blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95739b4b7c481cc78667c0076d7f207a66c7e6ff | b0d094717759bf3176c30ab965cb2a89d39c0866 | /plip/pdb_lig_analysis.py | 08295d947c413942f44522ce261831ee6fd892f5 | [] | no_license | lituan/tools | 110d44659d09035d95092abbfb4bd1d5241d5112 | 8d65d47c2704a3f058ada54540efe04fb6f8ae4a | refs/heads/master | 2020-12-25T17:37:10.891146 | 2017-04-25T08:24:29 | 2017-04-25T08:24:29 | 42,118,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,350 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
analys pdb_lig_interactions and make graphs
"""
import os
import sys
import cPickle as pickle
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from collections import Counter
def get_pdb_chain_sequences(p):
# p format: [(pdbid,chain)...]
pdbids = ','.join([pdbid for pdbid,chain in p])
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?'
data = {
'pdbids':pdbids,
'customReportColumns':'structureId,uniprotAcc,entityId,resolution,chainLength,releaseDate,sequence',
'service':'wsfile',
'format':'csv',
}
data = urllib.urlencode(data)
req = urllib2.Request(url,data)
response = urllib2.urlopen(req)
lines = response.readlines()
lines = [line.rstrip('\r\n') for line in lines]
lines = [line for line in lines if line]
lines = [line.split(',') for line in lines]
lines = [[w.strip('"') for w in line] for line in lines]
lines = [(line[0],line[1],line[-1])for line in lines if (line[0],line[1]) in p]
lines = [line for line in lines if line[-1]]
return lines
def patter_analysis(pdb_lig_interactions):
# salt_bridges and hydrogenbonds
pdb_lig_interaction_res= []
for pdb,lig,interactions in pdb_lig_interactions:
inter_res = []
for interaction_type,interaction_res in interactions:
inter_res += interaction_res
if inter_res:
pdb_lig_interaction_res.append((pdb,lig,set(inter_res)))
patterns = [(pdb,lig,[r.split('_')[-1] for r in res]) for pbd,lig,res in pdb_lig_interaction_res]
patterns = [p[2] for p in patterns if len(p[2])]
# patterns = [p[2] for p in patterns if len(p[2]) >= 3]
aminoacids = []
for p in patterns:
aminoacids += p
aa_count = Counter(aminoacids)
aa = []
count = []
for a,c in aa_count.iteritems():
aa.append(a)
count.append(c)
fname = os.path.split(sys.argv[-1])[1].split('.')[0]
df = pd.DataFrame({'AA':aa,'Freq':count})
df = df.sort_values('Freq',ascending=False)
f,ax = plt.subplots()
sns.set_style('whitegrid')
sns.set_palette('pastel')
sns.barplot(x='AA',y='Freq',data=df)
plt.savefig(fname+'_aa_freq.png',dpi=300)
aa = {'VAL':'V', 'ILE':'I', 'LEU':'L', 'GLU':'E', 'GLN':'Q', 'ASP':'D', 'ASN':'N', 'HIS':'H', 'TRP':'W', 'PHE':'F', 'TYR':'Y', 'ARG':'R', 'LYS':'K', 'SER':'S', 'THR':'T', 'MET':'M', 'ALA':'A', 'GLY':'G', 'PRO':'P', 'CYS':'C'}
patterns = [(aa[pi] for pi in p) for p in patterns]
patterns = [''.join(sorted(p)) for p in patterns]
patterns_count = Counter(patterns)
pattern = []
count = []
for p,c in patterns_count.iteritems():
pattern.append(p)
count.append(c)
fname = os.path.split(sys.argv[-1])[1].split('.')[0]
df = pd.DataFrame({'Pattern':pattern,'Freq':count})
df = df.sort_values('Freq',ascending=False)
f,ax = plt.subplots()
sns.set_style('whitegrid')
sns.set_palette('pastel')
sns.barplot(y='Pattern',x='Freq',data=df)
plt.savefig(fname+'_pattern_freq.png',dpi=300)
def main():
# format ['1nex','C_101_PTR','A',[('hydrophobic',['A_206_LYS'])]]
pdb_lig_interactions = pickle.load(open(sys.argv[-1],'r'))
patter_analysis(pdb_lig_interactions)
if __name__ == "__main__":
main()
| [
"imlituan@gmail.com"
] | imlituan@gmail.com |
c4c936a9ef0411a0eb5a42e5ee27987673c84d95 | 73d39e10c0d8986280d50d8a089f8a12f263c4d0 | /fangyuta.py | 7bfcb4b8d55504c7097d86b6525986ce2f970025 | [] | no_license | ballshapesdsd/wzry | cf04ccea8c42da01581d4a2fd6f1840662f05412 | 6211e9772a5a70a213e159a783cd0ab398f15685 | refs/heads/master | 2021-07-18T19:52:41.061299 | 2017-10-27T07:30:19 | 2017-10-27T07:30:19 | 108,513,955 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 2,515 | py | #coding:gbk
import cv2
from PIL import Image
import os
import time
#im是摧毁敌方防御塔的文字,im2是我方摧毁防御塔的文字
im=cv2.imread('fangyuta_difang.png',cv2.IMREAD_COLOR)[200:253,897:1048]
im2=cv2.imread('fangyuta_wofang.png',cv2.IMREAD_COLOR)[200:253,871:1022]
cap = cv2.VideoCapture('kog_00_19_25_00_33_30.mp4')
ret,frame = cap.read()
fps=20361/(13*60+53)
#heroes里是处理过的英雄头像和小兵头像信息,取上半部分,并把背景变成黑色
heroes=[]
for i in os.listdir('.\\heroes_thumbnail_jisha_1080'):
heroes.append((cv2.imread('.\\heroes_thumbnail_jisha_1080\\'+i,cv2.IMREAD_COLOR)[:74],i))
xiaobing=cv2.imread('xiaobingfangyuta.jpg',cv2.IMREAD_COLOR)[104:147,381:440]
xiaobing=cv2.resize(xiaobing,(99,74),interpolation=cv2.INTER_CUBIC)
for i1 in range(74):
for j1 in range(99):
if (i1-49)**2+(j1-49)**2>49*49:
xiaobing[i1,j1]=0
heroes.append((xiaobing,'xiaobing.png'))
#匹配英雄
def match_heroes(frame):
t=[]
for idx,hero in enumerate(heroes):
#res值越大匹配程度越高
res=cv2.matchTemplate(frame,hero[0],cv2.TM_CCOEFF_NORMED)[0][0]
t.append((res,hero[1]))
t.sort()
t=t[::-1]
return t[0][1]
i=0
tt=None
while(1):
if not ret:
break
#和二个文字信息分别进行匹配,匹配值越小越相似
temp=frame[200:253,897:1048]
res=cv2.matchTemplate(temp,im,cv2.TM_SQDIFF_NORMED)[0][0]
temp=frame[200:253,871:1022]
res1=cv2.matchTemplate(temp,im2,cv2.TM_SQDIFF_NORMED)[0][0]
#阈值设置为0.2
if res1<0.2 or res<0.2:
#tt为上一次击杀暴君的帧数,i是现在的帧数,i-tt大于100判断为不同的推塔信息
if not tt or i-tt>100:
tt=i
#出现推塔信息后第4帧匹配头像(防止刚出现时头像有变形)
elif i-tt==4:
if res<0.2:
print(str(int(i/fps/60)).zfill(2)+':'+str(i/fps-int(i/fps/60)*60)[:5],'cuihuidifangfangyuta')
if res1<0.2:
print(str(int(i/fps/60)).zfill(2)+':'+str(i/fps-int(i/fps/60)*60)[:5],'wofangfangyutabeicuihui')
yingxiong=frame[174:248,643:742]
#英雄头像背景变为黑色
for i1 in range(74):
for j1 in range(99):
if (i1-49)**2+(j1-49)**2>49*49:
yingxiong[i1,j1]=0
print('yingxiong:',match_heroes(yingxiong))
ret,frame = cap.read()
i+=1
| [
"zhengchao@pset.suntec.net"
] | zhengchao@pset.suntec.net |
68086a20b526b6b1621d7f74c9063b179f1f9cbf | 6d03d2356c6c903c56f65ec5426906c6f293e3ab | /geneFinding.py | 167336ae17e26612806ca9ec837c095afb9af3e3 | [] | no_license | MaltheBisbo/ML_handInsCopy | 5efe3a1495f2555380b8896201f4b46d00c473e4 | 69b971c9e1a8fc185fef518b6e2364a9eaccbc86 | refs/heads/master | 2021-05-06T17:14:13.967358 | 2017-11-23T12:15:29 | 2017-11-23T12:15:29 | 111,805,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,040 | py | import numpy as np
import math
def read_fasta_file(filename):
"""
Reads the given FASTA file f and returns a dictionary of sequences.
Lines starting with ';' in the FASTA file are ignored.
"""
sequences_lines = {}
current_sequence_lines = None
with open(filename) as fp:
for line in fp:
line = line.strip()
if line.startswith(';') or not line:
continue
if line.startswith('>'):
sequence_name = line.lstrip('>')
current_sequence_lines = []
sequences_lines[sequence_name] = current_sequence_lines
else:
if current_sequence_lines is not None:
current_sequence_lines.append(line)
sequences = {}
for name, lines in sequences_lines.items():
sequences[name] = ''.join(lines)
return sequences
class hmm:
def __init__(self, init_probs, trans_probs, emission_probs):
self.init_probs = init_probs
self.trans_probs = trans_probs
self.emission_probs = emission_probs
def translate_indices_to_observations(indices):
mapping = ['a', 'c', 'g', 't']
return ''.join(mapping[idx] for idx in indices)
def translate_path_to_indices(path):
return list(map(lambda x: int(x), path))
def translate_indices_to_path(indices):
return ''.join([str(i) for i in indices])
def translate_observations_to_indices(obs):
mapping = {'a': 0, 'c': 1, 'g': 2, 't': 3}
return [mapping[symbol.lower()] for symbol in obs]
def translate_sequence_to_states_old(sequence):
N = len(sequence)
states = np.array([])
i = 0
while i < N:
nextS, lenA = checkStart(sequence[i: i + 3])
states = np.append(states, nextS, axis = 0)
i += lenA
if states[-1] == 3 or states[-1] == 6 or states[-1] == 9:
while states[-1] != 15 and states[-1] != 18 and states[-1] != 21:
states = np.append(states, checkEndF(sequence[i : i + 3]), axis = 0)
i += 3
if states[-1] == 24 or states[-1] == 27 or states[-1] == 30:
while states[-1] != 36 and states[-1] != 39 and states[-1] != 42:
states = np.append(states, checkEndR(sequence[i : i + 3]), axis = 0)
i += 3
return states
### TEST FOR HMM 7 ###
def createZ7(annotation):
N = len(annotation)
i = 0
Z = np.zeros(N)
while i < N:
if i == 0:
Z[i] = 3
i += 1
while annotation[i: i + 3] == 'CCC':
Z[i: i + 3] = np.array([4, 5, 6])
i += 3
while annotation[i: i + 3] == 'RRR':
Z[i: i + 3] = np.array([2, 1, 0])
i += 3
Z[i] = 3
i += 1
return Z
def createA(Z_list):
A = np.zeros((43, 43))
for Z in Z_list:
for i in range(Z.shape[0] - 1):
a, b = int(Z[i]), int(Z[i + 1])
A[a, b] += 1
for i in range(43):
A[i] /= np.sum(A[i])
return A
def createPi():
Pi = np.zeros(43)
Pi[0] = 1
return Pi
def createPhi(Z_list, sequence_list):
Phi = np.zeros((43, 4))
for Z, s in zip(Z_list, sequence_list):
for i in range(Z.shape[0]):
state = int(Z[i])
emission = int(s[i])
Phi[state, emission] += 1
for i in range(43):
Phi[i] /= np.sum(Phi[i])
return Phi
### END TEST FOR HMM 7 ###
def log(x):
if x == 0:
return float('-inf')
return math.log(x)
def viterbi(A, Phi, Pi, sequence):
N = len(sequence) # Number of steps in the markov chain
K = 43 # Number of hidden states
Omega = np.zeros((K, N))
OmegaBack = np.zeros((K, N))
# First column
for i in range(K):
Omega[i, 0] = log(Pi[i]) + log(Phi[i, sequence[0]])
# Probably need log to make this work
for i in range(1, N): # Loop over the sequence
if i % 10000 == 0:
print('{} viterbi\r'.format(i), end='')
for k in range(K): # Loop over the hidden states
Omega[k, i] = log(Phi[k, sequence[i]]) + np.max(Omega[:, i - 1] + np.log(A[:, k]))
np.save('OmegaTest43.npy', Omega)
# Backtracking
Z = np.zeros(len(sequence))
Z[-1] = np.argmax(Omega[:,-1])
for i in reversed(range(0, N-1)):
if i % 10000 == 0:
print('{} backtracking\r'.format(i), end='')
state = sequence[i+1]
Z[i] = np.argmax(log(Phi[int(Z[i+1]), int(state)]) + Omega[:,i] + np.log(A[:, int(Z[i+1])]))
return Z
def translate_sequence_to_states(sequence, annotation):
N = len(sequence)
states = np.zeros(N)
i = 0
while i < N:
if (annotation[i-1: i + 3] == 'NCCC' or annotation[i-1: i + 3] == 'RCCC') and isStartF(sequence[i: i + 3]):
states[i:i+3] = checkStart(sequence[i: i + 3])[0]
i += 3
while not annotation[i: i + 4] == 'CCCN':
states[i:i+3] = np.array([10, 11, 12])
i += 3
states[i:i+3] = checkEndF(sequence[i : i + 3])
i += 3
if (annotation[i-1:i + 3] == 'NRRR' or annotation[i-1:i + 3] == 'CRRR') and isStartR(sequence[i:i+3]):
states[i:i+3] = checkStart(sequence[i: i + 3])[0]
i += 3
while not annotation[i : i + 4] == 'RRRN':
states[i:i+3] = np.array([31, 32, 33])
i += 3
states[i:i+3] = checkEndR(sequence[i : i + 3])
i += 3
if not annotation[i-1:i + 3] == 'RCCC':
states[i] = 0
i += 1
return states
def isStartF(s):
if s == 'ATG' or s == 'GTG' or s == 'TTG':
return True
else:
return False
def isStartR(s):
if s == 'TTA' or s == 'CTA' or s == 'TCA':
return True
else:
return False
def isStopF(s):
if s == 'TAG' or s == 'TGA' or s == 'TAA':
return True
else:
return False
def isStopR(s):
if s == 'CAT' or s == 'CAC' or s == 'CAA':
return True
else:
return False
def checkStart(string):
if string == 'ATG':
return np.array([1, 2, 3]), 3
if string == 'GTG':
return np.array([4, 5, 6]), 3
if string == 'TTG':
return np.array([7, 8, 9]), 3
if string == 'TTA':
return np.array([22, 23, 24]), 3
if string == 'CTA':
return np.array([25, 26, 27]), 3
if string == 'TCA':
return np.array([28, 29, 30]), 3
return np.array([0]), 1
def checkEndF(string):
if string == 'TAG':
return np.array([13, 14, 15])
if string == 'TGA':
return np.array([16, 17, 18])
if string == 'TAA':
return np.array([19, 20, 21])
return np.array([10, 11, 12])
def checkEndR(string):
if string == 'CAT':
return np.array([34, 35, 36])
if string == 'CAC':
return np.array([37, 38, 39])
if string == 'CAA':
return np.array([40, 41, 42])
return np.array([31, 32, 33])
def calculateA(states):
A = np.zeros((42, 42))
for i in range(states.shape[0]-1):
a, b = states[i], states[i + 1]
A[a, b] += 1
for i in range(42):
A[i] /= np.sum(A[i])
return A
def calculatePi():
pi = np.zeros(42)
pi[0] = 4
pi[7] = 1
def convert_Z_to_ann7(Z):
ann = ''
for i in range(len(Z)):
if Z[i] == 3:
ann += 'N'
elif Z[i] > 3 :
ann += 'C'
elif Z[i] < 3 :
ann += 'R'
return ann
def convert_Z_to_ann(Z):
ann = ''
for i in range(len(Z)):
if Z[i] == 0:
ann += 'N'
elif 1 <= Z[i] <= 21 :
ann += 'C'
elif 22 <= Z[i] <= 42 :
ann += 'R'
return ann
genomes = {}
annotation = {}
Z = [None]*5
sequence_list = [None]*5
for i in range(1, 6):
sequence = read_fasta_file('genome' + str(i) + '.fa')
sequence_list[i - 1] = translate_observations_to_indices(sequence['genome' + str(i)])
genomes['genome' + str(i)] = sequence['genome' + str(i)]
ann = read_fasta_file('true-ann' + str(i) + '.fa')
annotation['genome' + str(i)] = ann['true-ann' + str(i)]
# Test for hmm7
Z[i-1] = translate_sequence_to_states(genomes['genome' + str(i)], annotation['genome' + str(i)])
# Z[i-1] = createZ7(annotation['genome' + str(i)])
#print(Z[i-1][-10:])
A = createA(Z[:4])
Phi = createPhi(Z[:4], sequence_list[:4])
Pi = createPi()
sequence = sequence_list[4]
#print('Transition probabilities are', A)
#print('Emission probabilities are', Phi)
Zml = viterbi(A, Phi, Pi, sequence)
#print(Zml[-100:])
np.save('Z_5.npy', Zml)
#states = translate_sequence_to_states(genomes['genome1'])
#np.save('genome1.npy', states)
#Omega = np.load('OmegaTest2.npy')
#print(Omega[:,-10:].T)
#Z2_7 = np.load('Ztest2_7.npy')
ann = convert_Z_to_ann(Zml)
file = open("pred-ann5.fa", "w")
file.write(ann)
file.close()
| [
"30654168+MaltheBisbo@users.noreply.github.com"
] | 30654168+MaltheBisbo@users.noreply.github.com |
8d4658d1e3eb1a08880756b83ac5e8bbc743f5d6 | ea5928c99f32194add6e002b87f1f65d18f6f17a | /eventastic_project/settings.py | 719e38e827d749a299863af2de9fa6b6a3c32733 | [] | no_license | robbiemcgugan-18/Eventastic-Project | d39dff29c7e83a2ea437b029a345a2fd6334a647 | abc37cbd223d327a0e5639c372844dba216a4612 | refs/heads/main | 2023-03-27T23:00:09.283196 | 2021-04-06T14:07:40 | 2021-04-06T14:07:40 | 345,500,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | """
Django settings for eventastic_project project.
Generated by 'django-admin startproject' using Django 2.2.17.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_DIR = os.path.join(BASE_DIR, 'static')
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
# Media Settings
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bk&vb37c+h0qw^sncf)d+cykp&gpt-k5gzdo5o6t7_-jwj7e0-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["robbiemcgugan.pythonanywhere.com", "127.0.0.1"]
LOGIN_URL = 'eventastic:login'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'eventastic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'eventastic_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'eventastic_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR, ]
| [
"2523558M@student.gla.ac.uk"
] | 2523558M@student.gla.ac.uk |
19113aa6740bf744725d71d3b855c535fda8706e | f39d00f9f5a5dfe55f9f1bcc3e4fe15af2025323 | /equal-semantic-subset/SDL/timersim/binary.c/binaries/gui-GUI/sql_db/initdb.py | d1423aea1ae9224b9a519d0723539691b419747f | [] | no_license | taheruddin/transformation-stateflow-sdl | 04ccf2efff8067fb1e2bc1b4506f072b6431d2a7 | 3b43c205704d0a74247c75b29ccf82b2b8898683 | refs/heads/master | 2020-03-18T00:45:07.553587 | 2018-05-29T12:59:15 | 2018-05-29T12:59:15 | 134,110,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from sqlalchemy import create_engine
from timersim_model import Base
engine = create_engine(
'postgresql+psycopg2://taste:tastedb@localhost/timersim', echo=False)
Base.metadata.create_all(engine)
| [
"ktaheruddin@gmail.com"
] | ktaheruddin@gmail.com |
e39a7b5bb07e5fc0379fcb65027bcb0ecb46fdaa | d55ff0cf82b78d6db32d5463cbefdcd194679030 | /realtime_spider/realtime_spider/mycelery.py | 68473601ba4b3b8a8766ecc70cbfdfe72c39bc2b | [] | no_license | zhangshanwen/realtime_news_crawler | 69c20747bb20bbfbc7e5d1ff1dde0ccbf9cec8fc | eeebfa2b7e85c49dc5968f6d7fb8e0e559f92dfa | refs/heads/master | 2022-04-13T16:04:43.908171 | 2020-03-20T10:26:42 | 2020-03-20T10:26:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'realtime_spider.settings')
app = Celery('realtime_spider')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| [
"13340306507@163.com"
] | 13340306507@163.com |
fb1fa79cb27c7a6ce4a935e217688714206a1b88 | 32079a99520872be97e83ccbd3ae6f003f925006 | /devel/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | aaf7353b2780ed3961d18e3356795aab1a14a471 | [] | no_license | wndxwilson/Azimorph | a00fa8d34e664cc29cd9226ec378f93fa7df088e | 60b81694cadaaf30b9f640a4ed3bebd20ebc2f1a | refs/heads/master | 2023-02-16T12:55:26.046759 | 2021-01-08T22:09:30 | 2021-01-08T22:09:30 | 328,021,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,489 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from geographic_msgs/GeoPoseStamped.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
import geometry_msgs.msg
import std_msgs.msg
class GeoPoseStamped(genpy.Message):
_md5sum = "cc409c8ed6064d8a846fa207bf3fba6b"
_type = "geographic_msgs/GeoPoseStamped"
_has_header = True # flag to mark the presence of a Header object
_full_text = """Header header
geographic_msgs/GeoPose pose
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: geographic_msgs/GeoPose
# Geographic pose, using the WGS 84 reference ellipsoid.
#
# Orientation uses the East-North-Up (ENU) frame of reference.
# (But, what about singularities at the poles?)
GeoPoint position
geometry_msgs/Quaternion orientation
================================================================================
MSG: geographic_msgs/GeoPoint
# Geographic point, using the WGS 84 reference ellipsoid.
# Latitude [degrees]. Positive is north of equator; negative is south
# (-90 <= latitude <= +90).
float64 latitude
# Longitude [degrees]. Positive is east of prime meridian; negative is
# west (-180 <= longitude <= +180). At the poles, latitude is -90 or
# +90, and longitude is irrelevant, but must be in range.
float64 longitude
# Altitude [m]. Positive is above the WGS 84 ellipsoid (NaN if unspecified).
float64 altitude
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['header','pose']
_slot_types = ['std_msgs/Header','geographic_msgs/GeoPose']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,pose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GeoPoseStamped, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geographic_msgs.msg.GeoPose()
else:
self.header = std_msgs.msg.Header()
self.pose = geographic_msgs.msg.GeoPose()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
| [
"you@example.com"
] | you@example.com |
14c760f37873d7f96c930d4eeec2a69dcfe1579b | b134072c848bc1c13efb6c0017f094d324c18e41 | /old/r_pics.py | 89b0441685a2562a26faa045bbd2f1d5bee0d4a0 | [] | no_license | Cash-coder/Amazon-Images-Downloader | 5a0de36f648d3bbc6777dc59a1c5f1482788ea46 | 2f64af714675de2bc057a0ac5032dfc6482487e5 | refs/heads/master | 2023-08-06T01:51:07.300368 | 2021-10-08T11:07:05 | 2021-10-08T11:07:05 | 402,003,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,625 | py | from twocaptcha import TwoCaptcha
from decimal import Decimal
from re import sub
from time import sleep
from bs4 import BeautifulSoup as bs4
from requests_html import HTMLSession
import re
import traceback
import xlsxwriter
from openpyxl.workbook.workbook import Workbook
from openpyxl import load_workbook
def make_query_url(item,attribute):
query = item + ' ' + attribute
#this is used for human reference in the file, with spaces instead of +
query_t = item + ' ' + attribute
query = query.replace(' ','+')
#all deptartaments URL
#url = 'https://www.amazon.es/s?k='+ query +'&__mk_es_ES=%C3%85M%C3%85%C5%BD%C3%95%C3%91&ref=nb_sb_noss'
#Electronics URL
url = 'https://www.amazon.es/s?k=' + query + '&i=electronics&__mk_es_ES=%C3%85M%C3%85%C5%BD%C3%95%C3%91&ref=nb_sb_noss_2'
return url, query_t
#probably in disuse because now I lower() the prod_title
def make_match_data(item, attribute):
if attribute:
attribute_p = attribute.lower()
if item:
item_p = item.lower()
#print(item_p,attribute_p)
return item_p,attribute_p
def make_request(url):
sleep(0.5) # used to avoid too much requests / second
headers = {'user-agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"}
session = HTMLSession()
proxies = {
'http':'185.121.13.34',
'http':'193.43.119.89',
'http':'5.188.183.202',
'http':'103.80.87.224',
'http':'83.147.12.143',
'http':'185.238.229.88',
'http':'185.226.107.157',
'http':'185.206.249.118',
'http':'185.121.15.41',
'http':'185.121.12.141',}
r = session.get(url,headers=headers,proxies=proxies,allow_redirects=True)
#print('request made: ',url)
if r.status_code == 200:
captcha = r.html.xpath('//h4[contains(text(),"Introduce los caracteres que se muestran a continuación")]')
if captcha:
print('---------detected CAPTCHA !!----------------')
save_response(r) and print('saved response html')
#solver = TwoCaptcha('c6aba1c6718b89d60b0d5f0c4eb34785')
#result = solver.normal()
#keep_page r.html.page to interact with the page
r = session.get(url,headers=headers,proxies=proxies,allow_redirects=True)
return r
else:
print('bad request', r.status_code)
nr = 0
def save_response(r):
global nr
with open('response'+str(nr)+'.html','wb') as f:
f.write(r.content)
nr += 1
print('saved response')
def select(prod,title,query):
links = []
#print(query,title)
if 'carcasa' not in title and 'funda' not in title and 'protector' not in title and 'soporte' not in title:
link = prod.absolute_links
link = str(link)
link = link.replace('{','').replace('}','')
link = link.replace("'",'')
entry = [query,str(title),' ',str(link)]
print('---------this entry was accepted:')
print(entry)
links.append(entry)
return links
else:
print('not found from SELECT:','query:', query,'----','prod_title',title)
print('-----------')
write_no_results(query)
def get_matched_links(url,item_p,attribute_p,query,response):
print('inside get matched links')
#products_title = response.html.xpath('//div[@class="a-section a-spacing-none"]/div[@class="a-section a-spacing-none a-spacing-top-small"]/h2')
products = response.html.xpath('//div[@data-component-type="s-search-result"]')
# for p in products:
# title= p.xpath('//div[@class="a-section a-spacing-none a-spacing-top-small"]/h2')[0].text
# price = p.xpath('//span[@class="a-price-whole"]')
# print(title)
# print(price)
#s = tag[0].text
n_prods = (len(products))
print('founded {} products'.format(n_prods))
#prods
'//div[@data-component-type="s-search-result"]'
#price
'//span[@class="a-price-whole"]'
#save_response(response)
links = []
for prod in products:
try:
title= prod.xpath('//div[@class="a-section a-spacing-none a-spacing-top-small"]/h2')[0].text
price = prod.xpath('//span[@class="a-price-whole"]')[0].text
title = title.lower()
price = price.split(',')[0]
if '.' in price:
n = price.replace('.',',')
price = Decimal(sub(r'[^\d.]', '', n))
price = int(price)
else:
price = Decimal(sub(r'[^\d.]', '', price))
price = int(price)
# try:
# price = int(price)
# except:
# price = int(float(price))
print('//price:',price,' //title:', title)
#no matter the order, if the words of the query are in title, include that url
s = item_p.split(' ')
n = len(s)
n_t = len(attribute_p)
#set the minimal price for the items, Example: No iphone costs less than 80€, but there unwanted are accesories
min_price = 80
if price < min_price:
print('this -----PRICE----- is too low:',price)
continue
print('this is len:',n + n_t)
if n == 1:
if item_p in title and attribute_p in title :
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
elif n == 2:
if attribute_p in title and s[0] in title and s[1] in title:
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
elif n == 3:
if attribute_p in title and s[0] in title and s[1] in title and s[2] in title:
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
elif n == 4:
if attribute_p in title and s[0] in title and s[1] in title and s[2] in title and s[3] in title:
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
elif n == 5:
if attribute_p in title and s[0] in title and s[1] in title and s[2] in title and s[3] in title and s[4] in title:
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
elif n == 6:
if attribute_p in title and s[0] in title and s[1] in title and s[2] in title and s[3] in title and s[4] in title and s[5] in title:
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
elif n == 7:
if attribute_p in title and s[0] in title and s[1] in title and s[2] in title and s[3] in title and s[4] in title and s[5] in title and s[6] in title in title:
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
elif n == 8:
if attribute_p in title and s[0] in title and s[1] in title and s[2] in title and s[3] in title and s[4] in title and s[5] in title and s[6] in title and s[7] in title:
link = select(prod,title,query)
links.append(link)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
else:
print('not found in get_MATCHED links:','n_prods:',n_prods,'---','query: ', query,'-----','prod_title:',title,'---', 'item: ',item_p,'---','attr: ',attribute_p,)
print('-----------')
write_no_results(query)
return(links)
except Exception as e:
print('error in assign title and price, this is the url:')
print(url)
print(e)
print('-------This is the prod data with the problem:-----------')
print(prod.text)
continue
# if item_p in title and attribute_p in title:
# #if prod.text not in ['Carcasa', 'Funda', 'Protector', 'Soporte'] :
# #if 'Carcasa' and 'Funda' and 'Protector' and 'Soporte' not in prod.text:
# if 'carcasa' not in title and 'funda' not in title and 'protector' not in title and 'soporte' not in title:
# link = prod.absolute_links
# link = str(link)
# link = link.replace('{','').replace('}','')
# link = link.replace("'",'')
# #print({'query':query,'link':link})
# #entry = {'query':query,'link':link,'prod_title':prod.text}
# entry = (query,prod.text,' ',link)
# print(entry)
# links.append(entry)
row = 1
def write_excel(links):
global row
try:
#wb = load_workbook(filename = 'matches.xlsx')
wb = Workbook()
ws = wb.active
#query
ws.cell(row=row,column=1,value=links[0])
#title
ws.cell(row=row,column=2,value=links[1])
#white space
ws.cell(row=row,column=3,value=links[2])
#link
ws.cell(row=row,column=4,value=links[3])
#ws.append(entry)
# #print(query,link,prod_title)
separator = ('################################################','################################################','################################################','################################################')
row += 1
wb.save('matches.xlsx')
except Exception as e:
print(e)
pass
no_row_no_results = 1
def write_no_results(query):
global no_row_no_results
#wb = load_workbook(filename = 'no_results.xlsx')
wb = Workbook()
ws = wb.active
ws.cell(row= no_row_no_results,column=1,value=query)
no_row_no_results += 1
#entry = (query,'something_here')
#ws.append(entry)
wb.save('no_results.xlsx')
def get_item_attribute():
item_attribute_list = []
wb = load_workbook(filename = 'phones_color_variations.xlsx')
ws = wb.active
for row in ws.iter_rows(values_only=True):
item = row[0]
attribute = row[1]
item_attribute_list.append({'item':item,'attribute':attribute})
return item_attribute_list
item_attribute_list = get_item_attribute()
for element in item_attribute_list:
item = element.get('item')
attribute = element.get('attribute')
#print('1',item,attribute)
#process the item and the attribute to match Amazon's standart (Capitalization, iPhone,etc...)
#This is used later to identify matches within the titles of the prods.
# _p means processed: from 'iphone pro' to 'iPhone Pro'
item_p,attribute_p = make_match_data(item,attribute)
#print('2',item_p,attribute_p)
#with the above data make the url and the query (used later in the excel)
url, query = make_query_url(item_p,attribute_p)
#make the request with the query
try:
response = make_request(url)
except:
continue
#extract the links of the products which titles matches the query
#list of dicts with link , query, prod_title
links_set = get_matched_links(item_p=item_p,attribute_p=attribute_p,query=query,url=url,response=response)
# write excel with query , prod_title , selection, link
#selection is if the human validate that url has the needed pictures
write_excel(links_set)
| [
"vadymkozak3@gmail.com"
] | vadymkozak3@gmail.com |
6309ff456e24cef38c82f96bc276817fa9fed8b7 | 010de69f076fc82cb580eac93d564a9168ea31e2 | /keras_retinanet/utils/keras_version.py | cbfab965875f7be6d234431ffc6f24de45b6962e | [
"Apache-2.0"
] | permissive | SepidehAlassi/Math-Figure-Recognition | 8ba10214108b3683aded98007f18da5b1ac624fa | 46f96ee241894d8c2bb0b9560f4c8b73ac3141ff | refs/heads/master | 2020-03-24T00:34:13.278997 | 2018-10-05T09:26:28 | 2018-10-05T09:26:28 | 142,297,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | from __future__ import print_function
import keras
import sys
minimum_keras_version = 2, 0, 9
def keras_version():
return tuple(map(int, keras.__version__.split('.')))
def keras_version_ok():
return keras_version() >= minimum_keras_version
def assert_keras_version():
detected = keras.__version__
required = '.'.join(map(str, minimum_keras_version))
assert(keras_version_ok()), 'You are using keras version {}. The minimum required version is {}.'.format(detected, required)
def check_keras_version():
try:
assert_keras_version()
except AssertionError as e:
print(e, file=sys.stderr)
sys.exit(1)
| [
"sepideh.alassi@gmail.com"
] | sepideh.alassi@gmail.com |
079956603181043e047fcfcd8ae48b9209a73544 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /sportsdata/mlb_projections/models/mlb_projections_dfs_slate_game.py | 4aadebaeb66acdeb4d93f89a1e1c5748361edf13 | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | # coding: utf-8
"""
MLB v3 Projections
MLB projections API. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MlbProjectionsDfsSlateGame(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'slate_game_id': 'int',
'slate_id': 'int',
'game_id': 'int',
'game': 'MlbProjectionsGame',
'operator_game_id': 'int',
'removed_by_operator': 'bool'
}
attribute_map = {
'slate_game_id': 'SlateGameID',
'slate_id': 'SlateID',
'game_id': 'GameID',
'game': 'Game',
'operator_game_id': 'OperatorGameID',
'removed_by_operator': 'RemovedByOperator'
}
def __init__(self, slate_game_id=None, slate_id=None, game_id=None, game=None, operator_game_id=None, removed_by_operator=None): # noqa: E501
"""MlbProjectionsDfsSlateGame - a model defined in Swagger""" # noqa: E501
self._slate_game_id = None
self._slate_id = None
self._game_id = None
self._game = None
self._operator_game_id = None
self._removed_by_operator = None
self.discriminator = None
if slate_game_id is not None:
self.slate_game_id = slate_game_id
if slate_id is not None:
self.slate_id = slate_id
if game_id is not None:
self.game_id = game_id
if game is not None:
self.game = game
if operator_game_id is not None:
self.operator_game_id = operator_game_id
if removed_by_operator is not None:
self.removed_by_operator = removed_by_operator
@property
def slate_game_id(self):
"""Gets the slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._slate_game_id
@slate_game_id.setter
def slate_game_id(self, slate_game_id):
"""Sets the slate_game_id of this MlbProjectionsDfsSlateGame.
:param slate_game_id: The slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._slate_game_id = slate_game_id
@property
def slate_id(self):
"""Gets the slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._slate_id
@slate_id.setter
def slate_id(self, slate_id):
"""Sets the slate_id of this MlbProjectionsDfsSlateGame.
:param slate_id: The slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._slate_id = slate_id
@property
def game_id(self):
"""Gets the game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._game_id
@game_id.setter
def game_id(self, game_id):
"""Sets the game_id of this MlbProjectionsDfsSlateGame.
:param game_id: The game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._game_id = game_id
@property
def game(self):
"""Gets the game of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The game of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: MlbProjectionsGame
"""
return self._game
@game.setter
def game(self, game):
"""Sets the game of this MlbProjectionsDfsSlateGame.
:param game: The game of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: MlbProjectionsGame
"""
self._game = game
@property
def operator_game_id(self):
"""Gets the operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._operator_game_id
@operator_game_id.setter
def operator_game_id(self, operator_game_id):
"""Sets the operator_game_id of this MlbProjectionsDfsSlateGame.
:param operator_game_id: The operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._operator_game_id = operator_game_id
@property
def removed_by_operator(self):
"""Gets the removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: bool
"""
return self._removed_by_operator
@removed_by_operator.setter
def removed_by_operator(self, removed_by_operator):
"""Sets the removed_by_operator of this MlbProjectionsDfsSlateGame.
:param removed_by_operator: The removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: bool
"""
self._removed_by_operator = removed_by_operator
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MlbProjectionsDfsSlateGame, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MlbProjectionsDfsSlateGame):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"scotty.pate@auth0.com"
] | scotty.pate@auth0.com |
f06ee9937cf3a3588bcecabc5d6a9642a47b5842 | 910786e6fcc1021a523b71071225256f07444c8a | /env/lib/python3.8/genericpath.py | 1aa043536f9d08fdb0093f8d6b074bcf8f6a3714 | [] | no_license | Hugo-cruz/birdie-ps-webcrawler | c71c115b440252b53a9280b5b97c0205acb20bcc | a64399f0aa00e9391ab386dac44fb69beef235c3 | refs/heads/main | 2023-01-02T23:59:00.370237 | 2020-10-21T01:31:30 | 2020-10-21T01:31:30 | 304,638,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | /home/olodum/anaconda3/lib/python3.8/genericpath.py | [
"cruz@raccoon.ag"
] | cruz@raccoon.ag |
1ffb1db2c2066adb8f14c431dfcbf11087eb698d | f4ef2fb2880f558e804d89b3525ae1b7dac803b5 | /cdpcli/auth.py | 266e0f0b89a239f90b31da7b1c843391a1a409f5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | isabella232/cdpcli | c43a08b20e6f99263bd1eb531d813b8dfced6cb1 | 05ba488d7916aaed6b8ecfb51a05c4c0ad900ce5 | refs/heads/master | 2023-01-14T18:03:36.512253 | 2020-11-12T21:29:26 | 2020-11-12T21:29:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,949 | py | # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from base64 import b64decode, urlsafe_b64encode
from email.utils import formatdate
import logging
from asn1crypto import keys, pem
from cdpcli.compat import json
from cdpcli.compat import OrderedDict
from cdpcli.compat import urlsplit
from cdpcli.exceptions import NoCredentialsError
from pure25519 import eddsa
import rsa
LOG = logging.getLogger('cdpcli.auth')
class BaseSigner(object):
def add_auth(self, request):
raise NotImplementedError("add_auth")
class V1Signer(object):
ERROR_MESSAGE = \
"Failed to import private key from: '%s'. The private key is " \
"corrupted or not in the right format. The private key " \
"was extracted either from 'env' (environment variables), " \
"'shared-credentials-file' (a profile in the shared " \
"credential file, by default under ~/.cdp/credentials), or " \
"'auth-config-file' (a file containing the credentials whose " \
"location was supplied on the command line.)"
def __init__(self, credentials, auth_method):
self.credentials = credentials
self.auth_method = auth_method
def _raw_sign_string(self, string_to_sign):
raise NotImplementedError("Implement _raw_sign_string")
def _sign_string(self, string_to_sign):
"""
Sign the supplied string using the credentials and return the base64
encoded signature in UTF-8 format.
:param string_to_sign: String to sign
:return: Signature as string
"""
signature = self._raw_sign_string(string_to_sign)
return urlsafe_b64encode(signature).strip().decode('utf-8')
def _canonical_standard_headers(self, headers):
interesting_headers = ['content-type', 'x-altus-date']
hoi = []
if 'x-altus-date' in headers:
raise Exception("x-altus-date found in headers!")
headers['x-altus-date'] = self._get_date()
for ih in interesting_headers:
found = False
for key in headers:
lk = key.lower()
if headers[key] is not None and lk == ih:
hoi.append(headers[key].strip())
found = True
if not found:
hoi.append('')
return '\n'.join(hoi)
def _canonical_string(self, method, split, headers):
cs = method.upper() + '\n'
cs += self._canonical_standard_headers(headers) + '\n'
cs += split.path + '\n'
cs += self.auth_method
return cs
def _get_signature(self, method, split, headers):
string_to_sign = self._canonical_string(method, split, headers)
LOG.debug('StringToSign:\n%s', string_to_sign)
return self._sign_string(string_to_sign)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
LOG.debug("Calculating signature using %s." % self.auth_method)
LOG.debug('HTTP request method: %s', request.method)
split = urlsplit(request.url)
signature = self._get_signature(request.method,
split,
request.headers)
self._inject_signature(request, signature)
def _get_date(self):
return formatdate(usegmt=True)
def _inject_signature(self, request, signature):
if 'x-altus-auth' in request.headers:
raise Exception("x-altus-auth found in headers!")
request.headers['x-altus-auth'] = self._get_signature_header(signature)
def _get_signature_header(self, signature):
auth_params = OrderedDict()
auth_params['access_key_id'] = self.credentials.access_key_id
auth_params['auth_method'] = self.auth_method
encoded_auth_params = json.dumps(auth_params).encode('utf-8')
return "%s.%s" % (
urlsafe_b64encode(encoded_auth_params).strip().decode('utf-8'),
signature)
class Ed25519v1Auth(V1Signer):
"""
Ed25519 signing with a SHA-512 hash returning a base64 encoded signature.
"""
AUTH_METHOD_NAME = 'ed25519v1'
ED25519_SEED_LENGTH = 32
ED25519_BASE64_SEED_LENGTH = 44
def __init__(self, credentials):
super(Ed25519v1Auth, self).__init__(credentials, self.AUTH_METHOD_NAME)
@classmethod
def detect_private_key(cls, key):
return len(key) == cls.ED25519_BASE64_SEED_LENGTH
def _raw_sign_string(self, string_to_sign):
"""
Sign the supplied string using the credentials and return the raw signature.
:param string_to_sign: String to sign
:return: Raw signature as string
"""
try:
# We expect the private key to be a base64 formatted string.
seed = b64decode(self.credentials.private_key)
if len(seed) != self.ED25519_SEED_LENGTH:
raise Exception('Not an Ed25519 private key: %s' %
self.credentials.private_key)
pk = eddsa.publickey(seed)
signature = eddsa.signature(string_to_sign.encode('utf-8'), seed, pk)
return signature
except Exception:
message = self.ERROR_MESSAGE % self.credentials.method
LOG.debug(message, exc_info=True)
raise Exception(message)
class RSAv1Auth(V1Signer):
"""
RSA signing with a SHA-256 hash returning a base64 encoded signature.
"""
AUTH_METHOD_NAME = 'rsav1'
def __init__(self, credentials):
super(RSAv1Auth, self).__init__(credentials, self.AUTH_METHOD_NAME)
def _raw_sign_string(self, string_to_sign):
"""
Sign the supplied string using the credentials and return the raw signature.
:param string_to_sign: String to sign
:return: Raw signature as string
"""
try:
# We expect the private key to be the an PKCS8 pem formatted string.
pem_bytes = self.credentials.private_key.encode('utf-8')
if pem.detect(pem_bytes):
_, _, der_bytes = pem.unarmor(pem_bytes)
# In PKCS8 the key is wrapped in a container that describes it
info = keys.PrivateKeyInfo.load(der_bytes, strict=True)
# Directly unwrap the private key. The asn1crypto library stopped
# offering an API call for this in their 1.0.0 release but their
# official answer of using a separate native-code-dependent
# library to do one line of work is unreasonable. Of course, this
# line might break in the future...
unwrapped = info['private_key'].parsed
# The unwrapped key is equivalent to pkcs1 contents
key = rsa.PrivateKey.load_pkcs1(unwrapped.dump(), 'DER')
else:
raise Exception('Not a PEM file')
except Exception:
message = self.ERROR_MESSAGE % self.credentials.method
LOG.debug(message, exc_info=True)
raise Exception(message)
# We sign the hash.
signature = rsa.sign(string_to_sign.encode('utf-8'), key, 'SHA-256')
return signature
AUTH_TYPE_MAPS = {
Ed25519v1Auth.AUTH_METHOD_NAME: Ed25519v1Auth,
RSAv1Auth.AUTH_METHOD_NAME: RSAv1Auth,
}
| [
"dev-kitchen@cloudera.com"
] | dev-kitchen@cloudera.com |
54210d170584c806c0b71de5287540965d1e2a9d | f098e333654dfcae9c571c29d6dc4bf9aeb45e91 | /prep_terrain_data.py | e430d3cc120ae5c1532ba5244d12a35f99496cd3 | [] | no_license | stephenoken/SVM | c05a0990c72200d613acd7bb5cd94c411f4c6724 | afc65a554cecf39a51ff82a4b68ed7fb07176036 | refs/heads/master | 2021-01-20T15:44:30.908768 | 2017-05-28T20:37:47 | 2017-05-28T20:37:47 | 90,791,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | #!/usr/bin/python
import random
def makeTerrainData(n_points=1000):
# make the toy dataset
random.seed(42)
grade = [random.random() for ii in range(0,n_points)]
bumpy = [random.random() for ii in range(0,n_points)]
error = [random.random() for ii in range(0,n_points)]
y = [round(grade[ii]*bumpy[ii]+0.3+0.1*error[ii]) for ii in range(0,n_points)]
for ii in range(0, len(y)):
if grade[ii]>0.8 or bumpy[ii]>0.8:
y[ii] = 1.0
### split into train/test sets
X = [[gg, ss] for gg, ss in zip(grade, bumpy)]
split = int(0.75*n_points)
X_train = X[0:split]
X_test = X[split:]
y_train = y[0:split]
y_test = y[split:]
grade_sig = [X_train[ii][0] for ii in range(0, len(X_train)) if y_train[ii]==0]
bumpy_sig = [X_train[ii][1] for ii in range(0, len(X_train)) if y_train[ii]==0]
grade_bkg = [X_train[ii][0] for ii in range(0, len(X_train)) if y_train[ii]==1]
bumpy_bkg = [X_train[ii][1] for ii in range(0, len(X_train)) if y_train[ii]==1]
# training_data = {"fast":{"grade":grade_sig, "bumpiness":bumpy_sig}
# , "slow":{"grade":grade_bkg, "bumpiness":bumpy_bkg}}
grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==0]
bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==0]
grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==1]
bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==1]
test_data = {"fast":{"grade":grade_sig ,"bumpiness":bumpy_sig}
, "slow":{"grade":grade_bkg, "bumpiness":bumpy_bkg}}
return X_train, y_train, X_test, y_test
# return training_data, test_data
| [
"stephenoken@gmail.com"
] | stephenoken@gmail.com |
db41c6461ca60aec8131197475b23d84cca89170 | 90479144980baca82085252d68908b7bd8069166 | /photogur/admin.py | 38c785328993d77421cb30050cea3c5386c657d6 | [] | no_license | timurkurbanov/Photogur-Part-1 | 0c3bc5ce3c13dd086106ec3582afac675883fd68 | 9627cac1724a3ceb12747ebe40821b3d58983501 | refs/heads/master | 2020-05-27T02:41:43.845746 | 2019-06-22T12:58:12 | 2019-06-22T12:58:12 | 188,455,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from django.contrib import admin
from photogur.models import Picture, Comment
# registering Picture with the admin back-end
admin.site.register(Picture)
# registering Comment with the admin back-end
admin.site.register(Comment)
| [
"1tkurbanov@gmail.com"
] | 1tkurbanov@gmail.com |
318cd859b70a41e212785c1596ffdf88353bce76 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/snxkai001/util.py | 217a94e3e61b1d0258092af7a9640f7e96345ae2 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | def create_grid (grid):
for u in range(4):
grid.append([])
for down in range(4):
grid[u].append(0)
def print_grid(grid):
print("+" + "-"*20 + "+")
allign= "{0:" "<5}"
for row in range(4):
print("|", end="")
for col in range(4):
if grid[row][col] != 0:
print(allign.format(grid[row][col]), end="")
else:
print(allign.format(" "), end= "")
print("|")
print("+" + "-"*20 + "+")
def check_lost(grid):
for kol in range(4):
for lef in range(4):
if grid[kol][lef]==0:
return False
else:
continue
for n in range(4):
for m in range(3):
if grid[m][n]==grid[m+1][n]:
return False
else:
continue
for i in range(4):
for j in range(3):
if grid[i][j]==grid[i][j+1]:
return False
else:
continue
return True
def check_won(grid):
for i in range(4):
for p in range(4):
if grid[i][p]>=32:
return True
else:
continue
return False
def grid_equal(grid1, grid2):
for i in range(4):
for j in range(4):
if grid1[i][j]==grid2[i][j]:
continue
else:
return False
return True
def copy_grid(grid):
list1=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
for col in range(4):
for row in range(4):
list1[col][row]=grid[col][row]
return list1
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
6e0e2f4e5216af5abfe44a895adf4eaeb32824e7 | 2acc8b42f3082cf4019fd8b6a080c34089e2c367 | /OTcl/simulation/Test1/sevenVehicles/Bandwidth | 96aab238ae3bc468c32ec1f8d8a72107418c027f | [] | no_license | zhenhua-zou/VANETContentDownload | fafa69845a51f4650ec2f0b9d4d8a8ce25f64fdb | bbc8f152463e933c2069660a8e72b6ae90f20468 | refs/heads/master | 2020-05-16T22:36:21.475191 | 2014-11-21T17:14:25 | 2014-11-21T17:14:25 | 26,964,527 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 719 | #!/usr/bin/python
import sys
import os
fw = open('output', 'w')
totalTime=0
for i in range(1,11):
fname='log%d'%i
flag_after_pkt=0
fr=open(fname, 'r')
for line in fr:
if line.find('sendRequest')!=-1:
for field in line.split(' '):
request_startTime = field
if line.find('Transfer End')!=-1:
for field in line.split(' '):
transfer_endTime = field
if line.find('Server transfered')!=-1:
j=1
for field in line.split(' '):
if j==3:
pkt_no=field
j=j+1
totalTime=float(transfer_endTime)-float(request_startTime)+totalTime
totalTime=totalTime/10
bandwidth=float(pkt_no)*512/(totalTime-30)/1024*7
print bandwidth
| [
"zouzhenhua@gmail.com"
] | zouzhenhua@gmail.com | |
06262865278dafa18fe72b94439e6400df5b582d | 79e8505101a8b1c7d25f3b465e12431f55663bab | /Thread/threading_deom.py | e291ebaf2981b80969e96e37fdfd539e9401e1ca | [] | no_license | guokairong123/PythonBase | 4f0d7ec95c4d55371d0feb35073ac4280c9b09c4 | be3dd63a2181b047a017207c6e7137467df0571b | refs/heads/master | 2023-04-18T00:02:20.002234 | 2021-04-16T07:48:11 | 2021-04-16T07:48:11 | 355,809,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import logging
import threading
from time import sleep, ctime
logging.basicConfig(level=logging.INFO)
loops = [2, 4]
def loop(nloop, nsec):
logging.info("start loop" + str(nloop) + "at " + ctime())
sleep(nsec)
logging.info("end loop" + str(nloop) + "at " + ctime())
def main():
logging.info("start all at " + ctime())
threads = []
nloops = range(len(loops))
for i in nloops:
t = threading.Thread(target=loop, args=(i, loops[i]))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
logging.info("end all at " + ctime())
if __name__ == '__main__':
main()
| [
"992926186@qq.com"
] | 992926186@qq.com |
525009d829150075661e454a81a7c6ef0d95d838 | 5b8c51c6608dfe86c73b984b1fcd0f1f9dabfb92 | /painting/migrations/0005_auto_20200408_2020.py | 6966e1014cf5aa1d50ba2fba355e983aead061da | [] | no_license | DanilShchepelin/diplom | f4a831bf2b38bb326184fde2aed44b36a1755134 | 86cb1e0d333de6d41eedb26c209d4d0746b9cc22 | refs/heads/master | 2022-09-05T01:16:28.901155 | 2020-05-31T15:23:52 | 2020-05-31T15:23:52 | 267,684,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | # Generated by Django 3.0.3 on 2020-04-08 15:20
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('painting', '0004_pictures'),
]
operations = [
migrations.AddField(
model_name='pictures',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='pictures',
name='published_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"d.shepelin@gmail.com"
] | d.shepelin@gmail.com |
e526208fd9c766b77367fc743d4ad6fd2a00093e | d569deb783553d2bafdea9935349b58f202f5af5 | /utils/MultimodalMinibatchLoaderCaption.py | cfe52638cd68fa0892f7c2022db3eaff49f2ddc9 | [] | no_license | HayeonLee/LDRFVD-CVPR16-pytorch | 79b017a1066ba14ca573dc1e867e3a9fee3aa57e | 767a48b02576be12bfdd158b119c7f666aff00cd | refs/heads/master | 2020-04-14T17:10:00.442387 | 2019-01-03T12:45:57 | 2019-01-03T12:45:57 | 163,971,327 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,206 | py | #from util import model_utils
import os
import random
import argparse
import torch
import torch.nn as nn
from termcolor import cprint
from torch.utils.serialization import load_lua
#data.Dataset
class MultimodalMinibatchLoaderCaption(nn.Module):
''' FUNCTIONS DO FOLLOWINGS ###
1. Read manifest.txt to save file names to be read ex. ~/DATA/CUB/manifest.txt
2. Read trainvalids.txt to read train file only (total #150) ex. ~/trainvalids.txt
3. Read image files (train ids) (.t7) ex. ~/DATA/CUB/images/200.Common_Yellowthroat.t7
4. Read text files (train ids) (.t7) ex. ~/DATA/CUB/text_c10/200.Common_Yellowthroat.t7
'''''''''''''''''''''''''''''''''
def __init__(self, config):
self.nclass = config.nclass
self.batch_size = config.batch_size
self.data_dir = config.data_dir
self.img_dim = config.image_dim
self.doc_length = config.doc_length
self.randomize_pair = config.randomize_pair
self.num_caption = config.num_caption
self.image_dir = config.image_dir
self.flip = config.flip
self.ids_file = config.ids_file
self.alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{} "
self.dict = {}
for i in range(len(self.alphabet)):
self.dict[self.alphabet[i]] = i
self.alphabet_size = len(self.alphabet) # size: 70
## load manifest file.
self.files = []
# path of file names: /home/cvpr19/scottreed/DATA/CUB/manifest.txt
file_list = open(os.path.join(self.data_dir, 'manifest.txt')).readlines()
for i, line in enumerate(file_list):
# ex. self.files[0]: 001.Black_footed_Albatross.t7
self.files.append(line)
## load train / val / test splits.
self.trainids = []
# path of train ids: /home/cvpr19/scottreed/DATA/CUB/trainvalids.txt
train_id_list = open(os.path.join(self.data_dir, self.ids_file)).readlines()
for i, line in enumerate(train_id_list):
# ex. self.trainids[0]: 003 (three digits)
self.trainids.append(int(line))
self.nclass_train = len(self.trainids) # length of trainids: 150
def next_batch(self):
sample_ix = torch.randperm(self.nclass_train)
sample_ix = sample_ix.narrow(0,0,self.batch_size)
txt = torch.zeros(self.batch_size, self.doc_length, self.alphabet_size)
img = torch.zeros(self.batch_size, self.img_dim)
labels = torch.zeros(self.batch_size)
## *** Example *** ##
# fname[190]: 191.Red_headed_Woodpecker.t7
# path of file(image): /home/cvpr19/scottreed/DATA/CUB/images/191.Red_headed_Woodpecker.t7
# size of cls_imgs: [# of images per class, 1d img dim, 10 diff views]=torch.Size([60, 1024, 10])
# path of captions: /home/cvpr19/scottreed/DATA/CUB/text_c10/191.Red_headed_Woodpecker.t7
# size of cls_sens(captions): [# of images per class , doc_length, # of captions] = torch.Size([60, 201, 10])
for i in range(self.batch_size):
id = self.trainids[int(sample_ix[i])] - 1
fname = self.files[id][:-1]
if self.image_dir in ['', None]:
cls_imgs = load_lua(os.path.join(self.data_dir, 'images', fname))
else:
# [# of images per class, 1d img dim, # of captions] = [60, 1024, 10]
cls_imgs = load_lua(os.path.join(self.data_dir, self.image_dir, fname))
# [# of images per class , doc_length, # of captions] = [60, 201, 10]
cls_sens = load_lua(os.path.join(self.data_dir,'text_c{}'.format(self.num_caption), fname))
sen_ix = torch.Tensor(1)
sen_ix = random.randint(0, cls_sens.size(2)-1) # random pick one of 10 text captions 0 ~ 9
ix = torch.randperm(cls_sens.size(0))[0] # random select an image among all images per class 0 ~ 59
ix_view = torch.randperm(cls_imgs.size(2))[0] # random select one view of 10 different view images?
img[i] = cls_imgs[ix, :, ix_view] # 1024 dim (per a random view of a random image)
labels[i] = i
# txt: alphabet on
for j in range(cls_sens.size(1)): # 201
if cls_sens.size(0) == 1:
on_ix = int(cls_sens[0, j, sen_ix]) - 1
else:
on_ix = int(cls_sens[ix, j, sen_ix]) - 1
if on_ix == -1: # end of text
break
if random.random() < self.flip:
txt[i, cls_sens.size(1) - j + 1, on_ix] = 1
else:
txt[i, j, on_ix] = 1
return txt, img, labels
def vocab_mapping():
'''
1. Read Vocab.t7 file
2. convert the given sentences along with Vocabs
'''
vocab = 0
return vocab
if __name__=="__main__":
print('*** Dataset loader for Testing (python version) ***')
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/home/cvpr19/scottreed/DATA/CUB', help='data directory.')
parser.add_argument('--nclass', type=int, default=200, help='number of classes')
parser.add_argument('--doc_length', type=int, default=201, help='document length')
parser.add_argument('--image_dim', type=int, default=1024, help='image feature dimension')
parser.add_argument('--batch_size',type=int, default=40, help='number of sequences to train on in parallel')
parser.add_argument('--randomize_pair', type=int, default=0, help='if 1, images and captions of the same class are randomly paired.')
#parser.add_argument('--ids_file', type=str, default='trainids.txt', help='file specifying which class labels are used for training. Can also be trainvalids.txt')
parser.add_argument('--ids_file', type=str, default='trainvalids.txt', help='file specifying which class labels are used for training. Can also be trainvalids.txt')
#parser.add_argument('--num_caption',type=int, default=5, help='number of captions per image to be used for training')
parser.add_argument('--num_caption',type=int, default=10, help='number of captions per image to be used for training')
# parser.add_argument('--image_dir', type=str, default='images_th3', help='image directory in data')
parser.add_argument('--image_dir', type=str, default='images', help='image directory in data')
parser.add_argument('--flip',type=int, default=0, help='flip sentence')
config = parser.parse_args()
loader = MultimodalMinibatchLoaderCaption(config)
txt, img, labels = loader.next_batch()
print('size of txt: [batch_size, doc_length, alphabet size]={}'.format(txt.size()))
print('size of img: [batch_size, 1d image dim]={}'.format(img.size()))
print('size of labels: {}'.format(labels.size()))
| [
"yeonhi926@gmail.com"
] | yeonhi926@gmail.com |
a3412162d035faa00ecbdafaf468944e5716c1ac | 86daa9ab0ff3a42e5dbfc97a7ca1beb85fb97be4 | /autoscaler/azure_api.py | 168fb80d9ae70a5c67bf9fb8c0a15a87b297ee3b | [
"MIT"
] | permissive | ellerbrock/kubernetes-ec2-autoscaler | 4c1b10a23ade8a3499e5f590b6a5e7c14f29a191 | 7efdfbc565fee2d50b0f34332985af200f834110 | refs/heads/master | 2020-03-19T05:52:15.563556 | 2018-05-30T17:25:44 | 2018-05-30T19:01:09 | 135,969,599 | 1 | 0 | MIT | 2018-06-04T04:14:09 | 2018-06-04T04:14:09 | null | UTF-8 | Python | false | false | 15,194 | py | import logging
import json
import re
from azure.monitor import MonitorClient
from azure.monitor.models import EventData
from copy import deepcopy
from datetime import datetime, timedelta
from threading import RLock, Condition
from typing import List, Tuple, MutableMapping, Mapping
import pytz
from abc import ABC
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.compute.models import VirtualMachineScaleSet, Sku
from azure.mgmt.resource import ResourceManagementClient
from autoscaler.utils import Future
logger = logging.getLogger(__name__)
PRIORITY_TAG = 'priority'
# Value should be a json map of NoSchedule taint key-values
NO_SCHEDULE_TAINTS_TAG = 'no_schedule_taints'
class AzureScaleSet:
def __init__(self, location: str, resource_group: str, name: str, instance_type: str, capacity: int,
provisioning_state: str, timeout_until: datetime = None, timeout_reason: str = None, priority: int = None,
no_schedule_taints: Mapping[str, str] = {}) -> None:
self.name = name
self.instance_type = instance_type
self.capacity = capacity
self.provisioning_state = provisioning_state
self.resource_group = resource_group
self.location = location
self.timeout_until = timeout_until
self.timeout_reason = timeout_reason
self.priority = priority
self.no_schedule_taints = no_schedule_taints
def __str__(self):
return 'AzureScaleSet({}, {}, {}, {})'.format(self.name, self.instance_type, self.capacity, self.provisioning_state)
def __repr__(self):
return str(self)
def _key(self):
return (self.name, self.instance_type, self.capacity, self.provisioning_state, self.resource_group, self.location,
self.timeout_until, self.timeout_reason, self.priority, tuple(self.no_schedule_taints.items()))
def __eq__(self, other: object) -> bool:
if not isinstance(other, AzureScaleSet):
return False
return self._key() == other._key()
def __hash__(self) -> int:
return hash(self._key())
class AzureScaleSetInstance:
def __init__(self, instance_id: str, vm_id: str, launch_time: datetime) -> None:
self.instance_id = instance_id
self.vm_id = vm_id
self.launch_time = launch_time
def __str__(self):
return 'AzureScaleSetInstance({}, {}, {})'.format(self.instance_id, self.vm_id, self.launch_time)
def __repr__(self):
return str(self)
def _key(self):
return (self.instance_id, self.vm_id, self.launch_time)
def __eq__(self, other: object) -> bool:
if not isinstance(other, AzureScaleSetInstance):
return False
return self._key() == other._key()
def __hash__(self) -> int:
return hash(self._key())
class AzureApi(ABC):
def list_scale_sets(self, resource_group_name: str) -> List[AzureScaleSet]:
pass
def list_scale_set_instances(self, scale_set: AzureScaleSet) -> List[AzureScaleSetInstance]:
pass
def update_scale_set(self, scale_set: AzureScaleSet, new_capacity: int) -> Future:
pass
def terminate_scale_set_instances(self, scale_set: AzureScaleSet, instances: List[AzureScaleSetInstance]) -> Future:
pass
def get_remaining_instances(self, resource_group_name: str, sku: str) -> int:
pass
TIMEOUT_PERIOD = timedelta(minutes=15)
# Mangles a SKU name into the family name used for quotas
def _azure_sku_family(name: str) -> str:
match = re.match('Standard_(?P<family>[A-Z]{1,2})[0-9]{1,2}_?(?P<version>v[0-9])?', name)
if match is None:
raise ValueError("SKU not from a recognized family: " + name)
family = match.group('family')
result = "standard" + family
# Special case for one of Azure's new SKUs :(
if family == 'ND':
result += 'S'
if match.group('version') is not None:
result += match.group('version')
result += 'Family'
return result
class AzureWrapper(AzureApi):
def __init__(self, compute_client: ComputeManagementClient, monitor_client: MonitorClient, resource_client: ResourceManagementClient) -> None:
self._compute_client = compute_client
self._monitor_client = monitor_client
self._resource_client = resource_client
def list_scale_sets(self, resource_group_name: str) -> List[AzureScaleSet]:
fifteen_minutes_ago = datetime.now(pytz.utc) - TIMEOUT_PERIOD
filter_clause = "eventTimestamp ge '{}' and resourceGroupName eq '{}'".format(fifteen_minutes_ago, resource_group_name)
select_clause = "authorization,status,subStatus,properties,resourceId,eventTimestamp"
failures_by_scale_set: MutableMapping[str, List[EventData]] = {}
for log in self._monitor_client.activity_logs.list(filter=filter_clause, select=select_clause):
if (log.status and log.status.value == 'Failed') or (log.properties and log.properties.get('statusCode') == 'Conflict'):
if log.authorization and log.authorization.action and 'delete' in log.authorization.action:
continue
failures_by_scale_set.setdefault(log.resource_id, []).append(log)
result = []
for scale_set in self._compute_client.virtual_machine_scale_sets.list(resource_group_name):
failures = sorted(failures_by_scale_set.get(scale_set.id, []), key=lambda x: x.event_timestamp, reverse=True)
timeout_until = None
timeout_reason = None
for failure in failures:
status_message = json.loads(failure.properties.get('statusMessage', "{}")) if failure.properties else {}
error_details = status_message.get('error', {})
if 'message' in error_details:
timeout_until = failure.event_timestamp + TIMEOUT_PERIOD
timeout_reason = error_details['message']
# Stop if we found a message with details
break
if timeout_until is None:
timeout_until = failure.event_timestamp + TIMEOUT_PERIOD
timeout_reason = failure.sub_status.localized_value
priority = int(scale_set.tags[PRIORITY_TAG]) if PRIORITY_TAG in scale_set.tags else None
no_schedule_taints = json.loads(scale_set.tags.get(NO_SCHEDULE_TAINTS_TAG, '{}'))
result.append(AzureScaleSet(scale_set.location, resource_group_name, scale_set.name, scale_set.sku.name,
scale_set.sku.capacity, scale_set.provisioning_state, timeout_until=timeout_until,
timeout_reason=timeout_reason, priority=priority, no_schedule_taints=no_schedule_taints))
return result
def list_scale_set_instances(self, scale_set: AzureScaleSet) -> List[AzureScaleSetInstance]:
result = []
for instance in self._compute_client.virtual_machine_scale_set_vms.list(scale_set.resource_group, scale_set.name, expand="instanceView"):
launch_time = datetime.now(pytz.utc)
for status in instance.instance_view.statuses:
if status.code == 'ProvisioningState/succeeded':
launch_time = status.time
break
result.append(AzureScaleSetInstance(instance.instance_id, instance.vm_id, launch_time))
return result
def update_scale_set(self, scale_set: AzureScaleSet, new_capacity: int) -> Future:
parameters = VirtualMachineScaleSet(scale_set.location, sku=Sku(name=scale_set.instance_type, capacity=new_capacity))
azure_op = self._compute_client.virtual_machine_scale_sets.create_or_update(scale_set.resource_group, scale_set.name,
parameters=parameters)
return AzureOperationPollerFutureAdapter(azure_op)
def terminate_scale_set_instances(self, scale_set: AzureScaleSet, instances: List[AzureScaleSetInstance]) -> Future:
future = self._compute_client.virtual_machine_scale_sets.delete_instances(scale_set.resource_group, scale_set.name, [instance.instance_id for instance in instances])
return AzureOperationPollerFutureAdapter(future)
def get_remaining_instances(self, resource_group_name: str, sku: str):
resource_group = self._resource_client.resource_groups.get(resource_group_name)
cores_per_instance = None
for vm_size in self._compute_client.virtual_machine_sizes.list(location=resource_group.location):
if vm_size.name == sku:
cores_per_instance = vm_size.number_of_cores
if cores_per_instance is None:
logger.warn("No metadata found for sku: " + sku)
return 0
for usage in self._compute_client.usage.list(location=resource_group.location):
if usage.name.value == _azure_sku_family(sku):
return (usage.limit - usage.current_value) // cores_per_instance
logger.warn("No quota found matching: " + sku)
return 0
class AzureWriteThroughCachedApi(AzureApi):
def __init__(self, delegate: AzureApi) -> None:
self._delegate = delegate
self._lock = RLock()
self._instance_cache: MutableMapping[Tuple[str, str], List[AzureScaleSetInstance]] = {}
self._scale_set_cache: MutableMapping[str, List[AzureScaleSet]] = {}
self._remaining_instances_cache: MutableMapping[str, MutableMapping[str, int]] = {}
def invalidate_quota_cache(self, resource_group_name: str) -> None:
with self._lock:
if resource_group_name in self._remaining_instances_cache:
del self._remaining_instances_cache[resource_group_name]
def list_scale_sets(self, resource_group_name: str, force_refresh=False) -> List[AzureScaleSet]:
if not force_refresh:
with self._lock:
if resource_group_name in self._scale_set_cache:
return deepcopy(self._scale_set_cache[resource_group_name])
scale_sets = self._delegate.list_scale_sets(resource_group_name)
with self._lock:
old_scale_sets = dict((x.name, x) for x in self._scale_set_cache.get(resource_group_name, []))
for scale_set in scale_sets:
old_scale_set = old_scale_sets.get(scale_set.name)
if not old_scale_set:
continue
# Check if Scale Set was changed externally
if old_scale_set.capacity != scale_set.capacity:
if (resource_group_name, scale_set.name) in self._instance_cache:
del self._instance_cache[(resource_group_name, scale_set.name)]
self._scale_set_cache[resource_group_name] = scale_sets
return deepcopy(scale_sets)
def list_scale_set_instances(self, scale_set: AzureScaleSet) -> List[AzureScaleSetInstance]:
key = (scale_set.resource_group, scale_set.name)
with self._lock:
if key in self._instance_cache:
return deepcopy(self._instance_cache[key])
instances = self._delegate.list_scale_set_instances(scale_set)
# Make sure we don't poison the cache, if our delegate is eventually consistent
if len(instances) == scale_set.capacity:
with self._lock:
self._instance_cache[key] = instances
return deepcopy(instances)
def update_scale_set(self, scale_set: AzureScaleSet, new_capacity: int) -> Future:
future = self._delegate.update_scale_set(scale_set, new_capacity)
future.add_done_callback(lambda _: self._invalidate(scale_set.resource_group, scale_set.name))
return future
def terminate_scale_set_instances(self, scale_set: AzureScaleSet, instances: List[AzureScaleSetInstance]) -> Future:
future = self._delegate.terminate_scale_set_instances(scale_set, instances)
future.add_done_callback(lambda _: self._invalidate(scale_set.resource_group, scale_set.name))
return future
def get_remaining_instances(self, resource_group_name: str, sku: str):
with self._lock:
if resource_group_name in self._remaining_instances_cache:
cached = self._remaining_instances_cache[resource_group_name]
if sku in cached:
return cached[sku]
remaining = self._delegate.get_remaining_instances(resource_group_name, sku)
with self._lock:
self._remaining_instances_cache.setdefault(resource_group_name, {})[sku] = remaining
return remaining
def _invalidate(self, resource_group_name: str, scale_set_name: str) -> None:
with self._lock:
if (resource_group_name, scale_set_name) in self._instance_cache:
del self._instance_cache[(resource_group_name, scale_set_name)]
if resource_group_name in self._scale_set_cache:
del self._scale_set_cache[resource_group_name]
if resource_group_name in self._remaining_instances_cache:
del self._remaining_instances_cache[resource_group_name]
_AZURE_API_MAX_WAIT = 10*60
# Adapts an Azure async operation to behave like a Future
class AzureOperationPollerFutureAdapter(Future):
def __init__(self, azure_operation):
self._done = False
self._result = None
self._exception = None
# NOTE: All this complexity with a Condition is here because AzureOperationPoller is not reentrant,
# so a callback added with add_done_callback() could not call result(), if we delegated everything
self._condition = Condition()
self._callbacks = []
self.azure_operation = azure_operation
azure_operation.add_done_callback(self._handle_completion)
def _handle_completion(self, result):
with self._condition:
self._done = True
if self.azure_operation._exception is None:
self._result = result
else:
self._exception = self.azure_operation._exception
self._condition.notifyAll()
callbacks = self._callbacks
self._callbacks.clear()
for callback in callbacks:
callback(self)
def result(self):
callbacks = []
try:
with self._condition:
if not self._done:
self._condition.wait(_AZURE_API_MAX_WAIT)
if not self._done:
# We reached the timeout
self._exception = TimeoutError()
self._done = True
callbacks = self._callbacks
self._callbacks.clear()
if self._exception:
raise self._exception
return self._result
finally:
for callback in callbacks:
callback(self)
def add_done_callback(self, fn):
with self._condition:
if self._done:
fn(self)
else:
self._callbacks.append(fn)
| [
"christopherberner@gmail.com"
] | christopherberner@gmail.com |
39e716c97c55b1ae0ce73788baea20aa77976d3b | 9508879fcf1cff718f3fe80502baff8b82c04427 | /data_structures_domain/linked_lists/print_in_reverse.py | 9e70be3bfc61fc9bdc1e648101f1a043b9f0ec55 | [] | no_license | davidozhang/hackerrank | e37b4aace7d63c8be10b0d4d2bffb4d34d401d55 | bdc40d6ff3e603949eb294bbc02a1e24a4ba5b80 | refs/heads/master | 2021-05-04T11:31:59.110118 | 2017-11-15T09:17:27 | 2017-11-15T09:17:27 | 47,906,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | """
Print elements of a linked list in reverse order as standard output
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def ReversePrint(head):
if not head:
return
ReversePrint(head.next)
print head.data
'''
Cleaner implementation
October 1, 2016
'''
def ReversePrint(head):
if head is not None:
ReversePrint(head.next)
print head.data
| [
"davzee@hotmail.com"
] | davzee@hotmail.com |
8d663c12b81cd6cad9d98212bbc118f9002383e6 | ea2c203dcfef9a3a9c65b4bec1b4ab403c4fa852 | /Numpy/NP_Exer.02.py | 18aa7fa702e7a18793f6d379fc0eb1692c6afd9c | [] | no_license | PedroAlejandroUicabDiaz/Python | cdecf163f178b5d43150db6043818ea9a1dbc3e5 | 34af00ea037667bd2f883bd21ba6706cf7668e66 | refs/heads/master | 2022-11-18T11:24:35.765510 | 2020-07-28T03:39:03 | 2020-07-28T03:39:03 | 265,569,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | #create two matrices where the size is entered by the user, the elements of each matrix will be placed randomly and then perform the multiplication.
import numpy as np
def ask_for_values():
#Write code here
def create_matrix(r,c):
#Write code here
mult_matrix(matA,matB)
def mult_matrix(matA,matB):
#Write code here
print('---')
print(m)
#Write code here
| [
"1909180@upy.ed.mx"
] | 1909180@upy.ed.mx |
605fc99d0df1142b5a8d12e0e0789855849a8f3c | 98eb6c6b571308f08c12eb4bc09ad4b61c055bab | /RootCoreBin/bin/x86_64-slc6-gcc48-opt/checkxAOD.py | b14f41de268453926df40c8f3fcf51e4bd198d68 | [] | no_license | abrennan87/Arturo_CxAODframework | 8f1b2a18a267acc573dbd7f937bd134f38672eb4 | 04ba03c6589a9f6b7af6d1da7059541bbd0093bb | refs/heads/master | 2016-09-05T20:31:03.558531 | 2015-04-14T08:55:37 | 2015-04-14T08:55:37 | 33,920,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | /cvmfs/atlas.cern.ch/repo/sw/ASG/AnalysisBase/2.1.29/xAODRootAccess/scripts/checkxAOD.py | [
"mia.brennan87@gmail.com"
] | mia.brennan87@gmail.com |
2ba36e1719cbf15b2cb9501534717d6961417159 | 2a9a136296e3d2abebf3a3dbfbbb091076e9f15f | /env/Lib/site-packages/werkzeug/debug/__init__.py | e678589f38dc51ac239012e27c5b00b0d099ac27 | [] | no_license | Lisukod/planet-tracker | a865e3920b858000f5d3de3b11f49c3d158e0e97 | 6714e6332b1dbccf7a3d44430620f308c9560eaa | refs/heads/master | 2023-02-18T19:26:16.705182 | 2021-01-23T01:51:58 | 2021-01-23T01:51:58 | 328,032,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,561 | py | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import getpass
import hashlib
import json
import mimetypes
import os
import pkgutil
import re
import sys
import time
import uuid
from itertools import chain
from os.path import basename
from os.path import join
from .._compat import text_type
from .._internal import _log
from ..http import parse_cookie
from ..security import gen_salt
from ..wrappers import BaseRequest as Request
from ..wrappers import BaseResponse as Response
from .console import Console
from .tbtools import get_current_traceback
from .tbtools import render_console_html
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode("utf-8", "replace")
return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
if _machine_id is not None:
return _machine_id
def _generate():
linux = b""
# machine-id is stable across boots, boot_id is not.
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
value = f.readline().strip()
except IOError:
continue
if value:
linux += value
break
# Containers share the same machine id, add some cgroup
# information. This is used outside containers too but should be
# relatively stable across boots.
try:
with open("/proc/self/cgroup", "rb") as f:
linux += f.readline().strip().rpartition(b"/")[2]
except IOError:
pass
if linux:
return linux
# On OS X, use ioreg to get the computer's serial number.
try:
# subprocess may not be available, e.g. Google App Engine
# https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"],
stdout=PIPE,
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows, use winreg to get the machine guid.
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
wr = None
if wr is not None:
try:
with wr.OpenKey(
wr.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
wr.KEY_READ | wr.KEY_WOW64_64KEY,
) as rk:
guid, guid_type = wr.QueryValueEx(rk, "MachineGuid")
if guid_type == wr.REG_SZ:
return guid.encode("utf-8")
return guid
except WindowsError:
pass
_machine_id = _generate()
return _machine_id
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", app.__class__.__module__)
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", app.__class__.__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(
self,
app,
evalex=False,
request_key="werkzeug.request",
console_path="/console",
console_init_func=None,
show_hidden_frames=False,
pin_security=True,
pin_logging=True,
):
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log(
"warning",
" * Debugger PIN disabled. DEBUGGER UNSECURED!",
)
else:
_log("info", " * Debugger PIN: %s" % self.pin)
else:
self.pin = None
@property
def pin(self):
if not hasattr(self, "_pin"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
@pin.setter
def pin(self, value):
self._pin = value
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, "close"):
app_iter.close()
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex,
evalex_trusted=is_trusted,
secret=self.secret,
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype="application/json")
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, filename)
except OSError:
data = None
if data is not None:
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
return Response(data, mimetype=mimetype)
return Response("Not Found", status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace(
"-", ""
):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info",
" * To enable the debugger you need to enter the security pin:",
)
_log("info", " * Debugger pin code: %s" % self.pin)
return Response("")
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
traceback = self.tracebacks.get(request.args.get("tb", type=int))
frame = self.frames.get(request.args.get("frm", type=int))
if cmd == "resource" and arg:
response = self.get_resource(request, arg)
elif (
cmd == "paste"
and traceback is not None
and secret == self.secret
):
response = self.paste_traceback(request, traceback)
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request)
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request()
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame)
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request)
return response(environ, start_response)
| [
"45397160+Lisukod@users.noreply.github.com"
] | 45397160+Lisukod@users.noreply.github.com |
a0ffe00b966e93bc5a724035114f179b6a824fde | b5b09c2e9199b40f497c5885ed6b86de83c7bc3a | /bin/plugins/testplugin.py | e12032ccbf040a44adf3df2f303313c78820ea52 | [
"MIT"
] | permissive | Augmeneco/KBot6 | 68c85faeb9d0b129fb7e9fe5d47cf9764dfbed59 | 4ff57fc2db3eeb7765b2de1f00e6f5c00f7b2eb7 | refs/heads/master | 2020-07-25T13:18:09.982426 | 2019-11-04T19:49:24 | 2019-11-04T19:49:24 | 208,303,267 | 4 | 1 | null | 2019-09-18T03:51:03 | 2019-09-13T16:25:42 | Pascal | UTF-8 | Python | false | false | 384 | py | import kb
def f():
pass
kb.reg_handler('first1', f)
class SuperCmd:
level = 1
keywords = ['йцй']
def handler(self, msg):
#print(msg)
print(kb.vkapi('users.get',{'user_ids':1}))
print(kb.vkapi('messages.send',{"message":'test',"peer_id":msg['peer_id']}))
kb.reg_command(SuperCmd())
#kb.log_write('sos')
#print(kb.vkapi('groups.getTokenPermissions'))
#print(config) | [
"lanode@mail.ru"
] | lanode@mail.ru |
f158e9c950e80a7ed4ef3f91319e30d83e02ba0c | b7ff8811358c29121d6f60d96c3d05fdf2466ac5 | /Array/IntersectionOfTwoArrays.py | b68c26c16e5c8bfc2f57570a9e1a2e201805fdab | [] | no_license | kevinvud/leet_code_python | e4882c5cf7dd6d7dec54462f3707b9c6dad493ce | 34f92f5b64d56fa4f8f1ff85d746b09725e23621 | refs/heads/master | 2020-07-15T07:38:03.249607 | 2019-09-08T23:03:32 | 2019-09-08T23:03:32 | 205,513,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | """
Input: nums1 = [1,2,2,1], nums2 = [2,2]
Output: [2]
Example 2:
Input: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
Output: [9,4]
Note:
Each element in the result must be unique.
The result can be in any order.
"""
def intersectionTwoArrays(nums1, nums2):
output = []
for index in nums1:
if index in nums2 and index not in output:
output.append(index)
return output
# Use Set
def intersectionTwoArraysWithSet(nums1, nums2):
set1 = set(nums1)
set2 = set(nums2)
return list(set1.intersection(set2))
nums1 = [1, 2, 2, 1]
nums2 = [2, 2]
nums3 = [4,9,5]
nums4 = [9,4,9,8,4]
print(intersectionTwoArrays(nums1, nums2))
print(intersectionTwoArrays(nums3, nums4))
print(intersectionTwoArraysWithSet(nums1, nums2))
print(intersectionTwoArraysWithSet(nums3, nums4))
| [
"kevinvud@gmail.com"
] | kevinvud@gmail.com |
54601c3faba97921513238671d4defe422ee9d46 | d3eb732ffd738d3a624196f0971e4c29f85f6673 | /maptool.py | 57b5b053df938d8e44ecddd90a5bd11d4c5471b6 | [] | no_license | kailIII/mgrs-tools | c44aae9542e9883e9e1a395217b468bea4fb0788 | 3ac612bdf980f2d61f27d417c709115890af415f | refs/heads/master | 2021-01-15T16:57:14.768002 | 2015-04-01T12:15:10 | 2015-04-01T12:15:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | import mgrs
from qgis.core import *
from qgis.gui import *
from qgis.utils import iface
from PyQt4.QtCore import *
class MGRSMapTool(QgsMapTool):
ct = mgrs.MGRS()
epsg4326 = QgsCoordinateReferenceSystem("EPSG:4326")
def __init__(self, canvas):
QgsMapTool.__init__(self, canvas)
self.setCursor(Qt.CrossCursor)
def canvasMoveEvent(self, e):
pt = self.toMapCoordinates(e.pos())
canvas = iface.mapCanvas()
canvasCrs = canvas.mapRenderer().destinationCrs()
transform = QgsCoordinateTransform(canvasCrs, self.epsg4326)
pt4326 = transform.transform(pt.x(), pt.y())
try:
mgrsCoords = self.ct.toMGRS(pt4326.y(), pt4326.x())
iface.mainWindow().statusBar().showMessage("MGRS Coordinate: " + mgrsCoords)
except:
iface.mainWindow().statusBar().showMessage("")
| [
"volayaf@gmail.com"
] | volayaf@gmail.com |
dfae23ff73b031ae845e4e5f051c29950f5ce46d | 626a9b4977df32c6107ab71ec36c3062ccb87625 | /articleflow/notification_setup.py | 77c6d288eec4c517e4bc42557ae390172586c2d1 | [
"Apache-2.0"
] | permissive | wesavetheworld/AI | 9790567a66e9fd2cf77d7f914eafa2510aef6b07 | 3057e6a38f45da8146fa68d72464a68ad01f6640 | refs/heads/master | 2021-01-18T17:33:43.517718 | 2014-10-23T23:06:28 | 2014-10-23T23:06:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | import notification.models as notification
def create_notification_types(verbosity=2):
print "Initializing notifications ..."
notification.NoticeType.create(\
label="new_urgent_web_correction",
display="New Urgent Web Corrections",
description="New article needing urgent web corrections",
default=2,
verbosity=verbosity)
notification.NoticeType.create(\
label="sent_back_to_production",
display="Sent back to production",
description="An article was sent back to production",
default=2,
verbosity=verbosity)
notification.NoticeType.create(\
label="revision_arrived",
display="Revision arrived",
description="An article revision has arrived",
default=2,
verbosity=verbosity)
notification.NoticeType.create(\
label="sent_back_to_pm",
display="Sent back to prepare manuscript",
description="An article was sent back to prepare manuscript",
default=2,
verbosity=verbosity)
notification.NoticeType.create(\
label="reassign",
display="Reassigned to you",
description="An article was reassigned to you",
default=2,
verbosity=verbosity)
| [
"brakit@gmail.com"
] | brakit@gmail.com |
0f1cace2164889b51bf10b23904bfcbb57aa3abc | b20d084ee24890c94fae9c1bf5c8f353f209e285 | /python/blind_aid/microbit_interfaces/record_data.py | 8a199092299b4a01b430bd32e1257b264b57d72f | [] | no_license | georgiosrizos/BlindAid | 78ae7999a4bc8a5dc1caad3c2ac10ba2384c353e | 7af6858355b9784c5ed51f822756b2ee6dbfadb0 | refs/heads/master | 2021-08-20T09:25:43.285658 | 2017-11-28T19:48:49 | 2017-11-28T19:48:49 | 112,354,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,771 | py | ########################################################################################################################
# Authors: Dan Iorga, Georgios Rizos, Georgios Theodorakis, Johannes Wiebe, Thomas Uriot
#
# BlindAid: HiPEDS CDT group project - cohort 2017 - Imperial College London
########################################################################################################################
########################################################################################################################
# We used this script for recording measurements during our training trials.
########################################################################################################################
import serial
import signal
import numpy as np
from python.blind_aid import utility
signal.signal(signal.SIGINT, utility.signal_handler)
PORT = "COM10"
BAUD = 115200
s = serial.Serial(PORT)
s.baudrate = BAUD
s.parity = serial.PARITY_NONE
s.databits = serial.EIGHTBITS
s.stopbits = serial.STOPBITS_ONE
file_string = "data_test22.csv"
Nm = 250
i = 0
meas_list = list()
curr_meas_chunk = np.ones((1, 17), dtype=np.int32) * -9999
meas_list.append(curr_meas_chunk)
checkpoint = 0
timestamp_to_id = dict()
id_to_timestamp = dict()
val_to_print = 6
lmin = 0
lmax = 3600
try:
s.reset_input_buffer()
while True:
# read a line from the microbit, decode it and
# strip the whitespace at the end
data = s.readline().rstrip()
data = data.decode("ascii")
# split the data
data_s = data.split("_")
if len(data_s) == 2:
timestamp = int(data_s[0])
if int(data_s[0]) == -1:
print("CHECKPOINT: " + data)
checkpoint = int(val)
continue
msg_id = timestamp_to_id.get(timestamp, len(timestamp_to_id))
timestamp_to_id[timestamp] = msg_id
id_to_timestamp[msg_id] = timestamp
data_ss = data_s[1].split(":")
val_id = int(data_ss[0])
val = data_ss[1]
if val_id == val_to_print:
print(val)
if msg_id >= len(meas_list):
offset = msg_id - len(meas_list) + 1
for oo, oo_msg_id in zip(range(offset), range(msg_id, msg_id + offset)):
meas_list.append(np.ones((1, 17), dtype=np.int32) * -9999)
meas_list[-1][0, 0] = checkpoint
meas_list[-1][0, -1] = id_to_timestamp[oo_msg_id]
meas_list[msg_id][0, val_id] = int(val)
finally:
s.close()
meas_list = np.vstack(meas_list)
# print(meas_list)
with open(file_string, "wb") as fp:
np.savetxt(fp, meas_list, fmt='%i', delimiter=",")
| [
"gr912@doc-gr912.lib.ic.ac.uk"
] | gr912@doc-gr912.lib.ic.ac.uk |
12bcd2322805aa4cae21a72531a8901387a1a269 | 32c5b77f74f7e86ea5db1eb34e7a178abd2cbe68 | /4 _repetitions/AndreynaDuo_EX06.py | 363979313363463514a0b4dafb6f5d6ad3ade601 | [] | no_license | andreynaduo/python-studies | ac52569fdc70ec6ee721af5fb219b5e6737ad9f0 | 7c5741f74b6046eb8a92dcffa34a74e4a76fe32d | refs/heads/main | 2023-04-10T03:25:33.998387 | 2021-04-19T21:29:09 | 2021-04-19T21:29:09 | 359,602,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # Escreva um programa que leia dois números.
# Imprima a divisão inteira do primeiro pelo segundo, assim como o resto da divisão.
# Utilize apenas os operadores de soma e subtração para calcular o resultado.
# Lembre-se de que podemos entender o quociente da divisão de dois números
# como a quantidade de vezes que podemos retirar o divisor do dividendo.
# Logo,
# 20 ÷ 4 = 5
# uma vez que podemos subtrair 4 cinco vezes de 20.
# dividendo = número que será dividido
# divisor = número pelo qual será dividido
# quociente = resultado da divisão
dividendo = int(input("Insira o dividendo: "))
divisor = int(input("Insira o divisor: "))
quociente = 0
x = dividendo
while x >= divisor:
x -= divisor
quociente = quociente + 1
resto = x
print(f"{dividendo} % {divisor} = {quociente} resto: {resto}")
| [
"duoandreyna@gmail.com"
] | duoandreyna@gmail.com |
2e3bc7fe5c5ab5ee67da1ec75cca7bfbcc57f372 | 7672fe235826a3c0ebb6d94c9f78874cfe178f79 | /Week 2/Problem Set 2, Question2.py | 7802114a23d57e172111be4a90220f3339d0a8f2 | [] | no_license | Moly-malibu/edX-MITx-6.00.1x | 5b983f4eadbccc7f5a28a974f9be6a769b236c04 | f7dba6d4adfb2ef45cfd4dc345b78c4af21236ba | refs/heads/master | 2021-06-21T06:44:45.561695 | 2017-08-01T06:36:56 | 2017-08-01T06:36:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 21:13:57 2017
@author: mmonforte
Now write a program that calculates the minimum fixed monthly payment needed
in order pay off a credit card balance within 12 months.
By a fixed monthly payment, we mean a single number which does not change each month,
but instead is a constant amount that will be paid each month.
In this problem, we will not be dealing with a minimum monthly payment rate.
The following variables contain values as described below:
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
The program should print out one line: the lowest monthly payment
that will pay off all debt in under 1 year, for example:
Lowest Payment: 180
Assume that the interest is compounded monthly according to
the balance at the end of the month (after the payment for that month is made).
The monthly payment must be a multiple of $10 and is the same for all months.
Notice that it is possible for the balance to become negative
using this payment scheme, which is okay. A summary of the required math is found below:
Monthly interest rate = (Annual interest rate) / 12.0
Monthly unpaid balance = (Previous balance) - (Minimum fixed monthly payment)
Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)
Test Case 1:
balance = 3329
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 310
Test Case 2:
balance = 4773
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 440
Test Case 3:
balance = 3926
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 360
"""
# Establish variables that we know / needed for the evaluation.
# Counter optional
balance = 3329
annualInterestRate = 0.2
monthlyInterestRate = annualInterestRate / 12
monthlyPayment = 0
updatedBalance = balance
counter = 0
# Will loop through everything until we find a rate that will reduce updatedBalance to 0.
while updatedBalance > 0:
# Was stated that payments needed to happen in increments of $10
monthlyPayment += 10
# To reset balance back to actual balance when loop inevitably fails.
updatedBalance = balance
month = 1
# For 12 months and while balance is not 0...
while month <= 12 and updatedBalance > 0:
# Subtract the ($10*n) amount
updatedBalance -= monthlyPayment
# Compound the interest AFTER making monthly payment
interest = monthlyInterestRate * updatedBalance
updatedBalance += interest
# Increase month counter
month += 1
counter += 1
print("Lowest Payment: ", monthlyPayment)
print("Number of iterations: ", counter) | [
"marcjmonforte@gmail.com"
] | marcjmonforte@gmail.com |
24d149bb7dcc9eeea621f9f26dd3d75b9cd1e731 | 0691b303b57a1cffc7a550983ab39a8b5b341576 | /chemcat/migrations/0001_initial.py | 8f56fabeeae6eca0248138a99e16a24730f20a00 | [] | no_license | Chem3/djangotutorial | 4b6da58df094134fdafbb8bab1c9b67fee0dd398 | c07b336735c30ae942a77ae813ada1519e2d97bb | refs/heads/master | 2022-12-15T01:04:01.273443 | 2020-09-21T18:11:06 | 2020-09-21T18:11:06 | 297,146,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.2.16 on 2020-09-17 18:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"fmk@tuta.io"
] | fmk@tuta.io |
1641b9bd41fdd82f0415d23dd94e9ad183e5fb28 | 36196dc1bd7c80a6afccda0085fec32f2a6300b1 | /hx_controller/openai_model.py | 9bc0b7eb5d3c795ad86c117afb0ef0d5c76737c9 | [] | no_license | umb-hub/haxball-ai | 9eed9760992b60a997eeea7d188f617313a415a8 | 61659150ffa304495777d4e0b383afdd2f8b9c73 | refs/heads/master | 2023-07-14T18:33:02.209590 | 2020-01-19T15:07:35 | 2020-01-19T15:07:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,312 | py | from multiprocessing.dummy import Pool
from baselines.a2c.a2c import Model
import time
import functools
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common import tf_util
from baselines.common.policies import build_policy
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.runner import Runner
from baselines.common.runners import AbstractEnvRunner
from baselines.ppo2.ppo2 import safemean
from collections import deque
import numpy as np
from tensorflow import losses
from hx_controller.haxball_vecenv import HaxballSubProcVecEnv
class A2CModel(object):
"""
We use this class to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, policy, env, nsteps, model_name="a2c_model",
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.get_session()
nenvs = env.num_envs
nbatch = nenvs*nsteps
with tf.variable_scope(model_name, reuse=tf.AUTO_REUSE):
# step_model is used for sampling
step_model = policy(None, 1, sess)
# train_model is used to train our network
train_model = policy(None, nsteps, sess)
A = tf.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = tf.placeholder(tf.float32, (None, ))
R = tf.placeholder(tf.float32, (None, ))
LR = tf.placeholder(tf.float32, [])
# Calculate the loss
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Policy loss
neglogpac = train_model.pd.neglogp(A)
# L = A(s,a) * -logpi(a|s)
pg_loss = tf.reduce_mean(ADV * neglogpac)
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# Value loss
# vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)
vf_loss = losses.mean_squared_error(train_model.vf, R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# Update parameters using loss
# 1. Get the model parameters
params = find_trainable_variables(model_name)
# 2. Calculate the gradients
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 3. Make op for one policy and value update step of A2C
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# rewards = R + yV(s')
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
tf.global_variables_initializer().run(session=sess)
class MultimodelRunner(AbstractEnvRunner):
def __init__(self, env, models, nsteps=5, gamma=0.99):
super().__init__(env=env, model=models[0], nsteps=nsteps)
self.models = models
self.m = len(models)
self.gamma = gamma
self.batch_action_shape = [x if x is not None else -1 for x in models[0].train_model.action.shape.as_list()]
self.ob_dtype = models[0].train_model.X.dtype.as_numpy_dtype
self.tp = Pool(len(self.models))
self.models_indexes = [[] for _ in range(self.m)]
l = 0
for k in range(self.m ** 2):
i = k // self.m
j = k % self.m
if i == j:
continue
self.models_indexes[i].append(l)
self.models_indexes[j].append(l)
l += 1
def model_step(self, args):
model, obs, states, dones = args
return model.step(obs, S=states, M=dones)
def run(self):
# We initialize the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], []
mb_states = self.states
epinfos = []
for n in range(self.nsteps):
# Given observations, take action and value (V(s))
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, _ = self.tp.map(self.model_step, zip(self.models, self.obs, self.states, self.dones))
# actions, values, states, _ = self.model.step(self.obs, S=self.states, M=self.dones)
# Append the experiences
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
# Take actions in env and look the results
obs, rewards, dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
mb_dones.append(self.dones)
# Batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
if self.gamma > 0.0:
# Discount/bootstrap off value fn
last_values = self.model.value(self.obs, S=self.states, M=self.dones).tolist()
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
mb_actions = mb_actions.reshape(self.batch_action_shape)
mb_rewards = mb_rewards.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, epinfos
if __name__ == '__main__':
# Round-robin
num_players = 4
game_max_duration = 3 # minuti
gamma = 0.99
nsteps = 1
total_timesteps = int(15e6)
num_fields = 2 * (num_players - 1)
env = HaxballSubProcVecEnv(num_fields=num_fields, max_ticks=int(60 * game_max_duration * (1 / 0.1)))
# env = make_vec_env(env_id='PongNoFrameskip-v4', env_type=None, num_env=nenvs, seed=0)
# policy = build_policy(env=env, policy_network='lstm')#, num_layers=4, num_hidden=128)
policy = build_policy(env=env, policy_network='mlp', num_layers=4, num_hidden=256)
models = []
runners = []
for i in range(num_players):
m = A2CModel(policy, env=env, model_name="p"+str(i), nsteps=nsteps, ent_coef=0.05, total_timesteps=total_timesteps)
# runner = Runner(env, m, nsteps=nsteps, gamma=gamma)
# runners.append(runner)
models.append(m)
runner = MultimodelRunner(env, models, nsteps=nsteps, gamma=gamma)
# Calculate the batch_size
nbatch = num_fields * nsteps
for update in range(1, total_timesteps // nbatch + 1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
for runner, model in zip(runners, models):
obs, states, rewards, masks, actions, values, epinfos = runner.run()
# invert
inv_obs = env.invert_states(obs)
obs = np.vstack((obs, inv_obs))
rewards = np.hstack((rewards, rewards))
masks = np.hstack((masks, masks))
inv_actions = env.invert_actions(actions)
actions = np.hstack((actions, inv_actions))
values = np.hstack((values, values))
# policy_loss, value_loss, policy_entropy = model.train(inv_obs, states, rewards, masks, inv_actions, values)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time() - tstart
# last_rewards += list(rewards)
# last_rewards = last_rewards[-20000:]
# Calculate the fps (frame per second)
fps = int((update * nbatch) / nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update * nbatch)
logger.record_tabular('rewards', np.mean(rewards))
logger.record_tabular('values', np.mean(values))
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
if update % 500 == 0:
model.save(load_path) | [
"orlov.van@gmail.com"
] | orlov.van@gmail.com |
ce728dc3c74a67adc86d9daaae24b1f228975aa5 | abacbbe1938b8259134e484cef858baa08123212 | /src/app/common/direct_config.py | a49aff1280ae042cbeb676e0b259c7e483173e9d | [
"MIT"
] | permissive | acatalfano/pub-sub-middleware | 268ae9e57af5096b66e3d9c6936c64f4d8f4d804 | 442510f14f2ae167b95a18bb458c2cf64f5caa87 | refs/heads/main | 2023-05-10T03:39:06.909025 | 2021-06-10T22:27:59 | 2021-06-12T19:42:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | REGISTER_SUB_PORT = '5555'
REGISTER_PUB_PORT = '5556'
DISSEMINATE_PUB_PORT = '5557'
PUBLISHER_PORT = '5558'
BROKER_IP = 'localhost' # TODO: change this when we're doing network/mininet testing
| [
"adam.m.catalfano@vanderbilt.edu"
] | adam.m.catalfano@vanderbilt.edu |
1cd24eda66903e5a0eac60429e04e7b8843351b6 | 2d692238c878bf6582168a573f4075415a7ccba3 | /analysis.py | 7a257d1caf63e2a2b76ecd81090db5048b7064f3 | [] | no_license | gelasamgautami/Analysis-of-Congestive_heart_failure | 3bd5d07a1f2f1ba6db007b61e52c11095c0e56a9 | 844ba5c3ce9578b0548dc48d6452d673067d852e | refs/heads/master | 2020-05-04T06:09:34.238024 | 2019-04-02T04:55:57 | 2019-04-02T04:55:57 | 178,999,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | import pandas as pd
import matplotlib.pyplot as plt
c=0
d=0
e=0
f=0
g=0
h=0
j=0
k=0
l=0
m=0
item=[]
df=pd.read_csv('Heart_failure_re_admission.csv')
x=df['Age'].value_counts()
y=df['Age'].value_counts().tolist()
for i in df['Age'].value_counts().keys():
if(0<=i<=10):
c=c+x[i]
if(11<=i<=20):
d=d+x[i]
if(21<=i<=30):
e=e+x[i]
if(31<=i<=40):
f=f+x[i]
if(41<=i<=50):
g=g+x[i]
if(51<=i<=60):
h=h+x[i]
if(61<=i<=70):
j=j+x[i]
if(71<=i<=80):
k=k+x[i]
if(81<=i<=90):
l=l+x[i]
if(91<=i<=100):
m=m+x[i]
item.append(c)
item.append(d)
item.append(e)
item.append(f)
item.append(g)
item.append(h)
item.append(j)
item.append(k)
item.append(l)
item.append(m)
print(item)
print(c)
print(d)
print(e)
print(f)
print(g)
print(h)
print(j)
print(k)
print(l)
print(m)
# x-coordinates of left sides of bars
left = ['0-10','11-20','21-30','31-40','41-50','51-60','61-70','71-80','81-90','91-100']
# heights of bars
height = item
# plotting a bar chart
plt.bar(left, height,
width = 0.8)
# naming the x-axis
plt.xlabel('Age')
# naming the y-axis
plt.ylabel('No. of people')
# plot title
'''plt.title('My bar chart!')'''
# function to show the plot
plt.show()
| [
"noreply@github.com"
] | gelasamgautami.noreply@github.com |
534d5ad5f235394e73e6a961207fb391d549118f | 3eeef04c924d779593c6fbe8a510d09f4ab3f9f3 | /index.py | c1cc4c3956592027f63e1924bdc97c31fce7c2d2 | [] | no_license | Jayve/SWU-CpDaily | 59490a60dd17ac25e64d88562fec8201114a11ed | 57ebb16afbd0b934648e0aea70b024a7aedd3bdc | refs/heads/master | 2023-05-31T10:01:26.749703 | 2021-06-17T15:12:26 | 2021-06-17T15:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,687 | py | # -*- coding: utf-8 -*-
import requests
from datetime import datetime, timedelta, timezone
from pyDes import des, CBC, PAD_PKCS5
import urllib.parse as up
from aip import AipOcr
import random
import base64
import sys
import json
import re
import time
import traceback
####################################################
##########!!!!!!单用户信息!!!#######################
###################################################
USERNAME = '你的学号'
PASSWORD = '你的密码'
# 到点延迟多少秒签到,默认为0s
DELAY = 0
####################################################
###########!!!!!消息推送!!!!!#######################
###################################################
# Qmsg酱推送KEy,QQ消息推送,不需要消息推送的话可以不填
QMSG_KEY = ''
# 日志推送级别
PUSH_LEVEL = 1
######################################################
############!!!!!百度OCR识别!!!!######################
#####################################################
# SWU一般情况下不需要验证码,输错3次密码后才会要验证码,可以不填
APP_ID = '你的APP_ID'
API_KEY = '你的API_KEY'
SECRET_KEY = '你的SECRET_KEY'
#######################################################
#################!!!!DES加密密钥!!!!###################
#######################################################
DESKEY = 'b3L26XNL'
APPVERSION = '9.0.0'
#######################################################
############!!!!获取任务的接口!!!!###############
#######################################################
# 由于寒假不需要查寝,没有整理查寝的项目
API = {
'Sign': {
'GETTasks': 'https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay',
'GETDetail': 'https://{host}/wec-counselor-sign-apps/stu/sign/detailSignInstance',
'GenInfo': 'https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosByWeekMonth',
'PicUploadUrl': 'https://{host}/wec-counselor-sign-apps/stu/oss/getUploadPolicy',
'GETPicUrl': 'https://{host}/wec-counselor-sign-apps/stu/sign/previewAttachment',
'Submit': 'https://{host}/wec-counselor-sign-apps/stu/sign/submitSign'
},
'Attendance': {
'GETTasks': 'https://{host}/wec-counselor-attendance-apps/student/attendance/getStuAttendacesInOneDay',
'GETDetail': 'https://{host}/wec-counselor-attendance-apps/student/attendance/detailSignInstance',
'GenInfo': 'https://{host}/wec-counselor-attendance-apps/student/attendance/getStuSignInfosByWeekMonth',
'PicUploadUrl': 'https://{host}/wec-counselor-attendance-apps/student/attendance/getStsAccess',
'GETPicUrl': 'https://{host}/wec-counselor-attendance-apps/student/attendance/previewAttachment',
'Submit': 'https://{host}/wec-counselor-attendance-apps/student/attendance/submitSign'
}
}
#######################################################
#####!!!!正常情况下下面代码不需要更新!!!!#########
#######################################################
#######################################################
#########!!!!热更新代码!!!!######################
#######################################################
if 'CLOUDUSERNAME' in locals().keys():
USERNAME = locals().get('CLOUDUSERNAME')
if 'CLOUDPASSWORD' in locals().keys():
PASSWORD = locals().get('CLOUDPASSWORD')
if 'CLOUDDELAY' in locals().keys():
DELAY = locals().get('CLOUDDELAY')
if 'CLOUDPUSHTOKEN' in locals().keys():
QMSG_KEY = locals().get('CLOUDPUSHTOKEN')
if 'CLOUDAPP_ID' in locals().keys():
APP_ID = locals().get('CLOUDAPP_ID')
if 'CLOUDAPI_KEY' in locals().keys():
API_KEY = locals().get('CLOUDAPI_KEY')
if 'CLOUDSECRET_KEY' in locals().keys():
SECRET_KEY = locals().get('CLOUDSECRET_KEY')
if 'CLOUDPUSH_LEVEL' in locals().keys():
PUSH_LEVEL = locals().get('CLOUDPUSH_LEVEL')
######################################################
############!!!热更新代码结束!!!#######################
######################################################
MAX_Captcha_Times = 20
class Util: # 统一的类
logs = 'V2021.6.17'
OCRclient = None
@staticmethod
def GetDate(Mod='%Y-%m-%d %H:%M:%S', offset=0):
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
bj_dt=bj_dt-timedelta(days=offset)
return bj_dt.strftime(Mod)
@staticmethod
def log(content, show=True):
Text = Util.GetDate() + ' ' + str(content)
if show:
print(Text)
if Util.logs:
Util.logs = Util.logs+'<br>'+Text
else:
Util.logs = Text
sys.stdout.flush()
@staticmethod
def captchaOCR(image):
try:
if Util.OCRclient == None:
Util.OCRclient = AipOcr(APP_ID, API_KEY, SECRET_KEY)
options = {
'detect_direction': 'true',
'language_type': 'CHN_ENG',
'detect_language': 'false',
'probability': 'fasle',
}
# 调用通用文字识别接口
result = Util.OCRclient.basicGeneral(image, options)
result = result['words_result'][0]
text = result['words']
text = text.replace(' ', '')
return text
except:
Util.log("百度OCR识别失败,请检查配置!")
return ''
@staticmethod
def Login(user, School_Server_API):
loginurl = School_Server_API['login-url']
# 解析login-url中的协议和host
info = re.findall('(.*?)://(.*?)/', loginurl)[0]
protocol = info[0]
host = info[1]
headers = {
'Host': host,
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.98 Mobile Safari/537.36',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
'X-Requested-With': 'com.wisedu.cpdaily'
}
# session存放最终cookies
session = requests.Session()
try:
res = session.get(url=loginurl, headers=headers)
except:
Util.log("学校登录服务器可能宕机了...")
return None
#获取重定向url中的lt
lt = re.findall('_2lBepC=(.*)&*', res.url)
if len(lt) == 0:
Util.log("获取lt失败")
return None
lt=lt[0]
PostUrl = '{}://{}/iap/doLogin'.format(protocol,host)
Params = {}
Params['username'] = user['username']
Params['password'] = user['password']
Params['rememberMe'] = 'false'
Params['mobile'] = ''
Params['dllt'] = ''
Params['captcha'] = ''
ltUrl='{}://{}/iap/security/lt'.format(protocol,host)
LoginHeaders = headers
LoginHeaders['Content-Type'] = 'application/x-www-form-urlencoded'
res=session.post(url=ltUrl,data={'lt':lt},headers=LoginHeaders)
if res.status_code != 200:
Util.log("申请lt失败")
return None
res=res.json()['result']
Params['lt']=res['_lt']
#新版验证码,直接POST,结果会说明是否需要验证码
res = session.post(PostUrl,data=Params,headers=LoginHeaders,allow_redirects=False)
if 'Location' not in res.headers:
reason=res.json()['resultCode']
if reason == 'FORCE_MOD_PASS':
Util.log("请重置密码后重试!")
return None
elif reason == 'FAIL_UPNOTMATCH':
Util.log("用户名或密码错误!")
return None
#需要验证码登录
elif reason == 'CAPTCHA_NOTMATCH':
captchaUrl = '{}://{}/iap/generateCaptcha?ltId={}'.format(protocol, host,Params['lt'])
for i in range(MAX_Captcha_Times):
Captcha = session.get(url=captchaUrl, headers=headers)
code = Util.captchaOCR(Captcha.content)
# api qps限制
time.sleep(0.5)
if len(code) != 5:
continue
Params['captcha'] = code
res = session.post(PostUrl,data=Params,headers=LoginHeaders,allow_redirects=False)
if 'Location' in res.headers:
# 验证码登录成功或者密码错误
break
elif res.json()['resultCode'] == 'FAIL_UPNOTMATCH':
Util.log("用户名或密码错误!")
return None
if i == MAX_Captcha_Times-1:
Util.log("验证码识别超过最大次数")
nexturl = res.headers['Location']
headers['host'] = School_Server_API['host']
res = session.post(url=nexturl, headers=headers)
return session
@staticmethod
# DES+base64加密
def DESEncrypt(s, Key=DESKEY):
iv = b"\x01\x02\x03\x04\x05\x06\x07\x08"
k = des(Key, CBC, iv, pad=None, padmode=PAD_PKCS5)
encrypt_str = k.encrypt(s)
return base64.b64encode(encrypt_str).decode()
@staticmethod
# 生成带有extension的headers
def GenHeadersWithExtension(user, School_Server_API):
# Cpdaily-Extension
extension = {
"systemName": "android",
"systemVersion": "7.1.1",
"model": "MI 6",
"deviceId": user['deviceId'],
"appVersion": APPVERSION,
"lon": user['lon'],
"lat": user['lat'],
"userId": user['username'],
}
headers = {
'tenantId': '1019318364515869',#SWU
'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.98 Mobile Safari/537.36 okhttp/3.12.4',
'CpdailyStandAlone': '0',
'Cpdaily-Extension': Util.DESEncrypt(json.dumps(extension)),
'extension': '1',
'Content-Type': 'application/json; charset=utf-8',
'Host': School_Server_API['host'],
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
}
return headers
@staticmethod
# 生成正常POST请求的headers
def GenNormalHears(School_Server_API):
headers = {
'Host': School_Server_API['host'],
'Accept': 'application/json, text/plain, */*',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.98 Mobile Safari/537.36 cpdaily/9.0.0 wisedu/9.0.0',
'Content-Type': 'application/json',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,en-US;q=0.8',
}
return headers
@staticmethod
# 检查是否在签到时间,如果是,则返回0,否则返回和开始时间的差值
def TimeCheck(task):
try:
begin_Day = re.findall(
r'([\d]+-[\d]+-[\d]+)', task['rateSignDate'])
begin = begin_Day[0]+' '+task['rateTaskBeginTime']
end = begin_Day[0]+' '+task['rateTaskEndTime']
except:
Util.log("未知任务"+'"'+task['taskName']+'"')
return False
# Util.log('"'+task['taskName']+'"'+'的签到时间为'+begin+'至'+end)
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
now = bj_dt.timetuple()
#Util.log('执行函数时的时间为'+time.strftime("%Y-%m-%d %H:%M:%S",now))
begin = time.strptime(begin, "%Y-%m-%d %H:%M")
end = time.strptime(end, "%Y-%m-%d %H:%M")
if now >= begin and now <= end:
return 0
else:
now = time.mktime(now)
begin = time.mktime(begin)
# 返回距离开始的时间
return begin-now
# 通过pushplus推送消息
@staticmethod
def SendMessage(title: str, content: str,):
if QMSG_KEY == '':
Util.log("未配置QMSG酱,消息不会推送")
return False
data = {
'token': QMSG_KEY,
'msg': title+"\n"+content,
}
try:
res = requests.post(
url='https://qmsg.zendee.cn/send/{}'.format(QMSG_KEY), data=data)
except:
Util.log('发送失败')
@staticmethod
def GenDeviceID(username):
# 生成设备id,根据用户账号生成,保证同一学号每次执行时deviceID不变,可以避免辅导员看到用新设备签到
deviceId = ''
random.seed(username.encode('utf-8'))
for i in range(8):
num = random.randint(97, 122)
if (num*i+random.randint(1, 8)) % 3 == 0:
deviceId = deviceId+str(num % 9)
else:
deviceId = deviceId+chr(num)
deviceId = deviceId+'XiaomiMI6'
return deviceId
# 任务模板,签到和查寝均继承模板
class TaskModel:
def __init__(self, TaskType, School_Server_API, Showname, session, userBaseInfo):
self.API = API[TaskType]
self.Showname = Showname
self.School_Server_API = School_Server_API
self.session = session
self.userBaseInfo = userBaseInfo
self.real_taskname = ''
def UpdateInfo(self, session, userBaseInfo, School_Server_API=None):
# 更新数据
self.session = session
self.userBaseInfo = userBaseInfo
if School_Server_API:
School_Server_API = School_Server_API
def GetTasks(self):
res = self.session.post(
url=self.API['GETTasks'].format(
host=self.School_Server_API['host']),
headers=Util.GenNormalHears(self.School_Server_API),
data=json.dumps({})
)
res = res.json()
if res['message'] == 'SUCCESS':
return res['datas']
else:
Util.log('获取{}任务时出错,原因是'.format(self.Showname)+res['message'])
return None
def GetDetailTask(self, params):
res = self.session.post(
url=self.API['GETDetail'].format(
host=self.School_Server_API['host']),
headers=Util.GenNormalHears(self.School_Server_API),
data=json.dumps(params))
res = res.json()
if res['message'] == 'SUCCESS':
return res['datas']
else:
Util.log('获取{}任务详情时出错,原因是'.format(self.Showname)+res['message'])
return None
def GetSignedInfo(self,day=1):
# 默认获取前一天的签到信息
data = {"statisticYearMonth": Util.GetDate('%Y-%m', day)}
headers = Util.GenNormalHears(self.School_Server_API)
try:
res = self.session.post(url=self.API['GenInfo'].format(
host=self.School_Server_API['host']), data=json.dumps(data), headers=headers)
signdays = res.json()['datas']['rows']
except:
Util.log("获取昨天签到信息时出错")
return None
yesterday = Util.GetDate('%Y-%m-%d', day)
if len(signdays) == 0:
return []
for signday in signdays:
if signday['dayInMonth'] == yesterday:
yesterday_info = signday
break
yesterday_signed = yesterday_info['signedTasks']
params = {}
signedTasksInfo = []
for task in yesterday_signed:
params['signInstanceWid'] = task['signInstanceWid']
params['signWid'] = task['signWid']
info = self.GetDetailTask(params)
if info:
signedTasksInfo.append(info)
return signedTasksInfo
def CheckSuccess(self):
all_tasks=self.GetTasks()
if self.real_taskname not in all_tasks['unSignedTasks']:
return True
else:
return False
# 模板下面的函数根据对应任务实现
def GenConfig(self, signedTasksInfo):
pass
def fillForm(self, task, config):
pass
def submitForm(self, config):
pass
def Go(self, session=None, userBaseInfo=None, config=None, School_Server_API=None):
pass
# 签到
class Sign(TaskModel):
def __init__(self, School_Server_API, session, userBaseInfo):
super().__init__('Sign', School_Server_API, '签到', session, userBaseInfo)
def GenConfig(self, signedTasksInfo):
config = {}
for info in signedTasksInfo:
extra = {}
for item in info['signedStuInfo']['extraFieldItemVos']:
extra[item['extraTitle']] = [item['extraFieldItem']]
config[info['taskName']] = {
'address': info['signAddress'],
'lon': info['longitude'],
'lat': info['latitude'],
'abnormalReason': '',
'photo': info['signPhotoUrl'],
'extra': extra
}
return config
def fillForm(self, task, config):
form = {}
config = config[task['taskName']]
# 判断是否需要提交图片
if task['isPhoto'] == 1:
if config['photo'] != '':
#fileName = self.uploadPicture(config['photo'])
form['signPhotoUrl'] = config['photo']
else:
Util.log('"{}"需要照片,但未配置'.format(task['taskName']))
return None
else:
form['signPhotoUrl'] = ''
# 判断是否需要提交附加信息
if task['isNeedExtra'] == 1:
extraFields = task['extraField']
# 根据设定内容填充表格
defaults = config['extra']
extraFieldItemValues = []
# 遍历每条附加信息,这里,预设的值必须与选项顺序一一对应
for extraField in extraFields:
if extraField['title'] not in defaults:
Util.log('"{}"的选项"{}"配置出现问题,请检查"'.format(
task['taskName'], extraField['title']))
return None
extraFieldItems = extraField['extraFieldItems']
# 遍历附加信息的每一个选项
for extraFieldItem in extraFieldItems:
# 如果是设定值,则选择
if extraFieldItem['content'] == defaults[extraField['title']][0]:
extraFieldItemValue = {'extraFieldItemValue': defaults[extraField['title']][0],
'extraFieldItemWid': extraFieldItem['wid']}
extraFieldItemValues.append(extraFieldItemValue)
# 处理带附加选项的签到
form['extraFieldItems'] = extraFieldItemValues
form['longitude'] = config['lon']
form['latitude'] = config['lat']
form['isMalposition'] = task['isMalposition']
form['abnormalReason'] = config['abnormalReason']
form['signInstanceWid'] = task['signInstanceWid']
form['position'] = config['address']
form['uaIsCpadaily'] = True
form['signVersion'] = '1.0.0'
return form
def submitForm(self, config, form):
res = self.session.post(
url=self.API['Submit'].format(host=self.School_Server_API['host']),
headers=Util.GenHeadersWithExtension(
config, self.School_Server_API),
data=json.dumps(form)
)
message = res.json()['message']
if message == 'SUCCESS':
if not self.CheckSuccess():
message='提交信息成功,但任务仍为未签到状态'
Util.log(message)
Util.SendMessage("今日校园自动签到失败", "自动签到失败,原因是:" +
message+" 请手动签到,等待更新")
return False
Util.log('自动签到成功')
if PUSH_LEVEL == 1:
Util.SendMessage(
"自动签到成功", '"{}"已自动完成'.format(self.real_taskname))
return True
else:
Util.log('自动签到失败,原因是:' + message)
if PUSH_LEVEL < 2:
Util.SendMessage("今日校园自动签到失败", "自动签到失败,原因是:" +
message+" ,请手动签到,等待更新")
return False
# 指定config的参数会覆盖自动生成的参数
def Go(self, session=None, userBaseInfo=None, config=None, School_Server_API=None):
if session:
self.UpdateInfo(session, userBaseInfo, School_Server_API)
signedinfo = self.GetSignedInfo()
autoconfig = self.GenConfig(signedinfo)
if config:
autoconfig.update(config)
tasks = self.GetTasks()
todotaskstype = []
if len(tasks['unSignedTasks']) > 0:
text = '未完成的签到任务:'
for i, task in enumerate(tasks['unSignedTasks']):
text = text+str(i+1)+'.'+task['taskName']+' '
Util.log(text)
todotaskstype.append('unSignedTasks')
if len(tasks['leaveTasks']) > 0:
text = '请假的签到任务:'
for i, task in enumerate(tasks['leaveTasks']):
text = text+str(i+1)+'.'+task['taskName']+' '
Util.log(text)
todotaskstype.append('leaveTasks')
for todotype in todotaskstype:
for i in range(0, len(tasks[todotype])):
todoTask = tasks[todotype][i]
params = {
'signInstanceWid': todoTask['signInstanceWid'],
'signWid': todoTask['signWid']
}
taskDetail = self.GetDetailTask(params)
# 判断是否配置某个打卡选项
if taskDetail['taskName'] not in autoconfig:
Util.log('"{}"昨天不存在或未签到'.format(taskDetail['taskName']))
Util.log("开始回滚以获取签到信息")
for i in range(30):
Util.log("回滚{}天".format(str(i+2)))
signedinfo=self.GetSignedInfo(i+2)
autoconfig=self.GenConfig(signedinfo)
if taskDetail['taskName'] in autoconfig:
Util.log("获取到签到信息,继续进行签到")
break
if taskDetail['taskName'] not in autoconfig:
Util.log("回滚一月仍未获取到签到信息,可能是新发布的任务,跳过")
continue
# 判断是否在签到时间
t = Util.TimeCheck(taskDetail)
if t != 0 and t > 60: # 超过60秒则不再休眠
Util.log('"'+taskDetail['taskName']+'"'+"目前不在签到时间,跳过")
continue
Form = self.fillForm(taskDetail, autoconfig)
if Form == None:
continue
submitinfo = {
'username': self.userBaseInfo['username'],
'lon': autoconfig[taskDetail['taskName']]['lon'],
'lat': autoconfig[taskDetail['taskName']]['lat'],
'deviceId': self.userBaseInfo['deviceId']
}
if t > 0:
t = t+DELAY
Util.log("休眠{}s后开始签到".format(str(t)))
time.sleep(t)
self.real_taskname = taskDetail['taskName']
self.submitForm(submitinfo, Form)
# 查寝
class Attendance(TaskModel):
def __init__(self, School_Server_API, session, userBaseInfo):
super().__init__('Attendance', School_Server_API, '查寝', session, userBaseInfo)
def GenConfig(self, signedTasksInfo):
config = {}
for info in signedTasksInfo:
config[info['taskName']] = {
'address': info['signAddress'],
'lon': info['longitude'],
'lat': info['latitude'],
'abnormalReason': '',
'photo': info['signPhotoUrl'],
}
return config
def fillForm(self, task, config):
config = config[task['taskName']]
form = {}
form['signInstanceWid'] = task['signInstanceWid']
form['longitude'] = config['lon']
form['latitude'] = config['lat']
form['isMalposition'] = task['isMalposition']
form['abnormalReason'] = config['abnormalReason']
if task['isPhoto'] == 1:
if config['photo'] != '':
#fileName = self.uploadPicture(config['photo'])
form['signPhotoUrl'] = config['photo']
else:
Util.log('"{}"需要照片,但未配置'.format(task['taskName']))
return None
else:
form['signPhotoUrl'] = ''
form['position'] = config['address']
form['uaIsCpadaily'] = True
return form
def submitForm(self, config, form):
res = self.session.post(
url=self.API['Submit'].format(host=self.School_Server_API['host']),
headers=Util.GenHeadersWithExtension(
config, self.School_Server_API),
data=json.dumps(form)
)
message = res.json()['message']
if message == 'SUCCESS':
if not self.CheckSuccess():
message='提交信息成功,但任务仍为未签到状态'
Util.log(message)
Util.SendMessage("今日校园自动查寝失败", "自动查寝失败,原因是:" +
message+" 请手动签到,等待更新")
return False
Util.log('自动查寝成功')
if PUSH_LEVEL == 1:
Util.SendMessage(
"自动查寝成功", '"{}"已自动完成'.format(self.real_taskname))
return True
else:
Util.log('自动查寝失败,原因是:' + message)
if PUSH_LEVEL < 2:
Util.SendMessage("今日校园自动查寝失败", "自动查寝失败,原因是:" +
message+" 请手动签到,等待更新")
return False
# 指定config的参数会覆盖自动生成的参数
def Go(self, session=None, userBaseInfo=None, config=None, School_Server_API=None):
if session:
self.UpdateInfo(session, userBaseInfo, School_Server_API)
signedinfo = self.GetSignedInfo()
autoconfig = self.GenConfig(signedinfo)
if config:
autoconfig.update(config)
tasks = self.GetTasks()
todotaskstype = []
if len(tasks['unSignedTasks']) > 0:
text = '未完成的查寝任务:'
for i, task in enumerate(tasks['unSignedTasks']):
text = text+str(i+1)+'.'+task['taskName']+' '
Util.log(text)
todotaskstype.append('unSignedTasks')
if len(tasks['leaveTasks']) > 0:
text = '请假的查寝任务:'
for i, task in enumerate(tasks['leaveTasks']):
text = text+str(i+1)+'.'+task['taskName']+' '
Util.log(text)
todotaskstype.append('leaveTasks')
for todotype in todotaskstype:
for i in range(0, len(tasks[todotype])):
todoTask = tasks[todotype][i]
params = {
'signInstanceWid': todoTask['signInstanceWid'],
'signWid': todoTask['signWid']
}
taskDetail = self.GetDetailTask(params)
if taskDetail['taskName'] not in autoconfig:
Util.log('"{}"昨天不存在或未签到,跳过'.format(taskDetail['taskName']))
Util.log("开始回滚以获取签到信息")
for i in range(30):
Util.log("回滚{}天".format(str(i+2)))
signedinfo=self.GetSignedInfo(i+2)
autoconfig=self.GenConfig(signedinfo)
if taskDetail['taskName'] in autoconfig:
Util.log("获取到签到信息,继续进行签到")
break
if taskDetail['taskName'] not in autoconfig:
Util.log("回滚一月仍未获取到签到信息,可能是新发布的任务,跳过")
continue
# 判断是否在签到时间
t = Util.TimeCheck(taskDetail)
if t != 0 and t > 60: # 超过60秒则不再休眠
Util.log('"'+taskDetail['taskName']+'"'+"目前不在签到时间,跳过")
continue
Form = self.fillForm(taskDetail, autoconfig)
if Form == None:
continue
submitinfo = {
'username': self.userBaseInfo['username'],
'lon': autoconfig[taskDetail['taskName']]['lon'],
'lat': autoconfig[taskDetail['taskName']]['lat'],
'deviceId': self.userBaseInfo['deviceId']
}
if t > 0:
t = t+DELAY
Util.log("休眠{}s后开始签到".format(str(t)))
time.sleep(t)
self.real_taskname = taskDetail['taskName']
self.submitForm(submitinfo, Form)
def Do(School_Server_API, user):
session = Util.Login(user, School_Server_API)
if session:
Util.log('登陆成功')
userBaseInfo = {
'username': user['username'],
'deviceId': Util.GenDeviceID(user['username'])
}
Signer = Sign(School_Server_API, session, userBaseInfo)
Attendancer = Attendance(School_Server_API, session, userBaseInfo)
try:
Signer.Go()
except:
Util.log("签到过程中出现异常")
if PUSH_LEVEL < 2:
Util.SendMessage("今日校园签到失败", "签到过程中出现异常,请手动签到")
try:
Attendancer.Go()
except:
Util.log("查寝过程中出现异常")
if PUSH_LEVEL < 2:
Util.SendMessage("今日校园查寝失败", "查寝过程中出现异常,请手动签到")
else:
if PUSH_LEVEL < 2:
Util.SendMessage("今日校园签到失败", "登录过程中出现错误,如若经常发生,请修改执行时间")
def main():
School_Server_API = {
'login-url': 'https://swu.campusphere.net/iap/login?service=https%3A%2F%2Fswu.campusphere.net%2Fportal%2Flogin',
'host': 'swu.campusphere.net'
}
user = {
'username': USERNAME,
'password': PASSWORD
}
Do(School_Server_API, user)
if (PUSH_LEVEL > 1):
Util.SendMessage('签到日志', Util.logs)
# 提供给腾讯云函数调用的启动函数
def main_handler(event, context):
try:
main()
except Exception as e:
Util.log(traceback.format_exc(), False)
Util.SendMessage('出错了', Util.logs)
raise e
else:
return 'success'
if __name__ == '__main__':
print(main_handler({}, {}))
| [
"1767306012@qq.com"
] | 1767306012@qq.com |
b0dce7f8558fb82dd6a9d88c322bc375a39a5fbd | 57e2725f3ad0b03b5ecd1648047c9b765aa08ce3 | /Project 4/classification/classification/answers.py | 665080ac6675e741fbdde53168576afbf9fbf7b0 | [] | no_license | asgamre/Pacman-AI | e25b87b1b39d849b7224d397ba3ce84a489d5b77 | 6ef803727fe7a0dba30ddc969986157adf13bb60 | refs/heads/master | 2021-01-19T19:50:40.672223 | 2017-04-17T00:36:55 | 2017-04-17T00:36:55 | 88,451,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | # answers.py
# ----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
def q2():
"*** YOUR CODE HERE ***"
return 'a'
def q4():
return 'a'
| [
"Ameya Gamre"
] | Ameya Gamre |
5af11f662b713171f43dc2203aa05c28918272e5 | 8bf638167c699230a4cfaf55c6d7f1b7aeb72dc7 | /PointOS/system_setup.py | 828576fa3b7905558cd411ba6c7ed21d10448839 | [] | no_license | calebjohn24/PointOS | 6967b1ba8b33c598fa655504bf935345ef10f3fd | e91117b6ebac72ee35ac052be68526433bf048c5 | refs/heads/master | 2023-06-07T18:41:22.118084 | 2021-06-27T22:22:09 | 2021-06-27T22:22:09 | 335,115,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | import RPi.GPIO as GPIO
import json
GPIO.setmode(GPIO.BCM)
pin_map_file = open('/home/pi/PointOS/res/pinout.json')
pin_map = dict(json.load(pin_map_file))
motor_ena = pin_map['output']['motor_ena']
motor_0 = pin_map['output']['motor_0']
motor_1 = pin_map['output']['motor_1']
motor_2 = pin_map['output']['motor_2']
r_dir = pin_map['output']['r_dir']
l_dir = pin_map['output']['l_dir']
r_step = pin_map['output']['r_step']
l_step = pin_map['output']['l_step']
laser = pin_map['output']['laser']
for pin in pin_map['output']:
GPIO.setup(pin_map['output'][pin], GPIO.OUT)
GPIO.output(pin_map['output'][pin], GPIO.LOW)
| [
"cajohn0205@gmail.com"
] | cajohn0205@gmail.com |
c3af8fef67afd6550242c8ca323ebe060625aa59 | 0536e3c635c300a999764dba6f8cd766eeab95f2 | /uni_ticket/urls.py | 652787eb129ab484d29d304cbbaedde7ce73da93 | [
"Apache-2.0"
] | permissive | mspasiano/uniTicket | 57b7d4a6f2550529f37ecc6d685bd386e98590d3 | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | refs/heads/master | 2020-12-02T20:28:47.297929 | 2020-01-10T11:03:43 | 2020-01-10T11:03:43 | 231,111,874 | 0 | 0 | Apache-2.0 | 2019-12-31T15:40:50 | 2019-12-31T15:40:49 | null | UTF-8 | Python | false | false | 15,976 | py | from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path, re_path
from django.utils.text import slugify
from django.views.generic import RedirectView
from . decorators import is_manager, is_operator, is_the_owner
from . settings import MANAGEMENT_URL_PREFIX
from . views import (datatables, generic, management,
manager, operator, user)
app_name="uni_ticket"
_dashboard_name = 'dashboard'
# System/Generic URLs
ticket = 'ticket/<str:ticket_id>'
urlpatterns = [
path('', RedirectView.as_view(url='/{}/'.format(_dashboard_name))),
# Router url di responsabilità su struttura (manager/operator/user)
re_path(r'^manage/(?:(?P<structure_slug>[-\w]+))?$', generic.manage, name='manage'),
# Attachments download
path('{}/download/attachment/<str:attachment>/'.format(ticket), generic.download_attachment, name='download_attachment'),
path('{}/reply/<str:reply_id>/download/attachment/'.format(ticket), generic.download_message_attachment, name='download_message_attachment'),
path('{}/task/<str:task_id>/download/attachment/'.format(ticket), generic.download_task_attachment, name='download_task_attachment'),
# Delete ticket message
path('messages/delete/<str:ticket_message_id>/', generic.ticket_message_delete, name='message_delete'),
path('email-notify/update/', generic.email_notify_change, name='email_notify_change'),
path('print/ticket/<str:ticket_id>/', generic.ticket_detail_print, name='ticket_detail_print'),
]
# Datatables URLs
structure = '<str:structure_slug>'
urlpatterns += [
# User json
path('user_all_tickets.json', datatables.user_all_tickets, name='user_all_tickets_json'),
path('user_opened_ticket.json', datatables.user_opened_ticket, name='user_opened_ticket_json'),
path('user_closed_ticket.json', datatables.user_closed_ticket, name='user_closed_ticket_json'),
path('user_unassigned_ticket.json', datatables.user_unassigned_ticket, name='user_unassigned_ticket_json'),
# Manager json
path('{}/manager_unassigned_ticket.json'.format(structure), datatables.manager_unassigned_ticket, name='manager_unassigned_ticket_json'),
path('{}/manager_opened_ticket.json'.format(structure), datatables.manager_opened_ticket, name='manager_opened_ticket_json'),
path('{}/manager_closed_ticket.json'.format(structure), datatables.manager_closed_ticket, name='manager_closed_ticket_json'),
path('{}/manager_not_closed_ticket.json'.format(structure), datatables.manager_not_closed_ticket, name='manager_not_closed_ticket_json'),
# Operator json
path('{}/operator_unassigned_ticket.json'.format(structure), datatables.operator_unassigned_ticket, name='operator_unassigned_ticket_json'),
path('{}/operator_opened_ticket.json'.format(structure), datatables.operator_opened_ticket, name='operator_opened_ticket_json'),
path('{}/operator_closed_ticket.json'.format(structure), datatables.operator_closed_ticket, name='operator_closed_ticket_json'),
path('{}/operator_not_closed_ticket.json'.format(structure), datatables.operator_not_closed_ticket, name='operator_not_closed_ticket_json'),
]
# Management URLs (manager and operator)
base = 'manage/<str:structure_slug>'
tickets = '{}/tickets'.format(base)
ticket = '{}/ticket'.format(tickets)
ticket_id = '{}/<str:ticket_id>'.format(ticket)
task = '{}/task'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
urlpatterns += [
# Ticket
path('{}/opened/'.format(tickets), management.manage_opened_ticket_url, name='manage_opened_ticket_url'),
path('{}/unassigned/'.format(tickets), management.manage_unassigned_ticket_url, name='manage_unassigned_ticket_url'),
path('{}/closed/'.format(tickets), management.manage_closed_ticket_url, name='manage_closed_ticket_url'),
path('{}/'.format(tickets), management.manage_not_closed_ticket_url, name='manage_not_closed_ticket_url'),
path('{}/'.format(ticket), management.manage_ticket_url, name='manage_ticket_url'),
path('{}/'.format(ticket_id), management.manage_ticket_url_detail, name='manage_ticket_url_detail'),
path('{}/messages/'.format(ticket_id), management.ticket_message_url, name='manage_ticket_message_url'),
path('{}/competence/add/'.format(ticket_id), management.ticket_competence_add_url, name='add_ticket_competence_url'),
path('{}/dependence/add/'.format(ticket_id), management.ticket_dependence_add_url, name='add_ticket_dependence_url'),
path('{}/dependence/remove/<str:master_ticket_id>/'.format(ticket_id), management.ticket_dependence_remove, name='remove_ticket_dependence'),
path('{}/take/'.format(ticket_id), management.ticket_take, name='prendi_ticket_in_carico'),
path('{}/close/'.format(ticket_id), management.ticket_close_url, name='close_ticket'),
path('{}/reopen/'.format(ticket_id), management.ticket_reopen, name='reopen_ticket'),
# Task
path('{}/add/'.format(task), management.task_add_new_url, name='add_ticket_task_url'),
path('{}/'.format(task_id), management.task_detail_url, name='manage_task_detail_url'),
path('{}/close/'.format(task_id), management.task_close_url, name='close_task'),
path('{}/delete/'.format(task_id), management.task_remove, name='task_remove'),
path('{}/riapri/'.format(task_id), management.task_reopen, name='reopen_task'),
path('{}/edit/remove-attachment/'.format(task_id), management.task_attachment_delete, name='manage_elimina_allegato_task'),
path('{}/edit/'.format(task_id), management.task_edit_url, name='edit_task'),
]
# Manager URLs
base = '{}/<str:structure_slug>'.format(slugify(MANAGEMENT_URL_PREFIX['manager']))
tickets = '{}/tickets'.format(base)
ticket_id = '{}/ticket/<str:ticket_id>'.format(tickets)
task = '{}/activities'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
offices = '{}/offices'.format(base)
office = '{}/office'.format(offices)
office_id = '{}/<str:office_slug>'.format(office)
categories = '{}/categories'.format(base)
category = '{}/category'.format(categories)
category_id = '{}/<str:category_slug>'.format(category)
cat_input = '{}/input'.format(category_id)
cat_input_id = '{}/<int:module_id>'.format(cat_input)
condition = '{}/conditions/condition'.format(category_id)
condition_id = '{}/<int:condition_id>'.format(condition)
urlpatterns += [
path('{}/{}/'.format(base, _dashboard_name), manager.dashboard, name='manager_dashboard'),
# Ticket
path('{}/opened/'.format(tickets), is_manager(generic.opened_ticket), name='manager_opened_ticket'),
path('{}/unassigned/'.format(tickets), is_manager(generic.unassigned_ticket), name='manager_unassigned_ticket'),
path('{}/closed/'.format(tickets), is_manager(generic.closed_ticket), name='manager_closed_ticket'),
path('{}/'.format(tickets), is_manager(management.tickets), name='manager_tickets'),
path('{}/'.format(ticket_id), is_manager(management.ticket_detail), name='manager_manage_ticket'),
path('{}/messages/'.format(ticket_id), is_manager(management.ticket_message), name='manager_ticket_message'),
path('{}/competence/add/'.format(ticket_id), is_manager(management.ticket_competence_add_new), name='manager_add_ticket_competence'),
path('{}/competence/add/<str:str_slug>/'.format(ticket_id), is_manager(management.ticket_competence_add_final), name='manager_add_ticket_competence'),
path('{}/dependence/add/'.format(ticket_id), is_manager(management.ticket_dependence_add_new), name='manager_add_ticket_dependence'),
path('{}/close/'.format(ticket_id), is_manager(management.ticket_close), name='manager_close_ticket'),
# Task
path('{}/add/'.format(task), is_manager(management.task_add_new), name='manager_add_ticket_task'),
path('{}/'.format(task_id), is_manager(management.task_detail), name='manager_task_detail'),
path('{}/close/'.format(task_id), is_manager(management.task_close), name='manager_close_task'),
path('{}/edit/'.format(task_id), is_manager(management.task_edit), name='manager_edit_task'),
# Offices
path('{}/new/'.format(office), manager.office_add_new, name='manager_office_add_new'),
path('{}/'.format(office_id), manager.office_detail, name='manager_office_detail'),
path('{}/edit/'.format(office_id), manager.office_edit, name='manager_office_edit'),
path('{}/remove-operator/<int:employee_id>/'.format(office_id), manager.office_remove_operator, name='manager_remove_office_operator'),
path('{}/add-category/'.format(office_id), manager.office_add_category, name='manager_add_office_category'),
path('{}/remove-category/<str:category_slug>/'.format(office_id), manager.office_remove_category, name='manager_remove_office_category'),
path('{}/disable/'.format(office_id), manager.office_disable, name='manager_disable_office'),
path('{}/enable/'.format(office_id), manager.office_enable, name='manager_enable_office'),
path('{}/delete/'.format(office_id), manager.office_delete, name='manager_delete_office'),
path('{}/'.format(offices), manager.offices, name='manager_offices'),
# Categories
path('{}/'.format(categories), manager.categories, name='manager_categories'),
path('{}/new/'.format(category), manager.category_add_new, name='manager_category_add_new'),
path('{}/'.format(category_id), manager.category_detail, name='manager_category_detail'),
path('{}/edit/'.format(category_id), manager.category_edit, name='manager_category_edit'),
path('{}/disable/'.format(category_id), manager.category_disable, name='manager_disable_category'),
path('{}/enable/'.format(category_id), manager.category_enable, name='manager_enable_category'),
path('{}/delete/'.format(category_id), manager.category_delete, name='manager_delete_category'),
path('{}/new/'.format(category_id).format(cat_input), manager.category_input_module_new, name='manager_category_new_input_module'),
# Category input modules
path('{}/'.format(cat_input_id), manager.category_input_module_details, name='manager_category_input_module'),
path('{}/edit/'.format(cat_input_id), manager.category_input_module_edit, name='manager_category_input_module_edit'),
path('{}/enable/'.format(cat_input_id), manager.category_input_module_enable, name='manager_category_input_module_enable'),
path('{}/disable/'.format(cat_input_id), manager.category_input_module_disable, name='manager_category_input_module_disable'),
path('{}/delete/'.format(cat_input_id), manager.category_input_module_delete, name='manager_category_input_module_delete'),
path('{}/preview/'.format(cat_input_id), manager.category_input_module_preview, name='manager_category_input_module_preview'),
path('{}/field/<int:field_id>/delete/'.format(cat_input_id), manager.category_input_field_delete, name='manager_category_input_field_delete'),
path('{}/field/<int:field_id>/edit/'.format(cat_input_id), manager.category_input_field_edit, name='manager_category_input_field_edit'),
# Category conditions
path('{}/new/'.format(condition), manager.category_condition_new, name='manager_category_condition_new'),
path('{}/edit/'.format(condition_id), manager.category_condition_edit, name='manager_category_condition_edit'),
path('{}/delete/'.format(condition_id), manager.category_condition_delete, name='manager_category_condition_delete'),
path('{}/disable/'.format(condition_id), manager.category_condition_disable, name='manager_category_condition_disable'),
path('{}/enable/'.format(condition_id), manager.category_condition_enable, name='manager_category_condition_enable'),
path('{}/'.format(condition_id), manager.category_condition_detail, name='manager_category_condition_detail'),
path('{}/remove-office/<str:office_slug>/'.format(category_id), manager.category_remove_office, name='manager_remove_category_office'),
path('{}/settings/'.format(base), is_manager(generic.user_settings), name='manager_user_settings'),
path('{}/messages/'.format(base), is_manager(generic.ticket_messages), name='manager_messages'),
]
# Operator URLs
base = '{}/<str:structure_slug>'.format(slugify(MANAGEMENT_URL_PREFIX['operator']))
tickets = '{}/tickets'.format(base)
ticket_id = '{}/ticket/<str:ticket_id>'.format(tickets)
task = '{}/activities'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
urlpatterns += [
path('{}/{}/'.format(base, _dashboard_name), operator.dashboard, name='operator_dashboard'),
# Ticket
path('{}/opened/'.format(tickets), is_operator(generic.opened_ticket), name='operator_opened_ticket'),
path('{}/unassigned/'.format(tickets), is_operator(generic.unassigned_ticket), name='operator_unassigned_ticket'),
path('{}/closed/'.format(tickets), is_operator(generic.closed_ticket), name='operator_closed_ticket'),
path('{}/'.format(tickets), is_operator(management.tickets), name='operator_tickets'),
path('{}/'.format(ticket_id), is_operator(management.ticket_detail), name='operator_manage_ticket'),
path('{}/messages/'.format(ticket_id), is_operator(management.ticket_message), name='operator_ticket_message'),
path('{}/competence/add/'.format(ticket_id), is_operator(management.ticket_competence_add_new), name='operator_add_ticket_competence'),
path('{}/competence/add/<str:str_slug>/'.format(ticket_id), is_operator(management.ticket_competence_add_final), name='operator_add_ticket_competence'),
path('{}/dependence/add/'.format(ticket_id), is_operator(management.ticket_dependence_add_new), name='operator_add_ticket_dependence'),
path('{}/close/'.format(ticket_id), is_operator(management.ticket_close), name='operator_close_ticket'),
# Task
path('{}/add/'.format(task), is_operator(management.task_add_new), name='operator_add_ticket_task'),
path('{}/'.format(task_id), is_operator(management.task_detail), name='operator_task_detail'),
path('{}/close/'.format(task_id), is_operator(management.task_close), name='operator_close_task'),
path('{}/edit/'.format(task_id), is_operator(management.task_edit), name='operator_edit_task'),
path('{}/settings/'.format(base), is_operator(generic.user_settings), name='operator_user_settings'),
path('{}/messages/'.format(base), is_operator(generic.ticket_messages), name='operator_messages'),
]
# User URLs
tickets = 'tickets'
ticket = '{}/ticket'.format(tickets)
ticket_id = '{}/<str:ticket_id>'.format(ticket)
urlpatterns += [
path('{}/'.format(_dashboard_name), user.dashboard, name='user_dashboard'),
path('{}/opened/'.format(tickets), generic.opened_ticket, name='user_opened_ticket'),
path('{}/unassigned/'.format(tickets), generic.unassigned_ticket, name='user_unassigned_ticket'),
path('{}/closed/'.format(tickets), generic.closed_ticket, name='user_closed_ticket'),
path('{}/'.format(ticket), user.ticket_url, name='user_ticket_url'),
path('{}/new/'.format(ticket), user.ticket_new_preload, name='new_ticket_preload'),
path('{}/new/<str:struttura_slug>/'.format(ticket), user.ticket_new_preload, name='new_ticket_preload'),
path('{}/new/<str:struttura_slug>/<str:categoria_slug>/'.format(ticket), user.ticket_add_new, name='add_new_ticket'),
path('{}/messages/'.format(ticket_id), user.ticket_message, name='ticket_message'),
path('{}/edit/'.format(ticket_id), user.ticket_edit, name='ticket_edit'),
path('{}/edit/remove-attachment/<str:attachment>/'.format(ticket_id), user.delete_my_attachment, name='delete_my_attachment'),
path('{}/delete/'.format(ticket_id), user.ticket_delete, name='elimina_ticket'),
path('{}/close/'.format(ticket_id), user.ticket_close, name='user_close_ticket'),
path('{}/activity/<str:task_id>/'.format(ticket_id), user.task_detail, name='task_detail'),
path('{}/'.format(ticket_id), is_the_owner(user.ticket_detail), name='ticket_detail'),
path('settings/', generic.user_settings, name='user_settings'),
path('messages/', generic.ticket_messages, name='messages'),
]
| [
"francesco.filicetti@unical.it"
] | francesco.filicetti@unical.it |
b76e0be9b798084817428d24c322a4db18d3e390 | a9f160d9e2e2585e259661f94fff57f26692791d | /system.py | aae564d97c4bec42371d96a41ee85ac315cfe0ea | [] | no_license | zy-gao/deepSA2018 | 0d5d996e6451c83de47f86158d8311a46b960958 | 9ce810e84d236282d963fcfaa12b708372c7b55b | refs/heads/master | 2020-03-26T16:10:24.545492 | 2018-08-27T04:48:27 | 2018-08-27T04:48:27 | 145,085,767 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 33,082 | py | from __future__ import print_function
import numpy as np
from numpy import zeros, newaxis
from keras import regularizers
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Embedding, Input
from keras.layers import LSTM, SimpleRNN, GRU, RepeatVector, Permute, merge, Flatten, Lambda, Concatenate
from keras.layers import Bidirectional
from keras.utils import np_utils
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import optimizers
from keras.utils.np_utils import to_categorical
from keras import backend as K
import sys
import tensorflow as tf
import h5py
#------------------------------------------------------------------------------------------------------------
if len(sys.argv) < 5 :
print('[usage] python system.py [usage of data] [embedding] [class weights] [lexicons features]')
print('usage of data : train-18, train-all, train')
print('embedding : glove-t, glove-g, acl2015, word2vec, self')
print('class weights : True / False')
print('lexicons features : True / False')
sys.exit(1)
batch_size = 32
UsageOfData = sys.argv[1]
Embedding = sys.argv[2]
ClassWeights = sys.argv[3]
LexiconsFeatures = sys.argv[4]
#check
if not (UsageOfData == 'train-18' or UsageOfData == 'train-all' or UsageOfData == 'train') :
print('The "usage of data" is wrong!!!')
sys.exit(1)
if not (Embedding == 'glove-t' or Embedding == 'glove-g' or Embedding == 'acl2015' or Embedding == 'word2vec' or Embedding == 'self') :
print('The "embedding" is wrong!!!')
sys.exit(1)
if not (ClassWeights == 'True' or ClassWeights == 'False') :
print('The "class weights" is wrong!!!')
sys.exit(1)
if not (LexiconsFeatures == 'True' or LexiconsFeatures == 'False') :
print('The "lexicons features" is wrong!!!')
sys.exit(1)
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------')
print('Loading word list...')
word_list = {}
if UsageOfData == 'train-18' :
f = open('./Data/wordList/wordList-2018.txt', 'r')
else :
f = open('./Data/wordList/wordList-2017-2018.txt', 'r')
for line in f.readlines():
values = line.split()
coefs = values[0]
word = values[1]
word_list[word] = coefs
f.close()
print('word list :', len(word_list))
#------------------------------------------------------------------------------------------------------------
if LexiconsFeatures == 'True' :
print('--------------------------------------------------')
print('Load lexicons...')
lexicon = [{},{},{},{}]
fileName = ['normalize_afinn_score.txt', 'normalize_Sentiment140_score.txt', 'normalize_sentistrength_score.txt', 'normalize_vader_score.txt']
for i in range(len(fileName)) :
LexiconFile = open('./Lexicons/' + fileName[i], 'r')
for line in LexiconFile.readlines() :
token = line.split('\t')
if lexicon[i].get(token[0]) is None :
lexicon[i][token[0]] = float(token[1].split('\n')[0])
LexiconFile.close()
print('AFINN lexicon :', len(lexicon[0]))
print('Sentiment140 lexicon :', len(lexicon[1]))
print('Sentistrength lexicon :', len(lexicon[2]))
print('Vader lexicon :', len(lexicon[3]))
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------')
print('Loading Data...')
def LoadData(name, LexiconsFeatures) :
#Load data
data=[]
score=[]
f = open('./Data/processed/'+name, 'r')
for line in f.readlines():
temp=[]
tempScore=[]
sp=line.split()
for word in sp:
if word in word_list :
temp.append(int(word_list[word]))
if LexiconsFeatures == 'True' :
s=[]
if word in lexicon[0] :
s.append(float(lexicon[0][word]))
else :
s.append(float(0.0))
if word in lexicon[1] :
s.append(float(lexicon[1][word]))
else :
s.append(float(0.0))
if word in lexicon[2] :
s.append(float(lexicon[2][word]))
else :
s.append(float(0.0))
if word in lexicon[3] :
s.append(float(lexicon[3][word]))
else :
s.append(float(0.0))
tempScore.append(s)
data.append(temp)
if LexiconsFeatures == 'True' :
score.append(tempScore)
f.close()
X = np.asarray(data)
if LexiconsFeatures == 'True' :
Score = np.asarray(score)
return X, Score
return X
if LexiconsFeatures == 'True' :
XTrain18, ScoreTrain18 = LoadData('2018-Valence-oc-En-train-data.tok', LexiconsFeatures)
if UsageOfData != 'train-18' :
XTrain17, ScoreTrain17 = LoadData('2017-semEval-en-train-data.tok', LexiconsFeatures)
XDev18, ScoreDev18 = LoadData('2018-Valence-oc-En-dev-data.tok', LexiconsFeatures)
XTest18, ScoreTest18 = LoadData('2018-Valence-oc-En-test-data.tok', LexiconsFeatures)
else :
ScoreTrain18 = ScoreTrain17 = ScoreDev18 = ScoreTest18 = 0
XTrain18 = LoadData('2018-Valence-oc-En-train-data.tok', LexiconsFeatures)
if UsageOfData != 'train-18' :
ScoreTrain = 0
XTrain17 = LoadData('2017-semEval-en-train-data.tok', LexiconsFeatures)
XDev18 = LoadData('2018-Valence-oc-En-dev-data.tok', LexiconsFeatures)
XTest18 = LoadData('2018-Valence-oc-En-test-data.tok', LexiconsFeatures)
print('Padding sequences...')
if UsageOfData != 'train-18' :
XTrain = np.concatenate((XTrain18, XTrain17), axis=0)
maxlen = 99
XTrain18 = sequence.pad_sequences(XTrain18, maxlen=maxlen)
XTrain = sequence.pad_sequences(XTrain, maxlen=maxlen)
XDev18 = sequence.pad_sequences(XDev18, maxlen=maxlen)
XTest18 = sequence.pad_sequences(XTest18, maxlen=maxlen)
if LexiconsFeatures == 'True' :
ScoreTrain = np.concatenate((ScoreTrain18, ScoreTrain17), axis=0)
ScoreTrain18 = sequence.pad_sequences(ScoreTrain18, maxlen=maxlen)
ScoreTrain = sequence.pad_sequences(ScoreTrain, maxlen=maxlen)
ScoreDev18 = sequence.pad_sequences(ScoreDev18, maxlen=maxlen)
ScoreTest18 = sequence.pad_sequences(ScoreTest18, maxlen=maxlen)
else :
maxlen = 56
XTrain18 = sequence.pad_sequences(XTrain18, maxlen=maxlen)
XDev18 = sequence.pad_sequences(XDev18, maxlen=maxlen)
XTest18 = sequence.pad_sequences(XTest18, maxlen=maxlen)
if LexiconsFeatures == 'True' :
ScoreTrain18 = sequence.pad_sequences(ScoreTrain18, maxlen=maxlen)
ScoreDev18 = sequence.pad_sequences(ScoreDev18, maxlen=maxlen)
ScoreTest18 = sequence.pad_sequences(ScoreTest18, maxlen=maxlen)
print('train18 data :', XTrain18.shape)
if UsageOfData != 'train-18' :
print('trainAll data :', XTrain.shape)
print('dev data :', XDev18.shape)
print('test data :', XTest18.shape)
if LexiconsFeatures == 'True' :
print('Score Train :', ScoreTrain18.shape)
if UsageOfData != 'train-18' :
print('Score TrainAll :', ScoreTrain.shape)
print('Score Dev :', ScoreDev18.shape)
print('Score Test :', ScoreTest18.shape)
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------')
print('Loading Label...')
YTrain18 = np.loadtxt('./Data/processed/2018-Valence-oc-En-train-label.txt')
YDev18 = np.loadtxt('./Data/processed/2018-Valence-oc-En-dev-label.txt')
YTest18 = np.loadtxt('./Data/processed/2018-Valence-oc-En-test-label.txt')
if UsageOfData == 'train-18' :
YTrainThree = [0 if x < 0 else 2 if x > 0 else 1 for x in YTrain18]
YTrainThree = to_categorical(YTrainThree, num_classes=3)
YTrainNegative = [3 if x > 0 else x+3 for x in YTrain18]
YTrainNegative = to_categorical(YTrainNegative, num_classes=4)
YTrainNeutral = [0 if x == 0 else 1 for x in YTrain18]
YTrainNeutral = to_categorical(YTrainNeutral, num_classes=2)
YTrainPositive = [0 if x < 0 else x for x in YTrain18]
YTrainPositive = to_categorical(YTrainPositive, num_classes=4)
YTrainSeven = [x+3 for x in YTrain18]
YTrainSeven = to_categorical(YTrainSeven, num_classes=7)
elif UsageOfData != 'train-18' :
YTrain17 = np.loadtxt('./Data/processed/2017-semEval-en-train-label.txt')
# The labels of SemEval-2017 are [-1,0,1] in SemEval-2018
#YTrain = np.concatenate((YTrain18, YTrain17), axis=0)
# The labels of SemEval-2017 are [-3,0,3] in SemEval-2018
YTrain17 = [-3 if x < 0 else 3 if x > 0 else 0 for x in YTrain17]
YTrain = np.concatenate((YTrain18, YTrain17), axis=0)
YTrainThree = [0 if x < 0 else 2 if x > 0 else 1 for x in YTrain]
YTrainThree = to_categorical(YTrainThree, num_classes=3)
if UsageOfData == 'train-all' :
YTrainNegative = [3 if x > 0 else x+3 for x in YTrain]
YTrainNegative = to_categorical(YTrainNegative, num_classes=4)
YTrainNeutral = [0 if x == 0 else 1 for x in YTrain]
YTrainNeutral = to_categorical(YTrainNeutral, num_classes=2)
YTrainPositive = [0 if x < 0 else x for x in YTrain]
YTrainPositive = to_categorical(YTrainPositive, num_classes=4)
YTrainSeven = [x+3 for x in YTrain]
YTrainSeven = to_categorical(YTrainSeven, num_classes=7)
else :
YTrainNegative = [3 if x > 0 else x+3 for x in YTrain18]
YTrainNegative = to_categorical(YTrainNegative, num_classes=4)
YTrainNeutral = [0 if x == 0 else 1 for x in YTrain18]
YTrainNeutral = to_categorical(YTrainNeutral, num_classes=2)
YTrainPositive = [0 if x < 0 else x for x in YTrain18]
YTrainPositive = to_categorical(YTrainPositive, num_classes=4)
YTrainSeven = [x+3 for x in YTrain18]
YTrainSeven = to_categorical(YTrainSeven, num_classes=7)
YDevThree = [0 if x < 0 else 2 if x > 0 else 1 for x in YDev18]
YDevThree = to_categorical(YDevThree, num_classes=3)
YDevNegative = [3 if x > 0 else x+3 for x in YDev18]
YDevNegative = to_categorical(YDevNegative, num_classes=4)
YDevNeutral = [0 if x == 0 else 1 for x in YDev18]
YDevNeutral = to_categorical(YDevNeutral, num_classes=2)
YDevPositive = [0 if x < 0 else x for x in YDev18]
YDevPositive = to_categorical(YDevPositive, num_classes=4)
YDevSeven = [x+3 for x in YDev18]
YDevSeven = to_categorical(YDevSeven, num_classes=7)
YTestThree = [0 if x < 0 else 2 if x > 0 else 1 for x in YTest18]
YTestThree = to_categorical(YTestThree, num_classes=3)
YTestNegative = [3 if x > 0 else x+3 for x in YTest18]
YTestNegative = to_categorical(YTestNegative, num_classes=4)
YTestNeutral = [0 if x == 0 else 1 for x in YTest18]
YTestNeutral = to_categorical(YTestNeutral, num_classes=2)
YTestPositive = [0 if x < 0 else x for x in YTest18]
YTestPositive = to_categorical(YTestPositive, num_classes=4)
YTestSeven = [x+3 for x in YTest18]
YTestSeven = to_categorical(YTestSeven, num_classes=7)
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------')
print('Loading word vectors...')
embeddings_index = {}
if Embedding == 'glove-t' :
f = open('./vector/glove.twitter.27B.200d.2017.2018.txt') #glove twitter 200d
elif Embedding == 'glove-g' :
f = open('./vector/glove.840B.300d-2017-2018.txt') #glove common crawl 300d
elif Embedding == 'acl2015' :
f = open('./vector/word2vec-2017-2018.txt') #ACL W-NUT 2015 400d
elif Embedding == 'word2vec' :
f = open('./vector/GoogleNews-vectors-negative300-2017-2018.txt') #GoogleNews 300
elif Embedding == 'self' :
f = open('./vector/selfWordVector.txt') #Self train word2vec 400d
for line in f:
values = line.split()
if len(values) < 30 :
continue
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('word vectors :', len(embeddings_index))
print('dimintion of word vectors :', len(embeddings_index.values()[0]))
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------')
print('word embedding...')
#get the embedding dimension
EMBEDDING_DIM = len(embeddings_index.values()[0])
embedding_matrix = np.zeros((len(word_list) + 1, EMBEDDING_DIM))
for word, i in word_list.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[int(i)] = embedding_vector
'''
count = -1 #because embedding_matrix[0] is zero vector
for i in range(len(word_list)+1) :
for j in range(EMBEDDING_DIM) :
if embedding_matrix[i][j] != 0 :
break
if j == EMBEDDING_DIM-1 :
count += 1
'''
print('embedding_matrix :', embedding_matrix.shape)
#print('Number of zero embedding :', count)
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------')
print('input to embedding...')
def word_embedding(data, score, dim, LexiconsFeatures):
a=[]
for idx1, i in enumerate(data):
b=[]
for idx2, j in enumerate(i):
c = list( np.zeros(dim) )
c = embedding_matrix[j]
if LexiconsFeatures == 'True' :
c = np.concatenate((c, score[idx1][idx2]), axis=0)
b.append(c)
a.append(b)
a=np.asarray(a)
return a
XTrain18 = word_embedding(XTrain18, ScoreTrain18, EMBEDDING_DIM, LexiconsFeatures)
print('shape of train18 :', XTrain18.shape)
if UsageOfData != 'train-18' :
XTrain = word_embedding(XTrain, ScoreTrain, EMBEDDING_DIM, LexiconsFeatures)
print('shape of trainAll :', XTrain.shape)
XDev18 = word_embedding(XDev18, ScoreDev18, EMBEDDING_DIM, LexiconsFeatures)
print('shape of dev :', XDev18.shape)
XTest18 = word_embedding(XTest18, ScoreTest18, EMBEDDING_DIM, LexiconsFeatures)
print('shape of test :', XTest18.shape)
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------')
print('Training...')
def PCC(y_true, y_pred) :
pred_mean = K.mean(y_pred)
label_mean = K.mean(y_true)
covariance = K.sum(np.dot(y_pred-pred_mean, y_true-label_mean))
standard_deviation_pred = K.sqrt(K.sum(np.power(y_pred-pred_mean, 2)))
standard_deviation_label = K.sqrt(K.sum(np.power(y_true-label_mean, 2)))
pearson = covariance / (standard_deviation_pred * standard_deviation_label)
return pearson
# H-Parameter
adamC = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, clipnorm=5.0)
_input = Input(shape=[XTrain18.shape[1], XTrain18.shape[2]], dtype='float32')
monitors = ['val_loss', 'val_loss', 'val_loss', 'val_loss', 'val_loss']
patiences = [10, 15, 15, 15, 15]
modelName = ['Three', 'Negative', 'Neural', 'Positive', 'Seven']
earlyStopping1 = EarlyStopping(monitor=monitors[0], min_delta=0, patience=patiences[0], verbose=0, mode='auto')
checkpoint1 = ModelCheckpoint('./saveModel/'+modelName[0]+'.hdf5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
cb1 = [earlyStopping1, checkpoint1]
earlyStopping2 = EarlyStopping(monitor=monitors[1], min_delta=0, patience=patiences[1], verbose=0, mode='auto')
checkpoint2 = ModelCheckpoint('./saveModel/'+modelName[1]+'.hdf5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
cb2 = [earlyStopping2, checkpoint2]
earlyStopping3 = EarlyStopping(monitor=monitors[2], min_delta=0, patience=patiences[2], verbose=0, mode='auto')
checkpoint3 = ModelCheckpoint('./saveModel/'+modelName[2]+'.hdf5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
cb3 = [earlyStopping3, checkpoint3]
earlyStopping4 = EarlyStopping(monitor=monitors[3], min_delta=0, patience=patiences[3], verbose=0, mode='auto')
checkpoint4 = ModelCheckpoint('./saveModel/'+modelName[3]+'.hdf5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
cb4 = [earlyStopping4, checkpoint4]
earlyStopping5 = EarlyStopping(monitor=monitors[4], min_delta=0, patience=patiences[4], verbose=0, mode='auto')
checkpoint5 = ModelCheckpoint('./saveModel/'+modelName[4]+'.hdf5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
cb5 = [earlyStopping5, checkpoint5]
if ClassWeights == 'True' :
kw1 = 14389.0
three_class_weight = {0:kw1/9037.0, 1:kw1/18527.0, 2:kw1/15603.0}
kw2 = 175.0
negative_class_weight = {0:kw2/129.0, 1:kw2/249.0, 2:kw2/78.0, 3:kw2/725.0}
kw3 = 495.0
neutral_class_weight = {0:kw3/341.0, 1:kw3/840.0}
kw4 = 175.0
positive_class_weight = {0:kw4/797.0, 1:kw4/167.0, 2:kw4/92.0, 3:kw4/125.0}
kw5 = 140.0
seven_class_weight = {0:kw5/129.0, 1:kw5/249.0, 2:kw5/78.0, 3:kw5/341.0, 4:kw5/167.0, 5:kw5/92.0, 6:kw5/125.0}
print('three_class_weight = ', three_class_weight)
print('negative_class_weight = ', negative_class_weight)
print('neutral_class_weight = ', neutral_class_weight)
print('positive_class_weight = ', positive_class_weight)
print('seven_class_weight = ', seven_class_weight)
# Three class model : {-1, 0, 1}------------------------------------------------------------------------------------------
ThreeLstm = Bidirectional(LSTM(200, dropout=0.5, return_sequences=True), merge_mode='concat', name='ThreeLstm')(_input)
ThreeLstmSum = Lambda(lambda xin: K.mean(xin, axis=1))(ThreeLstm)
ThreeDense = Dense(200, activation='tanh', name='ThreeRepre')(ThreeLstmSum)
ThreeOutput = Dense(3, activation='softmax')(ThreeDense)
model1 = Model(inputs=_input, outputs=ThreeOutput)
model1.summary()
model1.compile(loss='categorical_crossentropy', optimizer=adamC, metrics=['accuracy', PCC])
if ClassWeights == 'True' :
if UsageOfData == 'train-18' :
model1.fit(XTrain18, YTrainThree, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevThree), callbacks=cb1, verbose=1, class_weight=three_class_weight)
else :
model1.fit(XTrain, YTrainThree, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevThree), callbacks=cb1, verbose=1, class_weight=three_class_weight)
else :
if UsageOfData == 'train-18' :
model1.fit(XTrain18, YTrainThree, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevThree), callbacks=cb1, verbose=1)
else :
model1.fit(XTrain, YTrainThree, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevThree), callbacks=cb1, verbose=1)
model1.load_weights('./saveModel/' + modelName[0] + '.hdf5')
w1 = model1.get_layer(name='ThreeLstm').get_weights()
# Negative class model : {-3, -2, -1, other}----------------------------------------------------------------------------------
NegativeLstm = Bidirectional(LSTM(200, dropout=0.5, return_sequences=True), merge_mode='concat', name='NegativeLstm1')(_input)
NegativeLstm = Bidirectional(LSTM(150, dropout=0.3, return_sequences=True), merge_mode='concat', name='NegativeLstm2')(NegativeLstm)
NegativeLstmSum = Lambda(lambda xin: K.mean(xin, axis=1))(NegativeLstm)
NegativeDense = Dense(200, activation='tanh', name='NegativeRepre')(NegativeLstmSum)
NegativeOutput = Dense(4, activation='softmax')(NegativeDense)
model2 = Model(inputs=_input, outputs=NegativeOutput)
model2.summary()
model2.get_layer(name='NegativeLstm1').set_weights(w1)
model2.compile(loss='categorical_crossentropy', optimizer=adamC, metrics=['accuracy', PCC])
if ClassWeights == 'True' :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model2.fit(XTrain18, YTrainNegative, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNegative), callbacks=cb2, verbose=1, class_weight=negative_class_weight)
else :
model2.fit(XTrain, YTrainNegative, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNegative), callbacks=cb2, verbose=1, class_weight=negative_class_weight)
else :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model2.fit(XTrain18, YTrainNegative, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNegative), callbacks=cb2, verbose=1)
else :
model2.fit(XTrain, YTrainNegative, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNegative), callbacks=cb2, verbose=1)
model2.load_weights('./saveModel/' + modelName[1] + '.hdf5')
w2 = model2.get_layer(name='NegativeLstm2').get_weights()
# Neutral class model : {0, other}-----------------------------------------------------------------------------------------
NeuralLstm = Bidirectional(LSTM(200, dropout=0.5, return_sequences=True), merge_mode='concat', name='NeuralLstm1')(_input)
NeuralLstm = Bidirectional(LSTM(150, dropout=0.3, return_sequences=True), merge_mode='concat', name='NeuralLstm2')(NeuralLstm)
NeuralLstmSum = Lambda(lambda xin: K.mean(xin, axis=1))(NeuralLstm)
NeuralDense = Dense(100, activation='tanh', name='NeuralRepre')(NeuralLstmSum)
NeuralOutput = Dense(2, activation='softmax')(NeuralDense)
model3 = Model(inputs=_input, outputs=NeuralOutput)
model3.summary()
model3.get_layer(name='NeuralLstm1').set_weights(w1)
model3.compile(loss='categorical_crossentropy', optimizer=adamC, metrics=['accuracy', PCC])
if ClassWeights == 'True' :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model3.fit(XTrain18, YTrainNeutral, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNeutral), callbacks=cb3, verbose=1, class_weight=neutral_class_weight)
else :
model3.fit(XTrain, YTrainNeutral, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNeutral), callbacks=cb3, verbose=1, class_weight=neutral_class_weight)
else :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model3.fit(XTrain18, YTrainNeutral, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNeutral), callbacks=cb3, verbose=1)
else :
model3.fit(XTrain, YTrainNeutral, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevNeutral), callbacks=cb3, verbose=1)
model3.load_weights('./saveModel/' + modelName[2] + '.hdf5')
w3 = model3.get_layer(name='NeuralLstm2').get_weights()
# Positive class model : {1, 2, 3, other}-----------------------------------------------------------------------------------
PositiveLstm = Bidirectional(LSTM(200, dropout=0.5, return_sequences=True), merge_mode='concat', name='PositiveLstm1')(_input)
PositiveLstm = Bidirectional(LSTM(150, dropout=0.3, return_sequences=True), merge_mode='concat', name='PositiveLstm2')(PositiveLstm)
PositiveLstmSum = Lambda(lambda xin: K.mean(xin, axis=1))(PositiveLstm)
PositiveDense = Dense(200, activation='tanh', name='PositiveRepre')(PositiveLstmSum)
PositiveOutput = Dense(4, activation='softmax')(PositiveDense)
model4 = Model(inputs=_input, outputs=PositiveOutput)
model4.summary()
model4.get_layer(name='PositiveLstm1').set_weights(w1)
model4.compile(loss='categorical_crossentropy', optimizer=adamC, metrics=['accuracy', PCC])
if ClassWeights == 'True' :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model4.fit(XTrain18, YTrainPositive, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevPositive), callbacks=cb4, verbose=1, class_weight=positive_class_weight)
else :
model4.fit(XTrain, YTrainPositive, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevPositive), callbacks=cb4, verbose=1, class_weight=positive_class_weight)
else :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model4.fit(XTrain18, YTrainPositive, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevPositive), callbacks=cb4, verbose=1)
else :
model4.fit(XTrain, YTrainPositive, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevPositive), callbacks=cb4, verbose=1)
model4.load_weights('./saveModel/' + modelName[3] + '.hdf5')
w4 = model4.get_layer(name='PositiveLstm2').get_weights()
# Seven class model : {-3, -2, -1, 0, 1, 2, 3}---------------------------------------------------------------------------
SevenLstm1 = Bidirectional(LSTM(200, dropout=0.5, return_sequences=True), merge_mode='concat', name='SevenLstm1')(_input)
SevenLstm2 = Bidirectional(LSTM(150, dropout=0.3, return_sequences=True), merge_mode='concat', name='SevenLstm2')(SevenLstm1)
SevenLstm3 = Bidirectional(LSTM(150, dropout=0.3, return_sequences=True), merge_mode='concat', name='SevenLstm3')(SevenLstm1)
SevenLstm4 = Bidirectional(LSTM(150, dropout=0.3, return_sequences=True), merge_mode='concat', name='SevenLstm4')(SevenLstm1)
SevenLstm = merge([SevenLstm2, SevenLstm3, SevenLstm4], mode='concat')
SevenLstm = Bidirectional(LSTM(200, dropout=0.3, return_sequences=True), merge_mode='concat')(SevenLstm)
attention = Dense(200, activation='tanh')(SevenLstm) #200
attention = Dense(1, bias=False)(attention)
attention = Flatten()(attention)
attention = Activation('softmax')(attention)
attention = RepeatVector(400)(attention) #400
attention = Permute([2, 1])(attention)
representation = merge([SevenLstm, attention], mode='mul')
representation = Lambda(lambda xin: K.sum(xin, axis=1))(representation)
SevenDense = Dense(200, activation='tanh', name='SevenRepre')(representation) #200
SevenOutput = Dense(7, activation='softmax')(SevenDense)
model5 = Model(inputs=_input, outputs=SevenOutput)
model5.summary()
model5.get_layer(name='SevenLstm1').set_weights(w1)
model5.get_layer(name='SevenLstm2').set_weights(w2)
model5.get_layer(name='SevenLstm3').set_weights(w3)
model5.get_layer(name='SevenLstm4').set_weights(w4)
model5.compile(loss='categorical_crossentropy', optimizer=adamC, metrics=['accuracy', PCC])
if ClassWeights == 'True' :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model5.fit(XTrain18, YTrainSeven, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevSeven), callbacks=cb5, verbose=1, class_weight=seven_class_weight)
else :
model5.fit(XTrain, YTrainSeven, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevSeven), callbacks=cb5, verbose=1, class_weight=seven_class_weight)
else :
if UsageOfData == 'train-18' or UsageOfData == 'train' :
model5.fit(XTrain18, YTrainSeven, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevSeven), callbacks=cb5, verbose=1)
else :
model5.fit(XTrain, YTrainSeven, batch_size=batch_size, epochs=200, validation_data=(XDev18, YDevSeven), callbacks=cb5, verbose=1)
#--------------predict--------------
predict = [[], [], [], [], []]
model1.load_weights('./saveModel/' + modelName[0] + '.hdf5')
predict[0] = model1.predict(XTest18, batch_size=batch_size)
model2.load_weights('./saveModel/' + modelName[1] + '.hdf5')
predict[1] = model2.predict(XTest18, batch_size=batch_size)
model3.load_weights('./saveModel/' + modelName[2] + '.hdf5')
predict[2] = model3.predict(XTest18, batch_size=batch_size)
model4.load_weights('./saveModel/' + modelName[3] + '.hdf5')
predict[3] = model4.predict(XTest18, batch_size=batch_size)
model5.load_weights('./saveModel/' + modelName[4] + '.hdf5')
predict[4] = model5.predict(XTest18, batch_size=batch_size)
np.savetxt('./predict/systemPredictProbability_Data_'+ UsageOfData + '_Embedding_' + Embedding + '_ClassWeights_' + ClassWeights + '_LexiconsFeatures_' + LexiconsFeatures + '.txt', predict[4])
#--------------Metric--------------
matrix = [np.zeros((3, 3)), np.zeros((4, 4)), np.zeros((2, 2)), np.zeros((4, 4)), np.zeros((7, 7))]
pred_str = []
pred = []
#Calculate confusion matrix
for l in range(len(predict)) :
if l == 0 :
Target = YTestThree
elif l == 1 :
Target = YTestNegative
elif l == 2 :
Target = YTestNeutral
elif l == 3 :
Target = YTestPositive
elif l == 4 :
Target = YTestSeven
for i, (tar, Label) in enumerate( zip(Target, predict[l]) ) :
m = np.max(Label)
for j, value in enumerate(Label) :
if value == m :
if l == 4 :
pred_str.append(str(j - 3))
pred.append(int(j - 3))
for k, num in enumerate(tar) :
if num == 1 :
matrix[l][k][j] += 1
break
break
#--------------Save Predict--------------
f = open('./predict/systemPredict_Data_'+ UsageOfData + '_Embedding_' + Embedding + '_ClassWeights_' + ClassWeights + '_LexiconsFeatures_' + LexiconsFeatures + '.txt', 'w')
f.write('\n'.join(pred_str))
f.close()
#------------------------------------------------------------------------------------------------------------
average_recall = [np.zeros((3)), np.zeros((4)), np.zeros((2)), np.zeros((4)), np.zeros((7))]
ar = [0, 0, 0, 0, 0]
acc = [0, 0, 0, 0, 0]
for i in range(len(matrix)) :
if i == 0 :
average_recall[i][0] = matrix[i][0][0] / (matrix[i][0][0] + matrix[i][0][1] + matrix[i][0][2])
average_recall[i][1] = matrix[i][1][1] / (matrix[i][1][0] + matrix[i][1][1] + matrix[i][1][2])
average_recall[i][2] = matrix[i][2][2] / (matrix[i][2][0] + matrix[i][2][1] + matrix[i][2][2])
ar[i] = (average_recall[i][0]+average_recall[i][1]+average_recall[i][2]) / 3
acc[i] = (matrix[i][0][0] + matrix[i][1][1] + matrix[i][2][2]) / len(YTestThree)
print('--------------------------------'+ modelName[i] +'---------------------------------------')
print('Average Recall : ', ar[i])
print('Acc. : ', acc[i])
elif i == 1 or i == 3 :
average_recall[i][0] = matrix[i][0][0] / (matrix[i][0][0] + matrix[i][0][1] + matrix[i][0][2] + matrix[i][0][3])
average_recall[i][1] = matrix[i][1][1] / (matrix[i][1][0] + matrix[i][1][1] + matrix[i][1][2] + matrix[i][1][3])
average_recall[i][2] = matrix[i][2][2] / (matrix[i][2][0] + matrix[i][2][1] + matrix[i][2][2] + matrix[i][2][3])
average_recall[i][3] = matrix[i][3][3] / (matrix[i][3][0] + matrix[i][3][1] + matrix[i][3][2] + matrix[i][3][3])
ar[i] = (average_recall[i][0]+average_recall[i][1]+average_recall[i][2]+average_recall[i][3]) / 4
print('--------------------------------'+ modelName[i] +'---------------------------------------')
print('Average Recall : ', ar[i])
if i == 1 :
acc[i] = (matrix[i][0][0] + matrix[i][1][1] + matrix[i][2][2] + matrix[i][3][3]) / len(YTestNegative)
if i == 3 :
acc[i] = (matrix[i][0][0] + matrix[i][1][1] + matrix[i][2][2] + matrix[i][3][3]) / len(YTestPositive)
print('Acc. : ', acc[i])
elif i == 2 :
average_recall[i][0] = matrix[i][0][0] / (matrix[i][0][0] + matrix[i][0][1])
average_recall[i][1] = matrix[i][1][1] / (matrix[i][1][0] + matrix[i][1][1])
ar[i] = (average_recall[i][0]+average_recall[i][1]) / 2
acc[i] = (matrix[i][0][0] + matrix[i][1][1]) / len(YTestNeutral)
print('--------------------------------'+ modelName[i] +'---------------------------------------')
print('Average Recall : ', ar[i])
print('Acc. : ', acc[i])
else :
average_recall[i][0] = matrix[i][0][0] / (matrix[i][0][0] + matrix[i][0][1] + matrix[i][0][2] + matrix[i][0][3] + matrix[i][0][4] + matrix[i][0][5] + matrix[i][0][6])
average_recall[i][1] = matrix[i][1][1] / (matrix[i][1][0] + matrix[i][1][1] + matrix[i][1][2] + matrix[i][1][3] + matrix[i][1][4] + matrix[i][1][5] + matrix[i][1][6])
average_recall[i][2] = matrix[i][2][2] / (matrix[i][2][0] + matrix[i][2][1] + matrix[i][2][2] + matrix[i][2][3] + matrix[i][2][4] + matrix[i][2][5] + matrix[i][2][6])
average_recall[i][3] = matrix[i][3][3] / (matrix[i][3][0] + matrix[i][3][1] + matrix[i][3][2] + matrix[i][3][3] + matrix[i][3][4] + matrix[i][3][5] + matrix[i][3][6])
average_recall[i][4] = matrix[i][4][4] / (matrix[i][4][0] + matrix[i][4][1] + matrix[i][4][2] + matrix[i][4][3] + matrix[i][4][4] + matrix[i][4][5] + matrix[i][4][6])
average_recall[i][5] = matrix[i][5][5] / (matrix[i][5][0] + matrix[i][5][1] + matrix[i][5][2] + matrix[i][5][3] + matrix[i][5][4] + matrix[i][5][5] + matrix[i][5][6])
average_recall[i][6] = matrix[i][6][6] / (matrix[i][6][0] + matrix[i][6][1] + matrix[i][6][2] + matrix[i][6][3] + matrix[i][6][4] + matrix[i][6][5] + matrix[i][6][6])
ar[i] = (average_recall[i][0]+average_recall[i][1]+average_recall[i][2]+average_recall[i][3]+average_recall[i][4]+average_recall[i][5]+average_recall[i][6]) / 7
acc[i] = (matrix[i][0][0] + matrix[i][1][1] + matrix[i][2][2] + matrix[i][3][3] + matrix[i][4][4] + matrix[i][5][5] + matrix[i][6][6]) / len(YTestSeven)
print('--------------------------------'+ modelName[i] +'---------------------------------------')
print('Average Recall : ', ar[i])
print('Acc. : ', acc[i])
#------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------------------------------')
print('pearson correlation coefficient')
pred_mean = np.mean(pred, axis=0)
label_mean = np.mean(YTest18, axis=0)
print('pred_mean = ', pred_mean)
print('label_mean = ', label_mean)
covariance = np.sum(np.dot(pred-pred_mean, YTest18-label_mean))
print('covariance = ', covariance)
standard_deviation_pred = np.sqrt(np.sum(np.power(pred-pred_mean, 2)))
standard_deviation_label = np.sqrt(np.sum(np.power(YTest18-label_mean, 2)))
print('standard_deviation_pred = ', standard_deviation_pred)
print('standard_deviation_label = ', standard_deviation_label)
pearson = covariance / (standard_deviation_pred * standard_deviation_label)
print('pearson = ', pearson)
| [
"noreply@github.com"
] | zy-gao.noreply@github.com |
7b0b29af11b03bd65e8653c0064728abc6eb399b | 17aa8cce13452e55572de46acfa9c97eedf01a4c | /agents/dqn.py | 53b3d4e226d35fe740d5342499f0e3d2def50184 | [] | no_license | tocom242242/deep_qlearning_sample | b19992ec74df779405dda9d68d563fad82160711 | a930fc140d2acbd4ad9f340c5cde5c31619fcff3 | refs/heads/master | 2020-05-03T21:28:21.349648 | 2019-04-13T02:44:31 | 2019-04-13T02:44:31 | 178,824,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,050 | py | import tensorflow as tf
import numpy as np
from copy import deepcopy
from agents.network import Network
from abc import ABCMeta, abstractmethod
from collections import deque, namedtuple
class Agent(metaclass=ABCMeta):
"""Abstract Agent Class"""
def __init__(self, id=None, name=None, training=None, policy=None):
self.id = id
self.name = name
self.training = training
self.policy = policy
self.reward_history = []
@abstractmethod
def act(self):
pass
@abstractmethod
def get_reward(self, reward):
pass
@abstractmethod
def observe(self, next_state):
pass
class DQNAgent(Agent):
"""
DQNエージェント
"""
def __init__(self, gamma=0.99, alpha_decay_rate=0.999, actions=None, memory=None, memory_interval=1,train_interval=1,
batch_size=32, update_interval=10, nb_steps_warmup=100, observation=None,
input_shape=None,
**kwargs):
super().__init__(**kwargs)
self.actions = actions
self.gamma = gamma
self.state = observation
self.alpha_decay_rate = alpha_decay_rate
self.recent_observation = observation
self.update_interval = update_interval
self.memory = memory
self.memory_interval = memory_interval
self.batch_size = batch_size
self.recent_action_id = 0
self.nb_steps_warmup = nb_steps_warmup
self.sess = tf.InteractiveSession()
self.net = Network(self.sess)
self.model_inputs, self.model_outputs, self.model_max_outputs, self.model = self.build_model(input_shape, len(self.actions))
self.target_model_inputs, self.target_model_outputs, self.target_model_max_outputs, self.target_model= self.build_model(input_shape, len(self.actions))
target_model_weights = self.target_model.trainable_weights
model_weights = self.model.trainable_weights
self.update_target_model = [target_model_weights[i].assign(model_weights[i]) for i in range(len(target_model_weights))]
self.train_interval = train_interval
self.step = 0
def build_model(self, input_shape, nb_output):
model = tf.keras.models.Sequential()
inputs = tf.placeholder(dtype=tf.float32, shape = [None,]+input_shape, name="input")
model.add(tf.keras.layers.Dense(16, activation="relu", input_shape =[None,]+input_shape))
model.add(tf.keras.layers.Dense(16, activation="relu"))
model.add(tf.keras.layers.Dense(16, activation="relu"))
model.add(tf.keras.layers.Dense(nb_output))
outputs = model(inputs)
max_outputs = tf.reduce_max(outputs, reduction_indices=1)
return inputs, outputs, max_outputs, model
def compile(self, optimizer=None):
self.targets = tf.placeholder(dtype=tf.float32, shape=[None, len(self.actions)], name="target_q")
self.inputs= tf.placeholder(dtype=tf.int32, shape=[None], name="action")
mask = tf.one_hot(indices=self.inputs, depth=len(self.actions), on_value=1.0, off_value=0.0, name="action_one_hot")
self.pred_q = tf.multiply(self.model_outputs, mask)
self.delta = tf.pow(self.targets - self.pred_q, 2)
# huber loss
self.clipped_error = tf.where(self.delta < 1.0,
0.5 * tf.square(self.delta),
self.delta - 0.5, name="clipped_error")
self.loss = tf.reduce_mean(self.clipped_error, name="loss")
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
else:
optimizer = optimizer
self.train = optimizer.minimize(self.loss)
self.sess.run(tf.global_variables_initializer())
def update_target_model_hard(self):
""" copy q-network to target network """
self.sess.run(self.update_target_model)
def train_on_batch(self, state_batch, action_batch, targets):
self.sess.run(self.train, feed_dict={self.model_inputs:state_batch, self.inputs:action_batch, self.targets:targets})
def predict_on_batch(self, state1_batch):
q_values = self.sess.run(self.target_model_max_outputs, feed_dict={self.target_model_inputs:state1_batch})
return q_values
def compute_q_values(self, state):
q_values = self.sess.run(self.model_outputs, feed_dict={self.model_inputs:[state]})
return q_values[0]
def get_reward(self, reward, terminal):
self.reward_history.append(reward)
if self.training:
self._update_q_value(reward, terminal)
self.policy.decay_eps_rate()
self.step += 1
def _update_q_value(self, reward, terminal):
self.backward(reward, terminal)
def backward(self, reward, terminal):
if self.step % self.memory_interval == 0:
""" store experience """
self.memory.append(self.recent_observation, self.recent_action_id, reward, terminal=terminal, training=self.training)
if (self.step > self.nb_steps_warmup) and (self.step % self.train_interval == 0):
experiences = self.memory.sample(self.batch_size)
state0_batch = []
reward_batch = []
action_batch = []
state1_batch = []
terminal_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal_batch.append(0. if e.terminal else 1.)
reward_batch = np.array(reward_batch)
target_q_values = np.array(self.predict_on_batch(state1_batch)) # compute maxQ'(s')
targets = np.zeros((self.batch_size, len(self.actions)))
discounted_reward_batch = (self.gamma * target_q_values)
discounted_reward_batch *= terminal_batch
Rs = reward_batch + discounted_reward_batch # target = r + γ maxQ'(s')
for idx, (target, R, action) in enumerate(zip(targets, Rs, action_batch)):
target[action] = R
self.train_on_batch(state0_batch, action_batch, targets)
if self.step % self.update_interval == 0:
""" update target network """
self.update_target_model_hard()
def act(self):
action_id = self.forward()
action = self.actions[action_id]
return action
def forward(self):
state = self.recent_observation
q_values = self.compute_q_values(state)
if self.training:
action_id = self.policy.select_action(q_values=q_values)
else:
action_id = self.policy.select_greedy_action(q_values=q_values)
self.recent_action_id = action_id
return action_id
def observe(self, next_state):
self.recent_observation = next_state
def reset(self):
self.recent_observation = None
self.recent_action_id = None
| [
"tcom242242@gmail.com"
] | tcom242242@gmail.com |
beda4e4ace5c63142acbc11fa23e11d9014908d4 | dac3134e67e5b28a4f46ed93d2485781d9765d2e | /record2app/migrations/0012_auto_20180424_1503.py | e03d804aa9632ac72d2e87d3aa515e85fb8f2f8b | [] | no_license | cyhsieh/record2 | 3a5974a552e904129966f9f9458e0b1579d84789 | 505e8b024851bdd4d2e523bc7fd59632e33b91c5 | refs/heads/master | 2021-01-24T12:41:54.338971 | 2018-05-30T17:20:58 | 2018-05-30T17:20:58 | 123,147,607 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | # Generated by Django 2.0.4 on 2018-04-24 15:03
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('record2app', '0011_auto_20180415_1611'),
]
operations = [
migrations.CreateModel(
name='TobuyItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('itemname', models.CharField(max_length=20, verbose_name='名稱')),
('budget', models.IntegerField(verbose_name='預算')),
('addtime', models.DateTimeField(auto_now_add=True, verbose_name='加入時間')),
],
),
migrations.AlterField(
model_name='record',
name='purch_date',
field=models.DateField(default=datetime.date(2018, 4, 24), verbose_name='日期'),
),
]
| [
"aabbabc12345@gmail.com"
] | aabbabc12345@gmail.com |
252daa037db6199516554602023f04a582b048e1 | 6c69a10294fe1b271fe1c1a36d61e326e184a93a | /nnet/101_from_scratch.py | c0aa207044117a3550b1eb6f8e0591e09ad03167 | [] | no_license | rahasayantan/Investigaciones | 95d18c106e34829b9c936b5ae50ad87fec533076 | be4d09430665addf29889bc881c75cf113d056ce | refs/heads/master | 2021-01-01T15:52:11.778378 | 2017-04-19T21:02:36 | 2017-04-19T21:02:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | #
# A Neural Network in 11 lines of Python (Part 1)
# http://iamtrask.github.io/2015/07/12/basic-python-network/
#
import numpy as np
# sigmoid function
def nonlin(x, deriv=False):
if (deriv == True):
return x * (1 - x)
return 1 / (1 + np.exp(-x))
# input dataset
X = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
# output dataset
y = np.array([[0,
0,
1,
1]]).T
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 2 * np.random.random((3, 1)) - 1
for iter in range(10000):
# forward propagation
l0 = X
l1 = nonlin(np.dot(l0, syn0))
# how much did we miss?
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1, True)
print(l1_delta)
# update weights
syn0 += np.dot(l0.T, l1_delta)
print("Output After Training:")
print(l1) | [
"ruoho.ruotsi@gmail.com"
] | ruoho.ruotsi@gmail.com |
3d41cdce890ba47e72e799ab7ba5db2afbf380d3 | a0dc38f2f9cd7f8b1584aba8d7814803da1f7a95 | /tmlanguage_2_json/tmlanguage_2_json.py | c6bc2b1670e1e883d676b69ce0ec92ffa5277d5a | [] | no_license | jfthuong/vscode-ocaml-tools | ce8114426b9cf18090f9ba5fd59159343470533b | bf62acb4d321989ce96ca6849194f35d1b3f3366 | refs/heads/master | 2022-04-22T01:07:43.488761 | 2020-04-19T14:38:13 | 2020-04-19T14:38:13 | 257,020,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,866 | py | from argparse import ArgumentParser
import json
from pathlib import Path
from pprint import pprint
import logging
from typing import Any, Dict, Generator, List, Tuple, Union
from xml.etree import ElementTree
try:
import yaml
except ImportError:
print("WARNING: 'yaml' module not found. Cannot export to YAML")
# TODO: improve typing for Grammar (find out how to do cyclic types)
Value_Dict = Dict[str, Any]
Value = Union[str, List[Value_Dict], Value_Dict]
Grammar = Dict[str, Any]
Element = ElementTree.Element
class GrammarError(Exception):
"""Error in Grammar"""
pass
class Parser:
def __init__(self, tml_path: Path):
self.tml = tml_path
self.grammar: Grammar = dict()
@classmethod
def parse_string(cls, string_r: Element, location: str = None) -> str:
"""Parse a string"""
loc = f" (in {location})" if location else ""
if not string_r.text:
raise GrammarError(f"Tag '{string_r.tag}' has no text{loc}")
return string_r.text
@classmethod
def parse_array(cls, array_r: Element, location: str = None) -> List[Any]:
"""Parse an array"""
loc = f" (in {location})" if location else ""
list_values: List[Value] = list()
for child in array_r:
if child.tag == "string":
list_values.append(cls.parse_string(child))
elif child.tag == "dict":
list_values.append(cls.parse_dict(child))
elif child.tag == "array":
list_values.append(cls.parse_array(child))
else:
raise GrammarError(f"Tag '{child.tag}' not supported in arrays{loc}")
return list_values
@classmethod
def parse_dict(cls, dict_r: Element, location: str = None) -> Dict[str, Any]:
"""Parse a dictionary"""
loc = f" (in {location})" if location else ""
def get_key_val(e: Element) -> Generator[Tuple[str, Value], None, None]:
children = iter(e)
while True:
# We will check that we have a key with text
# and either a value, an array, or (maybe) a dict
key_xml = next(children)
k_tag = key_xml.tag
if k_tag != "key":
raise GrammarError(f"Key {k_tag} shall be 'key'")
key = cls.parse_string(key_xml)
value_xml = next(children)
v_tag = value_xml.tag
value: Value
if v_tag == "string":
value = cls.parse_string(value_xml, location=f"key {key}")
elif v_tag == "array":
value = cls.parse_array(value_xml, location=f"key {key}")
elif v_tag == "dict":
value = cls.parse_dict(value_xml, location=f"key {key}")
else:
raise GrammarError(f"Incorrect value type ({v_tag}) in {key}")
yield key, value
grammar: Grammar = dict()
try:
for key, value in get_key_val(dict_r):
grammar[key] = value
except (RuntimeError, StopIteration):
pass
return grammar
def parse(self) -> "Parser":
"""Convert a tmLanguage using XML syntax to a dictionary"""
with self.tml.open() as f:
tree = ElementTree.parse(f)
root_dict = tree.getroot().find("./dict")
if root_dict is None:
raise GrammarError(f"No Top-Level Dictionary found in {self.tml}")
try:
self.grammar = self.parse_dict(root_dict)
except GrammarError as e:
logging.error(f"Error while parsing TextMate Grammar {self.tml}: {e}")
print(f"Finished parsing TextMate Grammar {self.tml}")
return self
def to_json(self, path: Path) -> "Parser":
"""Export to JSON"""
with path.open("w") as f:
json.dump(self.grammar, f)
print(f"Exported TextMate Grammar to JSON in {path}")
return self
def to_yaml(self, path: Path) -> "Parser":
"""Export to YAML"""
with path.open("w") as f:
yaml.dump(self.grammar, f)
print(f"Exported TextMate Grammar to YAML in {path}")
return self
def main(tml_path: Union[str, Path], out_path: str = None, to_json=True, to_yaml=False):
"""Export a TextMate to the desired format"""
# xml_path: Path = Path(__file__).parent.absolute() / tml_path # to CHDIR
xml_path = Path(tml_path)
parser = Parser(xml_path).parse()
if out_path is None:
json_path = xml_path.with_suffix(".tmLanguage.json")
else:
json_path = Path(out_path).with_suffix(".json")
if to_json:
parser.to_json(json_path)
if to_yaml:
parser.to_yaml(json_path.with_suffix(".yaml"))
if __name__ == "__main__":
# xml_path = Path(__file__).absolute().with_name("small.tmLanguage")
# main(xml_path, yaml=True)
# main("../syntaxes/menhir.tmLanguage", yaml=True)
# main("../syntaxes/Ocamlyacc.tmLanguage", yaml=True)
descr = (
"Program to convert a TextMate Grammar .tmLanguage (XML flavor) "
"to either JSON format or YAML form."
)
usgage = "If no format is specified, JSON output format is selected"
p = ArgumentParser("tmlanguage_2_json", description=descr)
p.add_argument("xml_path", help="Path to XML .tmLanguage")
p.add_argument("--json", "-j", action="store_true", help="Store to JSON")
p.add_argument("--yaml", "-y", action="store_true", help="Store to YAML")
p.add_argument(
"--output", "-o", help="Path to output file (default: based on input file)"
)
args = p.parse_args()
to_json, to_yaml = False, False
if not to_json and not to_yaml:
to_json = True
main(args.xml_path, args.output, to_json, to_yaml)
| [
"jfthuong@gmail.com"
] | jfthuong@gmail.com |
08b0a728944265f677ec74dadd71c4ada25f038e | c86cd75be4f5b4eef605fb0f40743406ae19685f | /asdl/typed_arith_parse.py | 3153714975fcab1c9e004bd1f6229ff6f08b8ae4 | [
"Apache-2.0"
] | permissive | jyn514/oil | 3de53092c81e7f9129c9d12d51a8dfdbcacd397b | 42adba6a1668ff30c6312a6ce3c3d1f1acd529ec | refs/heads/master | 2022-02-23T08:12:48.381272 | 2019-03-15T08:54:31 | 2019-03-15T08:54:31 | 176,316,917 | 0 | 0 | Apache-2.0 | 2019-03-18T15:36:14 | 2019-03-18T15:36:13 | null | UTF-8 | Python | false | false | 8,508 | py | #!/usr/bin/env python
"""
typed_arith_parse.py: Parse shell-like and C-like arithmetic.
"""
from __future__ import print_function
import sys
from _devbuild.gen.typed_arith_asdl import (
arith_expr, arith_expr_e, arith_expr_t,
arith_expr__Binary, arith_expr__FuncCall, arith_expr__Const)
from typing import Dict, List, Optional, Union, cast
from asdl import tdop
from asdl.tdop import Parser
from asdl.tdop import ParserSpec
Token = tdop.Token
#
# Null Denotation -- token that takes nothing on the left
#
def NullConstant(p, # type: Parser
token, # type: Token
bp, # type: int
):
# type: (...) -> arith_expr_t
if token.type == 'number':
return arith_expr.Const(int(token.val))
# We have to wrap a string in some kind of variant.
if token.type == 'name':
return arith_expr.Var(token.val)
raise AssertionError(token.type)
def NullParen(p, # type: Parser
token, # type: Token
bp, # type: int
):
# type: (...) -> arith_expr_t
""" Arithmetic grouping """
r = p.ParseUntil(bp)
p.Eat(')')
return r
def NullPrefixOp(p, token, bp):
# type: (Parser, Token, int) -> arith_expr_t
"""Prefix operator.
Low precedence: return, raise, etc.
return x+y is return (x+y), not (return x) + y
High precedence: logical negation, bitwise complement, etc.
!x && y is (!x) && y, not !(x && y)
"""
r = p.ParseUntil(bp)
return arith_expr.Unary(token.val, r)
def NullIncDec(p, token, bp):
# type: (Parser, Token, int) -> arith_expr_t
""" ++x or ++x[1] """
right = p.ParseUntil(bp)
if not isinstance(right, (arith_expr.Var, arith_expr.Index)):
raise tdop.ParseError("Can't assign to %r" % right)
return arith_expr.Unary(token.val, right)
#
# Left Denotation -- token that takes an expression on the left
#
def LeftIncDec(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
rbp, # type: int
):
# type: (...) -> arith_expr_t
""" For i++ and i--
"""
if not isinstance(left, (arith_expr.Var, arith_expr.Index)):
raise tdop.ParseError("Can't assign to %r" % left)
token.type = 'post' + token.type
return arith_expr.Unary(token.val, left)
def LeftIndex(p, token, left, unused_bp):
# type: (Parser, Token, arith_expr_t, int) -> arith_expr_t
""" index f[x+1] """
# f[x] or f[x][y]
if not isinstance(left, arith_expr.Var):
raise tdop.ParseError("%s can't be indexed" % left)
index = p.ParseUntil(0)
if p.AtToken(':'):
p.Next()
end = p.ParseUntil(0) # type: Union[arith_expr_t, None]
else:
end = None
p.Eat(']')
# TODO: If you see ], then
# 1:4
# 1:4:2
# Both end and step are optional
if end:
return arith_expr.Slice(left, index, end, None)
else:
return arith_expr.Index(left, index)
def LeftTernary(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
bp, # type: int
):
# type: (...) -> arith_expr_t
""" e.g. a > 1 ? x : y """
true_expr = p.ParseUntil(bp)
p.Eat(':')
false_expr = p.ParseUntil(bp)
return arith_expr.Ternary(left, true_expr, false_expr)
def LeftBinaryOp(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
rbp, # type: int
):
# type: (...) -> arith_expr__Binary
""" Normal binary operator like 1+2 or 2*3, etc. """
return arith_expr.Binary(token.val, left, p.ParseUntil(rbp))
def LeftAssign(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
rbp, # type: int
):
# type: (...) -> arith_expr__Binary
""" Normal binary operator like 1+2 or 2*3, etc. """
# x += 1, or a[i] += 1
if not isinstance(left, (arith_expr.Var, arith_expr.Index)):
raise tdop.ParseError("Can't assign to %r" % left)
node = arith_expr.Binary(token.val, left, p.ParseUntil(rbp))
# For TESTING
node.spids.append(42)
node.spids.append(43)
return node
# For overloading of , inside function calls
COMMA_PREC = 1
def LeftFuncCall(p, token, left, unused_bp):
# type: (Parser, Token, arith_expr_t, int) -> arith_expr__FuncCall
""" Function call f(a, b). """
args = []
# f(x) or f[i](x)
if not isinstance(left, arith_expr.Var):
raise tdop.ParseError("%s can't be called" % left)
func_name = left.name # get a string
while not p.AtToken(')'):
# We don't want to grab the comma, e.g. it is NOT a sequence operator. So
# set the precedence to 5.
args.append(p.ParseUntil(COMMA_PREC))
if p.AtToken(','):
p.Next()
p.Eat(")")
return arith_expr.FuncCall(func_name, args)
def MakeShellParserSpec():
# type: () -> ParserSpec
"""
Create a parser.
Compare the code below with this table of C operator precedence:
http://en.cppreference.com/w/c/language/operator_precedence
"""
spec = tdop.ParserSpec()
spec.Left(31, LeftIncDec, ['++', '--'])
spec.Left(31, LeftFuncCall, ['('])
spec.Left(31, LeftIndex, ['['])
# 29 -- binds to everything except function call, indexing, postfix ops
spec.Null(29, NullIncDec, ['++', '--'])
spec.Null(29, NullPrefixOp, ['+', '!', '~', '-'])
# Right associative: 2 ** 3 ** 2 == 2 ** (3 ** 2)
spec.LeftRightAssoc(27, LeftBinaryOp, ['**'])
spec.Left(25, LeftBinaryOp, ['*', '/', '%'])
spec.Left(23, LeftBinaryOp, ['+', '-'])
spec.Left(21, LeftBinaryOp, ['<<', '>>'])
spec.Left(19, LeftBinaryOp, ['<', '>', '<=', '>='])
spec.Left(17, LeftBinaryOp, ['!=', '=='])
spec.Left(15, LeftBinaryOp, ['&'])
spec.Left(13, LeftBinaryOp, ['^'])
spec.Left(11, LeftBinaryOp, ['|'])
spec.Left(9, LeftBinaryOp, ['&&'])
spec.Left(7, LeftBinaryOp, ['||'])
spec.LeftRightAssoc(5, LeftTernary, ['?'])
# Right associative: a = b = 2 is a = (b = 2)
spec.LeftRightAssoc(3, LeftAssign, [
'=',
'+=', '-=', '*=', '/=', '%=',
'<<=', '>>=', '&=', '^=', '|='])
spec.Left(COMMA_PREC, LeftBinaryOp, [','])
# 0 precedence -- doesn't bind until )
spec.Null(0, NullParen, ['(']) # for grouping
# -1 precedence -- never used
spec.Null(-1, NullConstant, ['name', 'number'])
spec.Null(-1, tdop.NullError, [')', ']', ':', 'eof'])
return spec
def MakeParser(s):
# type: (str) -> Parser
"""Used by tests."""
spec = MakeShellParserSpec()
lexer = tdop.Tokenize(s)
p = tdop.Parser(spec, lexer)
return p
def ParseShell(s, expected=None):
# type: (str, Optional[str]) -> arith_expr_t
"""Used by tests."""
p = MakeParser(s)
tree = p.Parse()
sexpr = repr(tree)
if expected is not None:
assert sexpr == expected, '%r != %r' % (sexpr, expected)
#print('%-40s %s' % (s, sexpr))
return tree
class Evaluator(object):
def __init__(self):
# type: () -> None
self.mem = {} # type: Dict[str, int]
def Eval(self, node):
# type: (arith_expr_t) -> int
"""Use the isinstance() style for comparison."""
if isinstance(node, arith_expr__Const):
assert node.i is not None
return node.i
if isinstance(node, arith_expr__Binary):
assert node.left is not None
assert node.right is not None
left = self.Eval(node.left)
right = self.Eval(node.right)
op = node.op
if op == '+':
return left + right
return 3
def Eval2(self, node):
# type: (arith_expr_t) -> int
tag = node.tag
if tag == arith_expr_e.Const:
n = cast(arith_expr__Const, node)
assert n.i is not None
return n.i
if tag == arith_expr_e.Binary:
n2 = cast(arith_expr__Binary, node)
assert n2.left is not None
assert n2.right is not None
left = self.Eval(n2.left)
right = self.Eval(n2.right)
op = n2.op
if op == '+':
return left + right
return 3
def main(argv):
# type: (List[str]) -> int
try:
action = argv[1]
s = argv[2]
except IndexError:
print('Usage: ./arith_parse.py ACTION EXPRESSION')
return 2
try:
node = ParseShell(s)
except tdop.ParseError as e:
print('Error parsing %r: %s' % (s, e), file=sys.stderr)
if action == 'parse':
print(node)
elif action == 'eval':
ev = Evaluator()
result = ev.Eval(node)
print(node)
print(' => ')
print(result)
else:
print('Invalid action %r' % action)
return 2
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"andy@oilshell.org"
] | andy@oilshell.org |
7ceceed258eb306cbc6fee57056ca756971ba8da | df1cb33bfe99a1e72cf75931749163b7c8731757 | /stages/stage3.py | 012d626c02d661dbc7a2f17848fc0e501c06bcb9 | [] | no_license | orf/wikilink_py | 2d6ae9dd64264fdf17995980ed8a4a960c199c5b | 6643397e220970a93dab1e50e120748bfdc3bf19 | refs/heads/master | 2021-01-22T11:55:16.906965 | 2014-01-08T20:49:38 | 2014-01-08T20:49:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,415 | py | from lib.progress import run_with_progressbar
from lib.formatters.Neo4jFormatter import Neo4jFormatter
from lib.formatters.CSVFormatter import MultiCSVFormatter
import functools
import os
import logging
import sys
import itertools
import __pypy__
import json
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
STAGE3_TITLES_TO_ID = {}
STAGE3_ID_TO_DATA = {}
FLAG_REDIRECT = 1
FLAG_SEEN = 2
def handle_stage1_line(line):
# There is one page in stage1.csv who's title is a unicode NEXT_LINE character (\x85).
# As such we have to encode each line individually.
# https://en.wikipedia.org/w/api.php?action=query&prop=info&pageids=28644448&inprop=url
page_id, page_title, is_redirect = unicode(line.strip("\n"), "utf-8").split("|")
flags = FLAG_REDIRECT if is_redirect == "1" else 0
STAGE3_TITLES_TO_ID[page_title] = int(page_id)
STAGE3_ID_TO_DATA[int(page_id)] = (page_title, flags)
#yield (page_title, flags), int(page_id)
def get_ids_from_titles(titles_list, get_none=False):
"""
I take a list of titles and return a list of integer ID's. If get_none is True then
the return list will contain None values where the title cannot be found.
"""
returner = []
for title in titles_list:
x = STAGE3_TITLES_TO_ID.get(title, 0)
if x is not 0 or get_none is True:
returner.append(x) # Keeping all elements uniform might increase performance
return returner
def get_page_data_from_id(page_id, update_seen=True):
"""
I take a page ID and I return a tuple containing the title, is_redirect flag and a value indicating if this
page ID has been queried before.
"""
p_data = STAGE3_ID_TO_DATA.get(page_id, None)
if p_data is None:
return None
if update_seen:
STAGE3_ID_TO_DATA[page_id] = (p_data[0], p_data[1] | FLAG_SEEN)
return p_data
def set_page_redirect(title, to):
"""
I replace a page title with the ID of the page it links to
"""
STAGE3_TITLES_TO_ID[title] = to
def delete_page(title, page_id):
"""
I take a page ID and/or I delete it from our registry
"""
if title:
del STAGE3_TITLES_TO_ID[title]
if page_id:
del STAGE3_ID_TO_DATA[page_id]
def split_page_info(line, update_seen=True, get_none=False, get_links=True):
"""
I take a line outputted from Stage2 and I return (the_id, page_links, page_info)
"""
line = line.rstrip("\n")
split_line = line.split("|")
page_id = int(split_line[0])
page_info = get_page_data_from_id(page_id, update_seen=update_seen)
if page_info is None:
return None, None, None
# Using islice like this keeps memory down by avoiding creating another list, it also doens't need a len() call
# so it might be faster. whatever.
page_links = itertools.islice(split_line, 1, sys.maxint)
return page_id, get_ids_from_titles(page_links, get_none) if get_links else page_links, page_info
def stage3_pre(line):
"""
We need to sort out redirects so they point to the correct pages. We do this by
loading stage2.csv which contains ID|link_title|link_title... and get the ID's of the links
"""
page_id, page_links, page_info = split_page_info(unicode(line, "utf-8"), update_seen=False, get_links=False)
if page_info and page_info[1] & FLAG_REDIRECT: # Are we a redirect?
page_links = get_ids_from_titles(page_links, True)
page_title = page_info[0]
if len(page_links) > 1 and page_links[0]:
# Point the redirect page to the ID of the page it redirects to
set_page_redirect(page_title, page_links[0])
delete_page(None, page_id)
else:
# The page we are redirecting to cannot be found, remove the redirect page.
delete_page(page_title, page_id)
def stage3(line, output_format="neo"):
"""
I combine the results from the previous stages into a single cohesive file
"""
global STAGE3_ROW_COUNTER
page_id, page_links, page_info = split_page_info(unicode(line.strip("\n"), "utf-8"), get_links=False)
if page_info is None: # Ignore redirects for now
return None
page_title, flags = page_info
#print "flags: %s" % flags
if not flags & FLAG_REDIRECT:
page_links = get_ids_from_titles(page_links, False)
if flags & FLAG_SEEN:
# Already visited this page before, output to an SQL file instead
if output_format == "neo":
return None, "\n".join(["%s\t%s" % (page_id, link_id) for link_id in set(page_links)])
else:
with open('stage3.sql', 'a') as fd:
fd.write("UPDATE pages SET links = uniq(array_cat(links, ARRAY[%s]::integer[])) WHERE id = %s;\n" %
(",".join(map(str, set(page_links))), page_id))
else:
# CSV output
# id, title, is_redirect, links_array
if output_format == "neo":
#return u"({id:%s, name:%s})" % (page_id, json.dumps(page_title).encode("unicode-escape"))
return ("%s\t%s\n" % (page_id, page_title)).encode("utf-8"),\
"%s\n" % "\n".join(["%s\t%s" % (page_id, link_id) for link_id in set(page_links)])
#return ((page_id, page_title),),
else:
return "%s|%s|%s|{%s}\n" % (page_id, page_title, is_redirect,
",".join(map(str, set(page_links))))
if __name__ == "__main__":
logger.info("Loading stage1.csv into memory")
with open("stage1.csv", 'rb', buffering=1024*1024) as csv_fd:
run_with_progressbar(csv_fd, None, handle_stage1_line, os.path.getsize("stage1.csv"))
logger.info("Loaded %s/%s page infos. Strategies: %s and %s" % (len(STAGE3_TITLES_TO_ID), len(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_TITLES_TO_ID)))
with open("stage2.csv", "rb", buffering=1024*1024) as input_fd:
run_with_progressbar(input_fd, None, stage3_pre, os.path.getsize("stage2.csv"))
logger.info("Have %s/%s page infos. Strategies: %s and %s" % (len(STAGE3_TITLES_TO_ID), len(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_TITLES_TO_ID)))
logger.info("Starting dump")
with open('stage2.csv', "rb", buffering=1024*1024*8) as input_fd: # , encoding="utf-8", buffering=1024*8
with open('stage3.nodes', mode="wb", buffering=1024*1024*8) as nodes_fd:
with open('stage3.links', mode="wb", buffering=1024*1024*20) as links_fd:
formatter = MultiCSVFormatter(((nodes_fd, ("id:int:node_id", "title:string")),
(links_fd, ("id:int:node_id", "id:int:node_id"))))
run_with_progressbar(input_fd, None,
functools.partial(stage3, output_format="neo"),
os.path.getsize("stage2.csv"),
formatter=formatter) | [
"tom@tomforb.es"
] | tom@tomforb.es |
92df4a82b4256ff8f683501f22e0c09dbea8b0c0 | b89df6019163d7b18a8ecb4003939f6235b5de85 | /mnist/cnn_mnist.py | 0f8dd40e176c805f08e1a65e10cdad7e16b51923 | [] | no_license | liketheflower/tf_practise | fdd22b608ca7d513a4972497466e3fc7a12762b6 | 2725b52169b2f0044d20b3c33c86485336e65483 | refs/heads/master | 2020-03-19T23:21:16.467649 | 2018-06-19T03:56:07 | 2018-06-19T03:56:07 | 137,003,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,709 | py | #opyright 2016 iThe TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| [
"jim.morris.shen@gmail.com"
] | jim.morris.shen@gmail.com |
d89b26a0c2aa42dccc501acbb07ac7e597b9047a | 102b67d83e12219f3bf4bea6ed691ddd9c2e69f1 | /ad/templatetags/ads.py | 7e6251780e534773006f27332ae6205e14bdccc8 | [
"BSD-3-Clause"
] | permissive | nicksergeant/snipt-old | 2cb6bec629d798dd83fc39f0105828f1fd40a51a | f2f1e9f183fb69bcc0fabbc25059bfd1c60527e2 | refs/heads/master | 2021-01-18T14:03:01.426851 | 2012-09-19T00:09:48 | 2012-09-19T00:09:48 | 865,573 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | from tagging.models import TaggedItem
from snipt.ad.models import Ad
from django import template
register = template.Library()
@register.simple_tag
def ad(tag):
try:
ads = TaggedItem.objects.get_by_model(Ad.objects.order_by('?'), tag)
ad = ads[0]
except:
ads = Ad.objects.order_by('?')
ad = ads[0]
tag = ''
return """
<h1 style="margin-bottom: 20px; padding-top: 15px;">A good %s read</h1>
<div class="amazon-book clearfix">
<div class="amazon-title">
<a href="%s" rel="nofollow" class="clearfix">
<img src="/media/%s" alt="%s" title="%s" />
%s
</a>
</div>
</div>
""" % (tag,
ad.url,
ad.image,
ad.title,
ad.title,
ad.title)
| [
"nick@nicksergeant.com"
] | nick@nicksergeant.com |
6fafb7d67282fac8e3f366a73152feb59b6eb480 | 468a66a94c55fadc60fbb351ec587dfd6cd697e3 | /twitter/tweepy/streaming.py | 9d56e76eb42bfc18bdb0f8aaf1a85850ce6852a4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | info-beamer/package-modules | 3d6de1d8dded60552a008a8f7f2939ce5ed0f3c9 | eae869a9d54e4f8969b0ebb5e8a05cbea3991efb | refs/heads/master | 2021-01-01T16:01:27.612431 | 2017-12-22T16:32:47 | 2017-12-22T16:32:47 | 97,757,172 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 9,884 | py | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import logging
import httplib
from socket import timeout
from threading import Thread
from time import sleep
import ssl
from tweepy.models import Status
from tweepy.api import API
from tweepy.error import TweepError
from tweepy.utils import import_simplejson, urlencode_noplus
json = import_simplejson()
STREAM_VERSION = '1.1'
class StreamListener(object):
def __init__(self, api=None):
self.api = api or API()
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
if 'in_reply_to_status_id' in data:
status = Status.parse(self.api, data)
if self.on_status(status) is False:
return False
elif 'delete' in data:
delete = data['delete']['status']
if self.on_delete(delete['id'], delete['user_id']) is False:
return False
elif 'limit' in data:
if self.on_limit(data['limit']['track']) is False:
return False
elif 'disconnect' in data:
if self.on_disconnect(data['disconnect']) is False:
return False
else:
logging.error("Unknown message type: " + str(raw_data))
def on_status(self, status):
"""Called when a new status arrives"""
return
def on_exception(self, exception):
"""Called when an unhandled exception occurs."""
return
def on_delete(self, status_id, user_id):
"""Called when a delete notice arrives for a status"""
return
def on_limit(self, track):
"""Called when a limitation notice arrvies"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
def on_disconnect(self, notice):
"""Called when twitter sends a disconnect notice
Disconnect codes are listed here:
https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect
"""
return
class Stream(object):
host = 'stream.twitter.com'
def __init__(self, auth, listener, **options):
self.auth = auth
self.listener = listener
self.running = False
self.timeout = options.get("timeout", 300.0)
self.retry_count = options.get("retry_count")
self.retry_time_start = options.get("retry_time", 10.0)
self.retry_time_cap = options.get("retry_time_cap", 240.0)
self.snooze_time_start = options.get("snooze_time", 0.25)
self.snooze_time_cap = options.get("snooze_time_cap", 16)
self.buffer_size = options.get("buffer_size", 1500)
if options.get("secure", True):
self.scheme = "https"
else:
self.scheme = "http"
self.api = API()
self.headers = options.get("headers") or {}
self.parameters = None
self.body = None
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_start
def _run(self):
# Authenticate
url = "%s://%s%s" % (self.scheme, self.host, self.url)
# Connect and process the stream
error_counter = 0
conn = None
exception = None
while self.running:
if self.retry_count is not None and error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
if self.scheme == "http":
conn = httplib.HTTPConnection(self.host, timeout=self.timeout)
else:
conn = httplib.HTTPSConnection(self.host, timeout=self.timeout)
self.auth.apply_auth(url, 'POST', self.headers, self.parameters)
conn.connect()
conn.request('POST', self.url, self.body, headers=self.headers)
resp = conn.getresponse()
if resp.status != 200:
if self.listener.on_error(resp.status) is False:
break
error_counter += 1
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2, self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_start
self.listener.on_connect()
self._read_loop(resp)
except (timeout, ssl.SSLError), exc:
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError) and not (exc.args and 'timed out' in str(exc.args[0])):
exception = exc
break
if self.listener.on_timeout() == False:
break
if self.running is False:
break
conn.close()
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time+0.25, self.snooze_time_cap)
except Exception, exception:
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if conn:
conn.close()
if exception:
# call a handler first so that the exception can be logged.
self.listener.on_exception(exception)
raise
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
while self.running and not resp.isclosed():
# Note: keep-alive newlines might be inserted before each length value.
# read until we get a digit...
c = '\n'
while c == '\n' and self.running and not resp.isclosed():
c = resp.read(1)
delimited_string = c
# read rest of delimiter length..
d = ''
while d != '\n' and self.running and not resp.isclosed():
d = resp.read(1)
delimited_string += d
# read the next twitter status object
if delimited_string.strip().isdigit():
next_status_obj = resp.read( int(delimited_string) )
self._data(next_status_obj)
if resp.isclosed():
self.on_closed(resp)
def _start(self, async):
self.running = True
if async:
Thread(target=self._run).start()
else:
self._run()
def on_closed(self, resp):
""" Called when the response has been closed by Twitter """
pass
def userstream(self, count=None, async=False, secure=True):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/2/user.json?delimited=length'
self.host='userstream.twitter.com'
self._start(async)
def firehose(self, count=None, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/firehose.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def retweet(self, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/retweet.json?delimited=length' % STREAM_VERSION
self._start(async)
def sample(self, count=None, async=False):
self.parameters = {'delimited': 'length'}
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/sample.json?delimited=length' % STREAM_VERSION
if count:
self.url += '&count=%s' % count
self._start(async)
def filter(self, follow=None, track=None, async=False, locations=None,
count = None, stall_warnings=False, languages=None):
self.parameters = {}
self.headers['Content-type'] = "application/x-www-form-urlencoded"
if self.running:
raise TweepError('Stream object already connected!')
self.url = '/%s/statuses/filter.json?delimited=length' % STREAM_VERSION
if follow:
self.parameters['follow'] = ','.join(map(str, follow))
if track:
self.parameters['track'] = ','.join(map(str, track))
if locations and len(locations) > 0:
assert len(locations) % 4 == 0
self.parameters['locations'] = ','.join(['%.2f' % l for l in locations])
if count:
self.parameters['count'] = count
if stall_warnings:
self.parameters['stall_warnings'] = stall_warnings
if languages:
self.parameters['language'] = ','.join(map(str, languages))
self.body = urlencode_noplus(self.parameters)
self.parameters['delimited'] = 'length'
self._start(async)
def disconnect(self):
if self.running is False:
return
self.running = False
| [
"fw@dividuum.de"
] | fw@dividuum.de |
be950409e040460df54a3b7b7fad78d04b5b8938 | e2265493d2c94c32834541b5b3c1a937c1d25450 | /venv/Scripts/django-admin.py | e8276242db9681b7dd0cb334b3e385fcf499627c | [] | no_license | BekturMuratov/todoist_api | a209665eb6154c47c501c296cfcecac29d2e2fb5 | 35df8cb4a867e4ba123b0f30a4d7426792ba6ace | refs/heads/master | 2023-08-13T02:35:20.379169 | 2021-10-04T15:27:30 | 2021-10-04T15:27:30 | 413,477,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | #!d:\joseph\projects\petprojects\todoist_api\venv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"Muratov225@gmail.com"
] | Muratov225@gmail.com |
66125500fbe356cb06825a08b1650df14f795299 | 62fa57586e51e9e58b4eb028d039dc518242b614 | /stagger/__init__.py | 4b9ce0cc3efeb9e691d96b0b7320234b4d315a15 | [
"BSD-2-Clause"
] | permissive | beckjake/stagger | fd6724efe6172ad8744a8b327a08d769ad0f6889 | 639f2dd5922b4140fabf70afff83c4e51a8f4ba7 | refs/heads/master | 2020-06-03T04:05:17.147157 | 2014-09-29T02:15:12 | 2014-09-29T02:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | #
# __init__.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import stagger.frames
import stagger.tags
import stagger.id3
import stagger.util
from stagger.errors import *
from stagger.frames import Frame, ErrorFrame, UnknownFrame, TextFrame, URLFrame
from stagger.tags import read_tag, decode_tag, delete_tag, Tag22, Tag23, Tag24
from stagger.id3v1 import Tag1
version = (0, 4, 3)
versionstr = ".".join((str(v) for v in version))
default_tag = Tag24
stagger.util.python_version_check()
| [
"jbeck@colorado.edu"
] | jbeck@colorado.edu |
10b4b8805396bcd61f81f8afa822a2c638bc990d | 3a3a911c472ca5fd091c9736c60c844fa6f678bd | /bin/pyspice | f2f7f43d9ecdd02e53403d316a617ebf0610c6d6 | [
"MIT"
] | permissive | SleepBook/pySpice | 35c2c88de49f4dca40827471c20d89c56198b205 | b1a468820379ce8aeaff91ddfa0113a0accb07f3 | refs/heads/master | 2021-01-13T10:37:13.032705 | 2016-10-17T02:08:18 | 2016-10-17T02:08:18 | 69,954,679 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | #!/usr/bin/python
#the executable scripts for pySpice
from pySpice.top import *
from pySpice.exhibitor.plotter import *
import sys, getopt
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'hpo:', ['help','print','output='])
print_flag = 0
out_name = 'out.ls'
for k,v in opts:
if k in ('-h','--help'):
print "THis is a Ciruit Simulator Implement in Python"
print "Use 'pyspice netlistname' to simulate the circuit"
print "Use '-p/--print' to direct print the result after simulation"
print "Use '-o/--output' outfile name' to appoint the output filename. By default, the output file name would be out.ls"
exit(0)
elif k in ('-p','--print'):
print_flag = 1
elif k in ('-o', '--output'):
out_name = v
if len(args) != 1:
print "Invalid Pararmeter, Expecting One Argument of the Netlist File Name"
exit(-1)
solve_circuit(args[0],out_name)
if print_flag:
plot(out_name)
exit(0)
| [
"oar.yin@sjtu.edu.cn"
] | oar.yin@sjtu.edu.cn | |
9020f3b938a2353773520a86f17648964a5271c3 | a5b091d79cacb3c2c69fd7f8769f68eb5c752041 | /filter/migrations/0001_initial.py | 34c19e3f108f50b7205c547920c699cd6b6ec97b | [] | no_license | dn-dev-gis/compendium | 237e511555ca7614e6769dc0201a1caf46821a51 | 0a61d23c8697752623ea932434c024b33470fa8b | refs/heads/master | 2023-08-01T13:22:19.249187 | 2020-06-30T07:20:53 | 2020-06-30T07:20:53 | 276,026,694 | 0 | 0 | null | 2021-09-22T19:27:31 | 2020-06-30T07:18:37 | Python | UTF-8 | Python | false | false | 813 | py | # Generated by Django 3.0.1 on 2020-06-25 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FinOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('thematicArea', models.CharField(blank=True, max_length=300)),
('eligibility', models.CharField(blank=True, max_length=300)),
('option', models.CharField(blank=True, max_length=300)),
('title', models.CharField(blank=True, max_length=200)),
('additional', models.CharField(blank=True, max_length=200)),
],
),
]
| [
"dnickel.se@gmail.com"
] | dnickel.se@gmail.com |
bb8103580c7adf23fe6a2ce1266cb5301e981485 | b72e0cdfbd1ef0472ae0f773487421ac5b83a5ee | /simulation_entry_delayCSI.py | 7a45bb6f672802b846cdafea481a0c60b904607f | [] | no_license | zhiwei-roy-0803/RAV2X | e3e12c90612d21fba65ad1589d8a9b2367b0c677 | b6d760410690a980409c6879eb37d4edbbfb52c2 | refs/heads/master | 2023-07-31T12:26:56.892173 | 2021-09-27T14:40:56 | 2021-09-27T14:40:56 | 326,129,874 | 16 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,201 | py | from ResourceAllocator import DelayedCSIAllocator
import numpy as np
from tqdm import tqdm
import os
import argparse
# simulation parameter
max_run = 1000
# Hyper-parameter configuration
config = {"dB_Pd_max":23,
"dB_Pc_max":23,
"stdV2V":3,
"stdV2I":8,
"freq":2,
"radius":500,
"bsHgt":25,
"disBstoHwy":35,
"bsAntGain":8,
"bsNoiseFigure":5,
"vehHgt":1.5,
"vehAntGain":3,
"vehNoiseFigure":9,
"numLane":6,
"laneWidth":4,
"r0":0.5,
"dB_gamma0":5,
"p0":0.001,
"dB_sigma2":-114,
"numDUE":20,
"numCUE":20}
def run_different_feedbacktime(obj, v=50):
print("Run Different FeedBack Period")
feedbacktime = np.linspace(0.2, 1.2, 6)
allocator = DelayedCSIAllocator(config)
sum_capacity_array = []
min_capacity_array = []
for t in feedbacktime:
total_sum = 0
total_min = 0
pbar = tqdm(range(max_run))
valid_cnt = 0
for _ in pbar:
sum_capacity, min_capacity = allocator.run_allocation(v=v, T=t)
if min_capacity < 0:
continue
total_sum += sum_capacity
total_min += min_capacity
valid_cnt += 1
pbar.set_description("SumCapcity={:2f}, MinCapacity={:.2f}".format(sum_capacity, min_capacity))
avg_sum = total_sum/max_run
avg_min = total_min/max_run
print("Feedback Perio = {:.3f} ms, Avg_SumCapacity = {:.3f}, Avg_MinCapacity = {:.3f}".format(t, avg_sum, avg_min))
sum_capacity_array.append(avg_sum)
min_capacity_array.append(avg_min)
# Save Statistics
save_dir = "./results/DelayCSI/different_feedback/{:s}/Velocity={:d}".format(obj, v)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
sum_capacity_path = os.path.join(save_dir, "SumCapacity")
min_capacity_path = os.path.join(save_dir, "MinCapacity")
sum_capacity_array = np.array(sum_capacity_array)
min_capacity_array = np.array(min_capacity_array)
np.savetxt(sum_capacity_path, sum_capacity_array, fmt='%.4f', delimiter='\n')
np.savetxt(min_capacity_path, min_capacity_array, fmt='%.4f', delimiter='\n')
def run_different_speed(obj, t=1.0):
print("Run Different Speed")
speed = np.arange(60, 150, 10)
allocator = DelayedCSIAllocator(config)
sum_capacity_array = []
min_capacity_array = []
for v in speed:
total_sum = 0
total_min = 0
pbar = tqdm(range(max_run))
valid_cnt = 0
for _ in pbar:
sum_capacity, min_capacity = allocator.run_allocation(v=v, T=t)
if min_capacity < 0:
continue
total_sum += sum_capacity
total_min += min_capacity
valid_cnt += 1
pbar.set_description("SumCapcity={:.3f}, MinCapacity={:.3f}".format(sum_capacity, min_capacity))
avg_sum = total_sum/valid_cnt
avg_min = total_min/valid_cnt
print("Speed = {:3f} km/h, Avg_SumCapacity = {:.3f}, Avg_MinCapacity = {:.3f}".format(v, avg_sum, avg_min))
sum_capacity_array.append(avg_sum)
min_capacity_array.append(avg_min)
# Save Statistics
save_dir = "./results/DelayCSI/different_speed/{:s}/FeedBackPeriod={:.1f}".format(obj, t)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
sum_capacity_path = os.path.join(save_dir, "SumCapacity")
min_capacity_path = os.path.join(save_dir, "MinCapacity")
sum_capacity_array = np.array(sum_capacity_array)
min_capacity_array = np.array(min_capacity_array)
np.savetxt(sum_capacity_path, sum_capacity_array, fmt='%.4f', delimiter='\n')
np.savetxt(min_capacity_path, min_capacity_array, fmt='%.4f', delimiter='\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-v", type=int, default=50)
parser.add_argument("-feedback", type=float, default=1.0)
args = parser.parse_args()
v = args.v
fbPeriod = args.feedback
# run_different_feedbacktime(obj="MaxSum", v=v)
run_different_speed(obj="MaxSum", t=fbPeriod)
| [
"775719904@qq.com"
] | 775719904@qq.com |
30e9269bd68ea6585bc6ca8c7881801bdd8446d4 | 9567a07b3209c6c5e4a2493fcc26768275d7e120 | /platformDetermin.py | 02b4adeaeea0f44f4f17f9bd74de07878191501b | [] | no_license | NewAlice/python-code | 66a9ed3f7a6a62e6e0bbcd5553c7c47630f2fe9d | 46be3bc7b15f4cb0eedf90c51bbea4240be8d4bc | refs/heads/master | 2021-01-23T00:07:27.316821 | 2017-11-07T15:47:58 | 2017-11-07T15:47:58 | 85,702,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import sys
print(sys.platform)
if sys.platform == "win32":
import ntpath
pathmodule=ntpath
print(pathmodule) | [
"noreply@github.com"
] | NewAlice.noreply@github.com |
4c8eedc6b3bbce72c30e473e8e3aa635ce5a15c8 | 76384c306a1fe04bc71a871069955d554b0e1591 | /11948/11948.py | 185f27359de1968af0d376075863f4e2f352b5dd | [] | no_license | potato179/BOJSolution | 0feebd080bf1bcb302830891a25eb924f57daa91 | 7d3f9620d479b20499b5db0d7d6c0a2fcc6376e1 | refs/heads/master | 2022-06-09T16:19:10.216532 | 2022-05-26T11:53:10 | 2022-05-26T11:53:10 | 247,057,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | a = []
for i in range(6):
a.append(int(input()))
print(sum(sorted(a[:4])[1:]) + max(a[4:])) | [
"44293278+potato179@users.noreply.github.com"
] | 44293278+potato179@users.noreply.github.com |
974c0c7fd25b0de5202f8adde919a1f585b0a4ed | aa45f6f5106517c582b21691ce22ad808339ec64 | /borax/calendars/birthday.py | aea5997b9e454ee9eaf8a2861a068b38780a781c | [
"MIT"
] | permissive | kinegratii/borax | 86b1a87c686f9b74db8d919afe30761497888368 | 06407958a6ba3115d783ed6457c2e7355a3f237c | refs/heads/master | 2023-03-11T06:09:20.040607 | 2022-11-15T02:39:43 | 2022-11-15T02:39:43 | 126,959,349 | 67 | 8 | MIT | 2022-11-15T02:39:44 | 2018-03-27T09:07:08 | Python | UTF-8 | Python | false | false | 1,151 | py | from datetime import date
from .lunardate import LunarDate, LCalendars
def nominal_age(birthday, today=None):
birthday = LCalendars.cast_date(birthday, LunarDate)
if today:
today = LCalendars.cast_date(today, LunarDate)
else:
today = LunarDate.today()
return today.year - birthday.year + 1
def actual_age_solar(birthday, today=None):
"""See more at https://stackoverflow.com/questions/2217488/age-from-birthdate-in-python/9754466#9754466
:param birthday:
:param today:
:return:
"""
birthday = LCalendars.cast_date(birthday, date)
if today:
today = LCalendars.cast_date(today, date)
else:
today = date.today()
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))
def actual_age_lunar(birthday, today=None):
birthday = LCalendars.cast_date(birthday, LunarDate)
if today:
today = LCalendars.cast_date(today, LunarDate)
else:
today = LunarDate.today()
return today.year - birthday.year - (
(today.month, today.leap, today.day) < (birthday.month, birthday.leap, birthday.day)
)
| [
"kinegratii@gmail.com"
] | kinegratii@gmail.com |
02f5fa0b6f13ef4066dda1bbef4657c37e7474c0 | b18de1e0e54c8559a4bb19ed246ec6760966e6f7 | /tubers/youtubers/migrations/0009_alter_youtuber_photo.py | b8f2ba010a00126a6b33c0fa0e2b742b075c66ad | [] | no_license | anulrajeev/Ytubers | 596f6f41e1c01d7637792a7ea407785ce0c1313a | c23b59cdccb47cbbbfdc59c4668d9082b3c86b80 | refs/heads/master | 2023-06-22T05:10:04.112740 | 2021-07-17T05:16:14 | 2021-07-17T05:16:14 | 334,475,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # Generated by Django 3.2.5 on 2021-07-15 11:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('youtubers', '0008_alter_youtuber_photo'),
]
operations = [
migrations.AlterField(
model_name='youtuber',
name='photo',
field=models.ImageField(blank=True, default='media/dummy.png', null=True, upload_to='media/ytubers/%Y/%m'),
),
]
| [
"anulrajeev@gmail.com"
] | anulrajeev@gmail.com |
101f1b7a76d6628d5f626c3eb34c42418d542ca2 | 8c4a080cf8f4a57ff765d3e10e3d3c15510ea7f5 | /src/mover.py | c9fa13f9b33a3e796b727dc5e39ccdc9994595c5 | [] | no_license | EKrukov1989/cars_marker | d6e62a6b611c8814a4f8d0d105410b621260d13a | e946d619cd8c801d6b541f7a8d9e169f1f422860 | refs/heads/master | 2023-01-18T21:50:33.042564 | 2020-11-17T18:34:37 | 2020-11-17T18:34:37 | 307,647,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,425 | py | """Module for Mover class."""
class Mover():
"""Class for handling arrow events.
This class binds events for Left, Right, up, Down buttons
and call callback accroding with complex intrinsic logic
"""
__SLOW = 1
__FAST = 5
__root = None
__callback = None
__left_press_counter = 0
__right_press_counter = 0
__up_press_counter = 0
__down_press_counter = 0
def __init__(self, root, callback):
"""."""
self.__root = root
self.__callback = callback
r = self.__root
r.bind("<Left>", lambda e: self.__left_press())
r.bind("<Right>", lambda e: self.__right_press())
r.bind("<Down>", lambda e: self.__down_press())
r.bind("<Up>", lambda e: self.__up_press())
r.bind("<KeyRelease-Left>", lambda e: self.__left_release())
r.bind("<KeyRelease-Right>", lambda e: self.__right_release())
r.bind("<KeyRelease-Down>", lambda e: self.__down_release())
r.bind("<KeyRelease-Up>", lambda e: self.__up_release())
def __left_press(self):
self.__left_press_counter += 1
self.__move()
def __right_press(self):
self.__right_press_counter += 1
self.__move()
def __up_press(self):
self.__up_press_counter += 1
self.__move()
def __down_press(self):
self.__down_press_counter += 1
self.__move()
def __left_release(self):
self.__left_press_counter = 0
def __right_release(self):
self.__right_press_counter = 0
def __up_release(self):
self.__up_press_counter = 0
def __down_release(self):
self.__down_press_counter = 0
def __move(self):
shift = self.__compute_current_shift()
if shift[0] != 0 or shift[1] != 0:
self.__callback(shift)
def __compute_current_shift(self):
def get_speed_in_certain_direction(counter):
if counter == 0:
return 0
elif counter == 1:
return self.__SLOW
else:
return self.__FAST
left = get_speed_in_certain_direction(self.__left_press_counter)
right = get_speed_in_certain_direction(self.__right_press_counter)
up = get_speed_in_certain_direction(self.__up_press_counter)
down = get_speed_in_certain_direction(self.__down_press_counter)
return (right - left, up - down)
| [
"evgenykryukov@mail.ru"
] | evgenykryukov@mail.ru |
16fe5e9d2e8a04bb97077627ec563090161e27e7 | cbab23d22792a20c847639c062e7093a223258fb | /01_django入门项目/02_V/test4/booktest/migrations/0004_blog_cre_time.py | a886a843de9adcea0c2a380aa7986ba6e222022d | [
"Apache-2.0"
] | permissive | labor55/django-test | 18b0b53647dd1adca27dc2f8ec4ce47bf7a50ee7 | 9faf16eebd6f373089eca83d54a640f0461377c8 | refs/heads/master | 2022-01-10T16:09:43.242043 | 2019-06-06T09:43:29 | 2019-06-06T09:43:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('booktest', '0003_remove_blog_created_time'),
]
operations = [
migrations.AddField(
model_name='blog',
name='cre_time',
field=models.DateField(default=0, auto_now_add=True),
preserve_default=False,
),
]
| [
"labzijin1314@sina.com"
] | labzijin1314@sina.com |
5760cd3684bc449519f036670f2eb1086095dd94 | 9da0ba811059f9e5960e712b9220b18efdaad46d | /Assignment4/car.py | ef2032e524721ee77611ea2e5cfb7e4646581063 | [] | no_license | JenySadadia/MIT-assignments-Python | cde0fae2556c06bfb980751aa02d0ab3e2d7150c | c422649ede9a6dc8bb9be7db4e87a261e22f1f21 | refs/heads/master | 2021-01-18T12:51:57.755235 | 2017-08-27T09:49:51 | 2017-08-27T09:49:51 | 100,366,296 | 0 | 5 | null | 2017-08-21T04:58:07 | 2017-08-15T10:23:17 | Python | UTF-8 | Python | false | false | 812 | py | ''' Drawing the car'''
from graphics import *
from wheel import *
class Car :
def __init__(self,center1,radius1,center2, radius2,height):
self.wheel_1 = Wheel(center1, 0.6*radius1, radius1)
self.wheel_2 = Wheel(center2, 0.6*radius2, radius2)
x1 = center1.getX()
y1 = center1.getY()
x2 = center2.getX()
y2 = center2.getY()
self.rect = Rectangle(Point(x1-height,y1),Point(x2+height,y2-height))
def draw(self,win):
self.wheel_1.draw(win)
self.wheel_2.draw(win)
self.rect.draw(win)
def main():
new_win = GraphWin("A Car", 700, 300)
car1 = Car(Point(50, 50), 15, Point(100,50), 15, 40)
car1.draw(new_win)
new_win.getMouse()
new_win.close()
main()
| [
"noreply@github.com"
] | JenySadadia.noreply@github.com |
6d4c466cd7d4dfb911739b94e24f68ad582d7595 | 1dfc3f3cac3d64cdee8d9709820ff8dd154e294d | /oracle_update.py | bbb8fe2964f385265c271697c9610861b237eb0b | [] | no_license | tanlull/python-django-api | a399a615dde33e8cda65c31fe5e178756e756939 | 2dc3ef5079ac17881193349617fa5862c75c0923 | refs/heads/master | 2022-12-20T00:46:28.068144 | 2020-10-06T15:09:09 | 2020-10-06T15:09:09 | 300,804,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | import cx_Oracle as ora
def updateTable(id, newid):
try:
conn = ora.connect('train00','train00','dboda-scan.rubber.co.th/testrac')
cursor = conn.cursor()
sql_update = "Update source set id = :1 where id = :2"
cursor.execute(sql_update, (newid, id))
conn.commit()
count = cursor.rowcount
print(count, "Record Updated successfully ")
except (Exception, ora.Error) as error:
print("Error in update operation", error)
finally:
# closing database conn.
if (conn):
cursor.close()
conn.close()
print("ORacle conn is closed")
updateTable(5, 10) | [
"tanlull@gmail.com"
] | tanlull@gmail.com |
689a7bcf9a17e9920971e0f75dbeae77f831658a | 65b9a63e8c132f32aeb56961968f5e363bd9a087 | /20190708_python识别中文车牌windows/同样的参数训练结果不同/09_last0.6937/keras_train_test.py | 8cb1811604a268356d28d0685ff1158985f6c64e | [] | no_license | 346644054/examples2019 | e70f13cfb56c3478fc6e335c730e0e70e70a6226 | 5f9777e7a887e635971156354f56ce065fa3f41e | refs/heads/master | 2022-04-09T03:52:52.973414 | 2020-02-28T03:05:02 | 2020-02-28T03:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,425 | py | # -*- coding: utf-8 -*-
"""
Vehicle plate recognition
using keras
Author: elesun
https://cloud.tencent.com/developer/article/1005199
# -*- coding: utf-8 -*-
"""
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from keras.models import Sequential,Input,Model
from keras.layers import Conv2D,MaxPooling2D,Dense,Dropout,Activation,Flatten
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.models import load_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import cv2
#os.environ["CUDA_VISIBLE_DEVICES"] = "0" #"1,0"
#####################车牌数据生成器,################################################
#用于深度神经网络的数据输入
#开源的车牌生成器,随机生成的车牌达到以假乱真的效果
#国内机动车车牌7位,第一位是各省的汉字,第二位是 A-Z 的大写字母,3-7位则是数字、字母混合
from genplate import *
chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
"琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
"B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X",
"Y", "Z"
]
M_strIdx = dict(zip(chars, range(len(chars))))
#print("M_strIdx\n",M_strIdx)
Ge = GenPlate("./font/platech.ttf",'./font/platechar.ttf',"./NoPlates")
model_dir = "./model"
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
def gen(batch_size=32):
while True:
l_plateStr, l_plateImg = Ge.genBatch(batch_size, 2, range(31, 65), "plate", (272, 72))
#print('l_plateStr type :', type(l_plateStr))
#print('l_plateStr = ', l_plateStr)
#print('l_plateImg type = ', type(l_plateImg))
#print('l_plateImg len :', len(l_plateImg))
X = np.array(l_plateImg, dtype=np.uint8)
#print 'X type :',type(X)
#print 'X.dtype :',X.dtype
#print 'X.shape :',X.shape
#print np.array(list(map(lambda a: [a for a in list(x)], l_plateStr)))#,dtype=np.float32)
#ytmp = np.array(list(map(lambda a: [a for a in list(x)], l_plateStr)))#, dtype=np.uint8)# x: [M_strIdx[a]
temp = list(map(lambda x: [a for a in list(x)], l_plateStr))#elesun TypeError: object of type 'map' has no len()
#print("temp\n",temp)
#print('temp type :', type(temp)) # <type 'list'>
#print("temp[0]\n",temp[0])
#print('temp[0] type :', type(temp[0])) # <type 'list'>
#print("temp[0][0]\n",temp[0][0])
#print('temp[0][0] type :', type(temp[0][0])) # <type 'str'>
#print("temp[0][0] + temp[0][1] + temp[0][2] :", (temp[0][0] + temp[0][1] + temp[0][2]))
temp2 = [] #list的第一层
for i in range(len(temp)):
temp1 = [] #list的第二层
for j in range(len(temp[i])):
if j == 0 :
temp1.append(temp[i][0] + temp[i][1] + temp[i][2]) #拼接字符串形成汉字 闽
elif 1 <= j <= 2 :
continue # 只拼接前三个字符为汉字
else :
temp1.append(temp[i][j]) #后面只追加 车牌数字和字符
temp2.append(temp1)
#print("temp2\n",temp2)
#打印字典对应值是否正确
#for i in range(len(temp2)):
# for j in range(len(temp2[i])):
# print("temp2[%d][%d]=" % (i, j),temp2[i][j],"; M_strIdx[(temp2[%d][%d])]="%(i,j),M_strIdx[(temp2[i][j])])
#print('temp2 type :', type(temp2)) # <type 'numpy.ndarray'>
#print("M_strIdx['A']",M_strIdx['A'])
#print("M_strIdx['\xe6\xb9\x98']", M_strIdx['\xe6\xb9\x98'])
#print("M_strIdx['\xe5']", M_strIdx['\xe5']) # error
#ytmp = np.array(list(map(lambda x: [M_strIdx[a] for a in list(x)], l_plateStr)), dtype=np.uint8)
ytmp = np.array(list(map(lambda x: [M_strIdx[a] for a in x], temp)), dtype=np.uint8)#elesun temp2 for python2 ubuntu
#print('ytmp\n', ytmp)
#print ('ytmp type :',type(ytmp)) # <type 'numpy.ndarray'>
#print ('ytmp.dtype :',ytmp.dtype) # uint8
#print ('ytmp.shape :',ytmp.shape) # (32, 7)
y = np.zeros([ytmp.shape[1],batch_size,len(chars)])# 7,32,65
#print 'y type :',type(y)
#print 'y.dtype :',y.dtype
#print 'y.shape :',y.shape
for batch in range(batch_size):
for idx,row_i in enumerate(ytmp[batch]):
y[idx,batch,row_i] = 1
yield X, [yy for yy in y]
#########################定义网络并训练###########################################
def model_build_train(lr=0.001, epochs=25, batch_size=32, model_name="model_best.h5"):
print("building network ...")
#用一个 一组卷积层+7个全链接层 的架构,来对应输入的车牌图片
input_tensor = Input((72, 272, 3))
x = input_tensor
for i in range(3):
x = Conv2D(32*2**i, (3, 3), activation='relu')(x)
x = Conv2D(32*2**i, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dropout(0.25)(x)
n_class = len(chars) #elesun len(chars)
x = [Dense(n_class, activation='softmax', name='c%d'%(i+1))(x) for i in range(7)]
model = Model(inputs=input_tensor, outputs=x)
model.summary()
print("save network picture")
#SVG(model_to_dot(model=model, show_layer_names=True, show_shapes=True).create(prog='dot', format='svg'))
#SVG(model_to_dot(model).create(prog='dot', format='svg'))
print("training network ...")
adam = Adam(lr=lr)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
best_model = ModelCheckpoint(os.path.join(model_dir, model_name), monitor='val_loss', verbose=0, save_best_only=True)
#print("gen(batch_size)",list(gen(batch_size)))
#fit_generator(generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)
model.fit_generator(gen(batch_size), steps_per_epoch=200, epochs=epochs,
validation_data=gen(batch_size), validation_steps=20,
verbose=2,callbacks=[best_model]) #每个epoch输出一行记录
#########################读取测试车牌图片###########################################
def load_plate_data(data_dir="./recognize_samples"):
print("loading plate data ...")
plateStr = []
plateImg = []
file_list = os.listdir(data_dir)
#print(file_list)
for filename in file_list:
path = ''
path = os.path.join(data_dir, filename)
image = cv2.imread(path) #读取图片 cv2.IMREAD_COLOR cv2.IMREAD_GRAYSCALE
#print("image.shape:",image.shape) #(72, 272, 3)
if image.shape != (72, 272, 3) :
# image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LANCZOS4)
print("picture %s size error, maybe resize before load !"%(filename))
continue
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#print ("%s has been read!"%filename)
plateStr.append(filename[:-4])
plateImg.append(image)
return plateStr, plateImg
##########################展示模型预测结果########################################
def model_load_predict_plt(model_name,test_Img):
# 加载模型
print('load the trained model')
model = load_model(os.path.join(model_dir, model_name))
print("###############model predict###############")
results = model.predict(np.array(test_Img))
print('results type :', type(results)) #<type 'list'>
results = np.array(results)
print ('results type :',type(results)) #<type 'numpy.ndarray'>
print ('results.dtype :',results.dtype) #float32
print ('results.shape :',results.shape) #(7, num, 65)
results = np.argmax(results, axis = 2)
results = results.T
print ('results.dtype :',results.dtype) #int64
print ('results.shape :',results.shape) #(num, 7)
print('results\n', results) #
#print("M_strIdx[0]",M_strIdx[0])
#results = "".join([M_strIdx[xx] for xx in results.T])
predict_plate_str = [] # list的第一层
for i in range(results.shape[0]):
temp = [] # list的第二层
for j in range(results.shape[1]):
for key, value in M_strIdx.items():
if value == results[i,j]:
print("key",key)
temp.append(key)
predict_plate_str.append(temp)
print('predict_plate_str type :', type(predict_plate_str)) #
print('predict_plate_str\n', predict_plate_str)
# predict_plate_str = np.array(predict_plate_str)
# print('predict_plate_str type :', type(predict_plate_str))
# print ('predict_plate_str.dtype :',predict_plate_str.dtype) #
# print ('predict_plate_str.shape :',results.shape) #
# print('predict_plate_str\n', predict_plate_str) #
print("###############plt results###############")
myfont = FontProperties(fname='./font/Lantinghei.ttc')
# 用来正常显示中文标签,SimHei是字体名称,字体必须再系统中存在,字体的查看方式和安装第三部分
plt.rcParams['font.sans-serif'] = ['SimHei']
# 用来正常显示负号
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
fig = plt.figure(figsize=(12,12))
#l_titles = list(map(lambda x: "".join([M_idxStr[xx] for xx in x]), np.argmax(np.array(model.predict( np.array(l_plateImg) )), 2).T))
for idx,img in enumerate(test_Img[0:12]):
ax = fig.add_subplot(4,3,idx+1)
ax.imshow(img)
ax.set_title(predict_plate_str[idx],fontproperties=myfont)
ax.set_axis_off()
plt.show()
if __name__ == "__main__":
model_name = "model_best.h5"
model_build_train(lr=0.0001, epochs=30, batch_size=16, model_name="model_best.h5")
test_data_dir = "./recognize_samples"
test_name, test_Img = load_plate_data(test_data_dir)
print("test_name",test_name)
model_load_predict_plt(model_name, test_Img)
| [
"elesun2018@gmail.com"
] | elesun2018@gmail.com |
d4a7222871aa531e146b380f3770209093fa40aa | 64f533b4b8755d19eca18f9d707dc4fa89abecec | /assig/settings.py | cbafc3bb3f3a3d788015074214ac7a1206f09d94 | [] | no_license | munriver/pb-inventm-assig | 0ec041986aa064dcb3c6233eca0aa438fde5dd93 | 1d803a4bfedd1bab7fd09db1fbb91adaf5c36a9c | refs/heads/master | 2020-04-03T09:08:50.409862 | 2018-10-29T05:22:10 | 2018-10-29T05:22:10 | 155,155,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,265 | py | """
Django settings for assig project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'is=l!o457wbro3dzzgi9(24llq4msg_tz1kfkk@al)h^klz5kc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'inventm.apps.InventmConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'assig.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'/home/sr4wc/pinkblue/assig/inventm/templates/',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'assig.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"sr4wc@debian"
] | sr4wc@debian |
d5ca95b83ff2a58c6652e81391a582f1b204d73c | 77dc121d0321a4bf653bb1298854bf5cd7bc1b03 | /c29_add_user_web/simpledu/simpledu/forms.py | 6a5a6c8e26516ef36c7383fad188c8f5762086d8 | [] | no_license | kinglion580/stu_python | 50ef7ddc80a90989508ce1290f1701dc6f7ec2cf | dde588090631b5526493c57b56d048f68cb0c3c3 | refs/heads/master | 2020-04-17T00:00:45.954911 | 2019-01-16T12:18:35 | 2019-01-16T12:18:35 | 166,034,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, ValidationError
from wtforms.validators import Length, Email, EqualTo, Required
from simpledu.models import db, User
class RegisterForm(FlaskForm):
username = StringField('username', validators=[Required(), Length(3,24)])
email = StringField('email', validators=[Required(), Email()])
password = StringField('password', validators=[Required(), Length(6, 24)])
repeat_password = StringField('repeat password', validators=[Required(), EqualTo('password')])
submit = SubmitField('submit')
def create_user(self):
user = User()
user.username = self.username.data
user.email = self.email.data
user.password = self.password.data
db.session.add(user)
db.session.commit()
return user
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('username is already exist')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('email is already exist')
class LoginForm(FlaskForm):
email = StringField('email', validators=[Required(), Email()])
password = StringField('password', validators=[Required(), Length(6,24)])
remember_me = BooleanField('remember me')
submit = SubmitField('submit')
def validate_email(self, field):
if field.data and not User.query.filter_by(email=field.data).first():
raise ValidationError('email not register')
def validate_password(self, field):
user = User.query.filter_by(email=self.email.data).first()
if user and not user.check_password(field.data):
raise ValidationError('password wrong')
| [
"1609019405@qq.com"
] | 1609019405@qq.com |
f279c1cde1dc3f959ae5f6c4dde3667c22710674 | e6237b6bb940a7cf64d8b8c5c9a34a741dc1e0c7 | /main/forms.py | b1b47b72253d8894cfeb4fb57b9bb6890c16c8cf | [] | no_license | Daniel-Keiser/TCC-IFC-em-Django | e3985a42f8b02239b126b65d48aadad712b60234 | 0625e2cb643b490faea5285e454dffe2e02255a2 | refs/heads/master | 2023-04-27T07:16:14.230332 | 2019-12-08T01:08:25 | 2019-12-08T01:08:25 | 226,595,568 | 3 | 0 | null | 2023-04-21T20:41:59 | 2019-12-08T00:47:29 | HTML | UTF-8 | Python | false | false | 2,632 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from.models import Profile, Evento, Almossom, Banda, ProgramacaoAlmossom
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['username'].label = 'Usuário'
self.fields['password1'].label = 'Senha'
self.fields['password2'].label = 'Repita a senha'
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
def __init__(self, *args, **kwargs):
super(UserUpdateForm, self).__init__(*args, **kwargs)
self.fields['username'].label = 'Usuário'
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['descricao','image']
def __init__(self, *args, **kwargs):
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
self.fields['descricao'].label = 'Descrição'
self.fields['image'].label = 'Imagem'
class EventUpdateForm(forms.ModelForm):
class Meta:
model = Evento
fields = ['titulo', 'descricao', 'image', 'data']
def __init__(self, *args, **kwargs):
super(EventUpdateForm, self).__init__(*args, **kwargs)
self.fields['titulo'].label = 'Titulo'
self.fields['descricao'].label = 'Descrição'
self.fields['data'].label = 'Data'
class AlmoUpdateForm(forms.ModelForm):
class Meta:
model = Almossom
fields = ['data', 'image', 'info', 'descricao']
def __init__(self, *args, **kwargs):
super(AlmoUpdateForm, self).__init__(*args, **kwargs)
self.fields['data'].label = 'Data'
self.fields['info'].label = 'Informações'
self.fields['descricao'].label = 'Descrição'
class BandaRegisterForm(forms.ModelForm):
# self.
class Meta:
model = Banda
fields = ['lider', 'nome', 'descricao']
def __init__(self, *args, **kwargs):
super(BandaRegisterForm, self).__init__(*args, **kwargs)
self.fields['descricao'].label = 'Descrição'
class ProgramacaoAlmo(forms.ModelForm):
class Meta:
model: ProgramacaoAlmossom
fields = ['banda', 'almossom']
def __init__(self, *args, **kwargs):
super(ProgramacaoAlmo, self).__init__(*args, **kwargs)
| [
"36968016+Daniel-Keiser@users.noreply.github.com"
] | 36968016+Daniel-Keiser@users.noreply.github.com |
a0f1f2557839af7ed23dfb81c8ff5bea64a59bc4 | e4c25590298b084e3fb44b0b325a05699fac4202 | /Kattis/sevenwonders.py | 5a96568a7cc25485bbe157259a725421d500474b | [] | no_license | shakib609/competitive-programming | 520028bd1147e7e43e708875b6390e1a7d65a94b | 5090d5d3650b8055e16651ed9de5380cc7fdb7aa | refs/heads/master | 2022-12-09T12:33:20.167332 | 2022-12-07T17:28:30 | 2022-12-07T17:28:30 | 67,289,210 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | s = input().strip()
t, c, g = [0, 0, 0]
for ch in s:
if ch == 'T':
t += 1
elif ch == 'C':
c += 1
else:
g += 1
result = t ** 2 + c ** 2 + g ** 2
result += min([t, c, g]) * 7
print(result)
| [
"shakib609@gmail.com"
] | shakib609@gmail.com |
fc30f03e2dbc7420a2b05aa2b872666f66ccb477 | a01792b26a0313b8dee14f57eac278f617de0ec6 | /cellscrape.py | 993c36e51f55a26b9de1fd79200738511caa062a | [] | no_license | metcalf/cellscrape | 91fc4184258775783ea4232f07b807796c5f508b | b2d0ff6625f08e0ed2b5f84471c9b7375af5c6ca | refs/heads/master | 2021-01-22T13:51:51.777332 | 2013-06-26T22:00:46 | 2013-06-26T22:00:46 | 10,980,226 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,535 | py | import urllib2
import simplejson
import optparse
import sys
import csv
import urllib
import time
import re
from collections import defaultdict, OrderedDict
from bs4 import BeautifulSoup
GAPI_KEY = "AIzaSyB8fwNPwYjTeEbZU0TT1ZvaMes2_dqvGyo"
CUSTOM_SEARCH = "https://www.googleapis.com/customsearch/v1?key=%s&cx=015988297936601976182:uvoaw7yqzou"%GAPI_KEY
PIXEL_RE = re.compile(r"(\d+)\s?x\s?(\d+)", re.IGNORECASE)
SIZE_RE = re.compile(r"(\d+(\.\d+)?)\s?inches", re.IGNORECASE)
MIDP_RE = re.compile(r"MIDP (\d+(\.\d+)?)", re.IGNORECASE)
VER_RE = re.compile(r"(\d+(\.\d+)?)", re.IGNORECASE)
HTML_RE = re.compile(r"(^|[^x])HTML", re.IGNORECASE)
def search(name):
result = ""
url = "".join((CUSTOM_SEARCH, "&q=",urllib.quote_plus(name),
"&exactTerms=", "Full+Phone+Specifications"))
request = urllib2.Request(
url, None, {'Referer': "http://kenya.throughawall.com"})
response = urllib2.urlopen(request)
data = simplejson.load(response)
items = data.get("items")
if items:
if "- Full phone specifications" in items[0]["title"]:
result = items[0]["link"]
else:
print "Title doesn't appear to be valid: %s"%items[0]["title"]
else:
print "No items returned for %s"%name
print "%s: %s"%(name, result)
return result
def scrape(uri):
results = []
request = urllib2.Request(uri)
response = urllib2.urlopen(request)
# Extract div#specs-list
# For each table
# Read table name from th in first tr
# For each tr
# Read key-value pairs from td.ttl and td.nfo
soup = BeautifulSoup(response, "html5lib")
specs = soup.find(id="specs-list")
if not specs:
print "No specs for %s"%uri
return None
for table in specs.find_all("table"):
try:
category = unicode(table.find("th").string)
except Exception as e:
print "Error parsing table for %s: %s"(uri, e)
continue
for row in table.find_all("tr"):
if not row.find("td", class_="ttl"):
continue # Skip blanks
try:
results.append({
"category": category.strip(),
"subcategory": unicode(row.find("td", class_="ttl").string).strip(),
"value": unicode(row.find("td", class_="nfo").string).strip()
})
except Exception as e:
print "Error parsing row for %s: %s\n%s"%(uri, row, e)
print "Scraped %s"%uri
return results
def parse(datum):
result = OrderedDict(datum["metadata"])
if not datum.get("raw"):
return result
tree = defaultdict(dict)
for row in datum["raw"]:
tree[row["category"].lower().strip()][row["subcategory"].lower()] = row["value"]
general = tree["general"]
data = tree["data"]
features = tree["features"]
if "No" not in general.get("4g network", "No"):
result["Network"] = "4G"
if "No" not in general.get("3g network", "No"):
result["Network"] = "3G"
elif "No" not in general.get("2g network", "No"):
result["Network"] = "2G"
else:
result["Network"] = "Other"
for tech in ("lte", "dc-hsdpa", "hsdpa", "ev-do", "hsupa"):
if tech in data.get("speed", "").lower():
result["Data"] = tech.upper()
break
else:
if result["Network"] == "4G":
result["Data"] = "Other 4G"
elif result["Network"] == "3G":
result["Data"] = "Other 3G"
elif "No" not in data.get("edge", "No"):
result["Data"] = "EDGE"
elif "No" not in data.get("gprs", "No"):
result["Data"] = "GPRS"
else:
result["Data"] = "None"
result["GPS"] = "Yes" if ("Yes" in features.get("gps", "")) else "No"
result["Video"] = "Yes" if ("Yes" in tree["camera"].get("video", "")) else "No"
if "No" in tree["camera"].get("primary", "No"):
result["Camera"] = "No"
else:
c_match = PIXEL_RE.search(tree["camera"].get("primary", ""))
result["Camera"] = c_match.group(0).replace(" ", "") if c_match else "Yes"
dr_match = PIXEL_RE.search(tree["display"]["size"])
result["Display Resolution"] = dr_match.group(0).replace(" ", "") if dr_match else "Unknown"
ds_match = SIZE_RE.search(tree["display"]["size"])
result["Display Size (inches)"] = ds_match.group(1) if ds_match else "Unknown"
if "No" in features.get("java", "No"):
result["Java"] = "No"
else:
midp_match = MIDP_RE.search(features["java"])
result["Java"] = midp_match.group(0) if midp_match else "Yes"
msging = features["messaging"].lower().replace("instant messaging", "im")
for msg in ("sms", "mms", "mail", "im"):
result[msg.upper()] = "Yes" if msg in msging else "No"
result["OS"] = ""
result["OS Version"] = ""
for os in ("Android", "iOS", "Symbian", "Blackberry", "Windows"):
if os.lower() in features.get("os", "").lower():
result["OS"] = os
osv_match = VER_RE.search(features["os"])
if osv_match:
result["OS Version"] = osv_match.group(0)
break
if "No" not in features.get("browser", "No"):
if HTML_RE.search(features["browser"]):
result["Browser"] = "HTML"
elif "wap" in features["browser"].lower():
result["Browser"] = "WAP"
else:
result["Browser"] = "Other"
else:
result["Browser"] = "No"
keys = (
("features", "os"),
("features", "browser"),
)
for cat, sub in keys:
result["RAW %s - %s"%(cat, sub)] = tree[cat].get(sub, "")
return result
def main():
parser = optparse.OptionParser(usage='%prog [mode] [options]')
parser.add_option("-i", "--input", action='store', dest="infilename",
help="Input file path")
parser.add_option("-o", "--output", action='store', dest="outfilename", type="string",
help="Output file path")
parser.add_option("-s", "--offset", action='store', dest="offset", type="int",
help="Start offset")
parser.add_option("-c", "--count", action='store', dest="count", type="int",
help="Records to process")
parser.set_defaults(count=0, offset=0)
options, args = parser.parse_args()
infile = open(options.infilename, "r")
outfile = open(options.outfilename, "w")
if args[0] == "search":
reader = csv.reader(infile)
writer = csv.writer(outfile)
header = reader.next()
if not "uri" in header:
header.append("uri")
uri_col = header.index("uri")
name_col = header.index("name")
writer.writerow(header)
for i, row in enumerate(reader):
if i >= options.offset and (options.count < 1 or i < (options.offset+options.count)):
res = search(row[name_col])
time.sleep(1.1)
if len(row) < len(header):
row.append(None)
row[uri_col] = res
writer.writerow(row)
elif args[0] == "scrape":
reader = csv.reader(infile)
results = []
header = reader.next()
uri_col = header.index("uri")
name_col = header.index("name")
for i, row in enumerate(reader):
result = {
"metadata": dict(zip(header, row)),
"raw": None
}
if (len(row) > uri_col and row[uri_col] and
i >= options.offset and
(options.count < 1 or i < (options.offset+options.count))):
result["raw"] = scrape(row[uri_col])
results.append(result)
simplejson.dump(results, outfile)
elif args[0] == "parse":
indata = simplejson.load(infile)
writer = csv.writer(outfile)
header = ["name", "Subscribers", "uri"]
rows = []
for datum in indata:
parsed = parse(datum)
for k in parsed:
if k not in header:
header.append(k)
row = [""]*len(header)
for k, v in parsed.iteritems():
row[header.index(k)] = v
rows.append(row)
writer.writerow(header)
writer.writerows(rows)
if __name__ == '__main__':
main()
| [
"narced133@github"
] | narced133@github |
4021f8f8a813d586632e89c11d8e49348a455081 | 043a2babd5856ff3ecb2af19c31d749e7836674a | /main1.py | 34ec34fe492f8d754e465ce38a44268f4a8d882f | [
"MIT"
] | permissive | jSm449g4d/AX-4 | 98cd76bac1f968b4d8a7d3c230601681deddc1de | 481f42ef4d6165a8b4c7107f15cce2079c974d5a | refs/heads/master | 2020-08-22T11:17:05.785523 | 2019-11-19T08:28:00 | 2019-11-19T08:28:00 | 216,040,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,386 | py | #9th
import numpy as np
import cv2
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Dense,Dropout,Conv2D,Conv2DTranspose,\
ReLU,Softmax,Flatten,Reshape,UpSampling2D,Input,Activation,LayerNormalization
from tqdm import tqdm
import random
from ARutil import mkdiring,rootYrel
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train,x_test= (x_train.astype(np.float32)/ 256,x_test.astype(np.float32)/ 256)
def tf2img(tfs,dir="./",name="",epoch=0,ext=".png"):
mkdiring(dir)
if type(tfs)!=np.ndarray:tfs=tfs.numpy()
tfs=(tfs*256).astype(np.uint8)
for i in range(tfs.shape[0]):
cv2.imwrite(rootYrel(dir,name+"_epoch-num_"+str(epoch)+"-"+str(i)+ext),tfs[i])
def tf_ini():#About GPU resources
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for k in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[k], True)
if len(physical_devices)==0:print("GPU failed!")
return len(physical_devices)
tf_ini()
class AE(tf.keras.Model):
def __init__(self,trials={},opt=keras.optimizers.Adam(1e-3)):
super().__init__()
self.layer1=[Flatten(),
Dense(128,activation="elu"),
Dense(32,activation="elu"),
Dropout(0.1),
Dense(12,activation="sigmoid")
]
self.layer2=[Dense(32,activation="elu"),
Dense(128,activation="elu"),
Dropout(0.1),
Dense(28*28,activation="sigmoid"),
Reshape((28,28))
]
self.opt=opt
@tf.function
def call(self,mod):
for i in range(len(self.layer1)):mod=self.layer1[i](mod)
for i in range(len(self.layer2)):mod=self.layer2[i](mod)
return mod
@tf.function
def pred(self,mod):
for i in range(len(self.layer2)):mod=self.layer2[i](mod)
return mod
batch=16
def objective(trial):
model = AE()
model.build(input_shape=(batch,28,28))
model.summary()
optimizer =keras.optimizers.Adam(1e-3)
for epoch in tqdm(range(30000)):
ii=random.randint(0,x_train.shape[0]-batch)
with tf.GradientTape() as tape:
loss=tf.reduce_mean(keras.losses.binary_crossentropy(
x_train[ii:ii+batch],model(x_train[ii:ii+batch])))
gradients = tape.gradient(loss, model.trainable_variables)
gradients,_ = tf.clip_by_global_norm(gradients, 15)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if epoch % 250 == 0:
iy=random.randint(0,x_test.shape[0]-batch)
loss=tf.reduce_mean(keras.losses.binary_crossentropy(
x_test[iy:iy+batch],model(x_test[iy:iy+batch])))
print("epoch:"+str(epoch)+" loss:"+str(float(loss)))
loss_for_return=float(loss)
tf2img(model(x_test[iy:iy+batch]),"./output1_p",epoch=epoch)
tf2img(x_test[iy:iy+batch],"./output1_t",epoch=epoch)
tf2img(model.pred(np.random.rand(batch,12).astype(np.float32)),"./output2_p",epoch=epoch)
return loss_for_return
objective(0)
| [
"noreply@github.com"
] | jSm449g4d.noreply@github.com |
0302276bcd5a478d894c7ab087ffeebb24c1b103 | ff896734043212b2da6f69471435a2dcda31612e | /src/resources/achievements/achievements.py | 7f410430d6ffdeedc53bc01f85b1afd6de5d33e3 | [
"MIT"
] | permissive | kirill-kundik/CinemaChallengeBackend | 2d77ff2776a1ad9d109738cddbb9ecebcfaebc54 | aea4ac801a9a5c907f36f07b67df162b4bd85044 | refs/heads/master | 2023-01-09T23:08:04.630520 | 2020-11-21T08:42:01 | 2020-11-21T08:42:01 | 314,540,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | """
Define the REST verbs relative to the users
"""
from flask_restful import Resource
from flask_restful.reqparse import Argument
from repositories import AchievementRepository
from util import render_resource, parse_params
class AchievementsResources(Resource):
""" Verbs relative to the users """
@staticmethod
def get(achievement_id, **_kwargs):
""" Return an user key information based on his name """
return render_resource(AchievementRepository.get(achievement_id))
@staticmethod
@parse_params(
Argument("name", help="Name of the achievement", location="json"),
Argument("short_description", help="Short description of the achievement", location="json"),
Argument("long_description", help="Long description of the achievement", location="json"),
Argument("difficulty", type=int, help="Difficulty of the achievement", location="json"),
Argument("image_src", help="Image of the achievement", location="json"),
Argument("bg_image_src", help="Bg image of the achievement", location="json"),
)
def put(achievement_id, name, short_description, long_description, difficulty, image_src, bg_image_src, **_kwargs):
return render_resource(AchievementRepository.update(
achievement_id,
name,
short_description,
long_description,
difficulty,
image_src,
bg_image_src
))
# @staticmethod
# @parse_params(
# Argument("age", location="json", required=True, help="The age of the user.")
# )
# @swag_from("../swagger/user/POST.yml")
# def post(last_name, first_name, age):
# """ Create an user based on the sent information """
# user = UserRepository.create(
# last_name=last_name, first_name=first_name, age=age
# )
# return jsonify({"user": user.json})
#
# @staticmethod
# @parse_params(
# Argument("age", location="json", required=True, help="The age of the user.")
# )
# @swag_from("../swagger/user/PUT.yml")
# def put(last_name, first_name, age):
# """ Update an user based on the sent information """
# repository = UserRepository()
# user = repository.update(last_name=last_name, first_name=first_name, age=age)
# return jsonify({"user": user.json})
| [
"goliathusua@gmail.com"
] | goliathusua@gmail.com |
52afe556959590049b64feb71a30c5fce7fedaf1 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/containerregistry/v20190501/get_webhook.py | 7948e368ab3b2de549dbfecb516f227ee8cca61a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,711 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebhookResult',
'AwaitableGetWebhookResult',
'get_webhook',
'get_webhook_output',
]
@pulumi.output_type
class GetWebhookResult:
"""
An object that represents a webhook for a container registry.
"""
def __init__(__self__, actions=None, id=None, location=None, name=None, provisioning_state=None, scope=None, status=None, tags=None, type=None):
if actions and not isinstance(actions, list):
raise TypeError("Expected argument 'actions' to be a list")
pulumi.set(__self__, "actions", actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def actions(self) -> Sequence[str]:
"""
The list of actions that trigger the webhook to post notifications.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the webhook at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the webhook at the time the operation was called.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetWebhookResult(GetWebhookResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebhookResult(
actions=self.actions,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
scope=self.scope,
status=self.status,
tags=self.tags,
type=self.type)
def get_webhook(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
webhook_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebhookResult:
"""
An object that represents a webhook for a container registry.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str webhook_name: The name of the webhook.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['webhookName'] = webhook_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20190501:getWebhook', __args__, opts=opts, typ=GetWebhookResult).value
return AwaitableGetWebhookResult(
actions=__ret__.actions,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
scope=__ret__.scope,
status=__ret__.status,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_webhook)
def get_webhook_output(registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
webhook_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebhookResult]:
"""
An object that represents a webhook for a container registry.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str webhook_name: The name of the webhook.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
b4cb6b650396f272e17879ab0ae5704357b257f3 | ce564f0a9b6f261e5303779ab95f8c1629487ac7 | /django_mysql_fix/version.py | e7cea81e8496619ab8dc010d38b5a71077b6eb17 | [
"MIT"
] | permissive | frol/django-mysql-fix | 192e334cb94c0fdf14516383022d6c5d4486c1d8 | 96d1e960b49ab686ea6d8d766bb4d86edb806e47 | refs/heads/master | 2021-01-19T14:09:38.956874 | 2014-05-03T16:07:11 | 2014-05-03T16:07:11 | 18,802,306 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | VERSION = (0, 1, 6)
__version__ = '.'.join(unicode(x) for x in VERSION)
| [
"frolvlad@gmail.com"
] | frolvlad@gmail.com |
98c8c17c2ddd71a74a4084a69a48368772ca6ab0 | e66e75cfeecdcc832c4b07e0a40b821b57170496 | /apps/organization/migrations/0007_courseorg_tag.py | eee412637daa8e93871df4ffed1c0b6cddf67974 | [] | no_license | chenkeren1994/mxonline | bd94783555c0445a7946f8c0eb604cb8c9d5ebf9 | 00af4a5a89e6a9d1324e6e2c18bc26515dbde0df | refs/heads/master | 2020-12-02T21:14:56.487657 | 2017-07-07T12:36:12 | 2017-07-07T12:36:12 | 96,279,389 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-07-05 11:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0006_teacher_age'),
]
operations = [
migrations.AddField(
model_name='courseorg',
name='tag',
field=models.CharField(default='\u5168\u56fd\u77e5\u540d', max_length=10, verbose_name='\u673a\u6784\u6807\u7b7e'),
),
]
| [
"seal@sealdeMacBook-Pro.local"
] | seal@sealdeMacBook-Pro.local |
5d1ff2249d14c248fe7903d781b51ba405023c40 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/a2602090981a65652199423a185e3c2bd8b2c356-<merge_bgp_peer_af_other>-bug.py | 3f4c944a4a6b2f086abda5e8ebe56efc68a702a4 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,936 | py | def merge_bgp_peer_af_other(self, **kwargs):
' merge_bgp_peer_af_other '
module = kwargs['module']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = (CE_MERGE_BGP_PEER_AF_HEADER % (vrf_name, af_type, remote_address))
cmds = []
advertise_irb = module.params['advertise_irb']
if (advertise_irb != 'no_use'):
conf_str += ('<advertiseIrb>%s</advertiseIrb>' % advertise_irb)
if (advertise_irb == 'true'):
cmd = ('peer %s advertise irb' % remote_address)
else:
cmd = ('undo peer %s advertise irb' % remote_address)
cmds.append(cmd)
advertise_arp = module.params['advertise_arp']
if (advertise_arp != 'no_use'):
conf_str += ('<advertiseArp>%s</advertiseArp>' % advertise_arp)
if (advertise_arp == 'true'):
cmd = ('peer %s advertise arp' % remote_address)
else:
cmd = ('undo peer %s advertise arp' % remote_address)
cmds.append(cmd)
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if (advertise_remote_nexthop != 'no_use'):
conf_str += ('<advertiseRemoteNexthop>%s</advertiseRemoteNexthop>' % advertise_remote_nexthop)
if (advertise_remote_nexthop == 'true'):
cmd = ('peer %s advertise remote-nexthop' % remote_address)
else:
cmd = ('undo peer %s advertise remote-nexthop' % remote_address)
cmds.append(cmd)
advertise_community = module.params['advertise_community']
if (advertise_community != 'no_use'):
conf_str += ('<advertiseCommunity>%s</advertiseCommunity>' % advertise_community)
if (advertise_community == 'true'):
cmd = ('peer %s advertise-community' % remote_address)
else:
cmd = ('undo peer %s advertise-community' % remote_address)
cmds.append(cmd)
advertise_ext_community = module.params['advertise_ext_community']
if (advertise_ext_community != 'no_use'):
conf_str += ('<advertiseExtCommunity>%s</advertiseExtCommunity>' % advertise_ext_community)
if (advertise_ext_community == 'true'):
cmd = ('peer %s advertise-ext-community' % remote_address)
else:
cmd = ('undo peer %s advertise-ext-community' % remote_address)
cmds.append(cmd)
discard_ext_community = module.params['discard_ext_community']
if (discard_ext_community != 'no_use'):
conf_str += ('<discardExtCommunity>%s</discardExtCommunity>' % discard_ext_community)
if (discard_ext_community == 'true'):
cmd = ('peer %s discard-ext-community' % remote_address)
else:
cmd = ('undo peer %s discard-ext-community' % remote_address)
cmds.append(cmd)
allow_as_loop_enable = module.params['allow_as_loop_enable']
if (allow_as_loop_enable != 'no_use'):
conf_str += ('<allowAsLoopEnable>%s</allowAsLoopEnable>' % allow_as_loop_enable)
if (allow_as_loop_enable == 'true'):
cmd = ('peer %s allow-as-loop' % remote_address)
else:
cmd = ('undo peer %s allow-as-loop' % remote_address)
cmds.append(cmd)
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
conf_str += ('<allowAsLoopLimit>%s</allowAsLoopLimit>' % allow_as_loop_limit)
if (allow_as_loop_enable == 'true'):
cmd = ('peer %s allow-as-loop %s' % (remote_address, allow_as_loop_limit))
else:
cmd = ('undo peer %s allow-as-loop' % remote_address)
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if (keep_all_routes != 'no_use'):
conf_str += ('<keepAllRoutes>%s</keepAllRoutes>' % keep_all_routes)
if (keep_all_routes == 'true'):
cmd = ('peer %s keep-all-routes' % remote_address)
else:
cmd = ('undo peer %s keep-all-routes' % remote_address)
cmds.append(cmd)
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str += ('<nextHopConfigure>%s</nextHopConfigure>' % nexthop_configure)
if (nexthop_configure == 'local'):
cmd = ('peer %s next-hop-local' % remote_address)
cmds.append(cmd)
elif (nexthop_configure == 'invariable'):
cmd = ('peer %s next-hop-invariable' % remote_address)
cmds.append(cmd)
preferred_value = module.params['preferred_value']
if preferred_value:
conf_str += ('<preferredValue>%s</preferredValue>' % preferred_value)
cmd = ('peer %s preferred-value %s' % (remote_address, preferred_value))
cmds.append(cmd)
public_as_only = module.params['public_as_only']
if (public_as_only != 'no_use'):
conf_str += ('<publicAsOnly>%s</publicAsOnly>' % public_as_only)
if (public_as_only == 'true'):
cmd = ('peer %s public-as-only' % remote_address)
else:
cmd = ('undo peer %s public-as-only' % remote_address)
cmds.append(cmd)
public_as_only_force = module.params['public_as_only_force']
if (public_as_only_force != 'no_use'):
conf_str += ('<publicAsOnlyForce>%s</publicAsOnlyForce>' % public_as_only_force)
if (public_as_only_force == 'true'):
cmd = ('peer %s public-as-only force' % remote_address)
else:
cmd = ('undo peer %s public-as-only force' % remote_address)
cmds.append(cmd)
public_as_only_limited = module.params['public_as_only_limited']
if (public_as_only_limited != 'no_use'):
conf_str += ('<publicAsOnlyLimited>%s</publicAsOnlyLimited>' % public_as_only_limited)
if (public_as_only_limited == 'true'):
cmd = ('peer %s public-as-only limited' % remote_address)
else:
cmd = ('undo peer %s public-as-only limited' % remote_address)
cmds.append(cmd)
public_as_only_replace = module.params['public_as_only_replace']
if (public_as_only_replace != 'no_use'):
conf_str += ('<publicAsOnlyReplace>%s</publicAsOnlyReplace>' % public_as_only_replace)
if (public_as_only_replace == 'true'):
cmd = ('peer %s public-as-only force replace' % remote_address)
else:
cmd = ('undo peer %s public-as-only force replace' % remote_address)
cmds.append(cmd)
public_as_only_skip_peer_as = module.params['public_as_only_skip_peer_as']
if (public_as_only_skip_peer_as != 'no_use'):
conf_str += ('<publicAsOnlySkipPeerAs>%s</publicAsOnlySkipPeerAs>' % public_as_only_skip_peer_as)
if (public_as_only_skip_peer_as == 'true'):
cmd = ('peer %s public-as-only force include-peer-as' % remote_address)
else:
cmd = ('undo peer %s public-as-only force include-peer-as' % remote_address)
cmds.append(cmd)
route_limit = module.params['route_limit']
if route_limit:
conf_str += ('<routeLimit>%s</routeLimit>' % route_limit)
cmd = ('peer %s route-limit %s' % (remote_address, route_limit))
cmds.append(cmd)
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
conf_str += ('<routeLimitPercent>%s</routeLimitPercent>' % route_limit_percent)
cmd = ('peer %s route-limit %s %s' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str += ('<routeLimitType>%s</routeLimitType>' % route_limit_type)
if (route_limit_type == 'alertOnly'):
cmd = ('peer %s route-limit %s %s alert-only' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
elif (route_limit_type == 'idleForever'):
cmd = ('peer %s route-limit %s %s idle-forever' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
elif (route_limit_type == 'idleTimeout'):
cmd = ('peer %s route-limit %s %s idle-timeout' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
conf_str += ('<routeLimitIdleTimeout>%s</routeLimitIdleTimeout>' % route_limit_idle_timeout)
cmd = ('peer %s route-limit %s %s idle-timeout %s' % (remote_address, route_limit, route_limit_percent, route_limit_idle_timeout))
cmds.append(cmd)
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
conf_str += ('<rtUpdtInterval>%s</rtUpdtInterval>' % rt_updt_interval)
cmd = ('peer %s route-update-interval %s' % (remote_address, rt_updt_interval))
cmds.append(cmd)
redirect_ip = module.params['redirect_ip']
if (redirect_ip != 'no_use'):
conf_str += ('<redirectIP>%s</redirectIP>' % redirect_ip)
redirect_ip_validation = module.params['redirect_ip_validation']
if (redirect_ip_validation != 'no_use'):
conf_str += ('<redirectIPVaildation>%s</redirectIPVaildation>' % redirect_ip_validation)
reflect_client = module.params['reflect_client']
if (reflect_client != 'no_use'):
conf_str += ('<reflectClient>%s</reflectClient>' % reflect_client)
if (reflect_client == 'true'):
cmd = ('peer %s reflect-client' % remote_address)
else:
cmd = ('undo peer %s reflect-client' % remote_address)
cmds.append(cmd)
substitute_as_enable = module.params['substitute_as_enable']
if (substitute_as_enable != 'no_use'):
conf_str += ('<substituteAsEnable>%s</substituteAsEnable>' % substitute_as_enable)
if (substitute_as_enable == 'true'):
cmd = ('peer %s substitute-as' % remote_address)
else:
cmd = ('undo peer %s substitute-as' % remote_address)
cmds.append(cmd)
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
conf_str += ('<importRtPolicyName>%s</importRtPolicyName>' % import_rt_policy_name)
cmd = ('peer %s route-policy %s import' % (remote_address, import_rt_policy_name))
cmds.append(cmd)
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
conf_str += ('<exportRtPolicyName>%s</exportRtPolicyName>' % export_rt_policy_name)
cmd = ('peer %s route-policy %s export' % (remote_address, export_rt_policy_name))
cmds.append(cmd)
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
conf_str += ('<importPrefFiltName>%s</importPrefFiltName>' % import_pref_filt_name)
cmd = ('peer %s filter-policy %s import' % (remote_address, import_pref_filt_name))
cmds.append(cmd)
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
conf_str += ('<exportPrefFiltName>%s</exportPrefFiltName>' % export_pref_filt_name)
cmd = ('peer %s filter-policy %s export' % (remote_address, export_pref_filt_name))
cmds.append(cmd)
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
conf_str += ('<importAsPathFilter>%s</importAsPathFilter>' % import_as_path_filter)
cmd = ('peer %s as-path-filter %s import' % (remote_address, import_as_path_filter))
cmds.append(cmd)
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
conf_str += ('<exportAsPathFilter>%s</exportAsPathFilter>' % export_as_path_filter)
cmd = ('peer %s as-path-filter %s export' % (remote_address, export_as_path_filter))
cmds.append(cmd)
import_as_path_name_or_num = module.params['import_as_path_name_or_num']
if import_as_path_name_or_num:
conf_str += ('<importAsPathNameOrNum>%s</importAsPathNameOrNum>' % import_as_path_name_or_num)
cmd = ('peer %s as-path-filter %s import' % (remote_address, import_as_path_name_or_num))
cmds.append(cmd)
export_as_path_name_or_num = module.params['export_as_path_name_or_num']
if export_as_path_name_or_num:
conf_str += ('<exportAsPathNameOrNum>%s</exportAsPathNameOrNum>' % export_as_path_name_or_num)
cmd = ('peer %s as-path-filter %s export' % (remote_address, export_as_path_name_or_num))
cmds.append(cmd)
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
conf_str += ('<importAclNameOrNum>%s</importAclNameOrNum>' % import_acl_name_or_num)
cmd = ('peer %s filter-policy %s import' % (remote_address, import_acl_name_or_num))
cmds.append(cmd)
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
conf_str += ('<exportAclNameOrNum>%s</exportAclNameOrNum>' % export_acl_name_or_num)
cmd = ('peer %s filter-policy %s export' % (remote_address, export_acl_name_or_num))
cmds.append(cmd)
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if (ipprefix_orf_enable != 'no_use'):
conf_str += ('<ipprefixOrfEnable>%s</ipprefixOrfEnable>' % ipprefix_orf_enable)
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf ip-prefix' % remote_address)
else:
cmd = ('undo peer %s capability-advertise orf ip-prefix' % remote_address)
cmds.append(cmd)
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if (is_nonstd_ipprefix_mod != 'no_use'):
conf_str += ('<isNonstdIpprefixMod>%s</isNonstdIpprefixMod>' % is_nonstd_ipprefix_mod)
if (is_nonstd_ipprefix_mod == 'true'):
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf non-standard-compatible' % remote_address)
else:
cmd = ('undo peer %s capability-advertise orf non-standard-compatible' % remote_address)
cmds.append(cmd)
else:
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf' % remote_address)
else:
cmd = ('undo peer %s capability-advertise orf' % remote_address)
cmds.append(cmd)
orftype = module.params['orftype']
if orftype:
conf_str += ('<orftype>%s</orftype>' % orftype)
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str += ('<orfMode>%s</orfMode>' % orf_mode)
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf ip-prefix %s' % (remote_address, orf_mode))
else:
cmd = ('undo peer %s capability-advertise orf ip-prefix %s' % (remote_address, orf_mode))
cmds.append(cmd)
soostring = module.params['soostring']
if soostring:
conf_str += ('<soostring>%s</soostring>' % soostring)
cmd = ('peer %s soo %s' % (remote_address, soostring))
cmds.append(cmd)
cmd = ''
default_rt_adv_enable = module.params['default_rt_adv_enable']
if (default_rt_adv_enable != 'no_use'):
conf_str += ('<defaultRtAdvEnable>%s</defaultRtAdvEnable>' % default_rt_adv_enable)
if (default_rt_adv_enable == 'true'):
cmd += ('peer %s default-route-advertise' % remote_address)
else:
cmd += ('undo peer %s default-route-advertise' % remote_address)
cmds.append(cmd)
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
conf_str += ('<defaultRtAdvPolicy>%s</defaultRtAdvPolicy>' % default_rt_adv_policy)
cmd = (' route-policy %s' % default_rt_adv_policy)
cmds.append(cmd)
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str += ('<defaultRtMatchMode>%s</defaultRtMatchMode>' % default_rt_match_mode)
if (default_rt_match_mode == 'matchall'):
cmd += ' conditional-route-match-all'
elif (default_rt_match_mode == 'matchany'):
cmd += ' conditional-route-match-any'
if cmd:
cmds.append(cmd)
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str += ('<addPathMode>%s</addPathMode>' % add_path_mode)
if (add_path_mode == 'receive'):
cmd += ' add-path receive'
elif (add_path_mode == 'send'):
cmd += ' add-path send'
elif (add_path_mode == 'both'):
cmd += ' add-path both'
if cmd:
cmds.append(cmd)
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
conf_str += ('<advAddPathNum>%s</advAddPathNum>' % adv_add_path_num)
cmd += (' advertise add-path path-number %s' % adv_add_path_num)
if cmd:
cmds.append(cmd)
origin_as_valid = module.params['origin_as_valid']
if (origin_as_valid != 'no_use'):
conf_str += ('<originAsValid>%s</originAsValid>' % origin_as_valid)
vpls_enable = module.params['vpls_enable']
if (vpls_enable != 'no_use'):
conf_str += ('<vplsEnable>%s</vplsEnable>' % vpls_enable)
vpls_ad_disable = module.params['vpls_ad_disable']
if (vpls_ad_disable != 'no_use'):
conf_str += ('<vplsAdDisable>%s</vplsAdDisable>' % vpls_ad_disable)
update_pkt_standard_compatible = module.params['update_pkt_standard_compatible']
if (update_pkt_standard_compatible != 'no_use'):
conf_str += ('<updatePktStandardCompatible>%s</updatePktStandardCompatible>' % update_pkt_standard_compatible)
conf_str += CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if ('<ok/>' not in recv_xml):
module.fail_json(msg='Error: Merge bgp peer address family other failed.')
return cmds | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
c3caa449cb549e5f87a3fd862b5752ed0787987c | dc8995b097fb2c064bb6d8ef4e078733a3623311 | /one.py | ec454695a1003fab8f706cceb689b3b0f432e573 | [] | no_license | kannisko/passwords | 5ea228abe7698ddd28685e3ccdc413843898fca4 | 9d37552ad233ccfd33b962909a6d3774721104c6 | refs/heads/master | 2020-11-30T23:58:15.862737 | 2019-12-29T17:20:48 | 2019-12-29T17:20:48 | 230,515,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | def ff(a,b="defauldB"):
print("a:"+a+ " b:"+b)
ff(b="b",a="aaa") | [
"kannisko@gmail.com"
] | kannisko@gmail.com |
b9bc9a1d8da56368d27e235aee52fc65c1c03ff4 | a5b62f1b80eab5ed4179efdd1a8cbfcb43fb8be7 | /handlers/handler.py | e94cf2316ab8e6a0b9ec4f7c45f3bcf75839ec32 | [
"BSD-3-Clause"
] | permissive | pawelszydlo/alert_broker | 4c473e7f1dfd07b9150a204d201ac28028ff1403 | b99439d01a43da28c89677b170fff8b315938e3b | refs/heads/master | 2020-07-22T11:38:10.259345 | 2019-09-09T00:29:53 | 2019-09-09T00:29:53 | 207,188,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | """Handler class. All handlers must inherit from it."""
class Handler:
def __init__(self, alert: str):
self.broker = None
self.alert = alert
def alert_on(self):
"""Will be run when alert pops up."""
pass
def alert_off(self):
"""Will be run when alert disappears."""
pass
def alert_ongoing(self):
"""Will be run every second when the alert is up."""
pass
| [
"pawelszydlo@gmail.com"
] | pawelszydlo@gmail.com |
22bf78aec17ef9a40c59bc9d641bf6bde47b595f | 53eb0a066fd73b6996b9c547536699865d499686 | /TestcaseAdmin/Usermg&Group&Authmg/test_Usergpcase.py | 4b32583f45c873dbd735901f2d163a291de7bb2c | [] | no_license | zhangliwen1112/HoliEBR-UI | 862e2aeda7b2884df2aa586f4cf630b50b91a1af | c755c978d2c977f4962a3f4426e93524fd5a5d4f | refs/heads/master | 2023-05-07T02:12:54.662392 | 2021-05-26T08:47:36 | 2021-05-26T08:47:36 | 360,793,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | #coding=utf-8
"""
Created on 2020/9/8
@usergpor: lianxiujuan
@desc: 用户组
"""
import pytest
import sys
from src.pageobjectAdmin.pageUsergp import *
from DataAdmin.UsergrpData import *
from src.public.common.Login import *
from src.public.common.Select_Item import *
class Test_Usergp:
def test_usergp_login(self):
login_usergp()
sleep(1)
# 新增用户组
def test_add_usergp(self):
log.info("开始执行用例%s" % sys._getframe().f_code.co_name)
usergp_add(addcodedata, addnamedata)
time.sleep(2)
assert new_page_source(addnamedata)
# 设置权限
def test_setauth_usergp(self):
log.info("开始执行用例%s" % sys._getframe().f_code.co_name)
select_item(addnamedata)
usergp_setauth()
time.sleep(2)
usergp_setauth()
# 编辑用户组
def test_edit_usergp(self):
log.info("开始执行用例%s" % sys._getframe().f_code.co_name)
select_item(addnamedata)
usergp_edit(editnamedata)
time.sleep(2)
assert new_page_source(editnamedata)
# 删除用户组
def test_delete_usergp(self):
log.info("开始执行用例%s" % sys._getframe().f_code.co_name)
select_item(addnamedata)
usergp_delete()
time.sleep(2)
assert new_page_source(addnamedata) == False
new_click(authmg)
| [
"411454954@qq.com"
] | 411454954@qq.com |
d504b3f82cd91bbfd23af3e8247f8d84915a2cfb | 198ca6124635e71c382237d68984e51f1f925829 | /exerc_315.py | 775e3819592a8b2e353d86881b07018224e7d8fa | [] | no_license | fabianomalves/python_introduction | ab7922ef9fbedb92209592422b8b93a3afdfd413 | 5c23b146318064173fcf3f8047d2fd08544873a7 | refs/heads/master | 2021-10-28T20:12:32.633450 | 2019-04-24T22:49:22 | 2019-04-24T22:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | """
Calculate the reduce life time for a smoke person.
How many cigarettes per day and how many years smoking.
Less 10 minutes life after one cigarette.
How many days are loose.
"""
smoke_day = int(input('How many cigarettes per day? '))
smoke_year = float(input('How many years smoking? '))
time_calc = ((smoke_year * 365) * (smoke_day * 10))
time_loose = (time_calc / (60 * 24))
print('You have %5.2f days' % time_loose)
| [
"fabiano.moreira.alves@gmail.com"
] | fabiano.moreira.alves@gmail.com |
8e8ee505088ce6ccc522a671e8d93dc13c4cfbb6 | 284a184343dc492ccac57e92e0766cb559e2404f | /omcp/diagnoses/register_status.py | 83a97fab7b922accbe3591b4efbeeea73b6ec31b | [
"MIT"
] | permissive | shutogeorgio/omcp-service | 298299390bdab4755ddd4b353e1be3d65677ee1f | 4d4c2943d3393c77019a780a0caa1457e14b7d8d | refs/heads/main | 2023-01-23T13:11:08.578901 | 2020-12-10T23:54:48 | 2020-12-10T23:54:48 | 317,579,158 | 1 | 1 | NOASSERTION | 2020-12-12T07:31:58 | 2020-12-01T15:09:30 | Python | UTF-8 | Python | false | false | 165 | py | from django.db import models
class RegisterStatus(models.TextChoices):
UNREGISTERED = "UNREGISTERED"
REGISTERED = "REGISTERED"
COMPLETED = "COMPLETED"
| [
"dev.shudiscrete@gmail.com"
] | dev.shudiscrete@gmail.com |
95c0947ee3556cf7aa6e680a014dc723a0e669aa | 80946175e7498b4d0d549a8a8d0a6d8eecb1c146 | /tvb/command.py | f5f59fe0c6acc2ebead92474bcfc62e587ae5859 | [
"Apache-2.0"
] | permissive | zlyq/tvb | 732c78f62e92e275cf28cb6a847ab014d43cc14d | 098936b7f054fbdd46e7509cdfdbe21914e6dcb8 | refs/heads/master | 2023-03-19T19:58:44.322854 | 2016-02-24T06:07:07 | 2016-02-24T06:07:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,490 | py | # encoding: utf-8
'''
@author: Juncheng Chen
@copyright: 1999-2015 Alibaba.com. All rights reserved.
@license: Apache Software License 2.0
@contact: juncheng.cjc@outlook.com
'''
import os
from datetime import datetime
import copy
import logging
logger = logging.getLogger(__name__)
class Command(object):
def __init__(self, name, command=None, clean_command=None):
self.name = name
self.command = command
self.clean_command = clean_command
self.process = None
def new(self, device, args):
self.device = device
self.args = args
return copy.deepcopy(self)
def kill(self):
if self.process:
self.process.kill()
self.process.wait()
self.process = None
logger.debug('kill %s %s' % (self.name, self.command))
def is_done(self):
if self.process:
return self.process.poll() is not None
return True
def execute(self):
raise Exception('%s not implement execute' % self.__class__)
def clean(self):
pass
class LastCommand(Command):
def execute(self):
if self.command:
logger.debug('execute single command %s' % self.command)
with open(os.path.join(self.device.log_dir, '%s.txt' % self.name), 'a') as f:
self.process = self.device.shell(self.command)
f.write(self.device.get_process_stdout(self.process))
class LoopCommand(Command):
def execute(self):
if self.command:
logger.debug('execute loop command %s' % self.command)
with open(os.path.join(self.device.log_dir, '%s.txt' % self.name), 'a') as f:
self.process = self.device.shell(self.command)
f.write(">>%s>>\n%s\n" % (datetime.now().strftime('%m/%d %H:%M:%S'), self.device.get_process_stdout(self.process)))
class AnrLoopCommand(LoopCommand):
def execute(self):
logger.debug('execute loop command %s' % self.command)
if not hasattr(self, 'timestamp'):
self.process = self.device.shell(self.command)
self.timestamp = self.device.get_process_stdout(self.process)
self.process = self.device.shell(self.command)
timestamp = self.device.get_process_stdout(self.process)
if timestamp != self.timestamp:
self.timestamp = timestamp
with open(os.path.join(self.device.log_dir, '%s_%s.txt' % (self.name, datetime.now().strftime('%Y%m%d%H%M%S'))), 'w') as f:
self.process = self.device.shell('cat /data/anr/traces.txt')
f.write(self.device.get_process_stdout(self.process))
class MemdetailLoopCommand(LoopCommand):
def new(self, device, args):
if args.process_names:
self.command = 'dumpsys meminfo -a %s' % args.process_names[0]
else:
self.command = None
return LoopCommand.new(self, device, args)
class ShowMapLoopCommand(LoopCommand):
def new(self, device, args):
if args.process_names:
self.command = "ps | grep %s | awk '{print $2}' | xargs showmap" % args.process_names[0]
else:
self.command = None
return LoopCommand.new(self, device, args)
class DumpheapLoopCommand(LoopCommand):
def new(self, device, args):
self.delay = 3600 / args.interval
self.hprof = '/sdcard/dumpheap.hprof'
self.clean_command = 'rm -f %s' % self.hprof
if args.process_names:
self.command = "am dumpheap %s %s" % (args.process_names[0], self.hprof)
else:
self.command = None
self.i = 0
return LoopCommand.new(self, device, args)
def execute(self):
if self.command:
if self.i == (self.delay - 2):
logger.debug('execute loop command %s' % self.command)
self.clean()
self.device.get_process_stdout(self.device.shell(self.command))
elif self.i == self.delay:
self.i = 0
self.device.adb('pull %s %s' % (self.hprof, os.path.join(self.device.log_dir, '%s_%s.hprof' % (self.name, datetime.now().strftime('%Y%m%d_%H%M%S')))))
self.i += 1
def clean(self):
if self.clean_command:
logger.debug('execute loop clean command %s' % self.clean_command)
self.device.get_process_stdout(self.device.shell(self.clean_command))
class DurableCommand(Command):
def execute(self):
if self.command and self.is_done():
logger.debug('execute durable command %s' % self.command)
self.clean()
self.process = self.device.shell(self.command, os.path.join(self.device.log_dir, '%s_%s.txt' % (self.name, datetime.now().strftime('%Y%m%d_%H%M%S'))))
def clean(self):
if self.clean_command:
logger.debug('execute durable clean command %s' % self.command)
self.device.get_process_stdout(self.device.shell(self.clean_command))
if self.process:
self.process.wait()
MONKEYBLACKLIST = '/mnt/sdcard/tvb_monkey_blacklist.txt'
MONKEYSCRIPT = '/mnt/sdcard/tvb_monkey_script.txt'
MONKEYSCRIPTTITLE = ['type = tvb_user', 'count = 1', 'speed = 1.0', 'start data >>']
MONKEYCMD = 'monkey -v -v -v --ignore-crashes --ignore-timeouts --ignore-security-exceptions --kill-process-after-error --monitor-native-crashes'
MONKEYCOUNT = 1200000000
MONKEYPCT = {'pct-touch': 0, 'pct-motion': 0, 'pct-trackball': 5, 'pct-nav': 55, 'pct-majornav': 15, 'pct-syskeys': 15, 'pct-appswitch': 9, 'pct-anyevent': 1}
class MonkeyDurableCommand(DurableCommand):
def new(self, device, args):
self.clean_command = 'busybox killall com.android.commands.monkey'
return DurableCommand.new(self, device, args)
def get_monkey_percent(self, args):
percent = []
for pct in MONKEYPCT:
if hasattr(args, pct):
value = getattr(args, pct)
if value:
percent.append('--%s %s' % (pct, value))
if percent:
return ' '.join(percent)
return ' '.join(['--%s %s' % (k, v) for k, v in MONKEYPCT.iteritems() if v])
class AppMonkeyDurableCommand(MonkeyDurableCommand):
def new(self, device, args):
extra = ''
if args.monkey:
extra = '-p ' + ' -p '.join(args.monkey)
self.command = '%s %s %s --throttle %s %s' % (MONKEYCMD, self.get_monkey_percent(args), extra, args.throttle, MONKEYCOUNT)
return MonkeyDurableCommand.new(self, device, args)
class BlacklistMonkeyDurableCommand(MonkeyDurableCommand):
def new(self, device, args):
if args.blacklist:
cmd = "echo '%s' > %s" % ('\\n'.join(args.blacklist), MONKEYBLACKLIST)
device.shell(cmd)
extra = '--pkg-blacklist-file %s' % MONKEYBLACKLIST
self.command = '%s %s %s --throttle %s %s' % (MONKEYCMD, self.get_monkey_percent(args), extra, args.throttle, MONKEYCOUNT)
else:
self.command = None
return MonkeyDurableCommand.new(self, device, args)
class ScriptMonkeyDurableCommand(MonkeyDurableCommand):
def new(self, device, args):
if args.script:
with open(args.script, 'r') as f:
cmd = "echo '%s' > %s" % ('\\n'.join(MONKEYSCRIPTTITLE + f.read().splitlines()), MONKEYSCRIPT)
device.shell(cmd)
extra = '-f %s ' % MONKEYSCRIPT
self.command = '%s %s --throttle %s %s' % (MONKEYCMD, extra, args.throttle, MONKEYCOUNT)
else:
self.command = None
return MonkeyDurableCommand.new(self, device, args)
COMMAND_CONFIG = {
'top': LoopCommand('top', 'top -n 1'),
'meminfo': LoopCommand('meminfo', 'dumpsys meminfo'),
'cpuinfo': LoopCommand('cpuinfo', 'dumpsys cpuinfo'),
'mali': LoopCommand('mali', 'librank -P /dev/mali'),
'activity': LoopCommand('activity', 'dumpsys activity'),
'oom': LoopCommand('activity_oom', 'dumpsys activity oom'),
'processes': LoopCommand('activity_processes', 'dumpsys activity processes'),
'procstats': LoopCommand('activity_procstats', 'dumpsys activity procstats'),
'temp0': LoopCommand('temperature_zone0', 'cat /sys/class/thermal/thermal_zone0/temp'),
'temp1': LoopCommand('temperature_zone1', 'cat /sys/class/thermal/thermal_zone1/temp'),
'anr': AnrLoopCommand('anr', 'ls -l /data/anr/traces.txt'),
'memdetail': MemdetailLoopCommand('memdetail'),
'showmap': ShowMapLoopCommand('showmap'),
'dumpheap': DumpheapLoopCommand('dumpheap'),
'logcat': DurableCommand('logcat', 'logcat -v threadtime', 'busybox killall logcat'),
'event': DurableCommand('logcat_event', 'logcat -v threadtime -b events'),
'monkey': AppMonkeyDurableCommand('monkey'),
'blacklist': BlacklistMonkeyDurableCommand('monkey'),
'script': ScriptMonkeyDurableCommand('monkey'),
}
LAST_COMMAND_CONFIG = {
'bugreport': LastCommand('bugreport', 'bugreport'),
'usagestats': LastCommand('usagestats', 'dumpsys usagestats')
}
excluded = ['monkey', 'blacklist', 'script']
support_commands = sorted([key for key in COMMAND_CONFIG.keys() if key not in excluded] + LAST_COMMAND_CONFIG.keys())
default_commands = sorted(['top', 'cpuinfo', 'meminfo', 'logcat', 'anr', 'bugreport'])
| [
"juncheng.cjc@alibaba-inc.com"
] | juncheng.cjc@alibaba-inc.com |
df9dd24400578916c3d14c13ccc9926eddfabb48 | 38eb57300418e6f10433630437388f779ce50e09 | /cookie_and_session/app02_session/views.py | 25a4bbc4abf9387fc8de2e70f90c22b5c03e8db7 | [] | no_license | SelfShadows/Django-Flask | f37839f763133f0d62bffad3128171c426a1c038 | 13e32d1c8aac1532b43323e1891c423fe78f2813 | refs/heads/master | 2021-01-04T12:31:18.018508 | 2020-02-14T16:29:27 | 2020-02-14T16:29:27 | 240,550,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | from django.shortcuts import render ,redirect
from functools import wraps
from django import views
# Django提供的工具,把函数装饰器转变为方法装饰器
from django.utils.decorators import method_decorator
from app02_session import models
def check_login(func):
@wraps(func) # 装饰器修复技术
def inner(request, *args, **kwargs):
# 获取seesion
ret = request.session.get("is_login")
# 1.获取cookie 中的随机字符串
# 2.根据随机字符串去数据库取 session_data --> 解密 --> 反序列化成字典
# 3.在字典里面 根据 is_login 取出具体数据
if ret == "1":
# 已经登陆过的 继续执行
return func(request, *args, **kwargs)
else:
# 没有登陆过的 跳转到登陆页面
next_url = request.path_info
return redirect("/app02/login/?next={}".format(next_url))
return inner
def login(request):
if request.method == "POST":
user = request.POST.get("user")
pwd = request.POST.get("pwd")
# 从url里面去除next参数
next_url = request.GET.get("next")
# 将所有Session失效日期小于当前日期的数据删除
request.session.clear_expired()
have_user = models.Person.objects.filter(username=user, password=pwd)
if have_user:
# 登录成功
# 告诉浏览器保存一个键值对
if next_url:
ret = redirect(next_url)
else:
ret = redirect("/app02/home/")
# 设置session
request.session["is_login"] = "1"
request.session["user_id"] = have_user[0].id
# 设置超时时间
request.session.set_expiry(5) # 5秒后失效
return ret
return render(request, "app02/login.html")
# 注销登陆函数
def logout(request):
# 只删除session数据
# request.session.delete()
# 删除session数据和cookie值
request.session.flush()
return redirect("/app02/login/")
@check_login
def home(request):
user_id = request.session.get("user_id")
user_obj = models.Person.objects.filter(id=user_id)
if user_obj:
return render(request, "app02/home.html", {"user_obj": user_obj[0]})
else:
return render(request, "app02/home.html", {"user_obj": "匿名用户"})
@check_login
def index(request):
return render(request, "app02/index.html")
class UserInfo(views.View):
# 把函数装饰器转变为方法装饰器
@method_decorator(check_login)
def get(self, request):
return render(request, "app02/userinfo.html") | [
"870670791@qq.com"
] | 870670791@qq.com |
df5ae065301feb09b49a290e10a7d73cfe0ea9d6 | a19a9036257fff2598390eb952b7f571a74ab35f | /Day 26/01 SQLAlchemy/load_authors.py | 7014e7e0d73b82d30bbdf84a646cec6087bcb0aa | [] | no_license | k-sheikh/bnta_cohort1 | 76420af4edae858acb5764862ce2f47575c61b4d | 9bdb52c4d6332a357c6d9fb0392b89741a598149 | refs/heads/master | 2023-04-16T08:50:18.732412 | 2021-04-28T11:11:14 | 2021-04-28T11:11:14 | 339,024,067 | 0 | 0 | null | 2021-04-28T11:11:15 | 2021-02-15T09:22:41 | Jupyter Notebook | UTF-8 | Python | false | false | 1,051 | py | from models import Author, Book, session
def load_lines(filename):
with open(f'../data/{filename}') as input_file:
lines = input_file.readlines()
return [line.strip() for line in lines if line.strip()]
def load_data(session):
s = session()
s.query(Author).delete()
s.query(Book).delete()
# e.g. Ursula K. Le Guin|1929
author_lines = load_lines('authors.txt')
for line in author_lines:
name, year = line.split('|')
name, year = name.strip(), int(year)
author = Author(name=name, year_of_birth=year)
s.add(author)
# e.g. Ursula K. Le Guin|A Wizard of Earthsea|1968
book_lines = load_lines('books.txt')
for line in book_lines:
author_name, title, year = line.split('|')
author_name, title, year = author_name.strip(), title.strip(), int(year)
author = s.query(Author).filter(Author.name == author_name).one()
book = Book(title=title, year_of_publication=year, author=author)
s.add(book)
s.commit()
load_data(session)
| [
"micheledicosmo@vascosmi.com"
] | micheledicosmo@vascosmi.com |
b71d360380405669e46f743526e37cf89a8813b1 | 18d78944d4733a8b19aef1acb67527641fc60d18 | /penning/seq/__init__.py | c491ae5b3d6ae54a0213c37f65de6fde9170450a | [] | no_license | iontrapimperial/penning_analysis | ee6f84a8770ab7803d20e3b567851b4d0c890666 | 76c2941d3f7f58624f65c131b8d48d2fb7fdd35c | refs/heads/master | 2020-03-23T05:39:24.929708 | 2019-06-06T10:29:44 | 2019-08-06T13:15:19 | 141,158,672 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | """
Module for creation of sequences of pulses for experiments. This module can
write out XML files for viewing the sequences in the Spectroscopy Contoller, but
more importantly it can directly write out FPGA hex files (though these still
need to be uploaded).
The creation and file writing functions are `create_{}()` and `write_{}()`
respectively, where the `{}` can be either `xml` or `hex`.
The building blocks of the pulse sequences are in the `elements` module, where
more help is available. Typically you might want to do `from elements import *`
- this will only put the building block elements in your global namespace.
"""
from .api import *
from . import elements
from . import api as _api
__all__ = ['elements'] + _api.__all__
| [
"jakelishman@gmail.com"
] | jakelishman@gmail.com |
3cecfe1047038234f1eec23f88841561a0718e0e | 1fd04a2d5bbde366635d020f6019d9e7b55fd29a | /code for fun/dice.py | ce31db83390ce613bf4533f9246573393eb638a0 | [] | no_license | bkravitz/climatecode | d03d6a898e63a358921a111692592ac6938587bd | 989e254bb540dfc67c78f3247e921b10a427fa55 | refs/heads/main | 2023-01-23T23:54:01.674690 | 2020-11-15T23:05:27 | 2020-11-15T23:05:27 | 303,485,070 | 0 | 1 | null | 2020-10-13T12:50:46 | 2020-10-12T18:54:14 | MATLAB | UTF-8 | Python | false | false | 1,277 | py | import random
print "Enter dice you want to roll"
print "Examples: 1d6, 2d12, 2d8+1d4, etc."
print "You can also add modifiers, e.g., 3d6+5"
diceroll=raw_input("> ")
randarray=[]
mod=0
dicevals=0
results=[]
if '+' in diceroll:
diceroll2=diceroll.split('+')
for n in range(len(diceroll2)):
temp=diceroll2[n]
if 'd' in temp:
temp2=temp.split('d')
numtimes=int(temp2[0])
dieval=int(temp2[1])
while numtimes>0:
randarray.append(dieval)
numtimes=numtimes-1
else:
mod=mod+int(temp)
else:
if 'd' in diceroll:
diceroll2=diceroll.split('d')
numtimes=int(diceroll2[0])
dieval=int(diceroll2[1])
while numtimes>0:
randarray.append(dieval)
numtimes=numtimes-1
else:
mod=mod+int(diceroll)
for k in range(len(randarray)):
rollval=random.randrange(1,randarray[k]+1,1)
if randarray[k]>9:
spaces=' '
else:
spaces=' '
outputstr='1d'+str(randarray[k])+':'+spaces+str(rollval)
results.append(outputstr)
dicevals=dicevals+rollval
totalvals=dicevals+mod
print "Rolls:"
for j in range(len(results)):
print results[j]
print "\nTotal: " + str(totalvals) | [
"bkravitz@iu.edu"
] | bkravitz@iu.edu |
7090fa32019a45880af6aa27f0beadc311ba15b9 | a5b1aa462055b26008b694dfd24297e53f53df7f | /scripts/timer.py | 558cce8b2516919751af0a6b09c8bdb771563a50 | [] | no_license | rsaarelm/config | bc78b2f540b723f6ea467837347ea18f6cf9c27c | 94ae14f816fde82e11ac9323a8bbf3ff348bd0ac | refs/heads/master | 2021-01-20T11:47:08.722757 | 2012-03-29T16:59:01 | 2012-03-29T19:53:10 | 97,229 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,829 | py | #!/usr/bin/env python
"""Shows a timer on the command line.
Used for project work, start the timer and keep it running for at least a
given time until stopping work. """
from time import *
import sys
import os
import pickle
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
SHOW_BREAK_END_MSG = True
BREAK_END_CMD = "xmessage Break is over"
# Untested stuff: Windows kbhit, adjusting today's time for tasks that go
# over midnight.
try:
# If we're on Windows, we might get a ready-made kbhit.
from msvcrt import kbhit
except ImportError:
# Define a kbhit under unix, from Python FAQ.
import termios, os, fcntl
def kbhit():
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsetattr(fd, termios.TCSADRAIN, new)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
try:
try:
sys.stdin.read(1)
return True
except IOError:
return False
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
termios.tcsetattr(fd, termios.TCSAFLUSH, old)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
def time_str(seconds):
return "%02d:%02d:%02d" % (seconds / 3600, seconds / 60 % 60, seconds % 60)
def print_time(seconds, today, total):
print ("\rTime: %s\tToday: %s\tTotal: %s " %
(time_str(seconds), time_str(seconds + today), time_str(seconds + total))),
sys.stdout.flush()
def day_interval(second_within_day):
# Zero time
time = [0] * 9
# Set date.
time[:3] = localtime(second_within_day)[:3]
day_start = mktime(time)
day_end = day_start + 86400
return (day_start, day_end)
class Records:
def __init__(self):
# Data is a list of (start_secs, duration_secs, message_string) tuples
self._data = []
def save(self, file):
for start, duration, msg in self._data:
assert '\n' not in msg
print >> file, "%s|%s|%s" % (strftime(TIME_FORMAT, localtime(start)), duration, msg)
def load(self, file):
self._data = []
for line in file:
line = line.strip() # Get rid of trailing newline
start, duration, msg = line.split('|', 2)
start = mktime(strptime(start, TIME_FORMAT))
duration = int(duration)
self.add_entry(start, duration, msg)
def add_entry(self, start, duration, msg):
if duration < 1:
return
assert('\n' not in msg)
msg = msg.strip()
self._data.append((int(start), int(duration), msg))
def total_time(self):
return sum([duration for (_, duration, _) in self._data])
def time_in_interval(self, min_sec, max_sec):
result = 0
for start, duration, _ in self._data:
if start >= min_sec and start + duration < max_sec:
result += duration
elif start >= min_sec:
result += max(0, min(start + duration, max_sec) - start)
elif start + duration < max_sec:
result += max(0, start + duration - min_sec)
return result
def time_for_day(self, second_within_day):
day_start, day_end = day_interval(second_within_day)
return self.time_in_interval(day_start, day_end)
def time_for_today(self):
return self.time_for_day(time())
def daily_hours(self):
result = {}
for start, duration, msg in self._data:
day_start, day_end = day_interval(start)
day_start = localtime(day_start)
if day_start not in result:
result[day_start] = self.time_for_day(start)
result = result.items()
result.sort()
return result
MODE_TIMER = 1
MODE_REPORT = 2
def parse_time(time_str):
if time_str.endswith('h'):
return int(float(time_str[:-1]) * 3600)
elif time_str.endswith('m'):
return int(float(time_str[:-1]) * 60)
# Allow 's' suffix for completeness' sake
elif time_str.endswith('s'):
time_str = time_str[:-1]
return int(float(time_str))
def usage():
print "Usage: %s [options] [file]" % sys.argv[0]
print "options: -r print report of log file"
print " -h print this help"
print " -c [time] count down for [time] seconds before starting"
print " use [time]m for minutes and [time]h for hours"
def countdown(seconds):
"""Show a countdown on screen. Stop when the user presses a key or when time
runs out."""
end_time = time() + seconds
while not kbhit() and time() < end_time:
seconds = end_time - time()
print "\rBreak left: %s " % time_str(seconds),
sys.stdout.flush()
sleep(0.1)
print "\r ",
def timer(records, filename=None):
"""Show a timer on screen until the user presses a key. Record the amount of
time elapsed."""
begin = time()
total = records.total_time()
today = records.time_for_today()
# Use adjust_today when crossing midnight and today's total is actually less than
# session total.
adjust_today = 0
seconds = 0
DATE_FORMAT = "%Y%m%d"
date = strftime(DATE_FORMAT)
while not kbhit():
try:
seconds = int(time() - begin)
print_time(seconds, today - adjust_today, total)
sleep(0.1)
newdate = strftime(DATE_FORMAT)
# The day has changed. Save last day's time.
if newdate != date:
adjust_today += seconds - adjust_today
date = newdate
except KeyboardInterrupt:
print "\nSpatiotemporal anomaly detected. Memory of the current session will be purged."
return
if filename:
entry = raw_input("\nLog entry: ")
records.add_entry(begin, seconds, entry)
file = open(filename, 'wb')
records.save(file)
file.close()
def report(records):
days = records.daily_hours()
print "Date\t\tHours"
for date, seconds in days:
print "%s\t%.3f" % (strftime("%Y-%m-%d", date), (seconds / 3600.0))
# Sort days by seconds for median.
daily_hours = [seconds / 3600.0 for (date, seconds) in days]
daily_hours.sort()
if len(daily_hours) % 2 == 1:
median_hours = daily_hours[len(daily_hours) / 2]
else:
# If there is an even number of entries, interpolate between the two
# middle entries.
median_hours = (daily_hours[len(daily_hours) / 2] / 2 +
daily_hours[len(daily_hours) / 2 - 1] / 2)
total_hours = records.total_time() / 3600.0
print "-" * 32
print
print "Daily mean: %.3f h" % (total_hours / len(days))
print "Daily median: %.3f h" % median_hours
print "Total hours: %.3f h" % total_hours
todays_time = records.time_for_today()
if todays_time > 0:
print "Today's time: ", time_str(todays_time)
def main():
records = Records()
filename = None
countdown_secs = 0
mode = MODE_TIMER
# Need to use a crude loop since we can manipulate i from within it.
i = 1
while i < len(sys.argv):
param = sys.argv[i]
if param.startswith('-'):
if param == '-r':
mode = MODE_REPORT
elif param == '-h':
usage()
return
elif param == '-c':
try:
i += 1
countdown_secs = parse_time(sys.argv[i])
assert countdown_secs >= 0
except:
usage()
return
else:
print "Unknown option '%s'" % param
usage()
return 1
else:
if filename is None:
filename = param
i += 1
if filename is not None:
if os.path.exists(filename):
file = open(filename, 'rb')
records.load(file)
file.close()
if mode == MODE_TIMER:
if countdown_secs > 0:
countdown(countdown_secs)
if SHOW_BREAK_END_MSG:
os.system(BREAK_END_CMD)
timer(records, filename)
elif mode == MODE_REPORT:
if filename is None:
print "No file to generate report from."
return 1
else:
report(records)
if __name__ == '__main__':
main()
| [
"risto.saarelma@iki.fi"
] | risto.saarelma@iki.fi |
18cf3691b5004b6df5d94c8a24498181fe58b546 | 477d4a2fc068c930bd6a8289429c226f23affca3 | /limited-tests/autograder_limited.py | 6ff040f638eb32cfa6564d47c97010a0b846dd20 | [] | no_license | vivianliu/Logisim-Processor | 2e08f6100f61c6c946a30d3fea2cd4a32fcf0859 | 2a317c9bb8dd9ef52dd31978647371273b785469 | refs/heads/master | 2021-01-22T13:38:13.611876 | 2011-11-29T04:33:52 | 2011-11-29T04:33:52 | 4,028,063 | 10 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | #!/usr/bin/env python
import autograder_base
import os.path
from autograder_base import file_locations, AbsoluteTestCase, FractionalTestCase, main
tests = [
("lui test",AbsoluteTestCase(os.path.join(file_locations,'lui-test.circ'),os.path.join(file_locations,'lui.out'),1)),
("ori test",AbsoluteTestCase(os.path.join(file_locations,'ori-test.circ'),os.path.join(file_locations,'ori.out'),1)),
("add test",AbsoluteTestCase(os.path.join(file_locations,'add-test.circ'),os.path.join(file_locations,'add.out'),1)),
("addi test",AbsoluteTestCase(os.path.join(file_locations,'addi-test.circ'),os.path.join(file_locations,'addi.out'),1)),
("slt test",AbsoluteTestCase(os.path.join(file_locations,'slt-test.circ'),os.path.join(file_locations,'slt.out'),1)),
("disp test",AbsoluteTestCase(os.path.join(file_locations,'disp-test.circ'),os.path.join(file_locations,'disp.out'),1)),
("branches test",AbsoluteTestCase(os.path.join(file_locations,'branches-test.circ'),os.path.join(file_locations,'branches.out'),1)),
("fibonacci test",AbsoluteTestCase(os.path.join(file_locations,'fibonacci-test.circ'),os.path.join(file_locations,'fibonacci.out'),1)),
]
if __name__ == '__main__':
main(tests)
| [
"alechoey@gmail.com"
] | alechoey@gmail.com |
e2e081e324e998a37d2a94a4d1659f2fbfec36c3 | dd3b3fc3cbb9a48d5056f39969f3e2be0e6abbaf | /venv/Scripts/pip3-script.py | cb3d85e6d3895a84278dc67a8e5d53ce243a4847 | [] | no_license | Pactortester/QDS_phone | c0c323dd44c22924d36a1c9fe8b13db354192c81 | 9844242e5a71de89c3cb994e70c40d3dfd7b0f35 | refs/heads/master | 2020-04-10T16:19:00.264023 | 2019-04-03T09:15:48 | 2019-04-03T09:15:48 | 161,141,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | #!G:\QDS_phone\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"1456470136@qq.com"
] | 1456470136@qq.com |
70fe1823f5c5652194350c79d30ed6c1fa5d33df | dd4ea5ac482a8db52454be718172b8395cb7a6ed | /virtual/lib/python3.6/site-packages/Token/generated/provider/models/create_token_request_payee.py | e6dcc457438d4529008d6fd5130809a06c94ab71 | [
"MIT"
] | permissive | osman2491/hood | 06d0021bc9510c5c279b364421c5d90ea81ba0db | 55343a8960db8f2772b0c9ae9b615cefac11dae5 | refs/heads/master | 2022-12-10T13:53:09.458913 | 2020-02-24T07:52:46 | 2020-02-24T07:52:46 | 242,658,606 | 0 | 0 | MIT | 2021-09-08T01:41:38 | 2020-02-24T06:00:42 | Python | UTF-8 | Python | false | false | 3,365 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class CreateTokenRequestPayee(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
CreateTokenRequestPayee - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'alias_code': 'str'
}
self.attribute_map = {
'alias_code': 'aliasCode'
}
self._alias_code = None
@property
def alias_code(self):
"""
Gets the alias_code of this CreateTokenRequestPayee.
:return: The alias_code of this CreateTokenRequestPayee.
:rtype: str
"""
return self._alias_code
@alias_code.setter
def alias_code(self, alias_code):
"""
Sets the alias_code of this CreateTokenRequestPayee.
:param alias_code: The alias_code of this CreateTokenRequestPayee.
:type: str
"""
self._alias_code = alias_code
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"osman67239121@gmail.com"
] | osman67239121@gmail.com |
e717b78965b6a020305b2da272f299fa7a0fce3f | d21a3768b12084a7d143106d2bfccfc27016c93e | /principal.py | 5fdc995ea9e7ecf022f86e1f41e299064d5cfef0 | [] | no_license | JoaquinNMusriG/Ejercicio-8-U3 | 4b7d406bc8979d6d37f40690c0f6ff182b31a410 | 78aed18011203ee64059ed5c6e235a82b9cfa410 | refs/heads/master | 2022-10-04T15:38:12.119369 | 2020-06-08T10:56:43 | 2020-06-08T10:56:43 | 270,604,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | from claseColeccion import Coleccion
from claseMenu import Menu
if __name__ == '__main__':
cant = input('Ingrese la cantidad de empleados a cargar: ')
if cant.isdigit():
empleados = Coleccion(int(cant))
menu = Menu()
salir = False
while not salir:
print("""
0 Salir
1 Registrar horas
2 Total de tarea
3 Ayuda
4 Calcular sueldo
5 Ingresar usuario""")
op = int(input('Ingrese una opcion: '))
menu.opcion(op,empleados)
salir = op == 0
else:
print('Valor inválido.')
| [
"joaquinmusrigomez@gmail.com"
] | joaquinmusrigomez@gmail.com |
ab3a19f73e5bcd44ab43a4e12b2bd85a9aee47d6 | fd8ad626e402ab65ce79329b25749be031af7e6c | /visualizer/url.py | 2468b2b897413c410f5bfb12ea4c7a2564974593 | [] | no_license | mastinux/tebRotarapmoc | b022069706a66f1c2af94fc64dcdddc91183b9d1 | df60f97564a3a75c9345f4707751b4db66750b89 | refs/heads/master | 2020-04-06T14:19:01.083785 | 2016-09-28T06:19:40 | 2016-09-28T06:19:40 | 52,157,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='public_page'),
url(r'^2$', views.index_2, name='refreshed_page'),
url(r'^3$', views.index_3, name='listed_page'),
]
| [
"andrea.pantaleo.93@gmail.com"
] | andrea.pantaleo.93@gmail.com |
c7a40903999118527299e8f9c26851b8181911bb | 5a60e4d280dae9e1145885588c70d6efff0b528e | /CH2/listcomp_adv.py | 1ea080259cbc4b99bf1b5447b4d76b8369a088e2 | [] | no_license | ninja-22/HOPTWP | f1fab85b00c154ffbe77d7ba3b15d9f4f69c6173 | 4335d00c324a8a717222a13e842af1939a73a76c | refs/heads/master | 2022-04-15T00:59:23.254787 | 2020-04-04T16:11:59 | 2020-04-04T16:11:59 | 247,283,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #!/usr/local/bin/python
l1 = [1, 2, 3, 4]
l2 = [5, 6, 7, 8]
sq_even = [x**2 for x in l1 if x%2 == 0]
l_sum = [x + y for x in l1 for y in l2]
sq_values = [{x: x**2} for x in l1]
print(f"Even squares: {sq_even}")
print(f"Sum: {l_sum}")
print(f"Square dictionary: {sq_values}")
| [
"uthman.eqbal@portswigger.net"
] | uthman.eqbal@portswigger.net |
ffd730a8712503bf27743663db5ee9c8673e7e6c | b99a8795ea737f96c504edfb20475c7bec40882e | /tests/test_multiprocessing_utils.py | adbf5191e3cb9df2e9ecf90705a30833350bfa9a | [
"MIT"
] | permissive | HBS-HBX/django-elastic-migrations | 26efa62e81e6fe7b16277cca1cd21e9c581ee255 | 8b33c3dd73f01b9199d1da70e6c5f557b74c699b | refs/heads/master | 2023-08-03T09:24:00.340935 | 2023-07-13T11:26:08 | 2023-07-13T11:26:08 | 141,747,944 | 5 | 6 | MIT | 2023-07-13T11:26:12 | 2018-07-20T19:00:53 | Python | UTF-8 | Python | false | false | 1,012 | py | from multiprocessing import cpu_count
from unittest import skip
from django.test import TestCase
from django_elastic_migrations.utils.multiprocessing_utils import DjangoMultiProcess
def add_1(num):
return {'job_id': num, 'result': num + 1}
@skip("AttributeError: Can't pickle local object 'threadwrapper.<locals>.wrapper'")
class TestMultiprocessingUtils(TestCase):
def test_basic_multiprocessing(self):
"""
Do a basic test of DjangoMultiProcess that doesn't touch the database
:return:
:rtype:
"""
one_to_ten = range(1, 10)
workers = cpu_count()
django_multiprocess = DjangoMultiProcess(workers, log_debug_info=3)
with django_multiprocess:
django_multiprocess.map(add_1, one_to_ten)
results = django_multiprocess.results()
for result_obj in results:
job_id = result_obj.get('job_id')
result = result_obj.get('result')
self.assertEqual(job_id + 1, result)
| [
"pnore@hbs.edu"
] | pnore@hbs.edu |
6f2b3299583e3da4f22584eb39d43be19e2b6dd6 | 778c35fd5cf09e01557bd9eeb543e559bdac1137 | /django_cleanup/testapp/models/integration.py | a37d8d9284168c7e1068a82d732bf8c37080e86f | [
"MIT"
] | permissive | avallbona/django-cleanup | 1009384469df1dd94bc63399c03d831a8ee24c0e | 6dd155f49caf885c5bf9830c3946d0072842c15d | refs/heads/master | 2022-03-01T04:42:27.810687 | 2019-10-13T15:39:51 | 2019-10-13T15:39:51 | 219,780,640 | 0 | 0 | NOASSERTION | 2019-11-05T16:24:42 | 2019-11-05T15:37:55 | null | UTF-8 | Python | false | false | 572 | py | # coding: utf-8
from __future__ import unicode_literals
from easy_thumbnails.fields import ThumbnailerImageField
from sorl.thumbnail import ImageField
from .app import ProductAbstract
class ProductIntegrationAbstract(ProductAbstract):
sorl_image = ImageField(upload_to='testapp', blank=True)
easy_image = ThumbnailerImageField(upload_to='testapp', blank=True)
class Meta:
abstract = True
class ProductIntegration(ProductIntegrationAbstract):
pass
def sorl_delete(**kwargs):
from sorl.thumbnail import delete
delete(kwargs['file'])
| [
"mario@dwaiter.com"
] | mario@dwaiter.com |
327103fffaee38361c968fc092fc289178f3d9e7 | f219324bca81da9046eef2641c367eb8f158785a | /0x1C-makefiles/5-island_perimeter.py | 64e3d714970f0e0b06329d79d169f0ecc6d86a71 | [] | no_license | AbdurahmanAb/alx-low_level_programming-1 | 1cb21251ca0da0e1e1544379fbb857e7555fdaf6 | 31eb26772c6317c13d44985fbc038f235a0c5361 | refs/heads/master | 2023-07-24T12:48:45.997792 | 2021-09-02T22:07:44 | 2021-09-02T22:07:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | #!/usr/bin/python3
"""
function island_perimeter
returns the perimeter of the island described in grid
"""
def island_perimeter(grid):
""" returns the perimeter of the island """
aux = 0
for y in range(len(grid)):
for x in range(len(grid[y])):
if grid[y][x] is 1:
aux += 4
if x > 0 and grid[y][x - 1]:
aux -= 2
if y > 0 and grid[y - 1][x]:
aux -= 2
return aux
| [
"nshimyumukizachristian@gmail.com"
] | nshimyumukizachristian@gmail.com |
6f42046e26a53d45a6b0e199f1b66b160ac34a3f | 99d7765da35926279c4a4fd7313d55908786f4b8 | /0/2/2739/2739.py | 32df89b38143b4cce88cb8125277af2ebf5543fb | [
"MIT"
] | permissive | chr0m3/boj-codes | b8294c5d4d10a5af25b5276427bccd74d0866ef5 | d71d0a22d0a3ae62c225f382442461275f56fe8f | refs/heads/master | 2021-08-16T15:24:57.733088 | 2021-03-22T13:13:10 | 2021-03-22T13:13:10 | 91,523,558 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | a = input()
for i in range(0, 9):
print("%d * %d = %d" % (int(a), i + 1, int(a) * (i + 1)))
| [
"chr0m3@users.noreply.github.com"
] | chr0m3@users.noreply.github.com |
9d2cd1f61430081aa4a65d8e29b28e23f51b088f | 85f6de6e3ef680cd717312233fd03c636c606550 | /src/two/rolling_a_dice.py | faf4234c08ca6aa9dc9b3cb20192a6fdd631a5dc | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Guillermogsjc/dissecting-reinforcement-learning | f8956455ffda22445ecc11fc6938da40ed4948e2 | 8a2751efa6d4a733df81c272c503b8061c70c04f | refs/heads/master | 2021-01-11T20:41:02.216522 | 2017-01-15T11:32:27 | 2017-01-15T11:32:27 | 79,168,192 | 1 | 0 | null | 2017-01-16T23:14:54 | 2017-01-16T23:14:53 | null | UTF-8 | Python | false | false | 611 | py | import numpy as np
#Trowing a dice for N times and evaluating the expectation
dice = np.random.randint(low=1, high=7, size=3)
print("Expectation (3 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=10)
print("Expectation (10 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=100)
print("Expectation (100 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=1000)
print("Expectation (1000 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=100000)
print("Expectation (100000 times): " + str(np.mean(dice)))
| [
"massimiliano.patacchiola@gmail.com"
] | massimiliano.patacchiola@gmail.com |
b1df5923fa5ee618e437894f6af7403d70ca086a | 5aa18412806e4900c6da0930c53a992da1bf72c6 | /Macro_Nutrient_And_Calorie_Tracker/Macro_Nutrient_And_Calorie_Tracker/settings.py | ab14f16781685b64704eac560b407c3f5bac9f43 | [] | no_license | Piotr-Zielinski-PZ/python_django_11 | 43351773567a727b6f1f58297b831c071b467362 | f5d24624d7588768428a7a49f648d600cbe15759 | refs/heads/master | 2023-08-10T23:29:11.032194 | 2021-09-14T11:47:58 | 2021-09-14T11:47:58 | 405,640,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | """
Django settings for Macro_Nutrient_And_Calorie_Tracker project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-bc_gdn+1fpr2)fh5jl+a$s)qbn)3o@jxq!%q*r%zn5i23)@5z6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'food_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Macro_Nutrient_And_Calorie_Tracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Macro_Nutrient_And_Calorie_Tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"piotrzet00@outlook.com"
] | piotrzet00@outlook.com |
fad178dd1abbc0aba4a67188e92de989771a12d2 | f13c0a8e70983ec0e3759a975dc357e7b0c99c4a | /pythonchallenge/challenge6.py | d13e362592f45b4840efd76fee4077f465dbdc96 | [] | no_license | cam-x/Python-exercises | daf5062d44c3780552fed427064d76645a1e7601 | eeacac4fd59c93a753475608d765e1fc9329da28 | refs/heads/master | 2020-12-24T17:27:09.143647 | 2014-10-08T09:25:34 | 2014-10-08T09:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | import requests
import zipfile
__author__ = 'Jianqiao'
""" Get the file Zip from url
url = 'http://www.pythonchallenge.com/pc/def/channel.zip'
r = requests.get(url)
with open("channel.zip","wb") as code: # 'w' - open for writing ; 'b' - binary mode
code.write(r.content)
"""
my_zip = zipfile.ZipFile('channel.zip') # class zipfile.ZipFile(file...) : Create a zipfile object
print(my_zip.namelist())
text = my_zip.read('readme.txt') # Read the file which in the zipfile and return the bytes of that file.
print(text.decode()) # .decode() transform a byte data type into string.
number = '90052'
next_txt = number + '.txt'
comment_list = []
while True:
try:
text = my_zip.read(next_txt)
#print(text.decode())
comment_list.append(my_zip.getinfo(next_txt).comment.decode()) # Instances of the ZipInfo class are returned by
# the getinfo() methods of ZipFile objects.
number = text.decode().split()[-1]
next_txt = number + '.txt'
except :
#print(text.decode(), 'you need to try manually', sep='\n') # sep= means separator.
print('You should check the txt before this one' )
print('This is the comments:', ''.join(comment_list))
my_zip.close()
break
"""
import os
print(os.getcwdb()) # Get the directory you are working on
print(os.listdir()) # Get the list of all your directory name
os.remove('code3.zip') # Remove a file
"""
| [
"xujianqiao127@gmail.com"
] | xujianqiao127@gmail.com |
fac85c5c169eaf142355c0655ac86fcd5f74fc09 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/surrounded_20200617223518.py | 233debe26db46593e2dfe08e99e70eb47ac5cf87 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | def surronded(board):
# dfs
# untouched
# in progress
# finished
rows = len(board)
if rows == 0:
return
cols = len(board[0])
if cols == 0:
return
state = [[0]* cols for _ in range(rows)]
def canReachOutside(x,y,pending):
pending.append(x,y)
canReach = False
directions = [(1,0),(-1,0),(0,1),(0,-1)]
for dx,dy in directions:
nextX,nextY = dx+x,dy+y
if nextX < 0 or nextX >= rows or nextY < 0 or nextY >= cols:
canReach = True
continue
if board[nextX][nextY] == 'O' and state[nextX][nextY] == 0:
state[nextX][nextY] = 1
canReach != canReachOutside(nextX,nextY,pending)
return canReach
for x in range(rows):
for y in range(cols):
if [x][y] == '0' and state[x][y] == 0:
pending = []
if canReachOutside(x,y,pending):
# process states to change from o to x
pass
else:
# regulary process states
pass
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
67aab2ae3ec58cf14d525735cdded2675e3e08d3 | d91ee9cc689ed049cce5077db8eded0584e05a00 | /dg/ivr_admin.py | ebda64f87bbcf101338a0c21b25b6bda02630955 | [] | no_license | digitalgreenorg/loop | 3368b521cd4bf331cda2e16b3f7938312bb1104c | 27ddcc13be4075954b1cf1c73a6fa871734eb78a | refs/heads/master | 2020-03-15T11:48:12.153246 | 2018-05-04T10:54:49 | 2018-05-04T10:54:49 | 132,128,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # from django.contrib.admin.sites import AdminSite
#
# from ivr.models import Call, Broadcast, Audio
# from ivr.admin import CallAdmin, BroadcastAdmin, AudioAdmin
#
# class IvrAdmin(AdminSite):
# pass
#
# ivr_admin = IvrAdmin(name="ivrsadmin")
#
# ivr_admin.register(Call, CallAdmin)
# ivr_admin.register(Broadcast, BroadcastAdmin)
# ivr_admin.register(Audio, AudioAdmin)
| [
"alodha21051992@gmail.com"
] | alodha21051992@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.