blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e562c5bc2e2ac231d89eefd48aacd0e007fc83e | b2caf572fc8765e7b1a7a381e0a2a9e3bcc27788 | /3.朴素贝叶斯/6_朴素贝叶斯之新浪新闻分类器_文本切割.py | 2263ad772502b28d007b698a7a6fb21911080b53 | [] | no_license | NaLaEur/ML_by_Python | 5fe42e4d54ef7f1a9ddc92dfceb6cd3f8d716fef | 5b57e52620e13f370d1d0a8e8a92b926f8e924b4 | refs/heads/master | 2020-04-20T05:55:58.351456 | 2019-02-02T14:16:17 | 2019-02-02T14:16:17 | 168,669,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import os
import jieba
def TextProcessing(folder_path):
# 1 查看 folder_path下的文件
folder_list = os.listdir (folder_path)
# 2 训练集数据
data_list = []
class_list = []
# 3 遍历每个文件夹中的内容
for folder in folder_list:
# 3.1 获取路径的新地址,以前的加现在的
new_folder_path = os.path.join (folder_path, folder)
# 3.2 获取文件夹中有哪些文件
files = os.listdir (new_folder_path)
# 3.3 打开文件夹中的所有文件,使用jieba对其进行切分,并将词加入数据list中
for file in files:
# 3.4 打开文件
with open (os.path.join (new_folder_path, file), 'r', encoding = 'utf-8') as f:
raw = f.read ()
# 3.5 使用jieba 对文件进行切分
word_cut = jieba.cut(raw, cut_all = False) #精简模式,返回一个可迭代的generator
word_list = list(word_cut)
# 3.6 加入元素
data_list.append(word_list)
class_list.append(folder)
print(data_list)
print(class_list)
if __name__ == '__main__':
#文本预处理
folder_path = 'Sample' #训练集存放地址
TextProcessing(folder_path) | [
"289282571@qq.com"
] | 289282571@qq.com |
61121edcd06686489edcfafd1d34247f516e7ee2 | 0ff05247425b7d9693c74ef0a86a45abcfff4331 | /com/dudu/image/matrix.py | 5fd1d06d34361ce4f83d68a3ca4404a1cd18c5df | [] | no_license | duyangfan/base_numpy | e0aa6693e3fba222e1d4513e81190be99fdcaf56 | 53ae28bcfb0cb8ba00c7ce9c6333a56b8def211b | refs/heads/master | 2023-04-14T00:10:50.227415 | 2021-04-25T01:25:33 | 2021-04-25T01:25:33 | 316,435,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time : 2020/10/16 9:57
#@Author: dudu
#@File : matrix.py
from PIL import Image
from scipy import signal
import numpy as np
'''
转置
'''
def matrix_t(types):
arr = np.random.randint(0,255,(429,500))
i=0
j=0
for i in range(0,429,1):
for j in range(0, 500, 1):
if i==j:
arr[i][j]=0
img=Image.open("D://imageInfo//apple.jpg")
r,g,b=img.split()
r_arr=np.array(r)
r2_arr=np.array(g)
i=0
j=0
for i in range(0,429,1):
for j in range(0, 500, 1):
if r_arr[i][j]>245:
r_arr[i][j]=255
continue
r_arr[i][j]=0
arr[i][j]=0
matrix_show(np.matrix(r_arr),"r_add")
print(r_arr)
arr=arr+r_arr;
print(r_arr.shape)
core_arr=np.matrix([[1/16,2/16,1/16],
[2/16,4/16,2/16],
[1/16,2/16,1/16]])
con_arr=signal.fftconvolve(r2_arr,core_arr,'same')
#np.absolute(con_arr)
#arr=arr.T
return np.matrix(con_arr)
def matrix_show(matrix,name):
print(matrix)
img=Image.fromarray(matrix.astype(np.uint8))
img.save("D://imageInfo//"+name+".jpg")
def matrix_convonlve():
img=Image.open("D://imageInfo//b.jpg")
r,g,b=img.split()
img_arr=np.array(r)
core_arr = np.matrix([[1 / 16, 2 / 16, 1 / 16],
[2 / 16, 4 / 16, 2 / 16],
[1 / 16, 2 / 16, 1 / 16]])
core_arr = np.matrix([[-1,-1,-1],
[0,0,0],
[1 ,1,1]])
core_arr = np.matrix([[-1, 0, -1],
[-1, 0, -1],
[-1, 0, -1]])
core_arr = np.matrix([[1, 1, 1],
[1, -8, 1],
[1, 1, 1]])
con_arr = signal.fftconvolve(img_arr, core_arr, 'same')
img_mat=np.matrix(con_arr)
matrix_show(img_mat,"createImage_matrix")
if __name__ =='__main__':
# matrix=matrix_t(500)
# matrix_show(matrix,"res_img")
matrix_convonlve() | [
"599588568@qq.com"
] | 599588568@qq.com |
07cee0c3dcf983d18fb59617d5c31dbcd8af863c | 6fb10cf0015d9f8e39bac14d9898dcbce934068c | /carts/serializers.py | be1738ed7f939e73812ee8e93d370e0e68e2a594 | [] | no_license | dmarquina/Nudocord | 7ae481f2dac576105e8cc1b9dee502cd9d263856 | bf94f298724cde9d945cbedb523e33eda87d93dc | refs/heads/master | 2021-01-17T12:49:45.267097 | 2016-06-14T07:19:47 | 2016-06-14T07:19:47 | 56,170,458 | 1 | 0 | null | 2016-06-14T07:19:47 | 2016-04-13T17:01:28 | HTML | UTF-8 | Python | false | false | 358 | py | from deliverplaces.models import Deliverplace
from rest_framework import serializers
class PlacesnameSerializer(serializers.ModelSerializer):
class Meta:
model = Deliverplace
fields = ('id', 'name')
class PlacesdateSerializer(serializers.ModelSerializer):
class Meta:
model = Deliverplace
fields = ('id', 'date')
| [
"mrdiego0892@gmail.com"
] | mrdiego0892@gmail.com |
22a5082162b8e3e3900e02a08ce7e3931b946ac7 | f6faeb43b394bebb2c66b270ece4a5422cece0f6 | /Input.py | 0a45338c9ddf096ffbf6f1a13214ef459aedce03 | [] | no_license | Khun-Cho-Lwin/Programming-Basic-with-Python | a57b6445d0fdfca23017aa691208899935fcf5e7 | 1e8cc924143771b7737bb54ad8f04ae5b88c1e81 | refs/heads/master | 2022-11-13T05:56:11.881552 | 2020-06-29T21:58:29 | 2020-06-29T21:58:29 | 267,246,983 | 0 | 4 | null | 2020-06-29T08:00:57 | 2020-05-27T07:10:11 | Python | UTF-8 | Python | false | false | 166 | py | input1 = int(input("Please enter first number:"))
input2 = int(input("Please enter second number:"))
result = input1 + input2
print(input1,"+",input2,"=",result)
| [
"khuncholwin.2019@gmail.com"
] | khuncholwin.2019@gmail.com |
dcea085cb7842830d5dfcc959fbeb3c62d396f90 | eb20c4726f3241e4cd86e9603a2b6af0f7df1154 | /pocket2notion.py | 1e8c02710c024a9d3b94c7b5e6e50bd1ff14c9b9 | [
"MIT"
] | permissive | PrabhaSahiti/PickPocket | 066065bacb0379e07cc1175a844f1861432b308d | 5c10becdf57efd28063a7b70166be96c7c5fb088 | refs/heads/master | 2022-12-01T15:30:29.871082 | 2020-08-18T06:09:06 | 2020-08-18T06:09:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,084 | py | from bs4 import BeautifulSoup
from random import choice
from uuid import uuid1
from datetime import datetime
from notion.client import NotionClient
from notion.collection import NotionDate
PATH_POCKET_FILE = ""
NOTION_TOKEN = ""
NOTION_TABLE_ID = ""
class PocketListItem:
title = ""
url = ""
tags = []
addedOn = 0
readStatus = None
def __init__(self, title, url, tags, addedOn, readStatus):
self.title = title
self.url = url
self.tags = tags
self.addedOn = addedOn
self.readStatus = readStatus
def retrieveAllPocketItems():
with open(PATH_POCKET_FILE, encoding='utf8', errors='ignore') as fp:
soup = BeautifulSoup(fp,'html.parser')
allPocketListItems = []
itemList = soup.h1.find_next("h1")
# Retrieving the items from the user's Pocket List first.
articles = itemList.find_all_previous("a")
for eachItem in articles:
title = eachItem.get_text()
url = eachItem['href']
tags = eachItem['tags'].split(',')
addedOn = int(eachItem['time_added'])
readStatus = False
eachPocketListItemData = PocketListItem(title,url,tags,addedOn,readStatus)
allPocketListItems.append(eachPocketListItemData)
# Retreiving the items from the user's Archive list next.
articles = itemList.find_all_next("a")
for eachItem in articles:
title = eachItem.get_text()
url = eachItem['href']
tags = eachItem['tags'].split(',')
addedOn = int(eachItem['time_added'])
readStatus = True
eachPocketListItemData = PocketListItem(title,url,tags,addedOn,readStatus)
allPocketListItems.append(eachPocketListItemData)
return allPocketListItems
def itemAlreadyExists(item):
index = 0
for index, eachItem in enumerate(allPocketListItems):
index += 1
# print(f"Checking for {eachItem.url}")
if item.url == eachItem.url:
return True
return False
colors = ['default', 'gray', 'brown', 'orange', 'yellow', 'green', 'blue', 'purple', 'pink', 'red']
def addNewTag(cv, schema, prop, tag):
dupe = next(
(o for o in prop["options"] if o["value"] == tag), None
)
if dupe:
raise ValueError(f'{tag} already exists in the schema!')
prop["options"].append(
{"id": str(uuid1()), "value": tag, "color": choice(colors)}
)
try:
cv.collection.set("schema", schema)
except (RecursionError, UnicodeEncodeError):
pass
def setTag(page, cv, prop, new_values):
schema = cv.collection.get("schema")
new_values_set = set(new_values)
if new_values == ['']:
return []
prop = next(
(v for k, v in schema.items() if v["name"] == 'Tags'), None
)
if "options" not in prop: prop["options"] = []
current_options_set = set(
[o["value"] for o in prop["options"]]
)
intersection = new_values_set.intersection(current_options_set)
if len(new_values_set) > len(intersection):
difference = [v for v in new_values_set if v not in intersection]
for d in difference:
addNewTag(cv, schema, prop, d)
page.set_property('Tags', new_values)
def addToNotion():
index = 0
for index, eachItem in enumerate(allPocketListItems):
if itemAlreadyExists(eachItem):
continue
index += 1
row = cv.collection.add_row()
row.title = eachItem.title
row.url = eachItem.url
setTag(row, cv, 'prop', eachItem.tags)
row.added_on = NotionDate(datetime.fromtimestamp(eachItem.addedOn))
row.read = eachItem.readStatus
print(f"{index}/{len(allPocketListItems)} added")
client = NotionClient(token_v2= NOTION_TOKEN)
cv = client.get_collection_view(NOTION_TABLE_ID)
print(cv.parent.views)
print("Retreiving all items from Pocket")
allPocketListItems = retrieveAllPocketItems()
print("Retreival done")
print("Inserting items as table entries in Notion database")
addToNotion()
print(cv.collection.get('rows'))
print("Transfer successfully completed") | [
"jeffreysamjacob@gmail.com"
] | jeffreysamjacob@gmail.com |
9959508f69633b29c762f2285fb116a501e0c961 | 1baeb727b5c8572a6e923e88deb4a59ee8d6ebca | /.metadata/.plugins/org.eclipse.core.resources/.history/37/704b143a99ba00161259f1a1b49e744f | f8c0f3a56b13a0ba016333293c1468803944edcc | [
"MIT"
] | permissive | gemoore-uw/pycog | a02e1ca2f745573e165f005b49db7f80748aeff1 | f6e4015116d6ff83771518c05ade00a43f865ca5 | refs/heads/master | 2020-06-12T10:10:38.420800 | 2016-12-05T05:07:46 | 2016-12-05T05:07:46 | 75,589,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | #! /usr/bin/env python
"""
Reproduce every figure in the paper from scratch.
Notes
-----
* Running this script in its entirety will take some time.
* We run a fair number of trials to get pretty psychometric curves, and this is done
in one big chunk of memory. You may need to change this to run more trials, depending
on your setup.
* Training converged for all the seeds we tried but we picked the ones that produced
the prettiest plots for the paper -- we hope that's understandable!
"""
from __future__ import division
import argparse
import datetime
import os
import subprocess
import sys
from os.path import join
import numpy as np
from pycog.utils import get_here, mkdir_p
#=========================================================================================
# Command line
#=========================================================================================
p = argparse.ArgumentParser()
p.add_argument('-g', '--gpus', nargs='?', type=int, const=1, default=0)
p.add_argument('-s', '--simulate', action='store_true', default=False)
p.add_argument('args', nargs='*')
a = p.parse_args()
# GPUs
gpus = a.gpus
simulate = a.simulate
args = a.args
if not args:
args = [
'rdm', # Fig. 2
'structure', # Fig. 3
'mante', # Fig. 4
'mante_areas', 'connectivity', # Fig. 5
'multisensory', # Fig. 6
'romo', # Fig. 7
'lee', # Fig. 8
'performance' # Fig. 9
]
#=========================================================================================
# Shared steps
#=========================================================================================
here = get_here(__file__)
#base = os.path.abspath(join(here, os.pardir))
base = '/home/gemoore/Documents/pycog'
examplespath = join(base, 'examples')
modelspath = join(examplespath, 'models')
analysispath = join(examplespath, 'analysis')
paperpath = join(base, 'paper')
paperfigspath = join(paperpath, 'figs')
timespath = join(paperpath, 'times')
# Make paths
mkdir_p(paperfigspath)
mkdir_p(timespath)
def call(s):
if simulate:
print(3*' ' + s)
else:
rv = subprocess.call(s.split())
if rv != 0:
sys.stdout.flush()
print("Something went wrong (return code {}).".format(rv)
+ " We're probably out of memory.")
sys.exit(1)
def clean(model):
call("python {} {} clean"
.format(join(examplespath, 'do.py'), join(modelspath, model)))
def train(model, seed=None):
if seed is None:
seed = ''
else:
seed = ' -s {}'.format(seed)
tstart = datetime.datetime.now()
call("python {} {} train{} -g{}"
.format(join(examplespath, 'do.py'), join(modelspath, model), seed, gpus))
tend = datetime.datetime.now()
# Save training time
totalmins = int((tend - tstart).total_seconds()/60)
timefile = join(timespath, model + '_time.txt')
np.savetxt(timefile, [totalmins], fmt='%d')
def train_seeds(model, start_seed=1, ntrain=5):
for seed in xrange(start_seed, start_seed+ntrain):
suffix = '_s{}'.format(seed)
s = ' --seed {} --suffix {}'.format(seed, suffix)
tstart = datetime.datetime.now()
call("python {} {} clean{}"
.format(join(examplespath, 'do.py'), join(modelspath, model), s))
call("python {} {} train{} -g{}"
.format(join(examplespath, 'do.py'), join(modelspath, model), s, gpus))
tend = datetime.datetime.now()
# Save training time
totalmins = int((tend - tstart).total_seconds()/60)
timefile = join(timespath, model + suffix + '_time.txt')
np.savetxt(timefile, [totalmins], fmt='%d')
def trials(model, ntrials, analysis=None, args=''):
if analysis is None:
analysis = model
call("python {} {} run {} trials {} {}".format(join(examplespath, 'do.py'),
join(modelspath, model),
join(analysispath, analysis),
ntrials, args))
def do_action(model, action, analysis=None):
if analysis is None:
analysis = model
call("python {} {} run {} {}".format(join(examplespath, 'do.py'),
join(modelspath, model),
join(analysispath, analysis),
action))
def figure(fig):
call('python ' + join(paperpath, fig + '.py'))
#=========================================================================================
if 'structure' in args:
print("=> Perceptual decision-making task (structure)")
antagonist_levels = 1-np.array([0,1,2,5,10,20,50])/100
#models = ['rdm_nodale', 'rdm_dense', 'rdm_fixed']
models = ['rdm_dense']
#seeds = [None, 101, 1001] # Pick out the prettiest
seeds = [101]
for m, seed in zip(models, seeds):
for antagonist_level in antagonist_levels:
ant_lvl_str = '--ant_lvl '+str(antagonist_level)
trials(m, 10, 'rdm', args=ant_lvl_str)
do_action(m, 'selectivity', 'rdm')
figure('fig_structure') | [
"gemoore@uw.edu"
] | gemoore@uw.edu | |
5a4ce6f1374cd32b69c79e797f88f6f63773ad45 | 44d9c8e31546e781b2654056d8ad597a70e1fb1d | /problem49.py | c5a76eefee62575ecb698d2ef21535f4a287da05 | [] | no_license | msg430/Project-Euler | 2fbca0472127e62f9cdf931d6a029a6da51089b1 | 8c9bc55b639952a6681a35ac6bdc0a4f6c3dbe2b | refs/heads/master | 2020-05-30T10:25:50.475807 | 2019-11-06T19:31:22 | 2019-11-06T19:31:22 | 189,672,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py |
def getPrimes():
primes = [2,3,5]
for d in range(6,10000):
isPrime = True
for p in primes:
if d % p == 0:
isPrime = False
break
if isPrime:
primes.append(d)
for p in range(len(primes)):
if primes[p] > 999:
hold = p
break
primes = primes[hold:len(primes)]
return primes
def perms(num1, num2):
a = list(str(num1))
b = list(str(num2))
a.sort()
b.sort()
if a == b:
return True
return False
if __name__ == '__main__':
primes = getPrimes()
progressions = []
while True:
try:
current = primes.pop(0)
except IndexError:
break
thisProgression = []
for p in primes:
if perms(current, p):
thisProgression.append(p)
for p in thisProgression:
primes.remove(p)
thisProgression.append(current)
if len(thisProgression) > 2:
progressions.append(thisProgression)
candidates = []
for p in progressions:
pairs = []
for a in range(len(p)-1):
for b in range(a+1, len(p)):
pairs.append((abs(p[b]-p[a]), a,b))
dictionary = dict()
for a in pairs:
if a[0] in dictionary:
dictionary[a[0]] += 1
else:
dictionary[a[0]] = 1
poss = []
for a in dictionary.keys():
if dictionary[a] >= 2:
poss.append(a)
for k in poss:
hold = set()
for a in pairs:
if a[0] == k:
hold.add(p[a[1]])
hold.add(p[a[2]])
hold = list(hold)
hold.sort()
length = len(hold)
base = hold[1]-hold[0]
bad = True
for k in range(2,length):
if hold[k]-hold[k-1] != base:
bad = False
break
if bad:
candidates.append(hold)
print(candidates)
| [
"msg430@mac.com"
] | msg430@mac.com |
01a356f1fac842936aef6aadf37335b90cd1c87b | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /enas_lm/src/controller.py | a737235c5997a2e2944bca765b591e48869fbeda | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 9,675 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ENAS controller."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow
tf = tensorflow.compat.v1
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('controller_baseline_dec', 0.999, '')
flags.DEFINE_float('controller_entropy_weight', 1e-5, '')
flags.DEFINE_float('controller_temperature', 5., '')
flags.DEFINE_float('controller_tanh_constant', 2.25, '')
flags.DEFINE_float('controller_learning_rate', 5e-5, '')
flags.DEFINE_integer('controller_num_layers', 9, '')
REWARD_CONSTANT = 80.0
def _build_train_op(loss, tf_vars, learning_rate, train_step, num_aggregate):
"""Build training ops from `loss` tensor."""
optim = tf.train.AdamOptimizer(learning_rate)
optim = tf.train.SyncReplicasOptimizer(
optim, replicas_to_aggregate=num_aggregate, total_num_replicas=1)
grads = tf.gradients(loss, tf_vars)
train_op = optim.apply_gradients(zip(grads, tf_vars), global_step=train_step)
grad_norm = tf.global_norm(grads)
return train_op, optim, grad_norm
def _lstm(x, prev_c, prev_h, w_lstm):
"""LSTM subgraph."""
ifog = tf.matmul(tf.concat([x, prev_h], axis=1), w_lstm)
i, f, o, g = tf.split(ifog, 4, axis=1)
i = tf.sigmoid(i)
f = tf.sigmoid(f)
o = tf.sigmoid(o)
g = tf.tanh(g)
next_c = i * g + f * prev_c
next_h = o * tf.tanh(next_c)
return next_c, next_h
def _set_default_params(params):
"""Add controller's default params."""
params.add_hparam('controller_hidden_size', 64)
params.add_hparam('controller_num_layers', FLAGS.controller_num_layers)
params.add_hparam('controller_num_functions', 4) # tanh, relu, sigmoid, iden
params.add_hparam('controller_baseline_dec', FLAGS.controller_baseline_dec)
params.add_hparam('controller_entropy_weight',
FLAGS.controller_entropy_weight)
params.add_hparam('controller_temperature', FLAGS.controller_temperature)
params.add_hparam('controller_tanh_constant', FLAGS.controller_tanh_constant)
params.add_hparam('controller_learning_rate', FLAGS.controller_learning_rate)
params.add_hparam('controller_num_aggregate', 10)
params.add_hparam('controller_num_train_steps', 25)
return params
class Controller(object):
"""ENAS controller. Samples architectures and creates training ops."""
def __init__(self, params, name='controller'):
print('-' * 80)
print('Create a controller')
self.params = _set_default_params(params)
self.name = name
self._build_params()
self._build_sampler()
def _build_params(self):
"""Create TF parameters."""
initializer = tf.random_uniform_initializer(minval=-0.01, maxval=0.01)
num_funcs = self.params.controller_num_functions
hidden_size = self.params.controller_hidden_size
with tf.variable_scope(self.name, initializer=initializer):
with tf.variable_scope('lstm'):
self.w_lstm = tf.get_variable('w', [2 * hidden_size, 4 * hidden_size])
with tf.variable_scope('embedding'):
self.g_emb = tf.get_variable('g', [1, hidden_size])
self.w_emb = tf.get_variable('w', [num_funcs, hidden_size])
with tf.variable_scope('attention'):
self.attn_w_1 = tf.get_variable('w_1', [hidden_size, hidden_size])
self.attn_w_2 = tf.get_variable('w_2', [hidden_size, hidden_size])
self.attn_v = tf.get_variable('v', [hidden_size, 1])
num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()
if v.name.startswith(self.name)])
print('Controller has {0} params'.format(num_params))
def _build_sampler(self):
"""Build the sampler ops and the log_prob ops."""
hidden_size = self.params.controller_hidden_size
num_layers = self.params.controller_num_layers
arc_seq = []
sample_log_probs = []
sample_entropy = []
all_h = [tf.zeros([1, hidden_size], dtype=tf.float32)]
all_h_w = [tf.zeros([1, hidden_size], dtype=tf.float32)]
# sampler ops
inputs = self.g_emb
prev_c = tf.zeros([1, hidden_size], dtype=tf.float32)
prev_h = tf.zeros([1, hidden_size], dtype=tf.float32)
inputs = self.g_emb
for layer_id in range(1, num_layers+1):
next_c, next_h = _lstm(inputs, prev_c, prev_h, self.w_lstm)
prev_c, prev_h = next_c, next_h
all_h.append(next_h)
all_h_w.append(tf.matmul(next_h, self.attn_w_1))
query = tf.matmul(next_h, self.attn_w_2)
query = query + tf.concat(all_h_w[:-1], axis=0)
query = tf.tanh(query)
logits = tf.matmul(query, self.attn_v)
logits = tf.reshape(logits, [1, layer_id])
if self.params.controller_temperature:
logits /= self.params.controller_temperature
if self.params.controller_tanh_constant:
logits = self.params.controller_tanh_constant * tf.tanh(logits)
diff = tf.to_float(layer_id - tf.range(0, layer_id)) ** 2
logits -= tf.reshape(diff, [1, layer_id]) / 6.0
skip_index = tf.multinomial(logits, 1)
skip_index = tf.to_int32(skip_index)
skip_index = tf.reshape(skip_index, [1])
arc_seq.append(skip_index)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=skip_index)
sample_log_probs.append(log_prob)
entropy = log_prob * tf.exp(-log_prob)
sample_entropy.append(tf.stop_gradient(entropy))
inputs = tf.nn.embedding_lookup(
tf.concat(all_h[:-1], axis=0), skip_index)
inputs /= (0.1 + tf.to_float(layer_id - skip_index))
next_c, next_h = _lstm(inputs, prev_c, prev_h, self.w_lstm)
prev_c, prev_h = next_c, next_h
logits = tf.matmul(next_h, self.w_emb, transpose_b=True)
if self.params.controller_temperature:
logits /= self.params.controller_temperature
if self.params.controller_tanh_constant:
logits = self.params.controller_tanh_constant * tf.tanh(logits)
func = tf.multinomial(logits, 1)
func = tf.to_int32(func)
func = tf.reshape(func, [1])
arc_seq.append(func)
log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=func)
sample_log_probs.append(log_prob)
entropy = log_prob * tf.exp(-log_prob)
sample_entropy.append(tf.stop_gradient(entropy))
inputs = tf.nn.embedding_lookup(self.w_emb, func)
arc_seq = tf.concat(arc_seq, axis=0)
self.sample_arc = arc_seq
self.sample_log_probs = tf.concat(sample_log_probs, axis=0)
self.ppl = tf.exp(tf.reduce_mean(self.sample_log_probs))
sample_entropy = tf.concat(sample_entropy, axis=0)
self.sample_entropy = tf.reduce_sum(sample_entropy)
self.all_h = all_h
def build_trainer(self, child_model):
"""Build the train ops by connecting Controller with a Child."""
# actor
self.valid_loss = tf.to_float(child_model.rl_loss)
self.valid_loss = tf.stop_gradient(self.valid_loss)
self.valid_ppl = tf.exp(self.valid_loss)
self.reward = REWARD_CONSTANT / self.valid_ppl
if self.params.controller_entropy_weight:
self.reward += self.params.controller_entropy_weight * self.sample_entropy
# or baseline
self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)
self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
baseline_update = tf.assign_sub(self.baseline,
((1 - self.params.controller_baseline_dec) *
(self.baseline - self.reward)))
with tf.control_dependencies([baseline_update]):
self.reward = tf.identity(self.reward)
self.loss = self.sample_log_probs * (self.reward - self.baseline)
self.train_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name='train_step')
tf_vars = [var for var in tf.trainable_variables()
if var.name.startswith(self.name)]
self.train_op, self.optimizer, self.grad_norm = _build_train_op(
loss=self.loss,
tf_vars=tf_vars,
learning_rate=self.params.controller_learning_rate,
train_step=self.train_step,
num_aggregate=self.params.controller_num_aggregate)
def train(self, sess, reset_op, log_every=10):
"""Train the controller for `num_steps`."""
print('-' * 80)
print('Training controller')
num_steps = (self.params.controller_num_aggregate *
self.params.controller_num_train_steps)
run_ops = [self.sample_arc,
self.sample_entropy,
self.reward,
self.baseline,
self.train_op]
for step in range(num_steps):
arc, ent, reward, baseline, _ = sess.run(run_ops)
sess.run(reset_op)
if step % log_every == 0:
log_string = 'step={0:<5d}'.format(step)
log_string += ' ent={0:<7.3f}'.format(ent)
log_string += ' ppl={0:<7.2f}'.format(REWARD_CONSTANT / reward)
log_string += ' rw={0:<7.4f}'.format(reward)
log_string += ' bl={0:<7.4f}'.format(baseline)
log_string += ' arc=[{0}]'.format(' '.join([str(v) for v in arc]))
print(log_string)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
355eedb74edbb8dc1eb5c11bf7aef7b1b6fbb9e9 | 3d5a669f0878a4594893e5136849faa0915aea71 | /src/ghost_eval/expe_tools.py | 79c4814123de987a4cc2b387b145a45835bd3353 | [] | no_license | mmoussallam/audio-sketch | 6829449a8c6fd0d303a9f813664c32b8d286801f | 486bc79ca3b4039f3b00665a67cf451f3d6ca367 | refs/heads/master | 2018-10-31T17:55:18.091308 | 2014-05-01T22:56:10 | 2014-05-01T22:56:10 | 7,933,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | '''
ghost_eval.expe_tools - Created on Dec 5, 2013
@author: M. Moussallam
'''
import sys, os
sys.path.append(os.environ['SKETCH_ROOT'])
from src.settingup import *
def learn(fgptsystem, filenames, sparsity, debug=0):
""" Given a fingerprinting system (sparsifier + landmark builder)
analyse all files in the list and populate the berkeley db object"""
(sparsifier, dbhandler) = fgptsystem
for fileIndex, filename in enumerate(filenames):
print fileIndex
# sparsify
sparsifier.recompute(filename,**{'sig_name':filename})
sparsifier.sparsify(sparsity)
# and build the landmarks
dbhandler.populate(sparsifier.fgpt(), sparsifier.params,
fileIndex, offset=0, debug=debug)
def test(fgptsystem, target, sparsity, nbfiles):
(sparsifier, dbhandler) = fgptsystem
sparsifier.recompute(target)
sparsifier.sparsify(sparsity)
return dbhandler.retrieve(sparsifier.fgpt(), sparsifier.params, 0, nbfiles)
def testratio(sketchifier,fgptsystem, file_names, testratio, sparsity, refsparsity):
(sparsifier, fgpthandle) = fgptsystem
rndindexes = np.random.random_integers(0,len(file_names)-1,testratio*len(file_names))
metric = []
for rndidx in rndindexes:
print "Working on ",file_names[rndidx]
sketchifier.recompute(file_names[rndidx])
sketchifier.sparsify(sparsity)
print np.count_nonzero(sketchifier.sp_rep)
resynth = sketchifier.synthesize(sparse=True)
# now test the KOR
# reference ?
refhist = test((sparsifier,fgpthandle),file_names[rndidx], refsparsity, len(file_names))
testhist = test((sparsifier,fgpthandle),resynth, refsparsity, len(file_names))
scores = np.sum(testhist, axis=0)/np.sum(refhist[:,rndidx])
print scores
# the masked array will use all elements EXCEPT the one where the mask is TRUE
masked_scores = np.ma.array(scores, mask=False)
masked_scores.mask[rndidx] = True
score = scores[rndidx]
if score>0:
metric.append((score - np.max(masked_scores))/score)
else:
metric.append(-100)
print "Score of ",metric[-1]
return metric
def comparekeys(sketchifier, fgpthandle, ref, sparsityref, target, sparsitytarget):
""" Show the overlapping keys """
fs = target.fs
target.spectrogram(2048,256,ax=plt.gca(),order=0.5,log=False,cbar=False,
cmap=cm.bone_r, extent=[0,target.get_duration(),0, fs/2])
sparsifier.recompute(ref)
sparsifier.sparsify(sparsityref)
fgpthandle._build_pairs(sparsifier.fgpt(), sparsifier.params,display=True,
ax=plt.gca(), color='m')
sparsifier.recompute(target)
sparsifier.sparsify(sparsitytarget)
fgpthandle._build_pairs(sparsifier.fgpt(), sparsifier.params,display=True,
ax=plt.gca(), color='y')
| [
"manuel.moussallam@gmail.com"
] | manuel.moussallam@gmail.com |
604335eaf619ffd9afb91d9ace631e45d4947b06 | 94155211e34fbd3cea8b5c0e72b1201708d53d7c | /sharematcher.py | 041fe51ade1b0a8ae1cf2e453302641793008813 | [] | no_license | michmay/share-database | b317f06f6eb3f9fd0dd1620f4024be337477eab3 | 75465c8af49ec11eb3893d6bd0448e3021e6f3dc | refs/heads/master | 2021-01-20T22:10:08.091586 | 2016-06-30T08:58:59 | 2016-06-30T08:58:59 | 62,284,444 | 0 | 0 | null | 2016-07-02T00:17:52 | 2016-06-30T06:14:15 | Python | UTF-8 | Python | false | false | 4,211 | py | import shelve
import os
os.chdir('C:\\python')
def inshares(self): #searches each key checking if it contains the stock
sharelist = shelve.open('clientshares')
count = 0
for i in list(sharelist.keys()):
if str(self.upper()) in sharelist[i]:
print(i)
count+=1
print('\nCOUNTED '+str(count)+ ' CLIENTS\n')
def allclients(): #prints all the keys
sharelist = shelve.open('clientshares')
for i in list(sharelist.keys()):
print(i.upper())
print('COUNTED '+str(len(list(sharelist.keys())))+' CLIENTS\n')
def allshares():
sharelist = shelve.open('clientshares')
allsharelist=[]
for i in list(sharelist.keys()):
clientlist = list(sharelist[i])
for j in clientlist:
if j not in allsharelist:
allsharelist.append(j.upper())
return allsharelist
print('Use live share data? y/n')
live = input()
if live == 'y':
import sharescraper
else:
def shareprice(share):
return 'N/A'
print('.....-----/////||||| GOLDEN GOOSE |||||\\\\\-----.....')
print('-\n-\n')
print('Prices are last ASX\n')
while True:
print('Type \'client\' for individual client holdings mode, or \'ticker\' for share search:')
x = input()
if x.upper() == 'CLIENT':
while True:
print('\nEnter client name, or type All for a full list')
y = input()
if y == '':
break
y = y.upper()
if y == 'ALL': #print all clients
print('\n')
allclients()
else:
#convert to uppercase (dict should have upper case keys)
try:
sharelist = shelve.open('clientshares')
clientlist = sharelist[y] #see if theres a key by the name y
for i in range(len(clientlist)):
try:
iprice = sharescraper.shareprice(clientlist[i])
except:
iprice = 'NO_CONN'
print(clientlist[i]+':\t$'+iprice)
print('\nCOUNTED '+str(len(clientlist))+ ' SHARES\n')
except:
print('\ndid you spell the client name wrong?\n')
if x.upper() == 'TICKER': #enter share selection mode
while True:
print('\nEnter stock ticker, eg ABC, to display clients holding that stock, or listall:\n')
z = input()
print('\n')
if z == '':
break
z = z.upper()
if z == 'LISTALL':
print('\n')
allsh = allshares()
allsh.sort()
for i in allsh:
try:
curprice = sharescraper.shareprice(i)
except:
curprice = 'unavail'
print(str(i)+'\t$'+str(curprice))
print('\n')
print(str(len(allsh))+' Total')
print('\n\n')
elif len(z) == 3:
try:
curprice = sharescraper.shareprice(z)
except:
curprice = 'NO_CONN'
print('\n'+z+':\t$'+curprice)
inshares(z) #searches clients to match with input
else:
print('ticker must be 3 letters, ***')
| [
"noreply@github.com"
] | michmay.noreply@github.com |
f4c540cb5a00c49f76b3c5fbaad4e403a6469b06 | cb9bf719224692e9f9533b1e04f8cde526a42acc | /tutorial-7:loop_and_range.py | 65e9061ed31a15aa0542e9735553c367e326d9e0 | [] | no_license | CoderTag/Python | c58d619bf0d267916b0a6328403ba2641bda0283 | 7c0797c17d1dd4e371d2196d005152addfde122d | refs/heads/master | 2022-12-28T01:17:17.624585 | 2020-10-05T01:17:01 | 2020-10-05T01:17:01 | 301,259,865 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | for i in range(5):
print(i)
#start from 1
for i in range(1,5):
print(i)
| [
"sarkar.kaushik.2000@gmail.com"
] | sarkar.kaushik.2000@gmail.com |
f55b9299cbeb66e8bcb5b38ff0bd71c23a83a6ba | c546d31ff026a4b4b31cbd8cac15e148a58a21ef | /genomic_neuralnet/common/in_temp_dir.py | e1e0a5022b3c5604a55afa6b265af185c4c44bc5 | [
"MIT"
] | permissive | yzare/genomic-neuralnet | fbc48e2e08711f6503a8e39f393aa876fef584cf | 67ed4f55dc8d5fefb1d9e3a9fc920a0b643fe9c9 | refs/heads/master | 2020-04-01T02:31:58.463382 | 2016-10-30T19:43:56 | 2016-10-30T19:43:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from __future__ import print_function
from functools import wraps
import os
import shutil
from tempfile import mkdtemp
def in_temp_dir(func):
""" Run a function inside of a temp directory w/ automatic cleanup """
@wraps(func)
def wrapped_func(*args, **kwargs):
try:
temp_dir = mkdtemp('.neuralnet')
os.chdir(temp_dir)
return func(*args, **kwargs)
except Exception as e:
import traceback
traceback.print_exc()
finally:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
return wrapped_func
| [
"mcdori02@luther.edu"
] | mcdori02@luther.edu |
e122bb3d3368c573a15280ed853fb491d887ace3 | 9202070d54abdaf08ae168fd24746a67cca7e143 | /Domain/reservations.py | 88f776274c924db6c120a864fd110ca5004b952d | [] | no_license | NataliaPAS/Hotel_Management_Project | 942ca89cd26e5b5f227b7879348fbcbf1da958b4 | 7877d7abed3f5704597de31bdea356ffb2d6034b | refs/heads/main | 2023-04-11T18:13:43.592571 | 2021-04-15T16:23:51 | 2021-04-15T16:23:51 | 356,899,939 | 0 | 0 | null | 2021-04-11T15:26:29 | 2021-04-11T15:07:45 | null | UTF-8 | Python | false | false | 818 | py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, ForeignKey, Date
from Domain.clients import Client
from Domain.rooms import Rooms
Base = declarative_base()
class Reservations(Base):
__tablename__ = "reservations"
reservation_id = Column(Integer, primary_key=True, autoincrement=True)
client_id = Column(Integer, ForeignKey(Client.client_id), nullable=False)
room_number = Column(Integer, ForeignKey(Rooms.room_number), nullable=False)
start_date = Column(Date, nullable=False)
end_date = Column(Date, nullable=False)
def __str__(self):
return (f'client_id: {self.client_id}\n'
f'room_number: {self.room_number}\n'
f'start_date: {self.start_date}\n'
f'end_date: {self.end_date}\n')
| [
"natalia.pasare@gmail.com"
] | natalia.pasare@gmail.com |
0a0cd400dfd3fd9f0fb209fc40db6e334d6e89f5 | 9e4c1ab15df780194de08edeb68faa2054754a6d | /project/axf/app/apps.py | dd0d07dca0351f3cd6ef70c3b7f4922c8189a19d | [] | no_license | lywade123456/axf | 5a8a4d449b82f4a8c01cc04cbd31080f61bde343 | 3c5dfdc5dcb4c3b6d3d02f45be9663078741ea84 | refs/heads/master | 2020-05-20T12:48:54.309577 | 2018-05-16T08:50:48 | 2018-05-16T08:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from django.apps import AppConfig
class UauthConfig(AppConfig):
name = 'app'
| [
"1192567449@qq.com"
] | 1192567449@qq.com |
b4a8e037ef1b4853caa44d3efe1a1766cf25fc40 | 30764d288385e3f232e0f0d6fcaf1f43dde35d91 | /lesson3/4.py | dc7ed131ac79986f4235078f2df1736d259f1ea0 | [] | no_license | MaksimVlasenko2006/git_python | e9f21eab8a22ac55e728070954cad4d18babc426 | 7bad099f545f8e69d87c7a6cef061650eb5bfe50 | refs/heads/main | 2023-08-20T15:10:54.450056 | 2021-10-12T14:18:14 | 2021-10-12T14:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | p=1
for y in range(1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000):
y+=1
p*=y
print("y=",y,"p=",p)
| [
"maksim.vlasenok@gmail.com"
] | maksim.vlasenok@gmail.com |
aee4ca93a1ac8b33461aab3ff14b4125e1e07852 | 6a8cd2128dee0cb348cf09054557df052b5d30a1 | /examples/ggplot/line.py | 70af3d6d95ae6283771749cddbdf20e427b92e20 | [] | no_license | amreenshaikh/bokeh | 0304700d93909f75330ea322b8a1d604dd00edaa | 81fbb8d4b9e85cfad02eb0c58f0b682a802d5dae | refs/heads/master | 2021-01-15T13:23:31.171831 | 2014-05-07T11:31:26 | 2014-05-07T11:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from ggplot import *
from bokeh import pyplot
import matplotlib.pyplot as plt
g = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_line()
g.draw()
plt.title("Line ggplot-based plot in Bokeh.")
pyplot.show_bokeh(plt.gcf(), filename="line.html")
| [
"damianavila@gmail.com"
] | damianavila@gmail.com |
bcd2c7e9609e8220daa20bb688620b26c45ef2b3 | e0cbea0cb68f0ba5dba837dbe60067eb88e3d151 | /BeautifulSoup/soup_env/lib/python3.6/site-packages/urllib3/util/timeout.py | 601f7fc9dafaae86b17dc8da5fc0b56b5511cab2 | [] | no_license | arossbrian/my_short_scripts | 74fb689ac25feaffb14437496902ee1a0dcc5b60 | a34923f7ecbf027d8a0704400fcfb3e71ed662fd | refs/heads/master | 2023-05-24T16:45:51.050321 | 2023-05-11T16:20:30 | 2023-05-11T16:20:30 | 195,057,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,132 | py | from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
# Use time.monotonic if available.
current_time = getattr(time, "monotonic", time.time)
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect = None
def __str__(self):
return "%s(connect=%r, read=%r, total=%r)" % (
type(self).__name__,
self._connect,
self._read,
self.total,
)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must "
"be an int, float or None."
)
try:
float(value)
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
try:
if value <= 0:
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
# Python 3
raise ValueError(
"Timeout value %s was %s, but it must be an "
"int, float or None." % (name, value)
)
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError(
"Can't get connect duration for timer " "that has not started."
)
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not self.DEFAULT_TIMEOUT
and self._read is not None
and self._read is not self.DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| [
"arossbrian@gmail.com"
] | arossbrian@gmail.com |
7e0f7f6ab87df090558f38fbee426d78cfdc7802 | 3b88944b10566a490d6e4dadf8f59b0495bcbb7f | /user.py | a095665d199f5cfe0a13f252b95896d3512048bb | [] | no_license | polisettikrishnaveni/Backend_for_Freshworks | aa9988d96b0275a0f5b728e88ffcfe06840c2ebf | baee0d42fb1b8cc5482e9f4c71a1423f8dfe0add | refs/heads/main | 2023-01-22T17:07:20.683354 | 2020-12-01T05:50:08 | 2020-12-01T05:50:08 | 317,436,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | import main as x
#importing the main file("main" is the name of the file I have used) as a library
x.Create("krishnaveni",50)
#to Create a key with key_name,value given and with no time-to-live property
x.Create("polisetti",80,3000)
#to create a key with key_name,value given and with time-to-live property value given(number of seconds)
x.Read("krishnaveni")
#it returns the value of the respective key in Jasonobject format 'key_name:value'
x.Read("krishna veni")
#it returns an error message as the key is not in the required format.
x.Read("polisetti")
#it returns the value of the respective key in Jasonobject format if the TIME-TO-LIVE IS NOT EXPIRED else it returns an ERROR
x.Create("krishnaveni",60)
#it returns an ERROR since the key_name already exists in the database
x.Delete("krishnaveni")
#it deletes the respective key and its value from the database(memory is also freed)
x.Delete("kri$hnaveni")
#it returns an error message as the keyname contains special character.
#the code also returns other errors like
#"invalidkey" if key_length is greater than 32 or key_name contains any numeric,special characters etc.,
#"key doesnot exist" if key_name was mis-spelt or deleted earlier
#"File memory limit reached" if file memory exceeds 1GB
| [
"noreply@github.com"
] | polisettikrishnaveni.noreply@github.com |
19f70b0e9c40ef334a81a982960c149ec1c19e55 | f7dba3ef3410f62bf387dc827adb728be424b033 | /es18.py | af3baeb90e6924f17eefefb80d6e863d3d1ca96d | [] | no_license | Noobly321/javascript- | 60d892b64a2fda6c9b1cce04541282ff13b687bc | 1f1e4399f55a83724d9d5e6cba19f3460cef68bb | refs/heads/master | 2020-03-18T15:01:04.188070 | 2018-05-29T16:32:32 | 2018-05-29T16:32:32 | 134,881,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | # Look at A22 to read from an array
import collections
def dice_poker():
rolls = int(input())
answer = []
for roll in range(rolls):
roll = input().split() #splits the input by spaces
# sort the values alphanummerically and counts them
values = sorted([x for x in collections.Counter(roll).values()])
if sorted(roll) == ['2', '3', '4', '5', '6']:
answer.append('big-straight')
# do the same for small straight
elif values == [5]:
answer.append('yacht')
elif values == [1, 4]:
answer.append('four')
elif values == [1, 1, 3]:
answer.append('three')
elif values == [1, 1, 1, 2]:
answer.append('pair')
elif values == [2, 3]:
answer.append('full-house')
elif values == [1, 2, 2]:
answer.append('two-pairs')
elif values == [1, 1, 1, 1, 1]:
answer.append('small straight')
elif values == [1, 1, 1, 1, 1]:
answer.append('big straight')
else:
answer.append('none')
print('answer:')
print(' '.join(answer))
dice_poker()
| [
"noreply@github.com"
] | Noobly321.noreply@github.com |
edec8b7d3661ff60282310b7940a414e207bd4f5 | bba0bc795e176c33b3985e5145ecbc354f20a4ac | /app/tests/testsv2/__init__.py | ef2d79651aad785de8062c84bff480c71c167ee1 | [] | no_license | Kibetchirchir/send-itv2 | d134a55d8a4b5634e72137006ba43898ea1ab02b | 8209729391a07d9fd573b03d94728a612e23e2e8 | refs/heads/develop | 2022-06-07T18:12:25.847843 | 2020-02-17T08:39:01 | 2020-02-17T08:39:01 | 158,283,089 | 0 | 0 | null | 2022-05-25T02:11:23 | 2018-11-19T20:04:47 | Python | UTF-8 | Python | false | false | 1,082 | py | import unittest
from app import create_app
from ...db_config import destroy_tables
class BaseClass(unittest.TestCase):
"""This base class for our testcases"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app("testing")
self.client = self.app.test_client
self.user = {"name": "chirchir Kibet",
"email": "langatchirchir@gmail.com",
"role": "user",
"password": "kevin12345"}
self.admin = {"name": "admin",
"email": "admin@gmail.com",
"role": "admin",
"password": "admin"}
self.parcel = {"parcel_type": "box",
"recepient_number": "254715428709",
"recepient_name": "chirchir",
"drop_off_location": "dgfgf",
"status": "not-picked",
"weight": "5",
"pick_up_location": "df"}
def tearDown(self):
destroy_tables()
| [
"langatchirchir@gmail.com"
] | langatchirchir@gmail.com |
da1194f756c753a394e07dceba88b8c5de66dc67 | ccc75588ae015a5e77759427c9b7b778678c991f | /.venv/bin/django-admin.py | b9270c0a6716b908fd65e34b4332cfbc6714a4fd | [] | no_license | will3685/NAPPY-APP | 1a68b82942b9248be537cd84ee4270317232e9d5 | 7b7ea7b9e59b1d885217147ea250f78951ad3b87 | refs/heads/master | 2023-08-16T22:24:29.186147 | 2021-10-11T22:54:58 | 2021-10-11T22:54:58 | 415,320,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | #!/home/willtheard/code/will3685/UBUNTU/.venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"lavardwilltheard@yahoo.com"
] | lavardwilltheard@yahoo.com |
9c665f2636f0506c2191a10ad99da160b277c34e | eda12fedf7db9ba55c0f5819eb0df90e9889060b | /33_Type_C_TF_acrobot_discrete/03_TF_type_bc1_acrobot_a2c_GREEN.py | 340490526f43de0b81072517749f3a05058bff52 | [] | no_license | RichardMinsooGo-RL-Gym/TF1_4_Reinforcement_Learning_A3C_TF_Gym | 1e816ffc747367d8e40100a64f332d6406738a4a | 75c88b86a2d001b67971bafb37dbfd097a59932a | refs/heads/master | 2022-12-12T14:19:54.537542 | 2020-09-13T07:29:14 | 2020-09-13T07:29:14 | 277,669,264 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,441 | py | import os
import sys
import gym
import pylab
import numpy as np
import time
import tensorflow as tf
env_name = "Acrobot-v1"
env = gym.make(env_name)
# env.seed(1) # reproducible, general Policy gradient has high variance
# np.random.seed(123)
# tf.set_random_seed(456) # reproducible
env = env.unwrapped
# get size of state and action from environment
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
MAX_EP_STEP = 3000
ENTROPY_BETA = 0.001
model_lr = 0.005
model_path = os.path.join(os.getcwd(), 'save_model')
graph_path = os.path.join(os.getcwd(), 'save_graph')
if not os.path.isdir(model_path):
os.mkdir(model_path)
if not os.path.isdir(graph_path):
os.mkdir(graph_path)
# Network for the Actor Critic
class A2C_agent(object):
def __init__(self, sess, scope):
self.sess = sess
# get size of state and action
self.action_size = action_size
self.state_size = state_size
self.value_size = 1
# these is hyper parameters for the ActorCritic
self.discount_factor = 0.99 # decay rate
self.hidden1, self.hidden2 = 128, 128
self.scope = scope
# create model for actor and critic network
with tf.variable_scope(self.scope):
self._init_input()
self.build_model()
self._init_op()
def _init_input(self):
# with tf.variable_scope('input'):
self.state = tf.placeholder(tf.float32, [None, self.state_size], name='state')
self.action = tf.placeholder(tf.int32, [None, ], name='action')
self.q_target = tf.placeholder(tf.float32, name="q_target")
def _init_op(self):
# with tf.variable_scope('td_error'):
# A_t = R_t - V(S_t)
# self.td_error = tf.subtract(self.q_target, self.value, name='td_error')
self.td_error = self.q_target - self.value
# with tf.variable_scope('critic_loss'):
# Value loss
# self.critic_loss = tf.reduce_mean(tf.square(self.td_error))
self.critic_loss = tf.reduce_mean(tf.square(self.value - self.q_target), axis=1)
# with tf.variable_scope('actor_loss'):
log_prob = tf.reduce_sum(tf.log(self.policy + 1e-5) * tf.one_hot(self.action, self.action_size, dtype=tf.float32), axis=1, keep_dims=True)
exp_v = log_prob * tf.stop_gradient(self.td_error)
entropy = -tf.reduce_sum(self.policy * tf.log(self.policy + 1e-5),
axis=1, keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.actor_loss = tf.reduce_mean(-self.exp_v)
self.loss_total = self.actor_loss + self.critic_loss
# with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(model_lr).minimize(self.loss_total)
# neural network structure of the actor and critic
def build_model(self):
w_init, b_init = tf.random_normal_initializer(.0, .3), tf.constant_initializer(0.1)
with tf.variable_scope("actor"):
actor_hidden = tf.layers.dense(self.state, self.hidden1, tf.nn.tanh, kernel_initializer=w_init,
bias_initializer=b_init)
self.actor_predict = tf.layers.dense(actor_hidden, self.action_size, kernel_initializer=w_init,
bias_initializer=b_init)
self.policy = tf.nn.softmax(self.actor_predict)
with tf.variable_scope("critic"):
critic_hidden = tf.layers.dense(inputs=self.state, units = self.hidden1, activation=tf.nn.tanh, # tanh activation
kernel_initializer=w_init, bias_initializer=b_init, name='fc1_c')
critic_predict = tf.layers.dense(inputs=critic_hidden, units = self.value_size, activation=None,
kernel_initializer=w_init, bias_initializer=b_init, name='fc2_c')
self.value = critic_predict
# get action from policy network
def get_action(self, state):
"""
Choose action based on observation
Arguments:
state: array of state, has shape (num_features)
Returns: index of action we want to choose
"""
# Reshape observation to (num_features, 1)
state_t = state[np.newaxis, :]
# Run forward propagation to get softmax probabilities
prob_weights = self.sess.run(self.policy, feed_dict={self.state: state_t})
# Select action using a biased sample
# this will return the index of the action we've sampled
action = np.random.choice(range(prob_weights.shape[1]), p=prob_weights.ravel())
return action
# save <s, a ,r> of each step
# this is used for calculating discounted rewards
def append_sample(self, state, action, reward):
self.buffer_state.append(state)
self.buffer_action.append(action)
self.buffer_reward.append(reward)
# update policy network and value network every episode
def train_model(self, next_state, done):
if done:
value_next_state = 0 # terminal
else:
value_next_state = self.sess.run(self.value, {self.state: next_state[np.newaxis, :]})[0][0]
for reward in self.buffer_reward[::-1]: # reverse buffer r
value_next_state = reward + self.discount_factor * value_next_state
self.buffer_q_target.append(value_next_state)
self.buffer_q_target.reverse()
feed_dict={
self.state: np.vstack(self.buffer_state),
self.action: np.array(self.buffer_action),
self.q_target: np.vstack(self.buffer_q_target)
}
self.sess.run(self.train_op, feed_dict)
self.buffer_state, self.buffer_action, self.buffer_reward = [], [], []
self.buffer_q_target = []
def main():
with tf.Session() as sess:
agent = A2C_agent(sess, "model")
agent.sess.run(tf.global_variables_initializer())
train_steps = 0
agent.buffer_state, agent.buffer_action, agent.buffer_reward = [], [], []
agent.buffer_q_target = []
scores, episodes = [], []
episode = 0
avg_score = MAX_EP_STEP
start_time = time.time()
while time.time() - start_time < 5 * 60 and avg_score > 90:
done = False
score = 0
state = env.reset()
while not done and score < MAX_EP_STEP:
# every time step we do train from the replay memory
score += 1
# fresh env
# if agent.render:
# env.render()
train_steps += 1
# get action for the current state and go one step in environment
action = agent.get_action(state)
# make step in environment
next_state, reward, done, _ = env.step(action)
# save the sample <state, action, reward> to the memory
agent.append_sample(state, action, reward)
# if train_steps % 10 == 0 or done: # update global and assign to local net
# agent.train_model(next_state, done)
# swap observation
state = next_state
# train when epsisode finished
if done or score == MAX_EP_STEP:
episode += 1
agent.train_model(next_state, done)
# every episode, plot the play time
scores.append(score)
episodes.append(episode)
avg_score = np.mean(scores[-min(30, len(scores)):])
print("episode :{:5d}".format(episode), "/ score :{:5d}".format(score))
break
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/Cartpole_PG_TF.png")
e = int(time.time() - start_time)
print('Elasped time :{:02d}:{:02d}:{:02d}'.format(e // 3600, (e % 3600 // 60), e % 60))
sys.exit()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | RichardMinsooGo-RL-Gym.noreply@github.com |
3a37408e6701ce8a433e048bb81c974008209282 | f8caeb5b7f3e6f1d8c50dcc204dd170044b55671 | /Summarize/violin_gen.py | 8296781d01bba74429baba0920efd5e2b2444033 | [
"MIT"
] | permissive | jgh9094/CohortLexicase | 23c51d9f46fc22b81b4aea19007071ee0697194a | 5179a3c0db6dcf0c2cae79fcfd08b4b919c9269d | refs/heads/master | 2020-04-16T17:01:23.092772 | 2019-02-06T01:50:30 | 2019-02-06T01:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,584 | py | #Will list all of the incomplete id's that need to finish running
#SEED_ORIGINAL & TREATMENTS will need to be adjusted based on the problems, treatments, and seeds that the project requires.
#Will also need to handle RANGE if different from the expected results!
#
#Input 1: file directory along with name, expecting Alex's solution_summary file
#Input 2: Directory where the data will be placed
#
#Output : create a csv for each treatment we are looking at!
#
#python3
SELECTION = {'DOWN_SAMPLE_TESTS':'Down Sample', 'TRUNCATED':'Truncated', 'PROG_ONLY_COHORT_LEX':'Prog-Only Cohort', 'COHORT_LEX':'Cohort-Lexicase', 'FULL_LEXICASE':'Lexicase'}
DIMS = {1:"cn1_cs512", 16:'cn16_cs32', 128:'cn128_cs4', 256:'cn256_cs2', 2:'cn2_cs256', 32:'cn32_cs16', 4:'cn4_cs128', 64:'cn64_cs8', 8:'cn8_cs64'}
POS_TREATMENT=0
POS_SOLUTION=4
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description="Data aggregation script.")
parser.add_argument("data_directory", type=str, help="Target experiment directory.")
parser.add_argument("dump_directory", type=str, help="Target dump directory")
args = parser.parse_args()
data_directory = args.data_directory
write_directory = args.dump_directory
write_directory = write_directory.strip()
df = pd.read_csv(data_directory)
df = df.values.tolist()
count = {}
count2 = {}
gens = ["Update"]
dims = ["Dimension"]
for row in df:
treat = row[POS_TREATMENT].split('__')
update = (row[8])
prob = treat[0][len('PROBLEM_'):]
sel = treat[1][4:]
cn = int(treat[2].strip('CN_'))
cnt = row[4]
if prob not in count:
if prob == 'sum-of squares':
continue
count[prob] = {}
if sel not in count[prob]:
count[prob][sel] = {}
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
else:
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
else:
if sel not in count[prob]:
count[prob][sel] = {}
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
else:
if cn not in count[prob][sel]:
count[prob][sel][cn] = [update]
else:
count[prob][sel][cn].append(update)
for prob in count.keys():
if prob == 'sum-of-squares':
continue
print(prob)
for sel in count[prob].keys():
dims = []
gens = []
for cn,cnt in count[prob][sel].items():
for up in cnt:
if up != "NONE":
dims.append(DIMS[cn])
gens.append(int(float(up)))
print(sel)
print(dims, len(dims))
print(gens, len(gens))
print()
raw_data = {'dims':dims, 'gens':gens}
df = pd.DataFrame(raw_data, columns=['dims', 'gens'])
df.to_csv(write_directory+prob+'__'+sel+'__violin__gens.csv')
if __name__ == "__main__":
main() | [
"josexavi06@gmail.com"
] | josexavi06@gmail.com |
181c1720d8cf8d3b896e53e57481503f6ebdea00 | 8ec1c9d30c14ec2acabb0554848b102f03d132f5 | /tests/unit/common/query/test_cell_query_results_reader.py | 901fd5008a67045d474fbc2a5bdcc769897ae952 | [
"MIT"
] | permissive | Deepbody-me/matrix-service | abafe115c5a9d7a8fcccc3373d1b08c343138af1 | ed9a23d407cce89127b8f0b662c3d2ef2e8fec77 | refs/heads/master | 2022-11-30T04:38:05.466495 | 2020-01-10T18:59:51 | 2020-01-10T18:59:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | import mock
import unittest
import pandas
from matrix.common.query.cell_query_results_reader import CellQueryResultsReader
class TestCellQueryResultsReader(unittest.TestCase):
@mock.patch("matrix.common.query.cell_query_results_reader.CellQueryResultsReader.load_slice")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test_load_results(self, mock_parse_manifest, mock_load_slice):
mock_parse_manifest.return_value = {
"columns": ["a", "b", "c"],
"part_urls": ["A", "B", "C"],
"record_count": 5
}
test_df = pandas.DataFrame()
mock_load_slice.return_value = test_df
reader = CellQueryResultsReader("test_manifest_key")
reader.load_results()
expected_calls = [mock.call(0), mock.call(1), mock.call(2)]
mock_load_slice.assert_has_calls(expected_calls)
@mock.patch("pandas.read_csv")
@mock.patch("s3fs.S3FileSystem.open")
def test_load_slice(self, mock_open, mock_pd_read_csv):
manifest_file_path = "tests/functional/res/cell_metadata_manifest"
with open(manifest_file_path) as f:
mock_open.return_value = f
reader = CellQueryResultsReader("test_manifest_key")
reader.load_slice(3)
pandas_args = mock_pd_read_csv.call_args[-2]
pandas_kwargs = mock_pd_read_csv.call_args[-1]
self.assertIn("project.project_core.project_short_name", pandas_kwargs["names"])
self.assertTrue(pandas_args[0].startswith("s3://"))
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test_load_empty_results(self, mock_parse_manifest):
mock_parse_manifest.return_value = {"record_count": 0}
cell_query_results_reader = CellQueryResultsReader("test_manifest_key")
results = cell_query_results_reader.load_results()
self.assertEqual(results.shape, (0, 0))
| [
"noreply@github.com"
] | Deepbody-me.noreply@github.com |
a3653df962d4ea083d29737aa7d5f6bbe3271377 | 408b00c1c2390ddec6fca41bfb5636130817c42b | /process_corpus.py | 439a42924dbaadfa783d1569a12d62efb90fed8a | [
"MIT"
] | permissive | wang1128/email-formality-detection | eb9a375049419695eed9ec6d99f683f151cd6e6e | fec44bdf370de2ac3fe54e85c6efe0549863c2f7 | refs/heads/mysql | 2021-01-22T21:27:45.475266 | 2015-12-07T15:42:40 | 2015-12-07T15:42:40 | 85,432,555 | 1 | 0 | null | 2017-03-18T21:17:04 | 2017-03-18T21:17:04 | null | UTF-8 | Python | false | false | 7,557 | py | #!/usr/bin/env python
# -*- coding: utf_8 -*-
"""
Dan O'Day
Robert Hinh
Upasita Jain
Sangmi Shin
Penghao Wang
Purdue University
CNIT499 Natural Language Technologies
Main corpus processor.
"""
__author__ = "Dan O'Day"
__copyright__ = "Copyright 2014, Dan O'Day, Purdue University"
__credits__ = ["Dan O'Day", "Robert Hinh", "Upasita Jain", "Sangmi Shin", "Penghao Wang", "Julia Taylor"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Dan O'Day"
__email__ = "doday@purdue.edu"
__status__ = "Development"
import csv
from data.models import Corpus
from features.bagofwords import extract_words_as_features
from features.simple_counts import average_syllables_per_word, character_count, syllable_count, word_count, \
is_forward, is_reply, subject_line_counts, count_verbs
from features.netlingo import find_netlingo
from features.contractionFeature import contraction
from features.timeFeature import weekend, day, time
from features.closing import closing
from features.count_recipients import count_recipients
from features.capitalization import ratio_incorrect_first_capitalization, punctRatio, \
incorrect_first_person_pronoun_capitalization_count, ratio_cap_letters, create_text_from_body
from features.spelling import ratio_misspelled_words
def populate_word_features(all=True):
c = Corpus()
all_word_features = []
print 'Preprocessing unigram features...'
if all:
email_generator = c.fetch_all_emails()
else:
email_generator = c.fetch_all_emails(column='classification', query='classified')
for email in email_generator:
email.get_current_message() # make sure only dealing with most recent message
text = create_text_from_body(email)
email_features = extract_words_as_features(text)
all_word_features.extend(email_features)
return all_word_features
def process_features(email, wf):
classifier_to_write_to_file = ""
feature_dictionary = {} # stores human-readable names for each feature
# determine classification; if unclassified then 2, if informal then 0, if formal then 1
if email.classification == 'I':
classifier_to_write_to_file = "0"
elif email.classification == 'F':
classifier_to_write_to_file = "1"
elif email.classification == 'U':
classifier_to_write_to_file = "2"
# process each email with each feature, and add the id of the feature and a description of it to the
# feature_dictionary
print 'Processing email #' + str(email.id)
i = 1
feature_dictionary[i] = "Character Count"
email.add_feature(i, character_count(email))
i += 1
feature_dictionary[i] = "Syllable Count"
email.add_feature(i, syllable_count(email))
i += 1
feature_dictionary[i] = "Average # Syllables per Word"
email.add_feature(i, average_syllables_per_word(email))
i += 1
feature_dictionary[i] = "Word Count"
email.add_feature(i, word_count(email))
i += 1
feature_dictionary[i] = "Net Lingo Term Count"
email.add_feature(i, find_netlingo(email))
i += 1
feature_dictionary[i] = "Contractions to All Words Ratio"
email.add_feature(i, contraction(email))
i += 1
feature_dictionary[i] = "Weekend"
email.add_feature(i, weekend(email))
i += 1
feature_dictionary[i] = "Day"
email.add_feature(i, day(email))
i += 1
feature_dictionary[i] = "Time"
email.add_feature(i, time(email))
i += 1
feature_dictionary[i] = "Closing Present"
email.add_feature(i, closing(email))
i += 1
feature_dictionary[i] = "Recipients Count"
email.add_feature(i, count_recipients(email))
i += 1
feature_dictionary[i] = "Ratio of Sentences Not Beginning With Capital Letters"
email.add_feature(i, ratio_incorrect_first_capitalization(email))
i += 1
feature_dictionary[i] = "Ratio of Excessive Punctuation to Normal Punctuation"
email.add_feature(i, punctRatio(email))
i += 1
feature_dictionary[i] = "Incorrect Capitalization of 'i' Count"
email.add_feature(i, incorrect_first_person_pronoun_capitalization_count(email))
i += 1
feature_dictionary[i] = "Ratio of contiguous capital letters to total letters"
email.add_feature(i, ratio_cap_letters(email))
i += 1
feature_dictionary[i] = "Ratio of misspelled words to total words"
email.add_feature(i, ratio_misspelled_words(email))
i += 1
feature_dictionary[i] = "Is Forward?"
email.add_feature(i, is_forward(email))
i += 1
feature_dictionary[i] = "Is Reply?"
email.add_feature(i, is_reply(email))
i += 1
feature_dictionary[i] = "Subject Line Reply Count"
email.add_feature(i, subject_line_counts(email, 'reply'))
i += 1
feature_dictionary[i] = "Subject Line Forward Count"
email.add_feature(i, subject_line_counts(email, 'forward'))
i += 1
# feature_dictionary[i] = "Verb Count"
# email.add_feature(i, count_verbs(email))
# i += 1
# word features (unigrams only)
email_text = create_text_from_body(email)
email_word_features = extract_words_as_features(email_text)
for feature in wf:
feature = feature.lower()
feature_dictionary[i] = feature
if feature in email_word_features:
fv = 1
else:
fv = 0
email.add_feature(i, fv)
i += 1
return feature_dictionary, classifier_to_write_to_file
def write_libsvm_file(wf, all=True):
with open('features.libsvm', 'w') as ff:
c = Corpus()
if all:
email_generator = c.fetch_many_emails()
else:
email_generator = c.fetch_many_emails(column='classification', query='classified')
for email in email_generator:
email.get_current_message() # make sure only dealing with most recent message
feature_dictionary, classifier_to_write_to_file = process_features(email, wf)
# write feature set for this sample to file
string_builder = ""
string_builder += classifier_to_write_to_file
for f in email.feature_set.items():
string_builder += " %s:%s" % f
# ff.write(" # email id: " + str(email.id)) # add comment to libsvm file with unique id for sample
try:
ff.write(string_builder + '\n')
except IOError:
pass
def write_csv_file(wf, all=True):
ff = open('features.csv', 'w')
csv_writer = csv.writer(ff)
c = Corpus()
features = []
if all:
email_generator = c.fetch_many_emails()
else:
email_generator = c.fetch_many_emails(column='classification', query='classified')
i = 0
for email in email_generator:
email.get_current_message() # make sure only dealing with most recent message
feature_dictionary, classifier_to_write_to_file = process_features(email, wf)
if i == 0:
tmp = ['Email ID#', 'Classification']
tmp.extend(feature_dictionary.values())
features.append(tmp)
email_features = []
email_features.append(email.id)
email_features.append(classifier_to_write_to_file)
email_features.extend(email.feature_set.values())
features.append(email_features)
i += 1
csv_writer.writerows(features)
def main():
all_word_features = populate_word_features(all=False)
write_libsvm_file(all_word_features, all=False)
write_csv_file(all_word_features, all=False)
if __name__ == '__main__':
main() | [
"digitaloday@gmail.com"
] | digitaloday@gmail.com |
fbcf4dff0606fafa97cc778c0778a49cc9e1a8e6 | 8830831a87f35ff2628f379d8230928ec6b5641a | /Homedepot/code/stem2.py | 3a3c13edf6434f0161556c5b49e294bd64829972 | [] | no_license | nickmcadden/Kaggle | e5882c9d68a81700d8d969328d91c059a0643868 | cbc5347dec90e4bf64d4dbaf28b8ffb362efc64f | refs/heads/master | 2019-07-18T08:09:40.683168 | 2018-01-26T14:35:38 | 2018-01-26T14:35:38 | 40,735,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | import pandas as pd
import numpy as np
from nltk.stem.snowball import EnglishStemmer
from nltk.tokenize import wordpunct_tokenize
import sys
import csv
reload(sys)
sys.setdefaultencoding('ISO-8859-1')
stemmer = EnglishStemmer()
print("Reading data\n")
train = pd.read_csv('./input/train.csv', encoding="ISO-8859-1")
test = pd.read_csv('./input/test.csv', encoding="ISO-8859-1")
desc = pd.read_csv('./input/product_descriptions.csv', encoding="ISO-8859-1")
print("Stemming train file\n")
for index, row in train.iterrows():
train.ix[index,'product_title'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['product_title'])])
train.ix[index,'search_term'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['search_term'])])
if index % 1000 == 0:
print(index)
train.to_csv('./input/train_stemmed_snowball.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)
print("\nStemming test file\n")
for index, row in test.iterrows():
test.ix[index,'product_title'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['product_title'])])
test.ix[index,'search_term'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['search_term'])])
if index % 1000 == 0:
print(index)
test.to_csv('./input/test_stemmed_snowball.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)
'''
print("\nStemming description file\n")
for index, row in desc.iterrows():
desc.ix[index,'product_description'] = " ".join([stemmer.stem(word.lower()) for word in wordpunct_tokenize(row['product_description'])])
if index % 1000 == 0:
print(index)
desc.to_csv('./input/desc_stemmed_snowball.csv', index=False, quoting=csv.QUOTE_NONNUMERIC)
'''
| [
"nmcadden@globalpersonals.co.uk"
] | nmcadden@globalpersonals.co.uk |
24d300f8fd9d5585af8f9d63a6ebd616f06615cd | 6d4c25b19babd1de9d8fbc08d87bbf7eab47d47a | /155.min-stack.py | 7844fd2c1f961fcad97622795cc0a52e7dc8667f | [] | no_license | Olament/Leetcode | ccb7474bc0ca96e813b25d84eba40ce3aba0a4a4 | 383fb3d59a11a6d1ad047b67292a7fafc23248b2 | refs/heads/master | 2020-09-03T03:16:31.765745 | 2019-11-26T05:10:20 | 2019-11-26T05:10:20 | 219,372,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | #
# @lc app=leetcode id=155 lang=python3
#
# [155] Min Stack
#
# @lc code=start
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.minIndex = 0
def push(self, x: int) -> None:
if len(self.stack) != 0 and x < self.stack[self.minIndex]:
self.minIndex = len(self.stack)
self.stack.append(x)
def pop(self) -> None:
if len(self.stack) > 0:
self.stack = self.stack[0:len(self.stack) - 1]
else:
return
self.minIndex = self.update()
def update(self) -> int:
if len(self.stack) == 0:
return 0
minValue = self.stack[0]
index = 0
for i in range(1, len(self.stack)):
if self.stack[i] < minValue:
minValue = self.stack[i]
index = i
return index
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.stack[self.minIndex]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
# @lc code=end
| [
"i@zxguo.me"
] | i@zxguo.me |
5a603e34acfb4d65cae42bbdd0c5911361c6cae4 | 30fae401c969adf3ddd3cd1adef972728cfafd75 | /docs/source/conf.py | ba6b942ea83d631d17f2ff4e44d2011303a8e23c | [
"Apache-2.0"
] | permissive | ktbyers/nornir_utils | 699c7d0236baffc593994363bedbbb16be1a8c03 | 6dd653e4359b03f0abcac7231134412b0fc147db | refs/heads/master | 2022-11-06T05:07:36.076999 | 2020-05-03T12:05:56 | 2020-05-03T12:05:56 | 267,370,721 | 2 | 0 | Apache-2.0 | 2020-05-27T16:28:10 | 2020-05-27T16:28:09 | null | UTF-8 | Python | false | false | 1,983 | py | import sys
from typing import List
sys.path.insert(0, "../")
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "nornir_utils"
copyright = "2020, David Barroso"
author = "David Barroso"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "nbsphinx", "sphinx_issues"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[str] = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| [
"noreply@github.com"
] | ktbyers.noreply@github.com |
b217ba63eaddc9616214a06e614c6246f5c30edf | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/restore_request.py | e662315f9bdfdbec34fe2249cdb69996c797c338 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,055 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RestoreRequest(Model):
"""Base class for restore request. Workload-specific restore requests are
derived from this class.
:param object_type: Polymorphic Discriminator
:type object_type: str
"""
_validation = {
'object_type': {'required': True},
}
_attribute_map = {
'object_type': {'key': 'objectType', 'type': 'str'},
}
_subtype_map = {
'object_type': {'IaasVMRestoreRequest': 'IaasVMRestoreRequest'}
}
def __init__(self):
self.object_type = None
| [
"dheeru.rathor14@gmail.com"
] | dheeru.rathor14@gmail.com |
a515d9e2eea2d03c7095f1a8cf8ea63c09fd61dc | 396c7fdf44b663b5f7f53c82ede1128a8adb71f7 | /photo-organizer.py | 7c3de9eda62864b21874f8081871db0bb6c7b169 | [] | no_license | RAJ66/OrganizePhoto | 09058e0f4b57eb583b95b7b20331416360bbce8c | 6b815d66eb91db396da17d16de22d011a7c16299 | refs/heads/master | 2020-07-07T18:57:37.802858 | 2019-08-27T13:42:34 | 2019-08-27T13:42:34 | 203,445,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import os
from PIL import Image
from datetime import datetime
import shutil
# 5
class PhotoOrganizer:
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
def folderPathFromPhotoDate(self,file):
date = self.photoShotingData(file)
return date.strftime('%Y')+'/'+date.strftime('%Y-%m-%d')
def photoShotingData(self,file):
photo = Image.open(file)
info = photo._getexif()
if info == None:
date = datetime.fromtimestamp(os.path.getmtime(file))
else:
if 36867 in info:
date = info[36867]
date = datetime.strptime(date, '%Y:%m:%d %H:%M:%S')
return date
def movePhoto(self,file):
newFolder = self.folderPathFromPhotoDate(file)
if not os.path.exists(newFolder):
os.makedirs(newFolder)
shutil.move(file, newFolder+'/'+file)
print('||'+file+'||'+' Complete||')
def organize(self):
photos = [
filename for filename in os.listdir('.') if any(filename.endswith(ext) for ext in self.extensions)
]
for name in photos:
self.movePhoto(name)
# execute
PO = PhotoOrganizer()
PO.organize()
| [
"vitorbarbosa1998@hotmail.com"
] | vitorbarbosa1998@hotmail.com |
4b7ad1257588f9d861614a07ee2bc059ad96ebde | b34f07d217cdda9f59e7f58f89dad17fae1ee132 | /malaya_speech/model/frame.py | 95fde8af773361726a61fb74e10e57b9e3e60f0e | [
"MIT"
] | permissive | Ariffleng/malaya-speech | 965cea504e364c77ca513d43bf340fc122b97672 | 4343c409340c608a426cc6f0926fbe2c1661783e | refs/heads/master | 2023-08-12T23:23:39.983006 | 2021-10-02T09:14:52 | 2021-10-02T09:14:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,386 | py | import numpy as np
from dataclasses import dataclass
SEGMENT_PRECISION = 1e-6
class Frame:
def __init__(self, array, timestamp, duration):
if not isinstance(array, np.ndarray):
array = np.array(array)
self.array = array
self.timestamp = timestamp
self.duration = duration
@dataclass(frozen=True, order=True)
class Segment:
start: float = 0.0
end: float = 0.0
def __bool__(self):
return bool((self.end - self.start) > SEGMENT_PRECISION)
@property
def duration(self) -> float:
"""
Segment duration (read-only)
"""
return self.end - self.start if self else 0.0
@property
def middle(self) -> float:
"""Segment mid-time (read-only)"""
return 0.5 * (self.start + self.end)
def __contains__(self, other: 'Segment'):
"""Inclusion
>>> segment = Segment(start=0, end=10)
>>> Segment(start=3, end=10) in segment:
True
>>> Segment(start=5, end=15) in segment:
False
"""
return (self.start <= other.start) and (self.end >= other.end)
def __and__(self, other):
"""
Intersection
>>> segment = Segment(0, 10)
>>> other_segment = Segment(5, 15)
>>> segment & other_segment
<Segment(5, 10)>
Note
----
When the intersection is empty, an empty segment is returned:
>>> segment = Segment(0, 10)
>>> other_segment = Segment(15, 20)
>>> intersection = segment & other_segment
>>> if not intersection:
... # intersection is empty.
"""
start = max(self.start, other.start)
end = min(self.end, other.end)
return Segment(start=start, end=end)
def intersects(self, other: 'Segment') -> bool:
"""
Check whether two segments intersect each other
Parameters
----------
other : Segment
Other segment
Returns
-------
intersect : bool
True if segments intersect, False otherwise
"""
return (
(
self.start < other.start
and other.start < self.end - SEGMENT_PRECISION
)
or (
self.start > other.start
and self.start < other.end - SEGMENT_PRECISION
)
or (self.start == other.start)
)
def overlaps(self, t: float):
"""
Check if segment overlaps a given time
Parameters
----------
t : float
Time, in seconds.
Returns
-------
overlap: bool
True if segment overlaps time t, False otherwise.
"""
return self.start <= t and self.end >= t
def __or__(self, other):
"""
Union
>>> segment = Segment(0, 10)
>>> other_segment = Segment(5, 15)
>>> segment | other_segment
<Segment(0, 15)>
Note
----
When a gap exists between the segment, their union covers the gap as well:
>>> segment = Segment(0, 10)
>>> other_segment = Segment(15, 20)
>>> segment | other_segment
<Segment(0, 20)
"""
if not self:
return other
if not other:
return self
start = min(self.start, other.start)
end = max(self.end, other.end)
return Segment(start=start, end=end)
def __xor__(self, other):
"""
Gap
>>> segment = Segment(0, 10)
>>> other_segment = Segment(15, 20)
>>> segment ^ other_segment
<Segment(10, 15)
Note
----
The gap between a segment and an empty segment is not defined.
>>> segment = Segment(0, 10)
>>> empty_segment = Segment(11, 11)
>>> segment ^ empty_segment
ValueError: The gap between a segment and an empty segment is not defined.
"""
if (not self) or (not other):
raise ValueError(
'The gap between a segment and an empty segment '
'is not defined.'
)
start = min(self.end, other.end)
end = max(self.start, other.start)
return Segment(start=start, end=end)
def _str_helper(self, seconds: float):
from datetime import timedelta
negative = seconds < 0
seconds = abs(seconds)
td = timedelta(seconds=seconds)
seconds = td.seconds + 86400 * td.days
microseconds = td.microseconds
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return '%s%02d:%02d:%02d.%03d' % (
'-' if negative else ' ',
hours,
minutes,
seconds,
microseconds / 1000,
)
def __str__(self):
"""
Human-readable representation
>>> print(Segment(1337, 1337 + 0.42))
[ 00:22:17.000 --> 00:22:17.420]
Note
----
Empty segments are printed as "[]"
"""
return '<Segment(%g, %g)>' % (self.start, self.end)
def __repr__(self):
"""
Computer-readable representation
>>> Segment(1337, 1337 + 0.42)
<Segment(1337, 1337.42)>
"""
return '<Segment(%g, %g)>' % (self.start, self.end)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
56d61b52a986db759e27b224e6f9af02a912baf9 | b2605c93db0c5b3dd0ac7f7cfa80674e82ff9439 | /sandbox/filter-max255.py | ee9f129f2aff3855cfde263f3b5c214ef661e5e1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | adnbsr/khmer | 76728708b60a5662e93b83c6559502d31b92445d | 64612c1140d17c0988fa01f3c6c627913b509700 | refs/heads/master | 2021-01-18T13:20:23.385284 | 2013-08-01T21:13:42 | 2013-08-01T21:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import sys, screed.fasta, os
import khmer
from khmer.thread_utils import ThreadedSequenceProcessor, verbose_fastq_iter
K = 32
HT_SIZE=4e9
N_HT=4
WORKER_THREADS=8
GROUPSIZE=100
###
def main():
repfile = sys.argv[1]
infile = sys.argv[2]
outfile = os.path.basename(infile) + '.fno255'
if len(sys.argv) >= 4:
outfile = sys.argv[3]
print 'file to count from: %s' % repfile
print 'input file to filter: %s' % infile
print 'filtering to output:', outfile
print '-- settings:'
print 'K', K
print 'N THREADS', WORKER_THREADS
print '--'
print 'making hashtable'
ht = khmer.new_counting_hash(K, HT_SIZE, N_HT)
print 'consuming input', repfile
ht.consume_fasta(repfile)
outfp = open(outfile, 'w')
def process_fn(record, ht=ht):
name = record['name']
seq = record['sequence']
if 'N' in seq:
return None, None
if len(seq) < K:
return None, None
if ht.get_max_count(seq) >= 255:
return None, None
return name, seq
tsp = ThreadedSequenceProcessor(process_fn, WORKER_THREADS, GROUPSIZE)
###
tsp.start(verbose_fastq_iter(infile), outfp)
if __name__ == '__main__':
main()
| [
"titus@idyll.org"
] | titus@idyll.org |
096b9871f6727145b9f830ca94e89cd862e87ea7 | df49441ead1e7224742f440c9bd09fd4e043a637 | /Lambda Deployment/GetReviews/get_reviews.py | c69f3551475e7de94e6fb533d4721df2e60a0517 | [] | no_license | AmolMavuduru/SuperRestaurantBot | 76b567704830fb06c67f9317d8883d3e42b24730 | 71357af33ef47016ff198afbbbf9f293067ba69f | refs/heads/master | 2021-01-01T06:55:50.339603 | 2019-01-06T23:11:48 | 2019-01-06T23:11:48 | 97,547,970 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,177 | py | from googleplaces import GooglePlaces, types, lang
import boto3
import json
YOUR_API_KEY = '<insert key here>' # My private API Key is not provided here for security reasons
google_places = GooglePlaces(YOUR_API_KEY)
def lambda_handler(event, context):
slots = event['currentIntent']['slots']
location = slots['Location']
restaurant_name = slots['RestaurantName']
try:
query_result = google_places.nearby_search(
location=location, keyword=restaurant_name,
radius=20000, types=[types.TYPE_FOOD])
except:
return { # If an error occurs it is probably because the user provided an invalid city.
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": "Sorry I could not find anything with those details. The city you provided seems invalid."
}
}
}
response = ""
if len(query_result.places) == 0:
return {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": "Sorry I could not find any results for that place."
}
}
}
elif len(query_result.places) == 1:
for place in query_result.places: # This iteration wil only occur once since there is one item in the list
place.get_details()
reviews_string = "Here is what people had to say about {} : ".format(place.name)
response += "I found {0} in {1}.".format(place.name, location)
sum_ratings = 0
num_reviews = 0
if (place.details['reviews']):
for review in place.details['reviews']: # Will iterate through all reviews and and calculate the average
sum_ratings += review['rating']
num_reviews += 1
new_review = "Review {0}: {1} ..... ".format(num_reviews, review['text'])
if(len(reviews_string) + len(response) + len(new_review) + 45 >= 640): # If there is a risk of exceeding limit return the response
avg_rating = float(sum_ratings) / float(num_reviews)
response += ' {0} received an average rating of: {1} out of 5 '.format(place.name, avg_rating)
response += reviews_string
return {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": response
}
}
}
reviews_string += "Review {0}: {1} ..... ".format(num_reviews, review['text'])
avg_rating = sum_ratings / num_reviews
response += ' {0} received an average rating of: {1} out of 5'.format(place.name, avg_rating)
response += reviews_string # Adds the text of all of the reviews to the response
else:
response += "I found the following results for {} : ".format(restaurant_name)
exact_match = False
for place in query_result.places: # This iteration will occur multiple times since the list has multiple items
if(place.name == restaurant_name): # If there is an exact match
exact_match = True
place.get_details()
sum_ratings = 0
num_reviews = 0
reviews_string = "Here is what people had to say about {} : ".format(place.name)
if(place.details['reviews']):
for review in place.details['reviews']:
sum_ratings += review['rating']
num_reviews += 1
new_review = "Review {0}: {1} ..... ".format(num_reviews, review['text'])
if(len(reviews_string) + len(response) + len(new_review) + 20 >= 640): # If there is a risk of exceeding limit return the response
avg_rating = float(sum_ratings) / float(num_reviews)
response += ' {0} received an average rating of: {1} out of 5 '.format(place.name, avg_rating)
response += reviews_string
return {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": response
}
}
}
reviews_string += new_review
avg_rating = sum_ratings / num_reviews
response += ' {0} which received an average rating of {1} out of 5. '.format(place.name, avg_rating)
response += reviews_string
return {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": response
}
}
}
elif(not exact_match): # If the current place is not an exact match
place.get_details()
sum_ratings = 0
num_reviews = 0
if(place.details['reviews']):
for review in place.details['reviews']:
sum_ratings += review['rating']
num_reviews += 1
avg_rating = float(sum_ratings) / float(num_reviews)
if(len(response) + len(' {0} (average rating : {1} out of 5) ..... '.format(place.name, avg_rating)) >= 640):
return {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": response
}
}
}
response += ' {0} (average rating : {1} out of 5) ..... '.format(place.name, avg_rating)
return {
"dialogAction": {
"type": "Close",
"fulfillmentState": "Fulfilled",
"message": {
"contentType": "PlainText",
"content": response
}
}
}
| [
"rovingrook09@yahoo.com"
] | rovingrook09@yahoo.com |
8edbb85fa816b4531504f71141e248419a278ba9 | 7e401f3da2f8e7c65cfd374d9f434687cd5eb950 | /src/dispatch/logging.py | 350b0b383cbd05534482d9ce856bb70cded629e4 | [
"Apache-2.0"
] | permissive | stefanm8/dispatch | 75b6b099b495440af9c720e83d58018591a40a24 | a7fe52f870a5deec8a161ca7395ca869aaf8f2c9 | refs/heads/master | 2021-08-05T03:54:02.473395 | 2021-08-02T15:54:28 | 2021-08-02T15:54:28 | 248,351,389 | 2 | 0 | Apache-2.0 | 2020-03-18T21:54:25 | 2020-03-18T21:54:24 | null | UTF-8 | Python | false | false | 409 | py | import logging
from dispatch.config import LOG_LEVEL
def configure_logging():
if LOG_LEVEL == "DEBUG":
# log level:logged message:full module path:function invoked:line number of logging call
LOGFORMAT = "%(levelname)s:%(message)s:%(pathname)s:%(funcName)s:%(lineno)d"
logging.basicConfig(level=LOG_LEVEL, format=LOGFORMAT)
else:
logging.basicConfig(level=LOG_LEVEL)
| [
"kglisson@netflix.com"
] | kglisson@netflix.com |
8d3090c08f5639721c316747bfd207a1fdaa128e | 7019b48eee182b5a592a0f13d2e9b764613c0ded | /deblurring/MPRNet.py | eb24ab75979fbd5f15a5602b678e7a3d0d4f6e44 | [] | no_license | claragarciamoll/Super-Resolution | 0a14e6cfdce547e6d706f7dd05dc4fa33943bc1c | bcf0407b3e7199080893ec5660bd3de554faaaab | refs/heads/main | 2023-08-25T03:26:49.350297 | 2021-10-25T11:51:00 | 2021-10-25T11:51:00 | 419,435,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,910 | py | """
## Multi-Stage Progressive Image Restoration
## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao
## https://arxiv.org/abs/2102.02808
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pdb import set_trace as stx
##########################################################################
def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size // 2), bias=bias, stride=stride)
##########################################################################
## Channel Attention Layer
class CALayer(nn.Module):
def __init__(self, channel, reduction=16, bias=False):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
##########################################################################
## Channel Attention Block (CAB)
class CAB(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, bias, act):
super(CAB, self).__init__()
modules_body = []
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
modules_body.append(act)
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
self.CA = CALayer(n_feat, reduction, bias=bias)
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res = self.CA(res)
res += x
return res
##########################################################################
## Supervised Attention Module
class SAM(nn.Module):
def __init__(self, n_feat, kernel_size, bias):
super(SAM, self).__init__()
self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
self.conv2 = conv(n_feat, 3, kernel_size, bias=bias)
self.conv3 = conv(3, n_feat, kernel_size, bias=bias)
def forward(self, x, x_img):
x1 = self.conv1(x)
img = self.conv2(x) + x_img
x2 = torch.sigmoid(self.conv3(img))
x1 = x1 * x2
x1 = x1 + x
return x1, img
##########################################################################
## U-Net
class Encoder(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats, csff):
super(Encoder, self).__init__()
self.encoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(2)]
self.encoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in
range(2)]
self.encoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in
range(2)]
self.encoder_level1 = nn.Sequential(*self.encoder_level1)
self.encoder_level2 = nn.Sequential(*self.encoder_level2)
self.encoder_level3 = nn.Sequential(*self.encoder_level3)
self.down12 = DownSample(n_feat, scale_unetfeats)
self.down23 = DownSample(n_feat + scale_unetfeats, scale_unetfeats)
# Cross Stage Feature Fusion (CSFF)
if csff:
self.csff_enc1 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=bias)
self.csff_enc2 = nn.Conv2d(n_feat + scale_unetfeats, n_feat + scale_unetfeats, kernel_size=1, bias=bias)
self.csff_enc3 = nn.Conv2d(n_feat + (scale_unetfeats * 2), n_feat + (scale_unetfeats * 2), kernel_size=1,
bias=bias)
self.csff_dec1 = nn.Conv2d(n_feat, n_feat, kernel_size=1, bias=bias)
self.csff_dec2 = nn.Conv2d(n_feat + scale_unetfeats, n_feat + scale_unetfeats, kernel_size=1, bias=bias)
self.csff_dec3 = nn.Conv2d(n_feat + (scale_unetfeats * 2), n_feat + (scale_unetfeats * 2), kernel_size=1,
bias=bias)
def forward(self, x, encoder_outs=None, decoder_outs=None):
enc1 = self.encoder_level1(x)
if (encoder_outs is not None) and (decoder_outs is not None):
enc1 = enc1 + self.csff_enc1(encoder_outs[0]) + self.csff_dec1(decoder_outs[0])
x = self.down12(enc1)
enc2 = self.encoder_level2(x)
if (encoder_outs is not None) and (decoder_outs is not None):
enc2 = enc2 + self.csff_enc2(encoder_outs[1]) + self.csff_dec2(decoder_outs[1])
x = self.down23(enc2)
enc3 = self.encoder_level3(x)
if (encoder_outs is not None) and (decoder_outs is not None):
enc3 = enc3 + self.csff_enc3(encoder_outs[2]) + self.csff_dec3(decoder_outs[2])
return [enc1, enc2, enc3]
class Decoder(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, act, bias, scale_unetfeats):
super(Decoder, self).__init__()
self.decoder_level1 = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(2)]
self.decoder_level2 = [CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act) for _ in
range(2)]
self.decoder_level3 = [CAB(n_feat + (scale_unetfeats * 2), kernel_size, reduction, bias=bias, act=act) for _ in
range(2)]
self.decoder_level1 = nn.Sequential(*self.decoder_level1)
self.decoder_level2 = nn.Sequential(*self.decoder_level2)
self.decoder_level3 = nn.Sequential(*self.decoder_level3)
self.skip_attn1 = CAB(n_feat, kernel_size, reduction, bias=bias, act=act)
self.skip_attn2 = CAB(n_feat + scale_unetfeats, kernel_size, reduction, bias=bias, act=act)
self.up21 = SkipUpSample(n_feat, scale_unetfeats)
self.up32 = SkipUpSample(n_feat + scale_unetfeats, scale_unetfeats)
def forward(self, outs):
enc1, enc2, enc3 = outs
dec3 = self.decoder_level3(enc3)
x = self.up32(dec3, self.skip_attn2(enc2))
dec2 = self.decoder_level2(x)
x = self.up21(dec2, self.skip_attn1(enc1))
dec1 = self.decoder_level1(x)
return [dec1, dec2, dec3]
##########################################################################
##---------- Resizing Modules ----------
class DownSample(nn.Module):
def __init__(self, in_channels, s_factor):
super(DownSample, self).__init__()
self.down = nn.Sequential(nn.Upsample(scale_factor=0.5, mode='bilinear', align_corners=False),
nn.Conv2d(in_channels, in_channels + s_factor, 1, stride=1, padding=0, bias=False))
def forward(self, x):
x = self.down(x)
return x
class UpSample(nn.Module):
def __init__(self, in_channels, s_factor):
super(UpSample, self).__init__()
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False))
def forward(self, x):
x = self.up(x)
return x
class SkipUpSample(nn.Module):
def __init__(self, in_channels, s_factor):
super(SkipUpSample, self).__init__()
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
nn.Conv2d(in_channels + s_factor, in_channels, 1, stride=1, padding=0, bias=False))
def forward(self, x, y):
x = self.up(x)
x = x + y
return x
##########################################################################
## Original Resolution Block (ORB)
class ORB(nn.Module):
def __init__(self, n_feat, kernel_size, reduction, act, bias, num_cab):
super(ORB, self).__init__()
modules_body = []
modules_body = [CAB(n_feat, kernel_size, reduction, bias=bias, act=act) for _ in range(num_cab)]
modules_body.append(conv(n_feat, n_feat, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
##########################################################################
class ORSNet(nn.Module):
def __init__(self, n_feat, scale_orsnetfeats, kernel_size, reduction, act, bias, scale_unetfeats, num_cab):
super(ORSNet, self).__init__()
self.orb1 = ORB(n_feat + scale_orsnetfeats, kernel_size, reduction, act, bias, num_cab)
self.orb2 = ORB(n_feat + scale_orsnetfeats, kernel_size, reduction, act, bias, num_cab)
self.orb3 = ORB(n_feat + scale_orsnetfeats, kernel_size, reduction, act, bias, num_cab)
self.up_enc1 = UpSample(n_feat, scale_unetfeats)
self.up_dec1 = UpSample(n_feat, scale_unetfeats)
self.up_enc2 = nn.Sequential(UpSample(n_feat + scale_unetfeats, scale_unetfeats),
UpSample(n_feat, scale_unetfeats))
self.up_dec2 = nn.Sequential(UpSample(n_feat + scale_unetfeats, scale_unetfeats),
UpSample(n_feat, scale_unetfeats))
self.conv_enc1 = nn.Conv2d(n_feat, n_feat + scale_orsnetfeats, kernel_size=1, bias=bias)
self.conv_enc2 = nn.Conv2d(n_feat, n_feat + scale_orsnetfeats, kernel_size=1, bias=bias)
self.conv_enc3 = nn.Conv2d(n_feat, n_feat + scale_orsnetfeats, kernel_size=1, bias=bias)
self.conv_dec1 = nn.Conv2d(n_feat, n_feat + scale_orsnetfeats, kernel_size=1, bias=bias)
self.conv_dec2 = nn.Conv2d(n_feat, n_feat + scale_orsnetfeats, kernel_size=1, bias=bias)
self.conv_dec3 = nn.Conv2d(n_feat, n_feat + scale_orsnetfeats, kernel_size=1, bias=bias)
def forward(self, x, encoder_outs, decoder_outs):
x = self.orb1(x)
x = x + self.conv_enc1(encoder_outs[0]) + self.conv_dec1(decoder_outs[0])
x = self.orb2(x)
x = x + self.conv_enc2(self.up_enc1(encoder_outs[1])) + self.conv_dec2(self.up_dec1(decoder_outs[1]))
x = self.orb3(x)
x = x + self.conv_enc3(self.up_enc2(encoder_outs[2])) + self.conv_dec3(self.up_dec2(decoder_outs[2]))
return x
##########################################################################
class MPRNet(nn.Module):
def __init__(self, in_c=3, out_c=3, n_feat=96, scale_unetfeats=48, scale_orsnetfeats=32, num_cab=8, kernel_size=3,
reduction=4, bias=False):
super(MPRNet, self).__init__()
act = nn.PReLU()
self.shallow_feat1 = nn.Sequential(conv(in_c, n_feat, kernel_size, bias=bias),
CAB(n_feat, kernel_size, reduction, bias=bias, act=act))
self.shallow_feat2 = nn.Sequential(conv(in_c, n_feat, kernel_size, bias=bias),
CAB(n_feat, kernel_size, reduction, bias=bias, act=act))
self.shallow_feat3 = nn.Sequential(conv(in_c, n_feat, kernel_size, bias=bias),
CAB(n_feat, kernel_size, reduction, bias=bias, act=act))
# Cross Stage Feature Fusion (CSFF)
self.stage1_encoder = Encoder(n_feat, kernel_size, reduction, act, bias, scale_unetfeats, csff=False)
self.stage1_decoder = Decoder(n_feat, kernel_size, reduction, act, bias, scale_unetfeats)
self.stage2_encoder = Encoder(n_feat, kernel_size, reduction, act, bias, scale_unetfeats, csff=True)
self.stage2_decoder = Decoder(n_feat, kernel_size, reduction, act, bias, scale_unetfeats)
self.stage3_orsnet = ORSNet(n_feat, scale_orsnetfeats, kernel_size, reduction, act, bias, scale_unetfeats,
num_cab)
self.sam12 = SAM(n_feat, kernel_size=1, bias=bias)
self.sam23 = SAM(n_feat, kernel_size=1, bias=bias)
self.concat12 = conv(n_feat * 2, n_feat, kernel_size, bias=bias)
self.concat23 = conv(n_feat * 2, n_feat + scale_orsnetfeats, kernel_size, bias=bias)
self.tail = conv(n_feat + scale_orsnetfeats, out_c, kernel_size, bias=bias)
def forward(self, x3_img):
# Original-resolution Image for Stage 3
H = x3_img.size(2)
W = x3_img.size(3)
# Multi-Patch Hierarchy: Split Image into four non-overlapping patches
# Two Patches for Stage 2
x2top_img = x3_img[:, :, 0:int(H / 2), :]
x2bot_img = x3_img[:, :, int(H / 2):H, :]
# Four Patches for Stage 1
x1ltop_img = x2top_img[:, :, :, 0:int(W / 2)]
x1rtop_img = x2top_img[:, :, :, int(W / 2):W]
x1lbot_img = x2bot_img[:, :, :, 0:int(W / 2)]
x1rbot_img = x2bot_img[:, :, :, int(W / 2):W]
##-------------------------------------------
##-------------- Stage 1---------------------
##-------------------------------------------
## Compute Shallow Features
x1ltop = self.shallow_feat1(x1ltop_img)
x1rtop = self.shallow_feat1(x1rtop_img)
x1lbot = self.shallow_feat1(x1lbot_img)
x1rbot = self.shallow_feat1(x1rbot_img)
## Process features of all 4 patches with Encoder of Stage 1
feat1_ltop = self.stage1_encoder(x1ltop)
feat1_rtop = self.stage1_encoder(x1rtop)
feat1_lbot = self.stage1_encoder(x1lbot)
feat1_rbot = self.stage1_encoder(x1rbot)
## Concat deep features
feat1_top = [torch.cat((k, v), 3) for k, v in zip(feat1_ltop, feat1_rtop)]
feat1_bot = [torch.cat((k, v), 3) for k, v in zip(feat1_lbot, feat1_rbot)]
## Pass features through Decoder of Stage 1
res1_top = self.stage1_decoder(feat1_top)
res1_bot = self.stage1_decoder(feat1_bot)
## Apply Supervised Attention Module (SAM)
x2top_samfeats, stage1_img_top = self.sam12(res1_top[0], x2top_img)
x2bot_samfeats, stage1_img_bot = self.sam12(res1_bot[0], x2bot_img)
## Output image at Stage 1
stage1_img = torch.cat([stage1_img_top, stage1_img_bot], 2)
##-------------------------------------------
##-------------- Stage 2---------------------
##-------------------------------------------
## Compute Shallow Features
x2top = self.shallow_feat2(x2top_img)
x2bot = self.shallow_feat2(x2bot_img)
## Concatenate SAM features of Stage 1 with shallow features of Stage 2
x2top_cat = self.concat12(torch.cat([x2top, x2top_samfeats], 1))
x2bot_cat = self.concat12(torch.cat([x2bot, x2bot_samfeats], 1))
## Process features of both patches with Encoder of Stage 2
feat2_top = self.stage2_encoder(x2top_cat, feat1_top, res1_top)
feat2_bot = self.stage2_encoder(x2bot_cat, feat1_bot, res1_bot)
## Concat deep features
feat2 = [torch.cat((k, v), 2) for k, v in zip(feat2_top, feat2_bot)]
## Pass features through Decoder of Stage 2
res2 = self.stage2_decoder(feat2)
## Apply SAM
x3_samfeats, stage2_img = self.sam23(res2[0], x3_img)
##-------------------------------------------
##-------------- Stage 3---------------------
##-------------------------------------------
## Compute Shallow Features
x3 = self.shallow_feat3(x3_img)
## Concatenate SAM features of Stage 2 with shallow features of Stage 3
x3_cat = self.concat23(torch.cat([x3, x3_samfeats], 1))
x3_cat = self.stage3_orsnet(x3_cat, feat2, res2)
stage3_img = self.tail(x3_cat)
return [stage3_img + x3_img, stage2_img, stage1_img]
| [
"claragarcia@MacBook-Pro-de-Clara.local"
] | claragarcia@MacBook-Pro-de-Clara.local |
e4ff6115024035a576ffcb2759f1ae5b8b9b6f6c | cb945ba66906b930f773835ff77530208f5205a6 | /Codewars/6 kyu/Sum of Digits_Digital Root.py | cd4f542bde2ef40462172ea8194848de16064976 | [] | no_license | Tchomasek/Codewars | a2f63ca088f53a5747763840abd66dcfb5917641 | 015f4797fd550327a8aec11abfbc88b51bd0aaac | refs/heads/master | 2021-06-13T04:35:03.957272 | 2020-08-31T18:09:01 | 2020-08-31T18:09:01 | 254,423,329 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | def digital_root(n):
while True:
if n > 9:
n_str=str(n)
n=0
for num in n_str:
n+=int(num)
else:
return n
print(digital_root(456))
| [
"46090576+Tchomasek@users.noreply.github.com"
] | 46090576+Tchomasek@users.noreply.github.com |
97d23fdb9293035257f2b63f7223884d29f25b32 | 3034e86347c71bf7e7af9e5f7aa44ab5ad61e14b | /mongodb/day04/grid.py | 6a756f9c08f808f46ec135295e3d86b64827d34a | [] | no_license | jason12360/AID1803 | bda039b82f43d6609aa8028b0d9598f2037c23d5 | f0c54a3a2f06881b3523fba7501ab085cceae75d | refs/heads/master | 2020-03-17T00:43:42.541761 | 2018-06-29T10:07:44 | 2018-06-29T10:07:44 | 133,127,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #用来获取数据库中gridfs存储文件
from pymongo import MongoClient
#和pymongo绑定的
import gridfs
conn = MongoClient('localhost',27017)
db = conn.get_database('grid')
#获取gridfs对象
fs = gridfs.GridFS(db)
files = fs.find()
for file in files:
if file.filename =='./生日快乐歌.mp3':
with open(file.filename,'wb') as f:
while True:
#file.read()函数可以获取文件内容
data = file.read(64)
if not data:
break
f.write(data)
conn.close()
| [
"370828117@qq.com"
] | 370828117@qq.com |
55ac847e9dceee23fc10cf00d7684b8f264007bd | 6d16118f6a0aa2113a570261f2912c18117472ad | /app.py | 31a7154a2633c812f65e4b70f7bc61d79d126f84 | [] | no_license | elCaxper2/repo1 | 50738f1b2c8f374255c3cbd598095bbd37e5ee7f | 830a662a2ed4b04669f10e52acdcdd838ca0cbdb | refs/heads/master | 2020-09-24T06:30:25.957337 | 2019-12-04T01:03:39 | 2019-12-04T01:03:39 | 225,688,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
def hello():
return "Hello World!"
@app.route("/template")
def index():
return render_template("index.html")
@app.route("/about")
def about():
return "<h1 style='color: red;'>GGPR WEB IN DOCKER!</h1>"
@app.route('/<name>')
def hello_name(name):
return "Hello {}!".format(name)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | [
"gplaza@gmv.com"
] | gplaza@gmv.com |
68190c6ba1e1051a720ec7e4063c40f1e6245e1e | 5bba6d90b3c393c43d26d66a84cb446ecdb9f58a | /tools/__init__.py | 36dee53ee5a63665e4bf0fb3ab7d8d61238fb03b | [] | no_license | xxftop1/interfaceDemo | 794cd5ada71838c874d1ee53a37df0da563e26ef | 972d7fef826453a6d2fb735be0842bb3d037bdc9 | refs/heads/master | 2023-01-03T18:13:19.532756 | 2020-10-28T14:30:19 | 2020-10-28T14:30:19 | 308,042,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName :__init__.py.py
# @Time :2020/10/26 21:36
# @Author :xxf
| [
"13842855651@163.com"
] | 13842855651@163.com |
5da559408f83dcb63e49ca6efcd79cb43fd97b44 | 649150d2a78841991d6c6b5bb38201dc9f168775 | /machine_learning_with_python/regression4.py | cf3c2e5b8797a1c6d2e2431051a37b7af19eb5fd | [
"Apache-2.0"
] | permissive | universityofprofessorex/machine-learning-with-python | 86e05fe209402af14cf54b00bfa98970e8c0732b | 88cf5951ccb6ef4ee7a6e0f818b67bb82984aff8 | refs/heads/main | 2023-07-14T06:56:09.885003 | 2021-08-29T00:49:19 | 2021-08-29T00:49:19 | 400,615,507 | 0 | 0 | Apache-2.0 | 2021-08-29T02:03:22 | 2021-08-27T19:26:48 | Python | UTF-8 | Python | false | false | 4,550 | py | import math
import os
import time
# import Quandl
import pathlib
import pandas as pd
import pyarrow.parquet as pq
from machine_learning_with_python.utils.file_functions import get_dataframe_from_csv
# deprecated: cross validation is used for splitting up data sets
# svm = support vector machine. svm is able to perform regression
# from sklearn import preprocessing, cross_validation, svm
from sklearn.model_selection import train_test_split
from sklearn import preprocessing, svm
from sklearn.linear_model import LinearRegression
import numpy as np
HERE = os.path.abspath(os.path.dirname(__file__))
_dir = pathlib.Path(HERE).resolve()
print(_dir.parent)
csv_file = f"{_dir.parent}/data/WIKI_PRICES_212b326a081eacca455e13140d7bb9db.csv"
parquet_file = f"{_dir.parent}/data/WIKI_PRICES_212b326a081eacca455e13140d7bb9db.parquet"
# Read CSV
# df = get_dataframe_from_csv(
# csv_file
# )
# Pandas: Read Parquet
t1 = time.time()
df = pd.read_parquet(parquet_file, engine='pyarrow')
t2 = time.time()
delta_t = round((t2 - t1), 3)
print(f"Time it took = {delta_t} seconds\n")
# # PyArrow: Read Parquet
# # read in parquet file using pyarrow which has significant performance boost
# t1 = time.time()
# df = pq.read_table(parquet_file)
# t2 = time.time()
# delta_t = round((t2 - t1), 3)
# print(f"Time it took = {delta_t} seconds\n")
# We only need some of these categories for linear regression
df = df[
[
"adj_open",
"adj_high",
"adj_low",
"adj_close",
"adj_volume",
]
]
# df["adj_open"] = pd.to_numeric(df["adj_open"], downcast="float")
# df["adj_open"] = pd.to_numeric(df["adj_open"], downcast="float")
# high minus low column
df["HL_PCT"] = (df["adj_high"] - df["adj_close"]) / df["adj_close"] * 100.0
# daily percent change
df["PCT_change"] = (df["adj_close"] - df["adj_open"]) / df["adj_open"] * 100.0
# We define a new datafram
df = df[["adj_close", "HL_PCT", "PCT_change", "adj_volume"]]
forecast_col = "adj_close"
# fill columns with NaN, but replace it with a real value. better than getting rid of data
df.fillna(-99999, inplace=True)
# round everything up to the nearest show number. We are trying to perdict 10% of the dataframe ( that's what the 0.1 is )
forecast_out = int(math.ceil(0.1 * len(df)))
# classifier ( the shift is forcasting the columns out negatively)
df["label"] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True) # Remove missing values.
# print(df.head())
# features = capital X
X = np.array(df.drop(['label'], 1)) # get everything except for label
# labels = lowercase y
y = np.array(df['label'])
# Now we are going to scale x
# in order to properly scale it, you need to scale them alongside all your other values (when training)
X = preprocessing.scale(X)
# Redefine X (shift) - we don't need to do this because we are dropping the label already
# X = X[:-forecast_out+1] # the point of where we were able to forecast the out plus +1
df.dropna(inplace=True)
y = np.array(df['label'])
# print(len(X), len(y))
# training
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # 20% of the data
# classifier definition and fit it
clf = LinearRegression(n_jobs=-1) # choice A
# clf = svm.SVR() # change algorithm to # choice B
clf.fit(X_train, y_train) # train
accuracy = clf.score(X_train, y_train) # test ( on seperate data, you want to use different data for this to make sure it actually works )
print(f"accuracy = {accuracy}\n") # 0.000595491194672948 ( not very accurate )
# we are now ready to test, train, and predict
# /Users / malcolm / dev / universityofprofessorex / machine - learning - with-python
# ticker date open high low close volume ex - dividend split_ratio adj_open adj_high adj_low adj_close adj_volume
# 0 A 1999 - 11 - 18 45.50 50.00 40.00 44.00 44739900.0 0.0 1.0 31.041951 34.112034 27.289627 30.018590 44739900.0
# 1 A 1999 - 11 - 19 42.94 43.00 39.81 40.38 10897100.0 0.0 1.0 29.295415 29.336350 27.160002 27.548879 10897100.0
# 2 A 1999 - 11 - 22 41.31 44.00 40.06 44.00 4705200.0 0.0 1.0 28.183363 30.018590 27.330562 30.018590 4705200.0
# 3 A 1999 - 11 - 23 42.50 43.63 40.25 40.25 4274400.0 0.0 1.0 28.995229 29.766161 27.460188 27.460188 4274400.0
# 4 A 1999 - 11 - 24 40.13 41.94 40.00 41.06 3464400.0 0.0 1.0 27.378319 28.613174 27.289627 28.012803 3464400.0
| [
"bossjones@theblacktonystark.com"
] | bossjones@theblacktonystark.com |
f88ae87863b688f952fb45af536df47b94e1c31b | 4cc416ae03f85752fce81ac9a29ffdc112ff7e75 | /Experiment/timeDuration.py | cf13daf9ee0d28823021f9882a62ce8dbb676000 | [] | no_license | GuiMarion/BenchmarkSourceSeparation | 3c492603cb248282c2d62cb0df39bd9c0255eb61 | 284905eff8e23568482c6aca66acee93725e5841 | refs/heads/master | 2020-04-21T01:34:54.275688 | 2019-02-12T16:55:25 | 2019-02-12T16:55:25 | 169,228,284 | 0 | 1 | null | 2019-02-12T12:14:59 | 2019-02-05T11:11:43 | Python | UTF-8 | Python | false | false | 1,731 | py | import datetime
from optparse import OptionParser
def getTime(n, K, k2, k3, k4, k5=0):
return n*(4*k2 + 8*k4 + 4*k3 + 16 * K) + (n-1)*k5
if __name__ == "__main__":
usage = "usage: %prog [options] <path to database>"
parser = OptionParser(usage)
parser.add_option("-n", "--mixtures", type="int",
help="Number of mixture to pass on.",
dest="n")
parser.add_option("-k", "--length", type="float",
help="Duration of the musical extract.",
dest="k")
parser.add_option("-a", "--k1", type="float",
help="Pause between the mix and the extracted sources.",
dest="k1")
parser.add_option("-b", "--k2", type="float",
help="Pause between two algorithm results.",
dest="k2")
parser.add_option("-c", "--k3", type="float",
help="Pause between two source listening.",
dest="k3")
parser.add_option("-d", "--k4", type="float",
help="Pause between two mixtures.",
dest="k4")
options, arguments = parser.parse_args()
if options.n is not None and options.k is not None and options.k1 is not None and \
options.k2 is not None and options.k3 is not None and options.k4 is not None:
print(str(datetime.timedelta(seconds=getTime(options.n, options.k, options.k1, options.k2, options.k3, options.k4))))
elif options.n is not None and options.k is not None and options.k1 is not None and \
options.k2 is not None and options.k3 is not None:
if options.n > 1:
parser.error("You must provide a pause value between two mixtures.")
else:
print(str(datetime.timedelta(seconds=getTime(options.n, options.k, options.k1, options.k2, options.k3))))
else:
parser.error("You have to specify at least the first 5 options.")
| [
"33119610+GuiMarion@users.noreply.github.com"
] | 33119610+GuiMarion@users.noreply.github.com |
47ea5536fef76e148509268ae8ba25d0729005a2 | 641de482cd30b7de00a711fcc078a72862117b90 | /apps/task/models.py | 69c6674ea24c5c578bf64a71a4ac52e2755d8029 | [] | no_license | SamBarFu/To-doList | 9c27a9947e2e0dd8d4c80990e24e72c876a7a3ea | a0e60609e154ba51e4e68decb452dddc2a460254 | refs/heads/master | 2021-02-17T02:42:49.730187 | 2020-03-31T23:53:26 | 2020-03-31T23:53:26 | 245,064,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from django.db import models
from django.contrib.auth.models import User
from datetime import date
# Create your models here.
class Task(models.Model):
STATE = (
('C','Completed'),
('NC', 'Not Completed')
)
user = models.ForeignKey(User, on_delete=models.CASCADE)
name_task = models.CharField(max_length=20)
description = models.CharField(max_length=254)
max_date = models.DateField()
finish_date = models.DateTimeField(blank=True, null=True)
state = models.CharField(max_length=2, choices=STATE, default='NC')
def __str__(self):
return self.name_task
def dateNow(self):
if date.today() > self.max_date:
return 'style=color:crimson;'
else:
return 'style=color:black;' | [
"samuelbarberena12@gmail.com"
] | samuelbarberena12@gmail.com |
82ed4678d079a7b15ccaa434b5e7277f38ab356a | 4d77f3da849bed4846b2517b387aad94bb09992b | /myThesis/evFleet/fleetEnv.py | 7ec548e9f76da4324fe1e4fd12492b1dee91d64e | [] | no_license | Gwiradus/myRepository | dc7d5caac5fac4ce00a0c6e7f756d3f70e9aee97 | 3303ecc45bcbbaeb69832a19266e5c0c9e514ff3 | refs/heads/master | 2020-09-21T17:23:31.795583 | 2020-04-19T15:58:36 | 2020-04-19T15:58:36 | 224,864,445 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,377 | py | """Environment - EV Fleet Model"""
import numpy as np
from scipy.stats import truncnorm
np.random.seed(0)
class FleetEnvironment:
"""Implements the environment for the EV Fleet problem."""
def __init__(self, ev_number, t_opening_hours=(7.0, 18.0), ad_means_sd=(8, 17, 1.5), charging_power=3.3):
"""Initialization for the environment.
Args:
ev_number: Number of simulated electric vehicles.
t_opening_hours: Opening and closing hours of the EV charging parking lot.
ad_means_sd: Mean and standard deviation of arrival and departure times, respectively.
charging_power: Power rating of the EV chargers [kW].
"""
self.ev_number = ev_number
self.t_opening_hours = t_opening_hours
self.ad_means_sd = ad_means_sd
self.charging_power = charging_power
self.start_state = 0.0
self.min_energy = []
self.max_energy = []
self.current_energy = []
self.spot_price = []
self.connected_evs = []
self.delta = []
self.current_time = None
self.arrival_times = None
self.departure_times = None
self.required_energy = None
self.current_evs = None
self.current_state = None
self.current_action = None
self.initialize_fleet()
reward = 0.0
soc = 0.0
termination = False
self.reward_obs_term = [reward, soc, termination]
def initialize_fleet(self, distance_mean=70, distance_sd=20, fuel_economy=0.174):
"""Setup for the environment called when the class is created. Initializes the fleet parameters.
Args:
distance_mean: Mean of the distance driven by the vehicles [km].
distance_sd: Standard deviation of the distance driven by the vehicles [km].
fuel_economy: Fuel economy of the vehicles [kWH/km].
"""
t_open = self.t_opening_hours[0]
t_close = self.t_opening_hours[1]
arrivals_mean = self.ad_means_sd[0]
departures_mean = self.ad_means_sd[1]
sd = self.ad_means_sd[2]
self.arrival_times = np.around(truncnorm((t_open - arrivals_mean) / sd, (12 - arrivals_mean) / sd,
loc=arrivals_mean, scale=sd).rvs(size=self.ev_number), 2)
self.departure_times = np.around(truncnorm((12 - departures_mean) / sd, (t_close - departures_mean) / sd,
loc=departures_mean, scale=sd).rvs(size=self.ev_number), 2)
travelled_distance = np.random.normal(distance_mean, distance_sd, self.ev_number)
self.required_energy = travelled_distance * fuel_economy
self.current_time = t_open
self.current_evs = np.zeros(self.ev_number)
self.current_state = self.start_state
def get_reward_state(self, time):
"""Gets the reward and new state after taking the previous action.
Args:
time: The new event time.
"""
is_terminal = False
old_soc = self.current_state
old_time = self.current_time
self.current_time = time
# update current_state with the action
delta_t = self.current_time - old_time
self.define_boundaries()
self.current_state = round(
old_soc + delta_t * sum(self.current_evs) * self.charging_power * self.current_action, 2)
self.current_state = np.clip(self.current_state, self.min_energy[-1], self.max_energy[-1])
self.current_energy.append(self.current_state)
self.connected_evs.append(sum(self.current_evs))
self.delta.append(delta_t)
# calculate reward
reward = spot_price(old_time) * (self.current_state - old_soc)
self.spot_price.append(spot_price(old_time))
# terminate if goal is reached
if self.current_time == self.t_opening_hours[1]:
is_terminal = True
self.reward_obs_term = [reward, self.current_state, is_terminal]
def env_step(self, action, next_time):
"""A step taken by the environment.
Args:
action: The action taken by the agent.
next_time: The time of the next event.
"""
self.current_action = action
self.get_reward_state(next_time)
def define_boundaries(self):
"""Function to find the energy boundaries of an episode."""
energy = self.ev_fleet_boundary()
self.min_energy.append(energy[0])
self.max_energy.append(energy[1])
def ev_fleet_boundary(self):
"""Function to find the energy boundaries of the fleet of EVs.
Returns:
[int, int]: A list with the lower and upper energy boundaries for the fleet of EVs.
"""
e_min = 0
e_max = 0
for ev in range(self.ev_number):
if self.arrival_times[ev] < self.current_time:
self.current_evs[ev] = 1
if self.departure_times[ev] < self.current_time:
self.current_evs[ev] = 0
energy_vector = self.ev_single_boundary([self.arrival_times[ev], self.departure_times[ev]],
self.required_energy[ev])
e_min += energy_vector[0]
e_max += energy_vector[1]
return [e_min, e_max]
def ev_single_boundary(self, time_vector, required_energy):
"""Function to find the energy boundaries of a single EV.
Returns:
[int, int]: A list with the lower and upper energy boundaries for a single EV.
"""
time = self.current_time
arrival_time = time_vector[0]
departure_time = time_vector[1]
if time < arrival_time:
return [0, 0]
elif arrival_time <= time <= departure_time:
e_min = max(required_energy - self.charging_power * (departure_time - time), 0)
e_max = min(required_energy, self.charging_power * (time - arrival_time))
return np.around([e_min, e_max], 2)
else:
return np.around([required_energy, required_energy], 2)
def spot_price(time):
"""Function that returns the day ahead price of that hour.
Returns:
int: The electricity price for calculating the reward during a step.
"""
return 25 + 8 * np.sin(12 * np.pi * int(time) / 22)
| [
"51299865+Gwiradus@users.noreply.github.com"
] | 51299865+Gwiradus@users.noreply.github.com |
701069f9258cad782bf153ae6b3f47942bb0fb9c | 3dbdc06187995ecb99ca48e8707d7942b7fc21d4 | /examples/ttgo/axp202_adc_brightness.py | bfab1298869ed1be3868627a5d9dca72a1a625b9 | [
"MIT"
] | permissive | y0no/lilygo-ttgo-twatch-2020-micropython | 1ab0725c15088e9a798402c80f3cd428724f8d57 | de1fff5d8c1335f7d42c16bafb0d5e083c17c222 | refs/heads/master | 2022-12-04T12:00:28.549254 | 2020-08-30T19:54:47 | 2020-08-30T19:54:47 | 289,098,460 | 2 | 0 | MIT | 2020-08-20T19:59:02 | 2020-08-20T19:59:01 | null | UTF-8 | Python | false | false | 2,364 | py | import lvgl as lv
import ttgo
from axp_constants import *
watch = ttgo.Watch()
tft = watch.tft
power = watch.pmu
def init():
power.adc1Enable(AXP202_VBUS_VOL_ADC1
| AXP202_VBUS_CUR_ADC1 |
AXP202_BATT_CUR_ADC1 | AXP202_BATT_VOL_ADC1, True)
watch.lvgl_begin()
def interface():
def update_task(task):
set_usb_voltage(power.getVbusVoltage())
set_usb_pwr(power.getVbusCurrent())
if power.isBatteryConnect():
set_batt_voltage(power.getBattVoltage())
if power.isChargeing():
set_batt_pwr("Charging", power.getBattChargeCurrent())
batt_percent.set_hidden(True)
else:
set_batt_pwr("Discharging", power.getBattDischargeCurrent())
set_batt_per(power.getBattPercentage())
batt_percent.set_hidden(False)
def event_cb(obj, event):
if event == lv.EVENT.VALUE_CHANGED:
tft.set_backlight_level(obj.get_value())
def set_usb_pwr(pwr):
usb_pwr.set_text("USB current: {} mA".format(pwr))
def set_usb_voltage(volts):
usb_voltage.set_text("USB voltage: {} mV".format(volts))
def set_batt_voltage(volts):
batt_voltage.set_text("Battery voltage: {} mV".format(volts))
def set_batt_pwr(mode, pwr):
batt_pwr.set_text("{} battery \ncurrent: {} mA".format(mode, pwr))
def set_batt_per(per):
batt_percent.set_text("Battery percent: {}%".format(per))
scr = lv.obj()
tabview = lv.tabview(scr)
vbus_tab = tabview.add_tab("USB")
usb_voltage = lv.label(vbus_tab)
usb_pwr = lv.label(vbus_tab)
usb_pwr.set_y(usb_voltage.get_y() + 20)
la = lv.switch(vbus_tab)
batt_tab = tabview.add_tab("Battery")
batt_voltage = lv.label(batt_tab)
batt_pwr = lv.label(batt_tab)
batt_pwr.set_y(batt_voltage.get_y() + 20)
batt_percent = lv.label(batt_tab)
batt_percent.set_y(batt_pwr.get_y() + 40)
brightness_slider = lv.slider(batt_tab)
brightness_slider.align(None, lv.ALIGN.IN_BOTTOM_MID, 0, 0)
brightness_slider.set_range(0, 100)
brightness_slider.set_value(100, 1)
brightness_slider.set_event_cb(event_cb)
update_task(None)
lv.scr_load(scr)
watch.tft.backlight_fade(100)
lv.task_create(update_task, 1000, 5, None)
init()
interface()
| [
"29470622+ophoperhpo@users.noreply.github.com"
] | 29470622+ophoperhpo@users.noreply.github.com |
2b2ea73e64b32ed92d82f2014115fdb21cf15ae7 | a437e573fc086b4181b682f3666acb9da5daa59a | /longest_substring/144ms.py | ebe15691f7068908dd58e2a1095d7303f476c00d | [] | no_license | rghv404/leet_code | aa0d2ad1158f13de3dbcde2e6a0d52cbfa17581c | 1a961afb22f482e6b920256fa5d14154e5e1a940 | refs/heads/master | 2021-06-29T05:27:16.932772 | 2020-10-12T15:42:04 | 2020-10-12T15:42:04 | 167,116,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
sub_count = 0
n = len(s)
a = set()
i = 0; j = 0
if len(s) == 1:
return len(s)
while i < n and j < n:
if s[j] not in a:
a.add(s[j])
j+=1
sub_count = max(sub_count, j - i)
else:
a.remove(s[i])
i+=1
return sub_count
| [
"rghu_93@hotmail.com"
] | rghu_93@hotmail.com |
a783bdb2cbac71f57900c83b05288050df71ca1a | a161999b8a9009b6bf961288b68d651541882f2d | /process_news.py | e653f8d7622888988beeeccb4c26faee2e2b6d09 | [] | no_license | kkb-Projects/P1-news-summarization | 788896460aa11712812a86eaf7c7c066c5028d0b | 85122968d92b84741fd2fa8dbb81410e807c7eac | refs/heads/master | 2021-01-09T14:39:09.941508 | 2020-03-19T02:44:17 | 2020-03-19T02:44:17 | 242,340,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | # -*- coding:utf8 -*-
# author:yaolinxia
# datetime:2020/3/11
# software: PyCharm
import random
import re
import pandas as pd
from collections import Counter
import jieba
from functools import reduce
"""
汉语新闻语料库处理
"""
def token(string):
# we will learn the regular expression next course.
return re.findall('\w+', string)
# 处理后的文本保存一下
def to_txt(articles_clean,outpath='news_articles.txt'):
with open(outpath, 'w') as f:
for a in articles_clean:
f.write(a + '\n')
# 分词
def cut(string):
return list(jieba.cut(string))
# 将token保存到dict在存储起来
def to_dict(Token, out_path='news_articles_dict.txt'):
line_dict = {}
with open(out_path, 'w') as f:
for i, line in enumerate(Token):
line_dict[i] = line
f.write(str(line_dict))
print(line_dict[2])
def seg2txt(Token, out_path='news_articles_cut.txt'):
with open(out_path, 'w') as f:
for line in Token:
f.write(line+' ')
# 计算词频
def seg2num(cut_txt):
c = Counter()
with open(cut_txt, 'r') as f:
for i in range(2):
for lines in f.readlines():
for l in lines.strip():
c[l] += 1
for (k, v) in c.most_common(2): # 输出词频最高的前两个词
print("%s:%d" % (k, v))
if __name__ == '__main__':
filename = 'data/sqlResult_1558435.csv'
wiki_file = "data/wiki_00"
wiki_out = "data/output/wiki_less.txt"
"""
outpath = 'news_articles.txt'
content = pd.read_csv(filename, encoding='gb18030')
articles = content['content'].tolist()
articles_clean = [''.join(token(str(a))) for a in articles]
Token = []
Token = cut(open(outpath).read())
print("Token", Token)
# to_dict(Token)
seg2txt(Token)
"""
seg2num("data/output/wiki_cut.txt")
| [
"18860976931@163.com"
] | 18860976931@163.com |
27bd8867afe8e9922cc0d962845a763b2d8d2c36 | 5a4daf245b2d2a124023c2a9ea1d37ad73a44fb3 | /RefStep.py | d99fe9362f9fbeadab9ba3aa4cf3a6de805c2167 | [] | no_license | AtillaTheFun/RefStep | 9d2bbdc4ce13bdc2ddf43d01fff3f2b979a24cd6 | ec9c493326b6e77606e0fc961ce4ab42db58b5db | refs/heads/master | 2021-09-12T09:56:54.621271 | 2018-04-16T04:28:24 | 2018-04-16T04:28:24 | 114,570,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,536 | py | """
Main thread for controlling the buttons of the ref-step algorithm.
Information is collected here and sent to other objects for handling.
"""
import wx, wx.html
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import GTC
import csv
import os
import sys
import visa
import time
import PyPDF2
import docx
import modules.noname as noname
import modules.visa2 as visa2 # this is the simulation version of visa
import modules.GridMaker as GridMaker
import modules.pywxgrideditmixin as pywxgrideditmixin
import modules.tables as tables
import modules.analysis as analysis
import modules.gpib_data as gpib_data
import modules.gpib_inst as gpib_inst
import modules.stuff as stuff
import modules.ReportBuilder as ReportBuilder
class GraphFrame(noname.MyFrame1):
def __init__(self, parent):
noname.MyFrame1.__init__(self, parent)
#the mixin below offers better ctrl c ctr v cut and paste than the basic wxgrid
wx.grid.Grid.__bases__ += (pywxgrideditmixin.PyWXGridEditMixin,)
self.m_grid3.__init_mixin__()
self.m_grid21.__init_mixin__()
self.m_grid2.__init_mixin__()
self.m_grid4.__init_mixin__()
# self.m_grid8.__init_mixin__()
self.number_plots = 1
self.ranges=[]
self.paused = True
self.Show(True)
self.Validate = False
self.EVT_RESULT_ID_1 = wx.NewId() #used for GPIB data 1 thread
self.worker1 = None # for data source 1
stuff.EVT_RESULT(self, self.OnResult1, self.EVT_RESULT_ID_1)
log = self.m_textCtrl81 # where stdout will be redirected
redir = stuff.RedirectText(log)
sys.stdout = redir #print statements, note to avoid 'print' if callafter delay is an issue
sys.stderr = redir #python errors
self.data = stuff.SharedList(None) #no data on start up
self.cwd = os.getcwd() #identifies working directory at startup.
iconFile = os.path.join(self.cwd, 'testpoint.ico')
icon1 = wx.Icon(iconFile, wx.BITMAP_TYPE_ICO)
self.dirname = 'xxx'
self.SetIcon(icon1)
self.inst_bus = visa # can be toggled (OnSimulate) to visa 2 for simulation
self.START_TIME = 0 #to be overidden when worker thread is called
self.filled_grid = False #was grid sucessfuly filled
self.loaded_dict = False
self.loaded_ranges = False
self.OverideSafety = False
col_names = ['Min','Max','# Readings','Pre-reading delay','Inter-reading delay','# Repetitions','# steps']
for i in range(len(col_names)):
self.m_grid21.SetColLabelValue(i, col_names[i])
#Murray wanted a popup window with info?
self.OnAbout(None)
def OnCreateReport(self, event):
"""
Uses the Report builder to output a Calibration Report.
"""
CalRep = ReportBuilder.CalReport()
CalRep.init(self)
CalRep.BuildReport()
def OnSource(self, event):
"""
Respond to checkbox events.
"""
if self.m_checkBox1.GetValue():
self.m_checkBox1.SetValue(False)
else:
self.m_checkBox1.SetValue(True)
def OnMeter(self, event):
"""
Respond to checkbox events.
"""
if self.m_checkBox2.GetValue():
self.m_checkBox2.SetValue(False)
else:
self.m_checkBox2.SetValue(True)
def dcStop(self, event):
"""
Flags the worker thread to stop running if it exists.
"""
# Flag the worker thread to stop if running
if self.worker1:
print('Halting GPIB data gathering')
self.worker1.abort()
def dcStart(self, event):
"""
Creates the instruments, and calls the doStart function.
"""
log = self.m_textCtrl91 # where stdout will be redirected
redir = stuff.RedirectText(log)
sys.stdout = redir #print statements, note to avoid 'print' if callafter delay is an issue
sys.stderr = redir #python errors
if self.filled_grid == True:
instruments = self.dcCreateMeter()
self.dcDoStart(instruments)
else:
print("Input grids changed, generate a table again to continue")
def dcMakeSafe(self, event):
"""
Flags all threads to stop.
"""
if self.worker1:
self.worker1.MakeSafe()
self.doStop() #stop main data gathering
self.paused = True
#self.m_button2.SetLabel("Plot")
def dcCreateMeter(self):
"""
Reads the dictionary uploaded to the grid, and creates gpib_inst.INSTRUMENT accordingly.
Instruments must be the meter on the left, source S in the middle, source X on the right.
"""
def sim(s):
"""Nested function used only here, returns a simplified string"""
s = s.replace('\\r','\r')
s = s.replace('\\n','\n')
#Other simplifications might be necessary in the future I suppose.
return s
dicts = self.m_grid2
dm={} #Meter dictionary
dx={} #X dictionary
ds={} #S dictionary
rows = dicts.GetNumberRows()
for row in range(rows):
dm.update({sim(dicts.GetCellValue(row, 0)):sim(dicts.GetCellValue(row, 1))})
ds.update({sim(dicts.GetCellValue(row, 2)):sim(dicts.GetCellValue(row, 3))})
dx.update({sim(dicts.GetCellValue(row, 4)):sim(dicts.GetCellValue(row, 5))})
#Unpack the dictionaries to each respective instrument.
self.meter = gpib_inst.INSTRUMENT(self.inst_bus, 'M', address=self.Meteraddress.GetValue(), **dm)
return [self.meter]
def dcDoStart(self, instruments):
"""
Starts the algorithm, sends the created instruments to the wroker thread.
"""
self.meter = instruments[0]
#first read essential setup info from the control grid (self.m_grid3).
grid = self.m_grid91
grid.EnableEditing(False)
#int(float()) is needed because the grid has loaded,e.g. 2.0 instead of 2
dvm_range_col = 0
self.START_TIME = time.localtime()
#DISABLE BUTTONS
for button in [self.m_menuItem21,self.m_menuItem11,self.m_menuItem111,\
self.m_menuItem2,self.m_menuItem1,self.m_menuItem25,\
self.m_menuItem26,self.m_button15,self.m_button16]:
button.Enable(False)
#now call the thread
if not self.worker1:
self.worker1 = gpib_data.GPIBThreadDC(self, self.EVT_RESULT_ID_1,\
[self.inst_bus, grid, self.meter,\
dvm_range_col, self.Analysis_file_name,self.m_textCtrl92.GetValue(),self.m_textCtrl93.GetValue()],\
self.data,self.START_TIME,self.OverideSafety,self)
#It has a huge list of useful things that it needs.
def OnAddRowDC(self,event):
"""Add another row to the ranges table, this is necessary as it requires manual inputting."""
self.m_grid91.AppendRows(1, True)
self.m_grid91.Layout()
def OnOpenData(self, event):
"""
from MIEcalculator, graph_gui.py.
"""
# In this case, the dialog is created within the method because
# the directory name, etc, may be changed during the running of the
# application. In theory, you could create one earlier, store it in
# your frame object and change it when it was called to reflect
# current parameters / values
wildcard = "Poject source (*.csv; *.xls; *.xlsx; *.xlsm)|*.csv;*.xls; *.xlsx; *.xlsm|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(self, "Choose a data file", self.dirname, "",
wildcard, wx.OPEN | wx.MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
self.data_file = os.path.join(dirname, filename)
#remember the project working directory
self.FillData()
self.m_textCtrl187.Clear()
self.m_textCtrl187.WriteText(filename) # update text field with current data file.
dlg.Destroy()
def OnOpenDCData(self, event):
"""
from MIEcalculator, graph_gui.py.
"""
# In this case, the dialog is created within the method because
# the directory name, etc, may be changed during the running of the
# application. In theory, you could create one earlier, store it in
# your frame object and change it when it was called to reflect
# current parameters / values
wildcard = "Poject source (*.csv; *.xls; *.xlsx; *.xlsm)|*.csv;*.xls; *.xlsx; *.xlsm|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(self, "Choose a data file", self.dirname, "",
wildcard, wx.OPEN | wx.MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
self.DCdata_file = os.path.join(dirname, filename)
self.FillDCData()
self.m_textCtrl187b.Clear()
self.m_textCtrl187b.WriteText(filename) # update text field with current data file.
dlg.Destroy()
def FillData(self):
"""
Loads data to create a report. Requires a results sheet named "Results".
Uses tables.TABLES for a excel-to-grid object.
"""
datagrid = tables.TABLES(self)
self.data_grid = datagrid.excel_to_grid(self.data_file, 'Results', self.m_grid44)
self.data_grid = datagrid.excel_to_grid(self.data_file, 'Gain Ratios', self.m_grid42)
self.data_grid = datagrid.excel_to_grid(self.data_file, 'Linearity Ratios', self.m_grid43)
meter_row = 0
for i in range(0, self.m_grid42.GetNumberRows()):
if self.m_grid42.GetCellValue(i,0) == 'Meter Gain Ratios' :
meter_row = i
if self.m_checkBox1.GetValue():
self.m_grid42.DeleteRows(0,meter_row)
else:
self.m_grid42.DeleteRows(meter_row-1, self.m_grid42.GetNumberRows()-1)
def FillDCData(self):
"""
Loads data to create a report. Requires a results sheet named "Results".
Uses tables.TABLES for a excel-to-grid object.
"""
datagrid = tables.TABLES(self)
self.data_grid = datagrid.excel_to_grid(self.DCdata_file, 'Sheet', self.m_grid41)
def OnChooseAnalysisFile(self, event):
wildcard = "Poject source (*.csv; *.xls; *.xlsx; *.xlsm)|*.csv;*.xls; *.xlsx; *.xlsm|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(self, "Choose a data file", self.dirname, "",
wildcard, wx.OPEN | wx.MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
self.Analysis_file_name.Clear()
self.Analysis_file_name.WriteText(filename) # update text field with current data file.
dlg.Destroy()
def OnClearResultsDC(self, event):
"""
Clears the Calibration Ranges table in the DC Offset Measurement tab to allow additional test runs.
Reloads the ranges into the table once it is clear.
"""
self.m_grid91.ClearGrid()
print('Clear Grid')
self.FillGrid()
def OnLive(self, event):
"""
Chooses visa for live instruments
"""
if self.m_menuItem2.IsChecked():
self.inst_bus = visa #default for real instruments
def OnSimulate(self, event):
"""
Chooses visa2 for simulated (poorly) GPIB.
"""
if self.m_menuItem1.IsChecked():
self.inst_bus = visa2 #choose visa2 for simulation
else:
self.inst_bus = visa
def OnOpenDict(self, event):
"""
from MIEcalculator, graph_gui.py.
"""
# In this case, the dialog is created within the method because
# the directory name, etc, may be changed during the running of the
# application. In theory, you could create one earlier, store it in
# your frame object and change it when it was called to reflect
# current parameters / values
wildcard = "Poject source (*.csv; *.xls; *.xlsx; *.xlsm)|*.csv;*.xls; *.xlsx; *.xlsm|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(self, "Choose a project file", self.dirname, "",
wildcard, wx.OPEN | wx.MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
self.proj_file = os.path.join(dirname, filename)
self.projwd = dirname #remember the project working directory
dlg.Destroy()
self.FillGrid()
def OnLoadTable(self, event):
"""Immediatly calls the FillGrid function, so it can be used without the event too."""
self.FillGrid()
def FillGrid(self):
"""
Loads self.proj_file to the grid. Requires a dictionary sheet named "Dict" and
a control sheet named "Sheet 1". Uses tables.TABLES for a excel-to-grid object.
"""
controlgrid = tables.TABLES(self)
self.filled_grid = controlgrid.excel_to_grid(self.proj_file, 'Control', self.m_grid3)
if self.filled_grid == True:
grid = self.m_grid3
if int(float(grid.GetCellValue(3,3)))>int(grid.GetNumberRows()):
#int(float(is needed as it cant seem to cast straight to an int
print("Final row needed to be updated in grid")
grid.SetCellValue(3,3,str(grid.GetNumberRows()))
self.m_grid3.Layout()
else:
print("no sheet named 'Control' found")
self.loaded_dict = controlgrid.excel_to_grid(self.proj_file, 'Dict', self.m_grid2)
if self.loaded_dict == True:
col_names = ['Key words','Meter','Key words','Source S','Key words','Source X']
for i in range(len(col_names)):
self.m_grid2.SetColLabelValue(i, col_names[i])
self.m_grid2.Layout()
else:
print("no sheet named 'Dict' found, can not run")
self.loaded_ranges = controlgrid.excel_to_grid(self.proj_file, 'Ranges', self.m_grid21)
if self.loaded_ranges == True:
col_names = ['Min','Max','# Readings','Pre-reading delay','Inter-reading delay','# Repetitions','# steps']
for i in range(len(col_names)):
self.m_grid21.SetColLabelValue(i, col_names[i])
self.m_grid21.Layout()
#Copy ranges from m_grid21 to m_grid91
rows = self.m_grid21.GetNumberRows()
rows = range(0,int(rows))
for x in rows:
self.m_grid91.SetCellValue(x+1,0, self.m_grid21.GetCellValue(x,1))
else:
print("no sheet named 'Ranges' found")
#wip
i = 0
while(isinstance(self.m_grid21.GetCellValue(1,i), (int, long, float))):
self.m_grid91.SetCellValue(0,i,self.m_grid21.GetCellValue(1,i))
i = i+1
def OnAddRow(self,event):
"""Add another row to the ranges table, this is necessary as it requires manual inputting."""
self.m_grid21.AppendRows(1, True)
self.m_grid21.Layout()
def OnAddRange(self,event):
"""Add a range to the Calibration Ranges table """
ranges = self.m_grid21.GetNumberRows()
i = 0
while i < ranges:
cell = self.m_grid21.GetCellValue(i,0)
if cell == '':
row = i
i = ranges
if i == (ranges-1):
self.m_grid21.AppendRows(1, True)
ranges = self.m_grid21.GetNumberRows()
i = i +1
self.m_grid21.SetCellValue(row,0,self.m_textCtrl50y.GetValue())
self.m_grid21.SetCellValue(row,1,self.m_textCtrl50z.GetValue())
self.m_grid21.SetCellValue(row,2,self.m_textCtrl50a.GetValue())
self.m_grid21.SetCellValue(row,3,self.m_textCtrl50b.GetValue())
self.m_grid21.SetCellValue(row,4,self.m_textCtrl50c.GetValue())
self.m_grid21.SetCellValue(row,5,self.m_textCtrl50d.GetValue())
self.m_grid21.SetCellValue(row,6,self.m_textCtrl50e.GetValue())
def OnClearRange(self, event):
"""Remove the last range from the Calibration Ranges table"""
ranges = self.m_grid21.GetNumberRows()
i = 0
while i < ranges:
cell = self.m_grid21.GetCellValue(i,0)
if cell == '':
row = i-1
i = ranges
if i == (ranges-1):
row = i
i = i +1
if row>-1:
for i in range(0,7):
self.m_grid21.SetCellValue(row,i,'')
def ProgressUpdate(self,Label,Status,colour):
"""
Update the status tracker in the Run tab. Parameters are Label of the heading to update,
the string to update to and the colour of the text as a wx.colour
"""
if Status == 'Error':
obj = self.LastObj
colour = wx.Colour(255,0,0)
if Label == "Creating Instruments:":
obj = self.m_staticText123b
self.m_staticText124b.SetValue(' ')
self.m_staticText125b.SetValue(' ')
elif Label == "Safety Checks:":
obj = self.m_staticText124b
self.m_staticText125b.SetValue(' ')
elif Label == "Data Collection:":
obj = self.m_staticText125b
#obj.SetDefaultStyle(wx.TextAttr(wx.NullColour, colour))
obj.SetForegroundColour(colour)
obj.SetValue(Status)
self.LastObj = obj
def OnGenerateTable(self,event):
"""
If a table has been loaded, calls CreateInstrumets and then GenerateTable.
"""
#check that the ranges are inputed correctly?
if self.loaded_dict == True:
instruments = self.CreateInstruments()
self.GenerateTable(instruments)
else: print("Load instrument dictionaries")
def GenerateTable(self,instruments):
"""
Generate table according to the calibration ranges table. The ranges
entered into the table are drawn from the insruments given as parameters
"""
grid = self.m_grid3 #The grid to be used.
#Make the grid 0 by 0, so it enlarges to exactly the right size when data is inputted.
if grid.GetNumberRows()>0:
grid.DeleteRows(0,grid.GetNumberRows() ,True)
if grid.GetNumberCols()>0:
grid.DeleteCols(0,grid.GetNumberCols() ,True)
range_table = self.m_grid21 #Table containing the ranges for the calibration.
cal_ranges = []
for row in range(range_table.GetNumberRows()):
info = [str(range_table.GetCellValue(row,i)) for i in range(7)]
if all(info): #Checks if ALL elements of info are non-empty/non-zero.
cal_ranges.append(info)
ranges = []
for inst in instruments:
ranges.append(inst.range) #Collects the physical ranges of the instrument.
#Recall that those could be different to the calibration ranges, eg:
#Instrument can have a range (0,12) but we want to do a buildup on (0,10).
rm,rs,rx = ranges #split up into range meter, range source, range sourceX.
GridFactory = GridMaker.GridPrinter(self,grid)
full_cols = GridFactory.ColMaker(rm,rs,rx,cal_ranges) #Thats it, grid is made. All the previous stuff was colelcting info.
for col,i in zip(full_cols, range(1,10)):
GridFactory.PrintCol(col,i,8)
#This prints the column to the table, since GridFactory has acess to the table in the GUI.
#This is because this instance of the class was sent to the grid maker.
#Just a long list of headers:
titles = ["X Range", "X Settings (V)","S Range","S Settings (V)","DVM Range","Nominal reading",\
"#Readings","Delay (S)","DVM pause","DVM status","S status","X status","Mean","STD"]
GridFactory.PrintRow(titles,1,7)
info = ["Start Row",8,"Stop Row",grid.GetNumberRows()]
GridFactory.PrintRow(info,0,4)
info = ["instruments:","Meter: "+str(self.meter.label),"S: "+str(self.sourceS.label),"X: "+str(self.sourceX.label)]
GridFactory.PrintRow(info,0,3)
self.filled_grid = True #Flag that the grid was sucessfully filled up.
self.m_grid3.Layout()
def OnAnalysisFile(self, event):
"""
from MIEcalculator, graph_gui.py.
"""
# In this case, the dialog is created within the method because
# the directory name, etc, may be changed during the running of the
# application. In theory, you could create one earlier, store it in
# your frame object and change it when it was called to reflect
# current parameters / values
wildcard = "Poject source (*.csv; *.xls; *.xlsx; *.xlsm)|*.csv;*.xls; *.xlsx; *.xlsm|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(self, "Choose a project file", self.dirname, "",
wildcard, wx.OPEN | wx.MULTIPLE)
analysis_file = None
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
print(filename)
dirname = dlg.GetDirectory()
print(dirname)
analysis_file = os.path.join(dirname, filename)
dlg.Destroy()
if analysis_file: self.Analysis_file_name.SetLabel(analysis_file)
def OnAnalyse(self,event):
"""
Reads the name of the file to analyse,
and sends it to the analysis object analysis.Analyser
"""
#read a text box to get the name of file to analyse
#create analysis object, send it the name of the file
#it updates the file, adding the ratios.
#call fill grid to load the new file up and replace the old one.
xcel_name = str(self.Analysis_file_name.GetValue())#'Book1.xlsx'
print(xcel_name)
xcel_sheet = 'Sheet'
analyser = analysis.Analyser(xcel_name,xcel_sheet)
analyser.analysis()
analyser.Save(xcel_name)
controlgrid = tables.TABLES(self)
printed_results = controlgrid.excel_to_grid(xcel_name, 'Results', self.m_grid4)
printed_results_ratios = controlgrid.excel_to_grid(xcel_name, 'Gain Ratios', self.m_grid4b)
#Perhaps find a good place to put back to the table.
def OnSaveTables(self,event):
"""
from MIEcalculator, graph_gui.py.
"""
# In this case, the dialog is created within the method because
# the directory name, etc, may be changed during the running of the
# application. In theory, you could create one earlier, store it in
# your frame object and change it when it was called to reflect
# current parameters / values
wildcard = "Poject source (*.csv; *.xls; *.xlsx; *.xlsm)|*.csv;*.xls; *.xlsx; *.xlsm|" \
"All files (*.*)|*.*"
dlg = wx.FileDialog(self, "Choose a project file", self.dirname, "",
wildcard, wx.OPEN | wx.MULTIPLE)
save_file = None
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
save_file = os.path.join(dirname, filename)
dlg.Destroy()
if save_file: self.SaveGrid(save_file)
def SaveGrid(self,path):
"""
Saves using the tables.TABLES object
"""
controlgrid = tables.TABLES(self)
controlgrid.grid_to_excel(path, [(self.m_grid3,"Control"),(self.m_grid2,"Dict"),(self.m_grid21,"Ranges")])
def on_grid_edited(self, event):
"""When one of the two input grids is edited, disable the run button."""
self.filled_grid = False
#This prevents the user from running the program until they press
#the fill grid button, and update the grid.
def DoReset(self, event):
"""
Resets by clearing the data, and clearing the on screen text feedback.
Also sets the Make Safe button back to green.
"""
#reset the data, and clear the text box
self.doStop()
self.data.reset_list()
self.m_textCtrl81.Clear()
self.m_button12.SetBackgroundColour(wx.Colour(0, 255, 0))
def OnStart(self, event):
"""
Creates the instruments, and calls the doStart function.
"""
log = self.m_textCtrl81 # where stdout will be redirected
redir = stuff.RedirectText(log)
sys.stdout = redir #print statements, note to avoid 'print' if callafter delay is an issue
sys.stderr = redir #python errors
if self.filled_grid == True:
if self.m_checkBox3.IsChecked():
instruments = self.CreateInstruments()
self.doStart(instruments)
else:
self.CheckCalibration()
else:
print("Input grids changed, generate a table again to continue")
def CheckCalibration(self):
"""
Generate and show confirmation window to check DC Zero Calibration.`
"""
popup = Confirmation()
popup.Show()
def CreateInstruments(self):
"""
Reads the dictionary uploaded to the grid, and creates gpib_inst.INSTRUMENT accordingly.
Instruments must be the meter on the left, source S in the middle, source X on the right.
"""
def sim(s):
"""Nested function used only here, returns a simplified string"""
s = s.replace('\\r','\r')
s = s.replace('\\n','\n')
#Other simplifications might be necessary for additional instruments.
return s
dicts = self.m_grid2
dm={} #Meter dictionary
dx={} #X dictionary
ds={} #S dictionary
rows = dicts.GetNumberRows()
for row in range(rows):
dm.update({sim(dicts.GetCellValue(row, 0)):sim(dicts.GetCellValue(row, 1))})
ds.update({sim(dicts.GetCellValue(row, 2)):sim(dicts.GetCellValue(row, 3))})
dx.update({sim(dicts.GetCellValue(row, 4)):sim(dicts.GetCellValue(row, 5))})
#Unpack the dictionaries to each respective instrument.
self.meter = gpib_inst.INSTRUMENT(self.inst_bus, 'M', address=self.Meteraddress.GetValue(), **dm)
self.sourceS = gpib_inst.INSTRUMENT(self.inst_bus, 'S', address=self.Saddress.GetValue(), **ds)
self.sourceX = gpib_inst.INSTRUMENT(self.inst_bus, 'X', address=self.Xaddress.GetValue(), **dx)
return [self.meter, self.sourceS, self.sourceX]
def OnOverideSafety(self,event):
self.OverideSafety = True
def OnCompleteChecks(self,event):
self.OverideSafety = False
def doStart(self,instruments):
"""
Starts the algorithm, sends the created instruments to the wroker thread.
"""
self.meter,self.sourceS,self.sourceX = instruments
#first read essential setup info from the control grid (self.m_grid3).
grid = self.m_grid3
grid.EnableEditing(False)
#int(float()) is needed because the grid has loaded,e.g. 2.0 instead of 2
start_row = int(float(grid.GetCellValue(3,1)))-1#wxgrid starts at zero
stop_row = int(float(grid.GetCellValue(3,3)))-1
sX_range_col = 1#source X range column
sX_setting_col = 2#source X setting column
sS_range_col = 3
sS_setting_col = 4
dvm_range_col = 5
dvm_nominal_col = 6
dvm_nordgs_col = 7
delay_col = 8
self.START_TIME = time.localtime()
#DISABLE BUTTONS
for button in [self.m_menuItem21,self.m_menuItem11,self.m_menuItem111,\
self.m_menuItem2,self.m_menuItem1,self.m_menuItem25,\
self.m_menuItem26,self.m_button15,self.m_button16]:
button.Enable(False)
#now call the thread
if not self.worker1:
self.worker1 = gpib_data.GPIBThreadF(self, self.EVT_RESULT_ID_1,\
[self.inst_bus, grid, start_row, stop_row, dvm_nordgs_col, self.meter,\
dvm_range_col, self.sourceX, sX_range_col, sX_setting_col,self.sourceS,\
sS_range_col, sS_setting_col,delay_col, self.Analysis_file_name],\
self.data,self.START_TIME,self.OverideSafety,self)
#It has a huge list of useful things that it needs.
def OnStop(self, event):
self.doStop()
def doStop(self):
"""
Flags the worker thread to stop running if it exists.
"""
# Flag the worker thread to stop if running
if self.worker1:
print('Halting GPIB data gathering')
self.worker1.abort()
def OnMakeSafe(self, event):
"""
Flags all threads to stop.
"""
if self.worker1:
self.worker1.MakeSafe()
self.doStop() #stop main data gathering
self.paused = True
#self.m_button2.SetLabel("Plot")
# next run a gpib thread that sets sources to zero and standby and meter to autorange HV?
def OnResult1(self, event):
"""Show Result status, event for termination of gpib thread"""
#ENABLE BUTTONS
for button in [self.m_menuItem21,self.m_menuItem11,self.m_menuItem111,\
self.m_menuItem2,self.m_menuItem1,self.m_menuItem25,\
self.m_menuItem26,self.m_button15,self.m_button16]:
button.Enable(True)
if event.data is None:
# Thread aborted (using our convention of None return)
print('GPIB data aborted'), time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
else:
# Process results here
print 'GPIB Result: %s'%event.data, time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
if event.data == "UNSAFE":
self.m_button12.SetBackgroundColour(wx.Colour(255, 0, 0))
# In either event, the worker is done
self.worker1 = None
self.m_grid3.EnableEditing(True)
def OnGetInstruments(self, event):
rm = self.inst_bus.ResourceManager()#new Visa
try:
#check = self.inst_bus.get_instruments_list()
check = rm.list_resources()#new Visa
except self.inst_bus.VisaIOError:
check = "visa error"
self.m_textCtrl8.SetValue(repr(check))
self.m_panel7.SendSizeEvent()#forces textCtrl8 to resize to content
def OnRefreshInstruments(self, event):
"""
Adds all active instrument addresses to the drop down selection for the instruments.
"""
rm = self.inst_bus.ResourceManager()#new Visa
try:
resources = rm.list_resources()
self.Meteraddress.Clear()
self.Saddress.Clear()
self.Xaddress.Clear()
for address in resources:
self.Meteraddress.Append(address)
self.Saddress.Append(address)
self.Xaddress.Append(address)
except self.inst_bus.VisaIOError:
resources = "visa error"
#self.m_textCtrl8.SetValue(repr(check))
def OnInterfaceClear(self, event):
rm = self.inst_bus.ResourceManager()#new Visa
#self.inst_bus.Gpib().send_ifc()
bus = rm.open_resource('GPIB::INTFC')#opens the GPIB interface
bus.send_ifc()
def OnSendTestCommand(self, event):
"""
Sends a test command to the selected instrument using doSend.
"""
name = self.m_comboBox8.GetValue()
if name == 'Meter':
address = self.Meteraddress.GetValue()
self.doOnSend(address)
elif name == 'Reference source (S)' :
address = self.Saddress.GetValue()
self.doOnSend(address)
elif name == 'To calibrate (X)':
address = self.Xaddress.GetValue()
self.doOnSend(address)
else:
self.m_textCtrl23.AppendText('select instrument\n')
def doOnSend(self,address):
""" sends the commend to the address specified,
creates a new visa resource manager."""
try:
command = self.m_textCtrl18.GetValue()
rm = self.inst_bus.ResourceManager()#new Visa
instrument = rm.open_resource(address)
instrument.write(command)
self.m_textCtrl23.AppendText(command+'\n')
except self.inst_bus.VisaIOError:
self.m_textCtrl23.AppendText('Failed to send\n')
def OnReadTestCommand(self, event):
"""
Reads from whatever instrument is selected using doRead.
Will fail if it finds nothing on the instrument bus.
"""
instrument = self.m_comboBox8.GetValue()
if instrument == 'Meter':
address = self.Meteraddress.GetValue()
self.doRead(address)
elif instrument == 'Reference source (S)' :
address = self.Saddress.GetValue()
self.doRead(address)
elif instrument == 'To calibrate (X)':
address = self.Xaddress.GetValue()
self.doRead(address)
else:
self.m_textCtrl23.AppendText('select instrument\n')
def doRead(self,address):
"""reads from the specified address"""
rm = self.inst_bus.ResourceManager()#new Visa
instrument = rm.open_resource(address)
try:
value = instrument.read_raw()
self.m_textCtrl23.AppendText(repr(value)+'\n')
return
except self.inst_bus.VisaIOError:
self.m_textCtrl23.WriteText('Failed to read\n')
return
def OnHelp(self,event):
dlg = HelpBox(None)
html = dlg.m_htmlWin1
name = 'Manual.html'
html.LoadPage(name)
dlg.Show()
def OnVal(self,event):
if self.Validate == False:
self.Validate = True
def OnNoVal(self, event):
if self.Validate == True:
self.Validate = False
def OnAbout(self,event):
info = wx.AboutDialogInfo()
info = wx.AboutDialogInfo()
info.SetName('Ref step')
info.SetVersion('2.0.0')
info.SetDescription("description")
info.SetCopyright('(C) 2017-2018 Measurement Standards Laboratory of New Zealand')
info.SetWebSite('http://www.measurement.govt.nz/')
info.SetLicence("Use it well")
info.AddDeveloper('some code monkey(s)')
wx.AboutBox(info)
def OnClose(self, event):
"""
Make sure threads not running if frame is closed before stopping everything.
Seems to generate errors, but at least the threads do get stopped!
The delay after stopping the threads need to be longer than the time for
a thread to normally complete? Since thread needs to be able to
post event back to the main frame.
"""
if self.worker1: #stop main GPIB thread
self.worker1.abort()
time.sleep(0.3)
self.Destroy()
class HelpBox (wx.Frame):
def __init__(self, parent):
wx.Frame.__init__ (self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size(500,300), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL)
self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)
gbSizer4 = wx.GridBagSizer(0, 0)
gbSizer4.SetFlexibleDirection(wx.BOTH)
gbSizer4.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.m_htmlWin1 = wx.html.HtmlWindow(self, wx.ID_ANY, wx.DefaultPosition, wx.Size(1500,1250), wx.html.HW_SCROLLBAR_AUTO)
gbSizer4.Add(self.m_htmlWin1, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL, 5)
self.SetSizer(gbSizer4)
self.Layout()
self.Centre(wx.BOTH)
def __del__(self):
pass
class Confirmation (wx.Frame):
"""
Creates window to remind user to perform a DC Zero calibration on the instruments.
User can either confirm that instruments are calibrated or can decide it is not
neccesary for the next run of tests.
"""
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, 'Sources not calibrated?',size=(800,130))
panel=wx.Panel(self, -1)
msg = "Instruments need to undergo DC Zero Calibration. If already calibrated, ensure 'Instruments are Zero Calibrated' checkbox is filled."
instructions = wx.StaticText(panel, label=msg)
closeBtn = wx.Button(panel, label="Instruments have been DC zero calibrated")
closeBtn.Bind(wx.EVT_BUTTON, self.onClose)
closeBtn2= wx.Button(panel, label="DC zero calibration is not required")
closeBtn2.Bind(wx.EVT_BUTTON, self.onClose)
sizer = wx.BoxSizer(wx.VERTICAL)
flags = wx.ALL|wx.CENTER
sizer.Add(instructions, 0, flags, 5)
sizer.Add(closeBtn, 0, flags, 5)
sizer.Add(closeBtn2, 0, flags, 5)
panel.SetSizer(sizer)
def onClose(self, event):
self.Close()
if __name__ == "__main__":
app = wx.App()
GraphFrame(None)
app.MainLoop()
| [
"HamishGibb@ihug.co.nz"
] | HamishGibb@ihug.co.nz |
0941d9add42c709ce40745c1df7036b78044b5fb | 08bac92b1741c0b2e106935bab47ff65b309123c | /0x0B-python-input_output/8-load_from_json_file.py | aee2a9dbcc538407ae1ce39d4af4c0c30fd1af7a | [] | no_license | RoMalms10/holbertonschool-higher_level_programming | 5702dbcc17156b66b472df79eddb55baac2613aa | aebff20e55c7fe07e9e3fb1ff33dd65d17d8ee1f | refs/heads/master | 2021-09-14T10:10:56.680309 | 2018-05-11T17:59:40 | 2018-05-11T17:59:40 | 113,100,806 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | #!/usr/bin/python3
""" My Module for converting from JSON to Python
"""
import json
def load_from_json_file(filename):
""" Load JSON into Python Object from a file
Args:
filename (str): filename to read from
"""
with open(filename, "r", encoding="UTF-8") as f:
return(json.load(f))
| [
"156@holbertonschool.com"
] | 156@holbertonschool.com |
54a555871897be0c8edfc959c7d6677b4d5b256e | 8245a3702d11d0c4fdf1073a873803b8b8463306 | /pymclevel/items.py | 0683f5dfed314e85a6d71a3d944e2dbe72e09d14 | [
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | conicalflask/minecraft.print | 5cb7fa6c9c7498a3939d9789af2c317d4718a657 | ec2e8d1c376da131e71b1514c622d21731754800 | refs/heads/master | 2021-01-18T07:25:04.596718 | 2012-02-17T10:48:01 | 2012-02-17T10:48:01 | 3,468,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,154 | py |
items_txt = """
:version 18
# Blocks
# ID NAME FILE CORDS DAMAGE
1 Stone terrain.png 1,0
2 Grass terrain.png 3,0
3 Dirt terrain.png 2,0
4 Cobblestone terrain.png 0,1
5 Wooden_Planks terrain.png 4,0
6 Sapling terrain.png 15,0 0
6 Spruce_Sapling terrain.png 15,3 1
6 Birch_Sapling terrain.png 15,4 2
7 Bedrock terrain.png 1,1
8 Water terrain.png 15,13
9 Still_Water terrain.png 15,13
10 Lava terrain.png 15,15
11 Still_Lava terrain.png 15,15
12 Sand terrain.png 2,1
13 Gravel terrain.png 3,1
14 Gold_Ore terrain.png 0,2
15 Iron_Ore terrain.png 1,2
16 Coal_Ore terrain.png 2,2
17 Wood terrain.png 4,1 0
17 Dark_Wood terrain.png 4,7 1
17 Birch_Wood terrain.png 5,7 2
18 Leaves terrain.png 4,3
19 Sponge terrain.png 0,3
20 Glass terrain.png 1,3
21 Lapis_Lazuli_Ore terrain.png 0,10
22 Lapis_Lazuli_Block terrain.png 0,9
23 Dispenser terrain.png 14,2
24 Sandstone terrain.png 0,12
25 Note_Block terrain.png 10,4
26 Bed_Block terrain.png 6,8
27 Powered_Rail terrain.png 3,10
28 Detector_Rail terrain.png 3,12
35 Wool terrain.png 0,4 0
35 Orange_Wool terrain.png 2,13 1
35 Magenta_Wool terrain.png 2,12 2
35 Light_Blue_Wool terrain.png 2,11 3
35 Yellow_Wool terrain.png 2,10 4
35 Lime_Wool terrain.png 2,9 5
35 Pink_Wool terrain.png 2,8 6
35 Gray_Wool terrain.png 2,7 7
35 Light_Gray_Wool terrain.png 1,14 8
35 Cyan_Wool terrain.png 1,13 9
35 Purple_Wool terrain.png 1,12 10
35 Blue_Wool terrain.png 1,11 11
35 Brown_Wool terrain.png 1,10 12
35 Green_Wool terrain.png 1,9 13
35 Red_Wool terrain.png 1,8 14
35 Black_Wool terrain.png 1,7 15
37 Flower terrain.png 13,0
38 Rose terrain.png 12,0
39 Brown_Mushroom terrain.png 13,1
40 Red_Mushroom terrain.png 12,1
41 Block_of_Gold terrain.png 7,1
42 Block_of_Iron terrain.png 6,1
43 Double_Stone_Slab terrain.png 5,0 0
43 Double_Sandstone_Slab terrain.png 0,12 1
43 Double_Wooden_Slab terrain.png 4,0 2
43 Double_Stone_Slab terrain.png 0,1 3
44 Stone_Slab special.png 0,0 0
44 Sandstone_Slab special.png 1,2 1
44 Wooden_Slab special.png 2,2 2
44 Stone_Slab special.png 3,2 3
45 Brick terrain.png 7,0
46 TNT terrain.png 8,0
47 Bookshelf terrain.png 3,2
48 Moss_Stone terrain.png 4,2
49 Obsidian terrain.png 5,2
50 Torch terrain.png 0,5
51 Fire special.png 1,0
52 Monster_Spawner terrain.png 1,4
53 Wooden_Stairs special.png 3,0
54 Chest terrain.png 11,1
55 Redstone_Dust terrain.png 4,5
56 Diamond_Ore terrain.png 2,3
57 Block_of_Diamond terrain.png 8,1
58 Workbench terrain.png 12,3
59 Crops terrain.png 15,5
60 Farmland terrain.png 7,5
61 Furnace terrain.png 12,2
62 Lit_Furnace terrain.png 13,3
63 Sign_Block terrain.png 0,0
64 Wooden_Door_Block terrain.png 1,6
65 Ladder terrain.png 3,5
66 Rail terrain.png 0,8
67 Stone_Stairs special.png 4,0
68 Wall_Sign terrain.png 4,0
69 Lever terrain.png 0,6
70 Stone_Pressure_Plate special.png 0,1
71 Iron_Door_Block terrain.png 2,6
72 Wooden_Pressure_Plate special.png 1,1
73 Redstone_Ore terrain.png 3,3
74 Glowing_Redstone_Ore terrain.png 3,3
75 Redstone_Torch_(off) terrain.png 3,7
76 Redstone_Torch terrain.png 3,6
77 Button special.png 2,1
78 Snow_Layer special.png 3,1
79 Ice terrain.png 3,4
80 Snow terrain.png 2,4
81 Cactus terrain.png 6,4
82 Clay terrain.png 8,4
83 Sugar_Cane terrain.png 9,4
84 Jukebox terrain.png 10,4
85 Fence special.png 2,0
86 Pumpkin terrain.png 7,7
87 Netherrack terrain.png 7,6
88 Soul_Sand terrain.png 8,6
89 Glowstone terrain.png 9,6
90 Portal special.png 4,1
91 Jack-o'-lantern terrain.png 8,7
92 Cake special.png 0,2
93 Repeater_Block_(off) terrain.png 3,8
94 Repeater_Block terrain.png 3,9
# Items
# ID NAME FILE CORDS DAMAGE
256 Iron_Shovel items.png 2,5 +250
257 Iron_Pickaxe items.png 2,6 +250
258 Iron_Axe items.png 2,7 +250
259 Flint_and_Steel items.png 5,0 +64
260 Apple items.png 10,0 x1
261 Bow items.png 5,1
262 Arrow items.png 5,2
263 Coal items.png 7,0 0
263 Charcoal items.png 7,0 1
264 Diamond items.png 7,3
265 Iron_Ingot items.png 7,1
266 Gold_Ingot items.png 7,2
267 Iron_Sword items.png 2,4 +250
268 Wooden_Sword items.png 0,4 +59
269 Wooden_Shovel items.png 0,5 +59
270 Wooden_Pickaxe items.png 0,6 +59
271 Wooden_Axe items.png 0,7 +59
272 Stone_Sword items.png 1,4 +131
273 Stone_Shovel items.png 1,5 +131
274 Stone_Pickaxe items.png 1,6 +131
275 Stone_Axe items.png 1,7 +131
276 Diamond_Sword items.png 3,4 +1561
277 Diamond_Shovel items.png 3,5 +1561
278 Diamond_Pickaxe items.png 3,6 +1561
279 Diamond_Axe items.png 3,7 +1561
280 Stick items.png 5,3
281 Bowl items.png 7,4 x1
282 Mushrom_Stew items.png 8,4 x1
283 Golden_sword items.png 4,4 +32
284 Golden_shovel items.png 4,5 +32
285 Golden_pickaxe items.png 4,6 +32
286 Golden_axe items.png 4,7 +32
287 String items.png 8,0
288 Feather items.png 8,1
289 Gunpowder items.png 8,2
290 Wooden_Hoe items.png 0,8 +59
291 Stone_Hoe items.png 1,8 +131
292 Iron_Hoe items.png 2,8 +250
293 Diamond_Hoe items.png 3,8 +1561
294 Golden_hoe items.png 4,8 +32
295 Seeds items.png 9,0
296 Wheat items.png 9,1
297 Bread items.png 9,2 x1
298 Leather_Cap items.png 0,0 +34
299 Leather_Tunic items.png 0,1 +48
300 Leather_Pants items.png 0,2 +46
301 Leather_Boots items.png 0,3 +40
302 Chainmail_Helmet items.png 1,0 +68
303 Chainmail_Chestplate items.png 1,1 +96
304 Chainmail_Leggings items.png 1,2 +92
305 Chainmail_Boots items.png 1,3 +80
306 Iron_Helmet items.png 2,0 +136
307 Iron_Chestplate items.png 2,1 +192
308 Iron_Leggings items.png 2,2 +184
309 Iron_Boots items.png 2,3 +160
310 Diamond_Helmet items.png 3,0 +272
311 Diamond_Chestplate items.png 3,1 +384
312 Diamond_Leggings items.png 3,2 +368
313 Diamond_Boots items.png 3,3 +320
314 Golden_Helmet items.png 4,0 +68
315 Golden_Chestplate items.png 4,1 +96
316 Golden_Leggings items.png 4,2 +92
317 Golden_Boots items.png 4,3 +80
318 Flint items.png 6,0
319 Raw_Porkchop items.png 7,5 x1
320 Porkchop items.png 8,5 x1
321 Painting items.png 10,1 x1
322 Golden_Apple items.png 11,0 x1
323 Sign items.png 10,2 x1
324 Wooden_Door items.png 11,2 x1
325 Bucket items.png 10,4 x1
326 Water_Bucket items.png 11,4 x1
327 Lava_Bucket items.png 12,4 x1
328 Minecart items.png 7,8 x1
329 Saddle items.png 8,6 x1
330 Iron_Door items.png 12,2 x1
331 Redstone items.png 8,3
332 Snowball items.png 14,0 x16
333 Boat items.png 8,8 x1
334 Leather items.png 7,6
335 Milk items.png 13,4
336 Brick items.png 6,1
337 Clay items.png 9,3
338 Sugar_Cane items.png 11,1
339 Paper items.png 10,3
340 Book items.png 11,3
341 Slimeball items.png 14,1
342 Minecart_with_Chest items.png 7,9 x1
343 Minecart_with_Furnace items.png 7,10 x1
344 Egg items.png 12,0
345 Compass items.png 6,3 x1
346 Fishing_Rod items.png 5,4 +64
347 Clock items.png 6,4 x1
348 Glowstone_Dust items.png 9,4
349 Raw_Fish items.png 9,5 x1
350 Cooked_Fish items.png 10,5 x1
351 Ink_Sack items.png 14,4 0
351 Rose_Red items.png 14,5 1
351 Cactus_Green items.png 14,6 2
351 Coco_Beans items.png 14,7 3
351 Lapis_Lazuli items.png 14,8 4
351 Purple_Dye items.png 14,9 5
351 Cyan_Dye items.png 14,10 6
351 Light_Gray_Dye items.png 14,11 7
351 Gray_Dye items.png 15,4 8
351 Pink_Dye items.png 15,5 9
351 Lime_Dye items.png 15,6 10
351 Dandelion_Yellow items.png 15,7 11
351 Light_Blue_Dye items.png 15,8 12
351 Magenta_Dye items.png 15,9 13
351 Orange_Dye items.png 15,10 14
351 Bone_Meal items.png 15,11 15
352 Bone items.png 12,1
353 Sugar items.png 13,0
354 Cake items.png 13,1 x1
355 Bed items.png 13,2 x1
356 Redstone_Repeater items.png 6,5
357 Cookie items.png 12,5 x8
2256 Gold_Music_Disk items.png 0,15 x1
2257 Green_Music_Disk items.png 1,15 x1
# Groups
# NAME ICON ITEMS
~ Blocks 2 1,2,3,12,24,44~1,13,82,4,48,67,44~3,17,5,53,44~2,47,20,44~0,49,79,80,78,7,45,19,87,88
~ Plants 6 18,81,86,91,6,37,38,39,40
~ Tools 257 269,270,271,290,273,274,275,291,256,257,258,292,277,278,279,293,284,285,286,294,259,346
~ Weapons 267 268,272,267,276,283,261,262,332
~ Armor 303 298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317
~ Food 297 260,322,281,282,295,296,297,319,320,344,349,350,353,354,357
~ Ores 56 16,15,14,56,73,21,263,265,266,264,42,41,57,22,89,348
~ Special 54 8,10,90,58,54,61,23,25,46,52,84,2256,2257,51,50,85,321,323,324,330,355,325,326,327,335,345,347
~ Items 341 280,287,288,289,334,341,318,337,336,338,339,340,352
~ Movement 328 65,66,27,28,328,342,343,333,329
~ Logic 76 331,76,356,69,70,72,77
~ Wool 35 35,351
"""
class ItemType (object):
def __init__(self, id, name, imagefile = None, imagecoords = None, maxdamage = 0, damagevalue = 0, stacksize = 64):
self.id=id
self.name=name
self.imagefile=imagefile
self.imagecoords=imagecoords
self.maxdamage=maxdamage
def __repr__(self):
return "ItemType({0}, '{1}')".format(self.id, self.name)
def __str__(self):
return "ItemType {0}: {1}".format(self.id, self.name)
class Items (object):
items_txt = items_txt
def __init__(self, filename = None):
if filename is None:
items_txt = self.items_txt
else:
try:
with file(filename) as f:
items_txt = f.read()
except Exception, e:
print "Error reading items.txt: ", e;
print "Using internal data."
items_txt = self.items_txt
self.itemtypes = {};
for line in items_txt.split("\n"):
try:
line = line.strip()
if len(line) == 0: continue
if line[0] == "#": continue;
if line[0] == "~": continue; #groups
stacksize = 64
damagevalue = None
maxdamage = 0
fields = line.split();
if len(fields) >= 4:
maxdamage = None;
id, name, imagefile, imagecoords = fields[0:4]
if len(fields) > 4:
info = fields[4]
if info[0] == 'x':
stacksize = int(info[1:])
elif info[0] == '+':
maxdamage = int(info[1:])
else:
damagevalue = int(info)
id = int(id);
name = name.replace("_", " ");
imagecoords = imagecoords.split(",");
self.itemtypes[(id, damagevalue)] = ItemType(id, name, imagefile, imagecoords, maxdamage, damagevalue, stacksize)
except Exception, e:
print "Error reading line:", e
print "Line: ", line
print
self.names = dict((item.name, item.id) for item in self.itemtypes.itervalues())
def findItem(self, id=0, damage=None):
item = self.itemtypes.get((id, damage))
if item: return item
item = self.itemtypes.get((id, None))
if item: return item
item = self.itemtypes.get((id, 0))
if item: return item
raise ItemNotFound, "Item {0}:{1} not found".format(id, damage)
class ItemNotFound(KeyError): pass
items = Items();
| [
"codysumter@gmail.com"
] | codysumter@gmail.com |
e6c5af9fa763a645f25924e9752c8bc791ce93e8 | f7a45dae004686c3a5f40c838986b10b869b5ed4 | /vnpy/trader/constant.py | 4d81760fdc4f5e2259460a8b77ab4a4e2d55bfe3 | [
"MIT"
] | permissive | ralex1975/vnpy2.1.3 | 31da87527136be3a7dc8b4ce03bc52d00b9f811d | 728f9bf5baa304e13fd8c344c106b5eea82f9b49 | refs/heads/master | 2022-11-12T03:16:54.600977 | 2020-07-02T08:49:12 | 2020-07-02T08:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,773 | py | """
General constant string used in VN Trader.
"""
from enum import Enum
class Direction(Enum):
"""
Direction of order/trade/position.
"""
LONG = "多"
SHORT = "空"
NET = "净"
class Offset(Enum):
"""
Offset of order/trade.
"""
NONE = ""
OPEN = "开"
CLOSE = "平"
CLOSETODAY = "平今"
CLOSEYESTERDAY = "平昨"
class Status(Enum):
"""
Order status.
"""
SUBMITTING = "提交中"
NOTTRADED = "未成交"
PARTTRADED = "部分成交"
ALLTRADED = "全部成交"
CANCELLED = "已撤销"
REJECTED = "拒单"
class Product(Enum):
"""
Product class.
"""
EQUITY = "股票"
FUTURES = "期货"
OPTION = "期权"
INDEX = "指数"
FOREX = "外汇"
SPOT = "现货"
ETF = "ETF"
BOND = "债券"
WARRANT = "权证"
SPREAD = "价差"
FUND = "基金"
class OrderType(Enum):
"""
Order type.
"""
LIMIT = "限价"
MARKET = "市价"
STOP = "STOP"
FAK = "FAK"
FOK = "FOK"
RFQ = "询价"
class OptionType(Enum):
"""
Option type.
"""
CALL = "看涨期权"
PUT = "看跌期权"
class Exchange(Enum):
"""
Exchange.
"""
# Chinese
CFFEX = "CFFEX" # China Financial Futures Exchange
SHFE = "SHFE" # Shanghai Futures Exchange
CZCE = "CZCE" # Zhengzhou Commodity Exchange
DCE = "DCE" # Dalian Commodity Exchange
INE = "INE" # Shanghai International Energy Exchange
SSE = "SSE" # Shanghai Stock Exchange
SZSE = "SZSE" # Shenzhen Stock Exchange
SGE = "SGE" # Shanghai Gold Exchange
WXE = "WXE" # Wuxi Steel Exchange
CFETS = "CFETS" # China Foreign Exchange Trade System
# Global
SMART = "SMART" # Smart Router for US stocks
NYSE = "NYSE" # New York Stock Exchnage
NASDAQ = "NASDAQ" # Nasdaq Exchange
NYMEX = "NYMEX" # New York Mercantile Exchange
COMEX = "COMEX" # a division of theNew York Mercantile Exchange
GLOBEX = "GLOBEX" # Globex of CME
IDEALPRO = "IDEALPRO" # Forex ECN of Interactive Brokers
CME = "CME" # Chicago Mercantile Exchange
ICE = "ICE" # Intercontinental Exchange
SEHK = "SEHK" # Stock Exchange of Hong Kong
HKFE = "HKFE" # Hong Kong Futures Exchange
HKSE = "HKSE" # Hong Kong Stock Exchange
SGX = "SGX" # Singapore Global Exchange
CBOT = "CBT" # Chicago Board of Trade
CBOE = "CBOE" # Chicago Board Options Exchange
CFE = "CFE" # CBOE Futures Exchange
DME = "DME" # Dubai Mercantile Exchange
EUREX = "EUX" # Eurex Exchange
APEX = "APEX" # Asia Pacific Exchange
LME = "LME" # London Metal Exchange
BMD = "BMD" # Bursa Malaysia Derivatives
TOCOM = "TOCOM" # Tokyo Commodity Exchange
EUNX = "EUNX" # Euronext Exchange
KRX = "KRX" # Korean Exchange
OANDA = "OANDA" # oanda.com
# CryptoCurrency
BITMEX = "BITMEX"
OKEX = "OKEX"
HUOBI = "HUOBI"
BITFINEX = "BITFINEX"
BINANCE = "BINANCE"
BYBIT = "BYBIT" # bybit.com
COINBASE = "COINBASE"
DERIBIT = "DERIBIT"
GATEIO = "GATEIO"
BITSTAMP = "BITSTAMP"
# Special Function
LOCAL = "LOCAL" # For local generated data
class Currency(Enum):
"""
Currency.
"""
USD = "USD"
HKD = "HKD"
CNY = "CNY"
class Interval(Enum):
"""
Interval of bar data.
"""
MINUTE = "1m"
HOUR = "1h"
DAILY = "d"
WEEKLY = "w"
| [
"53517184+AITrading2020@users.noreply.github.com"
] | 53517184+AITrading2020@users.noreply.github.com |
d909b0ffaf5b5eea94ac25fcc48980690ad3d34b | 255e3561a1b06e870ad99f17cd77af0e793db003 | /get_img.py | a904e9818e77a765fb825384de2cb4cf85bad60c | [] | no_license | dtowstar/flask-vue-crub | 2bb423ee92efe1a3e7774ce54ca85f49fe6ed29b | 662699e3f8f942fce3b798ebd0c11a1456df1686 | refs/heads/master | 2022-12-26T13:55:45.740048 | 2019-06-17T14:34:02 | 2019-06-17T14:34:02 | 183,728,645 | 4 | 9 | null | 2022-12-09T21:39:21 | 2019-04-27T04:37:55 | Python | UTF-8 | Python | false | false | 723 | py | from bs4 import BeautifulSoup
import requests
import shutil
def save_img(url):
# url: https://tixcraft.com/activity/detail/19_ERIC'
img_src = get_img_src(url)
if(img_src == ""):
return img_src
else:
res = requests.get(img_src, stream=True)
if res.status_code == 200:
print(img_src)
return img_src
def get_img_src(url):
# url: https://tixcraft.com/activity/detail/19_ERIC'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
try:
img_src = soup.find('img', alt='示意圖僅供參考示意1')['src']
except:
img_src = ""
return img_src
# save_img('https://tixcraft.com/activity/detail/19_TPENEPAL')
| [
"kenca16358@gmail.com"
] | kenca16358@gmail.com |
a4d7ea1a9844d84ee9f32f7a4f49aa8dd0e16d96 | f1c7e33a41d3ac5876bf02bd192f05d47e6385b9 | /static/libs/PigeonDataCrawler/bin/flask | a57f7f2df89556abc790c36346a4c62435a403e6 | [] | no_license | ro1963855/pigeon_racing | 424e0343fbc948398935f2d1fbab6e4676b50931 | f9fd0483faeefaebbdd709dd1b6cd904e5c4b6a1 | refs/heads/master | 2021-01-02T08:44:40.074193 | 2017-09-07T01:38:05 | 2017-09-07T01:38:05 | 99,059,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/home/dandy/pigeon_racing/static/libs/PigeonDataCrawler/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ro1963855@gmail.com"
] | ro1963855@gmail.com | |
93c7727d6a7ad4e295b49329ba5f87a0195089d5 | f3a22cc6b2db1af61f478721b1faf933ce2bf9cd | /migrations/versions/89d7c991271c_.py | d85a675e1ba4ac0f58bdc440e1f289241985685e | [] | no_license | soumya707/wishnet-in | 712897b08bf9616c9fd6efa660e0ea47053d8c2e | 87802ca93e003ee376e50461fe4fb8af197a69a7 | refs/heads/master | 2020-12-30T06:09:18.957667 | 2020-01-28T20:36:55 | 2020-01-28T20:36:55 | 238,886,212 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,122 | py | """empty message
Revision ID: 89d7c991271c
Revises: 801012041cf3
Create Date: 2019-09-08 13:17:46.299705
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '89d7c991271c'
down_revision = '801012041cf3'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_users():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('customer_login', sa.Column('password_hash', sa.String(length=100), nullable=True))
op.drop_column('customer_login', 'password')
# ### end Alembic commands ###
def downgrade_users():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('customer_login', sa.Column('password', mysql.VARCHAR(length=100), nullable=True))
op.drop_column('customer_login', 'password_hash')
# ### end Alembic commands ###
def upgrade_assets():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_assets():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_connection():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_connection():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_recharge():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_recharge():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| [
"synchonmandal@gmail.com"
] | synchonmandal@gmail.com |
46abac533c1ec9a572a565d59cc930bd692ad94d | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L66/66-77_MD_NVT_rerun/set_7.py | 9c5ad84b39e109861815092ca2f3a6a6735a91e4 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L66/MD_NVT_rerun/ti_one-step/66_77/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_7.in'
temp_pbs = filesdir + 'temp_7.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_7.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_7.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
a9cad12e0ab2aaafb4dab18f953262b068081272 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_virtual_network_taps_operations.py | aa759ad03ff8ad071e1733e6df02900161dfbadb | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 29,365 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkTapsOperations:
"""VirtualNetworkTapsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
tap_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
tap_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def get(
self,
resource_group_name: str,
tap_name: str,
**kwargs
) -> "_models.VirtualNetworkTap":
"""Gets information about the specified virtual network tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of virtual network tap.
:type tap_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkTap, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.VirtualNetworkTap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
tap_name: str,
parameters: "_models.VirtualNetworkTap",
**kwargs
) -> "_models.VirtualNetworkTap":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkTap')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
tap_name: str,
parameters: "_models.VirtualNetworkTap",
**kwargs
) -> AsyncLROPoller["_models.VirtualNetworkTap"]:
"""Creates or updates a Virtual Network Tap.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the virtual network tap.
:type tap_name: str
:param parameters: Parameters supplied to the create or update virtual network tap operation.
:type parameters: ~azure.mgmt.network.v2018_11_01.models.VirtualNetworkTap
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkTap or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkTap]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
tap_name: str,
tap_parameters: "_models.TagsObject",
**kwargs
) -> "_models.VirtualNetworkTap":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tap_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
tap_name: str,
tap_parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.VirtualNetworkTap"]:
"""Updates an VirtualNetworkTap tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param tap_name: The name of the tap.
:type tap_name: str
:param tap_parameters: Parameters supplied to update VirtualNetworkTap tags.
:type tap_parameters: ~azure.mgmt.network.v2018_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkTap or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkTap]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
tap_name=tap_name,
tap_parameters=tap_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'tapName': self._serialize.url("tap_name", tap_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps/{tapName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.VirtualNetworkTapListResult"]:
"""Gets all the VirtualNetworkTaps in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.VirtualNetworkTapListResult"]:
"""Gets all the VirtualNetworkTaps in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkTapListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.VirtualNetworkTapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkTapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkTapListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkTaps'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
6be5f68a574383e5d0e1d0d97d99a5a031b8915e | 360fd4e74f752dca1e915cde7d7638e51d8e27ee | /problem51.py | 2b700a0a061b24a4847d6f396cfbdca546b1f4be | [] | no_license | dsimpson1980/project_euler | 69718e516038093a34f7f0d0e9d9dc213d658bdc | d71c4739af41846a2821b568730c99271cb26eee | refs/heads/master | 2021-01-10T12:22:19.511550 | 2015-09-16T18:08:22 | 2015-09-16T18:08:22 | 36,770,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | """Prime digit replacements
Problem 51
By replacing the 1st digit of the 2-digit number *3, it turns out that six of
the nine possible values: 13, 23, 43, 53, 73, and 83, are all prime.
By replacing the 3rd and 4th digits of 56**3 with the same digit, this 5-digit
number is the first example having seven primes among the ten generated numbers,
yielding the family: 56003, 56113, 56333, 56443, 56663, 56773, and 56993.
Consequently 56003, being the first member of this family, is the smallest prime
with this property.
Find the smallest prime which, by replacing part of the number (not necessarily
adjacent digits) with the same digit, is part of an eight prime value family."""
from itertools import combinations
from tools import gen_primes
primes = gen_primes(1e6)
def replace(x, replace_pattern):
x = str(x)
count = 0
first_prime = None
for j in range(10):
y = [j] * 6
for n, idx in enumerate(replace_pattern):
y[idx] = int(x[n])
y = int(''.join(map(str, y)))
if y in primes:
if first_prime is None:
first_prime = y
count += 1
if j - count > 2:
break
return count, first_prime
first_prime = None
for pattern in combinations(range(6), 3):
for x in range(100, 999):
count, prime = replace(x, pattern)
if count == 8:
first_prime = prime
break
else:
continue
break
print 'first prime = %s' % first_prime
| [
"mapdes@gmail.com"
] | mapdes@gmail.com |
96fd673efea8bb48c5c92814a44410aaf13922cf | f1684396de5fc1a69259231a7103b42518db59fe | /allennlp/tango/step.py | b2f41ff3fb90fb09056898c92e24470bc2510715 | [
"Apache-2.0"
] | permissive | vikigenius/allennlp | 8eb3dad8aad4e8b0d40235cf8092bec2c25e92d9 | 3552842f4a804529ec221201bf8dd9163434bc13 | refs/heads/main | 2021-12-19T11:39:58.639380 | 2021-12-17T23:24:43 | 2021-12-17T23:24:43 | 193,206,207 | 0 | 0 | Apache-2.0 | 2021-12-16T13:04:31 | 2019-06-22T07:39:12 | Python | UTF-8 | Python | false | false | 28,977 | py | """
*AllenNLP Tango is an experimental API and parts of it might change or disappear
every time we release a new version.*
"""
import collections
import copy
import itertools
import json
import logging
import random
import re
import weakref
from abc import abstractmethod
from os import PathLike
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import (
Optional,
Any,
Set,
List,
Dict,
Type,
Union,
cast,
TypeVar,
Generic,
Iterable,
Tuple,
MutableMapping,
Iterator,
MutableSet,
OrderedDict,
Callable,
)
from allennlp.common.det_hash import det_hash
try:
from typing import get_origin, get_args
except ImportError:
def get_origin(tp): # type: ignore
return getattr(tp, "__origin__", None)
def get_args(tp): # type: ignore
return getattr(tp, "__args__", ())
from allennlp.common import Registrable, Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.from_params import (
pop_and_construct_arg,
infer_method_params,
infer_constructor_params,
)
from allennlp.common.logging import AllenNlpLogger
from allennlp.tango.format import Format, DillFormat
logger = logging.getLogger(__name__)
_version_re = re.compile("""^[a-zA-Z0-9]+$""")
T = TypeVar("T")
class StepCache(Registrable):
"""This is a mapping from instances of `Step` to the results of that step."""
def __contains__(self, step: object) -> bool:
"""This is a generic implementation of __contains__. If you are writing your own
`StepCache`, you might want to write a faster one yourself."""
if not isinstance(step, Step):
return False
try:
self.__getitem__(step)
return True
except KeyError:
return False
@abstractmethod
def __getitem__(self, step: "Step") -> Any:
"""Returns the results for the given step."""
raise NotImplementedError()
@abstractmethod
def __setitem__(self, step: "Step", value: Any) -> None:
"""Writes the results for the given step. Throws an exception if the step is already cached."""
raise NotImplementedError()
@abstractmethod
def __len__(self) -> int:
"""Returns the number of results saved in this cache."""
raise NotImplementedError()
def path_for_step(self, step: "Step") -> Optional[Path]:
"""Steps that can be restarted (like a training job that gets interrupted half-way through)
must save their state somewhere. A `StepCache` can help by providing a suitable location
in this method."""
return None
@StepCache.register("memory")
class MemoryStepCache(StepCache):
"""This is a `StepCache` that stores results in memory. It is little more than a Python dictionary."""
def __init__(self):
self.cache: Dict[str, Any] = {}
def __getitem__(self, step: "Step") -> Any:
return self.cache[step.unique_id()]
def __setitem__(self, step: "Step", value: Any) -> None:
if step in self:
raise ValueError(f"{step.unique_id()} is already cached! Will not overwrite.")
if step.cache_results:
self.cache[step.unique_id()] = value
else:
logger.warning("Tried to cache step %s despite being marked as uncacheable.", step.name)
def __contains__(self, step: object):
if isinstance(step, Step):
return step.unique_id() in self.cache
else:
return False
def __len__(self) -> int:
return len(self.cache)
default_step_cache = MemoryStepCache()
@StepCache.register("directory")
class DirectoryStepCache(StepCache):
"""This is a `StepCache` that stores its results on disk, in the location given in `dir`.
Every cached step gets a directory under `dir` with that step's `unique_id()`. In that
directory we store the results themselves in some format according to the step's `FORMAT`,
and we also write a `metadata.json` file that stores some metadata. The presence of
`metadata.json` signifies that the cache entry is complete and has been written successfully.
"""
LRU_CACHE_MAX_SIZE = 8
def __init__(self, dir: Union[str, PathLike]):
self.dir = Path(dir)
self.dir.mkdir(parents=True, exist_ok=True)
# We keep an in-memory cache as well so we don't have to de-serialize stuff
# we happen to have in memory already.
self.weak_cache: MutableMapping[str, Any] = weakref.WeakValueDictionary()
# Not all Python objects can be referenced weakly, and even if they can they
# might get removed too quickly, so we also keep an LRU cache.
self.strong_cache: OrderedDict[str, Any] = collections.OrderedDict()
def _add_to_cache(self, key: str, o: Any) -> None:
if hasattr(o, "__next__"):
# We never cache iterators, because they are mutable, storing their current position.
return
self.strong_cache[key] = o
self.strong_cache.move_to_end(key)
while len(self.strong_cache) > self.LRU_CACHE_MAX_SIZE:
del self.strong_cache[next(iter(self.strong_cache))]
try:
self.weak_cache[key] = o
except TypeError:
pass # Many native Python objects cannot be referenced weakly, and they throw TypeError when you try
def _get_from_cache(self, key: str) -> Optional[Any]:
result = self.strong_cache.get(key)
if result is not None:
self.strong_cache.move_to_end(key)
return result
try:
return self.weak_cache[key]
except KeyError:
return None
def __contains__(self, step: object) -> bool:
if isinstance(step, Step):
key = step.unique_id()
if key in self.strong_cache:
return True
if key in self.weak_cache:
return True
metadata_file = self.path_for_step(step) / "metadata.json"
return metadata_file.exists()
else:
return False
def __getitem__(self, step: "Step") -> Any:
key = step.unique_id()
result = self._get_from_cache(key)
if result is None:
if step not in self:
raise KeyError(step)
result = step.format.read(self.path_for_step(step))
self._add_to_cache(key, result)
return result
def __setitem__(self, step: "Step", value: Any) -> None:
location = self.path_for_step(step)
location.mkdir(parents=True, exist_ok=True)
metadata_location = location / "metadata.json"
if metadata_location.exists():
raise ValueError(f"{metadata_location} already exists! Will not overwrite.")
temp_metadata_location = metadata_location.with_suffix(".temp")
try:
step.format.write(value, location)
metadata = {
"step": step.unique_id(),
"checksum": step.format.checksum(location),
}
with temp_metadata_location.open("wt") as f:
json.dump(metadata, f)
self._add_to_cache(step.unique_id(), value)
temp_metadata_location.rename(metadata_location)
except: # noqa: E722
temp_metadata_location.unlink(missing_ok=True)
raise
def __len__(self) -> int:
return sum(1 for _ in self.dir.glob("*/metadata.json"))
def path_for_step(self, step: "Step") -> Path:
return self.dir / step.unique_id()
class Step(Registrable, Generic[T]):
"""
This class defines one step in your experiment. To write your own step, just derive from this class
and overwrite the `run()` method. The `run()` method must have parameters with type hints.
`Step.__init__()` takes all the arguments we want to run the step with. They get passed
to `Step.run()` (almost) as they are. If the arguments are other instances of `Step`, those
will be replaced with the step's results before calling `run()`. Further, there are four special
parameters:
* `step_name` contains an optional human-readable name for the step. This name is used for
error messages and the like, and has no consequence on the actual computation.
* `cache_results` specifies whether the results of this step should be cached. If this is
`False`, the step is recomputed every time it is needed. If this is not set at all,
we cache if the step is marked as `DETERMINISTIC`, and we don't cache otherwise.
* `step_format` gives you a way to override the step's default format (which is given in `FORMAT`).
* `only_if_needed` specifies whether we can skip this step if no other step depends on it. The
default for this setting is to set it for all steps that don't have an explicit name.
"""
default_implementation = "ref"
DETERMINISTIC: bool = False
"""This describes whether this step can be relied upon to produce the same results every time
when given the same inputs. If this is `False`, the step can't be cached, and neither can any
step that depends on it."""
CACHEABLE: Optional[bool] = None
"""This provides a direct way to turn off caching. For example, a step that reads a HuggingFace
dataset doesn't need to be cached, because HuggingFace datasets already have their own caching
mechanism. But it's still a deterministic step, and all following steps are allowed to cache.
If it is `None`, the step figures out by itself whether it should be cacheable or not."""
VERSION: Optional[str] = None
"""This is optional, but recommended. Specifying a version gives you a way to tell AllenNLP that
a step has changed during development, and should now be recomputed. This doesn't invalidate
the old results, so when you revert your code, the old cache entries will stick around and be
picked up."""
FORMAT: Format = DillFormat("gz")
"""This specifies the format the results of this step will be serialized in. See the documentation
for `Format` for details."""
def __init__(
self,
step_name: Optional[str] = None,
cache_results: Optional[bool] = None,
step_format: Optional[Format] = None,
only_if_needed: Optional[bool] = None,
**kwargs,
):
self.logger = cast(AllenNlpLogger, logging.getLogger(self.__class__.__name__))
if self.VERSION is not None:
assert _version_re.match(
self.VERSION
), f"Invalid characters in version '{self.VERSION}'"
self.kwargs = kwargs
if step_format is None:
self.format = self.FORMAT
if isinstance(self.format, type):
self.format = self.format()
else:
self.format = step_format
self.unique_id_cache: Optional[str] = None
if step_name is None:
self.name = self.unique_id()
else:
self.name = step_name
if cache_results is True:
if not self.CACHEABLE:
raise ConfigurationError(
f"Step {self.name} is configured to use the cache, but it's not a cacheable step."
)
if not self.DETERMINISTIC:
logger.warning(
f"Step {self.name} is going to be cached despite not being deterministic."
)
self.cache_results = True
elif cache_results is False:
self.cache_results = False
elif cache_results is None:
c = (self.DETERMINISTIC, self.CACHEABLE)
if c == (False, None):
self.cache_results = False
elif c == (True, None):
self.cache_results = True
elif c == (False, False):
self.cache_results = False
elif c == (True, False):
self.cache_results = False
elif c == (False, True):
logger.warning(
f"Step {self.name} is set to be cacheable despite not being deterministic."
)
self.cache_results = True
elif c == (True, True):
self.cache_results = True
else:
assert False, "Step.DETERMINISTIC or step.CACHEABLE are set to an invalid value."
else:
raise ConfigurationError(
f"Step {self.name}'s cache_results parameter is set to an invalid value."
)
if step_name is None:
self.only_if_needed = True
else:
self.only_if_needed = not self.cache_results
if only_if_needed is not None:
self.only_if_needed = only_if_needed
self.work_dir_for_run: Optional[
Path
] = None # This is set only while the run() method runs.
@classmethod
def from_params(
cls: Type["Step"],
params: Params,
constructor_to_call: Callable[..., "Step"] = None,
constructor_to_inspect: Union[Callable[..., "Step"], Callable[["Step"], None]] = None,
existing_steps: Optional[Dict[str, "Step"]] = None,
step_name: Optional[str] = None,
**extras,
) -> "Step":
# Why do we need a custom from_params? Step classes have a run() method that takes all the
# parameters necessary to perform the step. The __init__() method of the step takes those
# same parameters, but each of them could be wrapped in another Step instead of being
# supplied directly. from_params() doesn't know anything about these shenanigans, so
# we have to supply the necessary logic here.
if constructor_to_call is not None:
raise ConfigurationError(
f"{cls.__name__}.from_params cannot be called with a constructor_to_call."
)
if constructor_to_inspect is not None:
raise ConfigurationError(
f"{cls.__name__}.from_params cannot be called with a constructor_to_inspect."
)
if existing_steps is None:
existing_steps = {}
if isinstance(params, str):
params = Params({"type": params})
if not isinstance(params, Params):
raise ConfigurationError(
"from_params was passed a `params` object that was not a `Params`. This probably "
"indicates malformed parameters in a configuration file, where something that "
"should have been a dictionary was actually a list, or something else. "
f"This happened when constructing an object of type {cls}."
)
as_registrable = cast(Type[Registrable], cls)
choice = params.pop_choice(
"type", choices=as_registrable.list_available(), default_to_first_choice=True
)
subclass, constructor_name = as_registrable.resolve_class_name(choice)
if not issubclass(subclass, Step):
# This can happen if `choice` is a fully qualified name.
raise ConfigurationError(
f"Tried to make a Step of type {choice}, but ended up with a {subclass}."
)
parameters = infer_method_params(subclass, subclass.run)
del parameters["self"]
init_parameters = infer_constructor_params(subclass)
del init_parameters["self"]
del init_parameters["kwargs"]
parameter_overlap = parameters.keys() & init_parameters.keys()
assert len(parameter_overlap) <= 0, (
f"If this assert fails it means that you wrote a Step with a run() method that takes one of the "
f"reserved parameters ({', '.join(init_parameters.keys())})"
)
parameters.update(init_parameters)
kwargs: Dict[str, Any] = {}
accepts_kwargs = False
for param_name, param in parameters.items():
if param.kind == param.VAR_KEYWORD:
# When a class takes **kwargs we store the fact that the method allows extra keys; if
# we get extra parameters, instead of crashing, we'll just pass them as-is to the
# constructor, and hope that you know what you're doing.
accepts_kwargs = True
continue
explicitly_set = param_name in params
constructed_arg = pop_and_construct_arg(
subclass.__name__,
param_name,
param.annotation,
param.default,
params,
existing_steps=existing_steps,
**extras,
)
# If the param wasn't explicitly set in `params` and we just ended up constructing
# the default value for the parameter, we can just omit it.
# Leaving it in can cause issues with **kwargs in some corner cases, where you might end up
# with multiple values for a single parameter (e.g., the default value gives you lazy=False
# for a dataset reader inside **kwargs, but a particular dataset reader actually hard-codes
# lazy=True - the superclass sees both lazy=True and lazy=False in its constructor).
if explicitly_set or constructed_arg is not param.default:
kwargs[param_name] = constructed_arg
if accepts_kwargs:
kwargs.update(params)
else:
params.assert_empty(subclass.__name__)
return subclass(step_name=step_name, **kwargs)
@abstractmethod
def run(self, **kwargs) -> T:
"""This is the main method of a step. Overwrite this method to define your step's action."""
raise NotImplementedError()
def _run_with_work_dir(self, cache: StepCache, **kwargs) -> T:
if self.work_dir_for_run is not None:
raise ValueError("You can only run a Step's run() method once at a time.")
logger.info("Starting run for step %s of type %s", self.name, self.__class__.__name__)
if self.DETERMINISTIC:
random.seed(784507111)
try:
import numpy
numpy.random.seed(784507111)
except ImportError:
pass
try:
import torch
torch.manual_seed(784507111)
except ImportError:
pass
step_dir = cache.path_for_step(self)
if step_dir is None:
work_dir = TemporaryDirectory(prefix=self.unique_id() + "-", suffix=".work")
self.work_dir_for_run = Path(work_dir.name)
try:
return self.run(**kwargs)
finally:
self.work_dir_for_run = None
work_dir.cleanup()
else:
self.work_dir_for_run = step_dir / "work"
try:
self.work_dir_for_run.mkdir(exist_ok=True, parents=True)
return self.run(**kwargs)
finally:
# No cleanup, as we want to keep the directory for restarts or serialization.
self.work_dir_for_run = None
def work_dir(self) -> Path:
"""
Returns a work directory that a step can use while its `run()` method runs.
This directory stays around across restarts. You cannot assume that it is empty when your
step runs, but you can use it to store information that helps you restart a step if it
got killed half-way through the last time it ran."""
if self.work_dir_for_run is None:
raise ValueError("You can only call this method while the step is running.")
return self.work_dir_for_run
@classmethod
def _replace_steps_with_results(cls, o: Any, cache: StepCache):
if isinstance(o, Step):
return o.result(cache)
elif isinstance(o, list):
return [cls._replace_steps_with_results(i, cache) for i in o]
elif isinstance(o, tuple):
return tuple(cls._replace_steps_with_results(list(o), cache))
elif isinstance(o, set):
return {cls._replace_steps_with_results(i, cache) for i in o}
elif isinstance(o, dict):
return {key: cls._replace_steps_with_results(value, cache) for key, value in o.items()}
else:
return o
def result(self, cache: Optional[StepCache] = None) -> T:
"""Returns the result of this step. If the results are cached, it returns those. Otherwise it
runs the step and returns the result from there."""
if cache is None:
cache = default_step_cache
if self in cache:
return cache[self]
kwargs = self._replace_steps_with_results(self.kwargs, cache)
result = self._run_with_work_dir(cache, **kwargs)
if self.cache_results:
cache[self] = result
if hasattr(result, "__next__"):
assert isinstance(result, Iterator)
# Caching the iterator will consume it, so we write it to the cache and then read from the cache
# for the return value.
return cache[self]
return result
def ensure_result(self, cache: Optional[StepCache] = None) -> None:
"""This makes sure that the result of this step is in the cache. It does
not return the result."""
if not self.cache_results:
raise ValueError(
"It does not make sense to call ensure_result() on a step that's not cacheable."
)
if cache is None:
cache = default_step_cache
if self in cache:
return
kwargs = self._replace_steps_with_results(self.kwargs, cache)
result = self._run_with_work_dir(cache, **kwargs)
cache[self] = result
def det_hash_object(self) -> Any:
return self.unique_id()
def unique_id(self) -> str:
"""Returns the unique ID for this step.
Unique IDs are of the shape `$class_name-$version-$hash`, where the hash is the hash of the
inputs for deterministic steps, and a random string of characters for non-deterministic ones."""
if self.unique_id_cache is None:
self.unique_id_cache = self.__class__.__name__
if self.VERSION is not None:
self.unique_id_cache += "-"
self.unique_id_cache += self.VERSION
self.unique_id_cache += "-"
if self.DETERMINISTIC:
self.unique_id_cache += det_hash(
(
(self.format.__class__.__module__, self.format.__class__.__qualname__),
self.format.VERSION,
self.kwargs,
)
)[:32]
else:
self.unique_id_cache += det_hash(random.getrandbits((58 ** 32).bit_length()))[:32]
return self.unique_id_cache
def __hash__(self):
return hash(self.unique_id())
def __eq__(self, other):
if isinstance(other, Step):
return self.unique_id() == other.unique_id()
else:
return False
def _ordered_dependencies(self) -> Iterable["Step"]:
def dependencies_internal(o: Any) -> Iterable[Step]:
if isinstance(o, Step):
yield o
elif isinstance(o, str):
return # Confusingly, str is an Iterable of itself, resulting in infinite recursion.
elif isinstance(o, Iterable):
yield from itertools.chain(*(dependencies_internal(i) for i in o))
elif isinstance(o, dict):
yield from dependencies_internal(o.values())
else:
return
return dependencies_internal(self.kwargs.values())
def dependencies(self) -> Set["Step"]:
"""Returns a set of steps that this step depends on.
Does not return recursive dependencies."""
return set(self._ordered_dependencies())
def recursive_dependencies(self) -> Set["Step"]:
"""Returns a set of steps that this step depends on.
This returns recursive dependencies."""
seen = set()
steps = list(self.dependencies())
while len(steps) > 0:
step = steps.pop()
if step in seen:
continue
seen.add(step)
steps.extend(step.dependencies())
return seen
@Step.register("ref")
class _RefStep(Step[T], Generic[T]):
def run(self, *, ref: str) -> T: # type: ignore
raise ConfigurationError(
f"Step {self.name} is a RefStep (referring to {ref}). RefSteps cannot be executed. "
"They are only useful while parsing an experiment."
)
def ref(self) -> str:
return self.kwargs["ref"]
def det_hash_object(self) -> Any:
# If we're using a RefStep to compute a unique ID, something has gone wrong. The unique ID would
# change once the RefStep is replaced with the actual step. Unique IDs are never supposed to
# change.
raise ValueError("Cannot compute hash of a _RefStep object.")
class MissingStepError(Exception):
def __init__(self, ref: str):
self.ref = ref
def step_graph_from_params(params: Dict[str, Params]) -> Dict[str, Step]:
"""Given a mapping from strings to `Params` objects, this parses each `Params` object
into a `Step`, and resolved dependencies between the steps. Returns a dictionary
mapping step names to instances of `Step`."""
# This algorithm for resolving step dependencies is O(n^2). Since we're
# anticipating the number of steps to be in the dozens at most, we choose
# simplicity over cleverness.
unparsed_steps: Dict[str, Params] = params
next_unparsed_steps: Dict[str, Params] = {}
parsed_steps: Dict[str, Step] = {}
steps_parsed = 0
while len(unparsed_steps) > 0 or len(next_unparsed_steps) > 0:
if len(unparsed_steps) <= 0:
if steps_parsed <= 0:
raise ConfigurationError(
f"Cannot parse steps {','.join(next_unparsed_steps.keys())}. Do you have a "
f"circle in your steps, or are you referring to a step that doesn't exist?"
)
unparsed_steps = next_unparsed_steps
next_unparsed_steps = {}
steps_parsed = 0
step_name, step_params = unparsed_steps.popitem()
if step_name in parsed_steps:
raise ConfigurationError(f"Duplicate step name {step_name}")
step_params_backup = copy.deepcopy(step_params)
try:
parsed_steps[step_name] = Step.from_params(
step_params, existing_steps=parsed_steps, step_name=step_name
)
steps_parsed += 1
except _RefStep.MissingStepError:
next_unparsed_steps[step_name] = step_params_backup
# Sanity-check the graph
for step in parsed_steps.values():
if step.cache_results:
nondeterministic_dependencies = [
s for s in step.recursive_dependencies() if not s.DETERMINISTIC
]
if len(nondeterministic_dependencies) > 0:
nd_step = nondeterministic_dependencies[0]
logger.warning(
f"Task {step.name} is set to cache results, but depends on non-deterministic "
f"step {nd_step.name}. This will produce confusing results."
)
# We show this warning only once.
break
return parsed_steps
def tango_dry_run(
step_or_steps: Union[Step, Iterable[Step]], step_cache: Optional[StepCache]
) -> List[Tuple[Step, bool]]:
"""
Returns the list of steps that will be run, or read from cache, if you call
a step's `result()` method.
Steps come out as tuples `(step, read_from_cache)`, so you can see which
steps will be read from cache, and which have to be run.
"""
if isinstance(step_or_steps, Step):
steps = [step_or_steps]
else:
steps = list(step_or_steps)
cached_steps: MutableSet[Step]
if step_cache is None:
cached_steps = set()
else:
class SetWithFallback(set):
def __contains__(self, item):
return item in step_cache or super().__contains__(item)
cached_steps = SetWithFallback()
result = []
seen_steps = set()
steps.reverse()
while len(steps) > 0:
step = steps.pop()
if step in seen_steps:
continue
dependencies = [s for s in step._ordered_dependencies() if s not in seen_steps]
if len(dependencies) <= 0:
result.append((step, step in cached_steps))
cached_steps.add(step)
seen_steps.add(step)
else:
steps.append(step)
steps.extend(dependencies)
return result
| [
"noreply@github.com"
] | vikigenius.noreply@github.com |
dab70b08223dede2f62d09b88702580110fcf525 | 67704c864ca56d836dd4090e42b97abe515972d7 | /test_shelf.py | 85787a86405dc6673a4186fc311cb3e0899180d6 | [] | no_license | KirillGu/Test_YaD_shelf | 7aef1b91ce1a836daef631b77f87620e72ccbb0c | 50b55c81f242504ee364090c51f45cba3fe2d980 | refs/heads/master | 2023-03-19T19:26:58.993684 | 2021-03-21T16:42:11 | 2021-03-21T16:42:11 | 350,057,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | import unittest
from shelf import add_new_doc, delete_doc
class DocuTests(unittest.TestCase):
def test_ap(self):
self.assertEqual(add_new_doc('11', 'doc', 'kirill', '3'), '3')
def test_del(self):
self.assertEqual(delete_doc('11-2'), ('11-2', True))
if __name__ == "__main__":
unittest.main()
| [
"kirillgusev@MacBook-Air-Kirill.local"
] | kirillgusev@MacBook-Air-Kirill.local |
4604889cb01d7545f85b808794374af3ef1f3966 | 14a913fce4b538b22f28409645cd6abe3455808f | /iam/api-client/grantable_roles_test.py | 87af3564b9305be6d29d1725a407034abf8131c5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | iamLoi/Python-Random-Number-Generator | 8da7dbd37cb13a01232c8ed49b9df35a99c63d73 | 7579e8b15130802aaf519979e475c6c75c403eda | refs/heads/master | 2022-08-29T19:05:32.649931 | 2019-09-14T14:48:58 | 2019-09-14T14:48:58 | 208,454,877 | 2 | 1 | Apache-2.0 | 2022-08-05T21:57:49 | 2019-09-14T14:51:05 | Python | UTF-8 | Python | false | false | 897 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import grantable_roles
def test_service_accounts(capsys):
project = os.environ['GCLOUD_PROJECT']
resource = '//cloudresourcemanager.googleapis.com/projects/' + project
grantable_roles.view_grantable_roles(resource)
out, _ = capsys.readouterr()
assert 'Title:' in out
| [
"andrew.gorcester@gmail.com"
] | andrew.gorcester@gmail.com |
6a1d676cb150a7d20d5e2d342ee5fe1d9c7d53cc | b944843f0bf3105fa8ee62186b25bc2ff2102a5b | /base64_encrypter_main.py | 4654fa9cc8bb7cc94ff1859a9ad96c1b9e8060af | [] | no_license | tame12/Base64-encoding | 46b2e22993142f9608977d4c3acbde191f434661 | 1b68b60c9d390967b1f2f6a8e1ef2968612df4a0 | refs/heads/master | 2021-01-24T20:14:12.099794 | 2018-02-28T08:06:45 | 2018-02-28T08:06:45 | 123,246,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | list_64 = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',
'0','1','2','3','4','5','6','7','8','9',
'+','/']
list_ASCII = [None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,
' ','!','"','#','$','%','&',"'",'(',')','*','+',',','-','.','/',
'0','1','2','3','4','5','6','7','8','9',
':',';','<','=','>','?',"@",
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
'[',None, #backslash giving problems... '\\' may work.?
']','^','_','`',
'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',
'{','|','}','~','\n']
#actuall ASCII dosent have \n at last index, i added that just to make this program better...
from number_to_binary import binary_to_num
from number_to_binary import num_to_binary
def encrypt_to_base64(text):
text_hold = list(str(text))
binary_string = ""
output = ""
padding = 0
for i in range(0,len(text_hold)): #convert each letter to binary of length 8, concatinate in binary_string
index_ASCII = list_ASCII.index(text_hold[i])
binary_string += num_to_binary(index_ASCII,8)
while (len(binary_string)%6)!= 0: #add 00 to complete 6 bit -> every 00 add 1 padding later
binary_string += "00"
padding += 1
binary_list = list(binary_string)
while len(binary_list) > 0: #minus away from binary list
binary_6bit = ""
for a in range(0,6):
binary_6bit += binary_list[0]
binary_list.pop(0)
index_64 = binary_to_num(binary_6bit)
output += list_64[index_64]
for b in range(0,padding): #add padding
output += "="
return output
#end
def decrypt_from_base64(text):
text_hold = list(str(text))
padding = 0
binary_string = ""
output = ""
while text_hold[len(text_hold)-1] == "=": #find num padding & remove it
text_hold.pop()
padding += 1
for i in range(0,len(text_hold)): #change text to binary
index_64 = list_64.index(text_hold[i])
binary_string += num_to_binary(index_64,6)
binary_list = list(binary_string)
for a in range(0,padding*2): #for every padding remove 2 0s
binary_list.pop()
while len(binary_list) > 0: #minus away from binary list
binary_8bit = ""
for a in range(0,8):
binary_8bit += binary_list[0]
binary_list.pop(0)
index_ASCII = binary_to_num(binary_8bit)
output += list_ASCII[index_ASCII]
return output
#end
| [
"noreply@github.com"
] | tame12.noreply@github.com |
b4f598bb7e606e584899aaf66f8c72decb8fa123 | c60c071bc5cf72917883cddbcb5b6a42b6e71f2b | /ja_craiglist_djo/manage.py | 36d599e26d9d291d582787680bf01232cfa9b030 | [] | no_license | CyborgVillager/ja-django-git | bbf17927c8b2d3e774dc4d3bc363f96f3ec49216 | 7755d5996f91ecb5014ae720a4212b44a2e863ef | refs/heads/master | 2020-09-25T19:41:34.297549 | 2019-12-05T16:18:09 | 2019-12-05T16:18:09 | 226,074,493 | 0 | 0 | null | 2019-12-05T16:07:51 | 2019-12-05T10:20:53 | Python | UTF-8 | Python | false | false | 548 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ja_craiglist_djo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"almawijonathan@gmail.com"
] | almawijonathan@gmail.com |
c0e3f33560d87b12dfd4c8a1b7dbed40257625b5 | 01c33443db4c4ac74087d220a2a3a6967ee3930f | /ccxt/async_support/bitflyer.py | 3aec9da4036433720f813bbe283bea39e187d4cb | [] | no_license | arques-changhwan/ccxt | 74de1790ab2e2cc07fa55f418817c988b3af6a28 | ac26599695af742aaffc16a8fd4dda4f8cb63588 | refs/heads/master | 2022-09-05T11:31:35.903127 | 2020-05-26T06:36:33 | 2020-05-26T06:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,801 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import OrderNotFound
class bitflyer(Exchange):
def describe(self):
return self.deep_extend(super(bitflyer, self).describe(), {
'id': 'bitflyer',
'name': 'bitFlyer',
'countries': ['JP'],
'version': 'v1',
'rateLimit': 1000, # their nonce-timestamp is in seconds...
'has': {
'CORS': False,
'withdraw': True,
'fetchMyTrades': True,
'fetchOrders': True,
'fetchOrder': 'emulated',
'fetchOpenOrders': 'emulated',
'fetchClosedOrders': 'emulated',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28051642-56154182-660e-11e7-9b0d-6042d1e6edd8.jpg',
'api': 'https://api.bitflyer.jp',
'www': 'https://bitflyer.jp',
'doc': 'https://lightning.bitflyer.com/docs?lang=en',
},
'api': {
'public': {
'get': [
'getmarkets/usa', # new(wip)
'getmarkets/eu', # new(wip)
'getmarkets', # or 'markets'
'getboard', # ...
'getticker',
'getexecutions',
'gethealth',
'getboardstate',
'getchats',
],
},
'private': {
'get': [
'getpermissions',
'getbalance',
'getbalancehistory',
'getcollateral',
'getcollateralhistory',
'getcollateralaccounts',
'getaddresses',
'getcoinins',
'getcoinouts',
'getbankaccounts',
'getdeposits',
'getwithdrawals',
'getchildorders',
'getparentorders',
'getparentorder',
'getexecutions',
'getpositions',
'gettradingcommission',
],
'post': [
'sendcoin',
'withdraw',
'sendchildorder',
'cancelchildorder',
'sendparentorder',
'cancelparentorder',
'cancelallchildorders',
],
},
},
'fees': {
'trading': {
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'BTC/JPY': {
'maker': 0.15 / 100,
'taker': 0.15 / 100,
},
},
})
async def fetch_markets(self, params={}):
jp_markets = await self.publicGetGetmarkets(params)
us_markets = await self.publicGetGetmarketsUsa(params)
eu_markets = await self.publicGetGetmarketsEu(params)
markets = self.array_concat(jp_markets, us_markets)
markets = self.array_concat(markets, eu_markets)
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'product_code')
currencies = id.split('_')
baseId = None
quoteId = None
base = None
quote = None
numCurrencies = len(currencies)
if numCurrencies == 1:
baseId = id[0:3]
quoteId = id[3:6]
elif numCurrencies == 2:
baseId = currencies[0]
quoteId = currencies[1]
else:
baseId = currencies[1]
quoteId = currencies[2]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if (numCurrencies == 2) else id
fees = self.safe_value(self.fees, symbol, self.fees['trading'])
maker = self.safe_value(fees, 'maker', self.fees['trading']['maker'])
taker = self.safe_value(fees, 'taker', self.fees['trading']['taker'])
spot = True
future = False
type = 'spot'
if ('alias' in market) or (currencies[0] == 'FX'):
type = 'future'
future = True
spot = False
maker = 0.0
taker = 0.0
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'maker': maker,
'taker': taker,
'type': type,
'spot': spot,
'future': future,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetGetbalance(params)
#
# [
# {
# "currency_code": "JPY",
# "amount": 1024078,
# "available": 508000
# },
# {
# "currency_code": "BTC",
# "amount": 10.24,
# "available": 4.12
# },
# {
# "currency_code": "ETH",
# "amount": 20.48,
# "available": 16.38
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'amount')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
}
orderbook = await self.publicGetGetboard(self.extend(request, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'size')
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
}
ticker = await self.publicGetGetticker(self.extend(request, params))
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
last = self.safe_float(ticker, 'ltp')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume_by_product'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
side = self.safe_string_lower(trade, 'side')
if side is not None:
if len(side) < 1:
side = None
order = None
if side is not None:
id = side + '_child_order_acceptance_id'
if id in trade:
order = trade[id]
if order is None:
order = self.safe_string(trade, 'child_order_acceptance_id')
timestamp = self.parse8601(self.safe_string(trade, 'exec_date'))
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'size')
cost = None
if amount is not None:
if price is not None:
cost = price * amount
id = self.safe_string(trade, 'id')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': order,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
response = await self.publicGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
'child_order_type': type.upper(),
'side': side.upper(),
'price': price,
'size': amount,
}
result = await self.privatePostSendchildorder(self.extend(request, params))
# {"status": - 200, "error_message": "Insufficient funds", "data": null}
id = self.safe_string(result, 'child_order_acceptance_id')
return {
'info': result,
'id': id,
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a `symbol` argument')
await self.load_markets()
request = {
'product_code': self.market_id(symbol),
'child_order_acceptance_id': id,
}
return await self.privatePostCancelchildorder(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'ACTIVE': 'open',
'COMPLETED': 'closed',
'CANCELED': 'canceled',
'EXPIRED': 'canceled',
'REJECTED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = self.parse8601(self.safe_string(order, 'child_order_date'))
amount = self.safe_float(order, 'size')
remaining = self.safe_float(order, 'outstanding_size')
filled = self.safe_float(order, 'executed_size')
price = self.safe_float(order, 'price')
cost = price * filled
status = self.parse_order_status(self.safe_string(order, 'child_order_state'))
type = self.safe_string_lower(order, 'child_order_type')
side = self.safe_string_lower(order, 'side')
symbol = None
if market is None:
marketId = self.safe_string(order, 'product_code')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
fee = None
feeCost = self.safe_float(order, 'total_commission')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
'rate': None,
}
id = self.safe_string(order, 'child_order_acceptance_id')
return {
'id': id,
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
'average': None,
'trades': None,
}
async def fetch_orders(self, symbol=None, since=None, limit=100, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a `symbol` argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
'count': limit,
}
response = await self.privateGetGetchildorders(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
async def fetch_open_orders(self, symbol=None, since=None, limit=100, params={}):
request = {
'child_order_state': 'ACTIVE',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=100, params={}):
request = {
'child_order_state': 'COMPLETED',
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a `symbol` argument')
orders = await self.fetch_orders(symbol)
ordersById = self.index_by(orders, 'id')
if id in ordersById:
return ordersById[id]
raise OrderNotFound(self.id + ' No order found with id ' + id)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a `symbol` argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
if limit is not None:
request['count'] = limit
response = await self.privateGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
if code != 'JPY' and code != 'USD' and code != 'EUR':
raise ExchangeError(self.id + ' allows withdrawing JPY, USD, EUR only, ' + code + ' is not supported')
currency = self.currency(code)
request = {
'currency_code': currency['id'],
'amount': amount,
# 'bank_account_id': 1234,
}
response = await self.privatePostWithdraw(self.extend(request, params))
id = self.safe_string(response, 'message_id')
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.version + '/'
if api == 'private':
request += 'me/'
request += path
if method == 'GET':
if params:
request += '?' + self.urlencode(params)
url = self.urls['api'] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
auth = ''.join([nonce, method, request])
if params:
if method != 'GET':
body = self.json(params)
auth += body
headers = {
'ACCESS-KEY': self.apiKey,
'ACCESS-TIMESTAMP': nonce,
'ACCESS-SIGN': self.hmac(self.encode(auth), self.encode(self.secret)),
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| [
"insukim@arques.com"
] | insukim@arques.com |
ac242bd9428a8e8c909b8ceebdba6c1129a468c2 | f2673cd07770dca1bc5017341e8293aebbfd66c7 | /models/attention/encoders/pyramidal_blstm_encoder.py | fc78e88348e5a7cb29ead0d799c4e24a19c25a9f | [
"MIT"
] | permissive | xiao2mo/tensorflow_end2end_speech_recognition | 52d2c8d32b2f6e9f9f11dfaf8ddf434da16ff2ea | 9b4bdcacd9d73c3db19205b74f4d48419584834d | refs/heads/master | 2020-06-03T04:54:34.127500 | 2017-06-12T02:47:51 | 2017-06-12T02:47:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Pyramidal Bidirectional LSTM Encoder class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .encoder_base import EncoderOutput, EncoderBase
class PyramidalBLSTMEncoder(EncoderBase):
"""Pyramidal Bidirectional LSTM Encoder.
Args:
num_units:
num_layer:
keep_prob_input:
keep_prob_hidden:
parameter_init:
clip_activation:
num_proj:
"""
def __init__(self,
num_units,
num_layer,
keep_prob_input=1.0,
keep_prob_hidden=1.0,
parameter_init=0.1,
clip_activation=50,
num_proj=None,
name='pblstm_encoder'):
EncoderBase.__init__(self, num_units, num_layer, keep_prob_input,
keep_prob_hidden, parameter_init, clip_activation,
num_proj, name)
def _build(self, inputs, inputs_seq_len):
"""Construct Pyramidal Bidirectional LSTM encoder.
Args:
inputs:
inputs_seq_len:
Returns:
EncoderOutput: A tuple of
`(outputs, final_state,
attention_values, attention_values_length)`
outputs:
final_state:
attention_values:
attention_values_length:
"""
self.inputs = inputs
self.inputs_seq_len = inputs_seq_len
raise NotImplementedError
| [
"hiro.mhbc@gmail.com"
] | hiro.mhbc@gmail.com |
152553eda650901c21d5c57c5c78ebcc75106dfa | 0b16b44e4fc8c98c9ea3f9d4b8b470f4f62f918d | /Core/migrations/0002_auto_20201101_2120.py | 26e849d4a31082849e63d44fac1fcb8360cb5f66 | [] | no_license | AthifSaheer/DipakNiroula-Django-Ecom | 342eece90211fe80c41ba72bf69a50e63c5ea901 | 94ead608919c5bb076387e26f396e6c38319433e | refs/heads/main | 2023-02-05T06:52:24.204206 | 2020-12-24T13:19:13 | 2020-12-24T13:19:13 | 324,160,212 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | # Generated by Django 2.2.14 on 2020-11-01 15:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='return_POlicy',
new_name='return_Policy',
),
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='admins')),
('mobile', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"liteboook@gmail.com"
] | liteboook@gmail.com |
c36fdb2b62304120a4bc8ff3d374431f09ba3ad6 | fa35e8857e1fb8782d044bc3bca9de5df2eccb34 | /src/pyconversations/reader/chan.py | 7d02dae1585920faea89135df2f842aa2e3ed70b | [
"MIT"
] | permissive | hunter-heidenreich/pyconversations | 6c602414bce7a1e68dbed19197161fe431aa9593 | 62d69bf8b2609926566cb87ba8de17f38a19269a | refs/heads/master | 2023-07-13T13:19:07.972226 | 2021-08-23T21:13:53 | 2021-08-23T21:13:53 | 342,770,039 | 0 | 1 | MIT | 2021-08-23T21:13:54 | 2021-02-27T04:27:49 | Python | UTF-8 | Python | false | false | 1,882 | py | import json
from glob import glob
from tqdm import tqdm
from ..convo import Conversation
from ..message import ChanPost
from .base import BaseReader
class ChanReader(BaseReader):
"""
Reader class for reading and converting raw 4chan data
"""
@staticmethod
def read(path_pattern, ld=True):
"""
Function for reading an entire file/directory of conversations.
Parameters
----------
path_pattern : str
The path to file or directory containing Conversation data
ld : bool
Whether or not language detection should be activated. (Default: True)
Raises
------
NotImplementedError
"""
raise NotImplementedError
@staticmethod
def iter_read(path_pattern, ld=True):
"""
Function for iteratively reading an entire file/directory of conversations.
Currently expects a `path_pattern` that points to a directory of JSON files
enumerated from 00 to 99.
Parameters
----------
path_pattern : str
The path to file or directory containing Conversation data
ld : bool
Whether or not language detection should be activated. (Default: True)
Yields
------
2-tuple(int, Conversation)
A tuple containing which chunk (in 0..99) this Conversation originated from as well as a Conversation segment.
"""
for chunk in range(100):
print(f'Parsing chunk {chunk+1}/100...')
convo = Conversation()
for f in glob(path_pattern + f'{chunk:02d}.json'):
for post in tqdm(json.load(open(f)).values()):
px = ChanPost.parse_raw(post, lang_detect=ld)
if px:
convo.add_post(px)
yield chunk, convo.segment()
| [
"hunter.scott.heidenreich@gmailcom"
] | hunter.scott.heidenreich@gmailcom |
2104a8dfb0c25a65f48367f59a2c39e31fa10ca8 | 9e779ba1d3c8db305a3bda2adf03aec6eea80810 | /adsilib/__init__.py | cc2526c6aee1c7983d65662aaeab911becfe19ff | [
"MIT"
] | permissive | atsiaras/adsilib | e70d213802f81cec74cc3c77f8d3dd8551db41f3 | 0408baab937812bb0862f6186ad936806fe670a4 | refs/heads/master | 2021-04-07T00:39:38.267838 | 2019-01-24T16:56:49 | 2019-01-24T16:56:49 | 125,230,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = '2.0.0'
from .__run__ import run_app
from .build_my_library import *
def __get_abspath__():
import os
return os.path.abspath(os.path.dirname(__file__))
| [
"aggelostsiaras@gmail.com"
] | aggelostsiaras@gmail.com |
1fd66354281cf71c1dfef3890a6dce31375494ca | 670a3b0a986a954f9ad0686073e548dc64c57207 | /day1/asgi.py | ff5540b08dda38a9f9b2828df6f502d496ab73f7 | [] | no_license | udaykiran96186/first-git | d7d4967cc984b153003ef8a59fd3f73483523b98 | eeaf9c548f05785d6b513ce96883a5029c66095a | refs/heads/master | 2023-08-30T10:57:14.741397 | 2021-11-16T16:00:03 | 2021-11-16T16:00:03 | 428,311,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for day1 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day1.settings')
application = get_asgi_application()
| [
"udaykiran96186@gmail.com"
] | udaykiran96186@gmail.com |
009e53c59746e5e95ef1681b709b7a2b28c2339c | 267aafa3826d216f70a0197369c334bc542aee40 | /setup.py | a4b523deefdb6153c1331d6b30709c5c10b95b35 | [] | no_license | research-core/core-orders | 7ccc199e6b89e6cd86affd4d8e5bab4fe845589b | 37566b742b1423d30f9dc8e67641d828dc22e4a6 | refs/heads/master | 2020-06-29T02:37:00.250110 | 2019-08-26T17:10:48 | 2019-08-26T17:10:48 | 200,413,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
version, license = None, None
with open('orders/__init__.py', 'r') as fd:
content = fd.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
license = re.search(r'^__license__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
if version is None: raise RuntimeError('Cannot find version information')
if license is None: raise RuntimeError('Cannot find license information')
with open('README.md', 'r') as fd:
long_description = fd.read()
setup(
name='core-orders',
version=version,
description='Research CORE ERM - orders module',
author='Ricardo Ribeiro, Hugo Cachitas',
author_email='ricardojvr@gmail.com, hugo.cachitas@research.fchampalimaud.org',
url='https://github.com/research-core/core-orders',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
license=license,
)
| [
"ricardojvr@gmail.com"
] | ricardojvr@gmail.com |
a9682c31beb5aa6a6e2cacc7e42da087c161cd63 | 7ec04fc867d0a48fffc05c65bff9217cfe211fe7 | /HW/统计字符串/teachers.py | f3e81a089bc6a999b09cf50c7dafa2466777ca3b | [] | no_license | Cherry93/pythonPractic | 3b9d1f99803503073bbb2f3a58009665338bd278 | 2889183af6c9a01ab47895b23e2d6ce8c288fd4d | refs/heads/master | 2021-08-31T16:41:56.655989 | 2017-12-22T03:53:18 | 2017-12-22T03:53:18 | 115,008,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | '''
定义教师类,属性包括姓名、职称、工资,创建1000个实例,使用pickle写入teachers.dat并再次读出;
'''
import pickle
from tkinter import filedialog
class Teachers:
def __init__(self,name,call,rmb):
self.name =name
self.call = call
self.rmb = rmb
def __str__(self):
return "name:"+str(self.name)+"call:"+str(self.call)+"rmb:"+str(self.rmb)
c = Teachers("王小星","高级",1000)
#print(c)
def writeDemo():
global file
#print(c)
savePath = filedialog.asksaveasfilename()
file = open(savePath, mode="ab")
for i in range(10):
data = c
pickle.dump(data, file)
file.close()
writeDemo()
def readMode():
global file
print(data)
with open(filedialog.askopenfilename(), mode="rb") as file:
for i in range(10):
data = pickle.load(file)
print(data)
readMode()
| [
"358544104@qq.com"
] | 358544104@qq.com |
eb3d54dc1db886b98008f3a576109aa33e101d6d | 5e734cd4e071272688ab635243290936c5c2db40 | /lib/paths.py | 26971a871946a307647c399e9c700320a62ab114 | [
"MIT"
] | permissive | jwilk/i18nspector | a2a4aecee00de9cfb8d9a0354614f7413e19f1b9 | d9762416937399b81abaedc9ddcdc36dbda1c318 | refs/heads/master | 2023-09-04T12:32:35.255101 | 2023-08-22T08:41:50 | 2023-08-22T08:41:50 | 29,258,684 | 2 | 3 | MIT | 2022-06-27T19:04:57 | 2015-01-14T18:22:23 | Python | UTF-8 | Python | false | false | 1,388 | py | # Copyright © 2013 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
paths to code and data
'''
import os
basedir = os.path.normpath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'',
))
datadir = os.path.join(basedir, 'data', '')
def check():
os.stat(basedir)
os.stat(datadir)
# vim:ts=4 sts=4 sw=4 et
| [
"jwilk@jwilk.net"
] | jwilk@jwilk.net |
8a1feda92bbe3aa7c8ea93a88590748b2de2c784 | 08871111acfec5049c3d4b48b400f84146a29b06 | /littlelambocoin/types/weight_proof.py | 8313a845fd96b19c16af331d5932b4a571194cb3 | [
"Apache-2.0"
] | permissive | AndreAndris/littlelambocoin | 8d7705b64c018b503bea2c64cec4e15fc4a438ef | ffbf98d5d43ae248586aadbb68316c6ed43da7cb | refs/heads/main | 2023-06-29T15:33:33.167616 | 2021-08-05T06:46:16 | 2021-08-05T06:46:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | from dataclasses import dataclass
from typing import List, Optional
from littlelambocoin.types.blockchain_format.proof_of_space import ProofOfSpace
from littlelambocoin.types.blockchain_format.reward_chain_block import RewardChainBlock
from littlelambocoin.types.blockchain_format.sized_bytes import bytes32
from littlelambocoin.types.blockchain_format.vdf import VDFInfo, VDFProof
from littlelambocoin.types.end_of_slot_bundle import EndOfSubSlotBundle
from littlelambocoin.types.header_block import HeaderBlock
from littlelambocoin.util.ints import uint8, uint32, uint64, uint128
from littlelambocoin.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class SubEpochData(Streamable):
reward_chain_hash: bytes32
num_blocks_overflow: uint8
new_sub_slot_iters: Optional[uint64]
new_difficulty: Optional[uint64]
# number of challenge blocks
# Average iters for challenge blocks
# |--A-R----R-------R--------R------R----R----------R-----R--R---| Honest difficulty 1000
# 0.16
# compute total reward chain blocks
# |----------------------------A---------------------------------| Attackers chain 1000
# 0.48
# total number of challenge blocks == total number of reward chain blocks
@dataclass(frozen=True)
@streamable
class SubSlotData(Streamable):
# if infused
proof_of_space: Optional[ProofOfSpace]
# VDF to signage point
cc_signage_point: Optional[VDFProof]
# VDF from signage to infusion point
cc_infusion_point: Optional[VDFProof]
icc_infusion_point: Optional[VDFProof]
cc_sp_vdf_info: Optional[VDFInfo]
signage_point_index: Optional[uint8]
# VDF from beginning to end of slot if not infused
# from ip to end if infused
cc_slot_end: Optional[VDFProof]
icc_slot_end: Optional[VDFProof]
# info from finished slots
cc_slot_end_info: Optional[VDFInfo]
icc_slot_end_info: Optional[VDFInfo]
cc_ip_vdf_info: Optional[VDFInfo]
icc_ip_vdf_info: Optional[VDFInfo]
total_iters: Optional[uint128]
def is_challenge(self) -> bool:
if self.proof_of_space is not None:
return True
return False
def is_end_of_slot(self) -> bool:
if self.cc_slot_end_info is not None:
return True
return False
@dataclass(frozen=True)
@streamable
class SubEpochChallengeSegment(Streamable):
sub_epoch_n: uint32
sub_slots: List[SubSlotData]
rc_slot_end_info: Optional[VDFInfo] # in first segment of each sub_epoch
@dataclass(frozen=True)
@streamable
# this is used only for serialization to database
class SubEpochSegments(Streamable):
challenge_segments: List[SubEpochChallengeSegment]
@dataclass(frozen=True)
@streamable
# this is used only for serialization to database
class RecentChainData(Streamable):
recent_chain_data: List[HeaderBlock]
@dataclass(frozen=True)
@streamable
class ProofBlockHeader(Streamable):
finished_sub_slots: List[EndOfSubSlotBundle]
reward_chain_block: RewardChainBlock
@dataclass(frozen=True)
@streamable
class WeightProof(Streamable):
sub_epochs: List[SubEpochData]
sub_epoch_segments: List[SubEpochChallengeSegment] # sampled sub epoch
recent_chain_data: List[HeaderBlock]
| [
"kevin.vercauteren@gmail.com"
] | kevin.vercauteren@gmail.com |
de35b092deb9a81164caa6ea340b019c2e8b3aa1 | 59961cd3842f2a46803c96f1d4448e8fea49c6db | /device/actuator/ACDCI_UC8/act_server/actuator.py | e94c7e5c359c17c14caa028e9b91c817b0edcbec | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | seanbrhn3/openc2-oif-device | 69a8c59e8075b842a1fdda3d6c115c0540f9aea2 | 805c55e44f59f7c86c5bf347a6fc3787e6903df4 | refs/heads/master | 2021-03-05T12:58:55.917321 | 2020-04-22T08:35:17 | 2020-04-22T08:35:17 | 246,123,536 | 0 | 0 | Apache-2.0 | 2020-03-09T19:26:13 | 2020-03-09T19:26:12 | null | UTF-8 | Python | false | false | 306 | py | from .utils import ActuatorBase
from .actions import (
Delete,
Query
)
class Actuator(ActuatorBase):
def __init__(self, *args, **kwargs):
super(Actuator, self).__init__(*args, **kwargs)
self._dispatch.register_dispatch(Delete)
self._dispatch.register_dispatch(Query)
| [
"jerome.czachor@g2-inc.com"
] | jerome.czachor@g2-inc.com |
9b3d001951b24200fcdb3bd49fa67280cf2503c4 | 6659f860ddbb7550f66ea712753d3d2aab1cc6ff | /Note_3/Example_36.py | 2671dcc8e106d4ba64273a5f63c1cda83dfc50f5 | [] | no_license | ianhom/Python-Noob | adf077bee78727eac43da2804a90528ace6c38a6 | e12f0159d68d7c4962cafa3cb8b68a8761037f21 | refs/heads/master | 2020-12-08T12:06:01.909463 | 2018-07-03T00:42:41 | 2018-07-03T00:42:41 | 67,806,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
题目:求100之内的素数。
'''
lower = int(input("输入区间最小值: "))
upper = int(input("输入区间最大值: "))
for num in range(lower,upper + 1):
# 素数大于 1
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
print(num)
# result
'''
输入区间最小值: 2
输入区间最大值: 78
2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
53
59
61
67
71
73
'''
| [
"noreply@github.com"
] | ianhom.noreply@github.com |
4581172461ca6e272ba66f94b453f7e3627ebeb2 | e617affbb9292944465969a7f7a6a02b1c88f10a | /offer_algri/数组中出现次数超过一半的数字/p.py | 2be851787656e28518166bb8ce3645d671b6563e | [] | no_license | darr/offer_algri | 92904d02c7bbd721aa47b4836f2190c3e9407f24 | 724fd689cfe7bd2f8aaed19ef912eecbf00a2df3 | refs/heads/master | 2020-03-25T04:18:40.491916 | 2018-09-07T08:52:39 | 2018-09-07T08:52:39 | 143,388,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#####################################
# File name : p.py
# Create date : 2018-07-23 08:49
# Modified date : 2018-07-23 13:04
# Author : DARREN
# Describe : not set
# Email : lzygzh@126.com
#####################################
class Solution:
#run:32ms memorry:5624k
def MoreThanHalfNum_Solution(self,numbers):
lenN = len(numbers)
if numbers == None or lenN <= 0:
return 0
num = numbers[0]
times =1
for i in range(1,lenN):
if times == 0:
num = numbers[i]
elif numbers[i] == num:
times +=1
else:
times -=1
count = 0
for i in range(lenN):
if numbers[i] == num:
count +=1
if count > lenN/2:
return num
return 0 | [
"lzygzh@126.com"
] | lzygzh@126.com |
e6319b6d8def275c9cca76ac7f504569eba57cd8 | 3193ad5646eed71ac636f4f2617e303fa6dc9884 | /main.py | 4a8cbfe45d8ca92d0a2647b8ce7339698aa61e4e | [] | no_license | RAJ-SUDHARSHAN/Turtle-Crossing-game | a31128fdf6251097c9d46915b69023d2e445bb7a | 65b52fb4ca66e3e88a3d68b9ac11011786952243 | refs/heads/master | 2023-05-31T01:29:44.942839 | 2021-07-01T10:29:33 | 2021-07-01T10:29:33 | 377,066,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | import time
from turtle import Screen, Turtle
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
player = Player()
car_manager = CarManager()
scoreboard = Scoreboard()
screen.listen()
screen.onkey(player.move_up, "Up")
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
car_manager.create_car()
car_manager.move_car()
for car in car_manager.all_cars:
if car.distance(player) < 20:
game_is_on = False
scoreboard.game_over()
if player.is_at_finish_line():
player.go_to_start()
car_manager.level_up()
scoreboard.increase_level()
screen.exitonclick()
| [
"sudharshann05@gmail.com"
] | sudharshann05@gmail.com |
217939a4ec285f58d2024b374a9fa8ccd9a8ac80 | 6369730d24b0e38238e157750994607f7735290d | /InternTracker/InternTracker/spiders/goldmansachs.py | d192518b10b5c3799ba63b38b2f5500e726d4098 | [] | no_license | kailash360/InternTracker | 72a616663380b7465f4f79279f5a85fc04c76b7d | 5016e35c94b31cf33ae75d5b382ee61570e5853f | refs/heads/master | 2023-06-26T21:51:27.663325 | 2021-07-24T19:43:55 | 2021-07-24T19:43:55 | 354,225,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | import scrapy
from scrapy import Spider
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
from scrapy.http import TextResponse as response
from InternTracker.items import InternshipPosting
from Logger.logger import career_site_logger
import json
import requests
import csv
class GoldmanSachs(scrapy.Spider) :
name = "goldmansachs_spy"
# allowed_domains = []
start_urls = ['https://www.goldmansachs.com/careers/students/programs/programs-list.json']
close_spider = False
def parse(self,response) :
# Closes the spider if record already scraped before
if self.close_spider :
raise CloseSpider(reason = "ALREADY SCRAPED")
try :
# Getting all the postings
r = requests.get(response.url).json()
posts = r['programs']
# Getting each post and extracting data from it
for post in posts :
title = post['title']
location = post['region']['name']
link = post['url']
if ("Intern" in title or "Internship" in title) :
# Storing the data in internship item
posting = InternshipPosting()
posting['role'] = title
posting['company_name'] = "Goldman Sachs"
posting['location'] = location
posting['start_date'] = ""
posting['stipendmin'] = 0
posting['stipendmax'] = 0
posting['deadline'] = ""
posting['link'] = f"https://www.goldmansachs.com{link}"
posting['number_of_applicants'] = 0
posting['posting_date'] = ""
posting['category_id'] = 0
yield posting
except Exception as e :
career_site_logger.error(e)
| [
"anantvijay3@gmail.com"
] | anantvijay3@gmail.com |
52a6548dfa37833edb95c3a18de38be1ae4aac3c | 7fb7d050ce1b8d8b680c1a19ab49df63c6918a68 | /monai/transforms/utility/array.py | 24d2feb781c8090125e356388c646daaf945bc03 | [
"Apache-2.0"
] | permissive | zuoguoqing/MONAI | 52a2de4e4305e1ae5154824e54f301dca34ad429 | f5ccdc6bcd9696da9c4763910143f12835dca954 | refs/heads/master | 2023-03-11T21:38:01.162356 | 2021-03-01T18:40:59 | 2021-03-01T18:40:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,476 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of "vanilla" transforms for utility functions
https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
"""
import logging
import time
from typing import TYPE_CHECKING, Callable, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import DtypeLike, NdarrayTensor
from monai.transforms.transform import RandomizableTransform, Transform
from monai.transforms.utils import extreme_points_to_image, get_extreme_points, map_binary_to_indices
from monai.utils import ensure_tuple, min_version, optional_import
if TYPE_CHECKING:
from PIL.Image import Image as PILImageImage
from PIL.Image import fromarray as pil_image_fromarray
has_pil = True
else:
PILImageImage, has_pil = optional_import("PIL.Image", name="Image")
pil_image_fromarray, _ = optional_import("PIL.Image", name="fromarray")
__all__ = [
"Identity",
"AsChannelFirst",
"AsChannelLast",
"AddChannel",
"RepeatChannel",
"RemoveRepeatedChannel",
"SplitChannel",
"CastToType",
"ToTensor",
"ToNumpy",
"Transpose",
"SqueezeDim",
"DataStats",
"SimulateDelay",
"Lambda",
"LabelToMask",
"FgBgToIndices",
"ConvertToMultiChannelBasedOnBratsClasses",
"AddExtremePointsChannel",
"TorchVision",
]
class Identity(Transform):
"""
Convert the input to an np.ndarray, if input data is np.ndarray or subclasses, return unchanged data.
As the output value is same as input, it can be used as a testing tool to verify the transform chain,
Compose or transform adaptor, etc.
"""
def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
"""
Apply the transform to `img`.
"""
return np.asanyarray(img)
class AsChannelFirst(Transform):
"""
Change the channel dimension of the image to the first dimension.
Most of the image transformations in ``monai.transforms``
assume the input image is in the channel-first format, which has the shape
(num_channels, spatial_dim_1[, spatial_dim_2, ...]).
This transform could be used to convert, for example, a channel-last image array in shape
(spatial_dim_1[, spatial_dim_2, ...], num_channels) into the channel-first format,
so that the multidimensional image array can be correctly interpreted by the other transforms.
Args:
channel_dim: which dimension of input image is the channel, default is the last dimension.
"""
def __init__(self, channel_dim: int = -1) -> None:
if not (isinstance(channel_dim, int) and channel_dim >= -1):
raise AssertionError("invalid channel dimension.")
self.channel_dim = channel_dim
def __call__(self, img: np.ndarray) -> np.ndarray:
"""
Apply the transform to `img`.
"""
return np.moveaxis(img, self.channel_dim, 0)
class AsChannelLast(Transform):
"""
Change the channel dimension of the image to the last dimension.
Some of other 3rd party transforms assume the input image is in the channel-last format with shape
(spatial_dim_1[, spatial_dim_2, ...], num_channels).
This transform could be used to convert, for example, a channel-first image array in shape
(num_channels, spatial_dim_1[, spatial_dim_2, ...]) into the channel-last format,
so that MONAI transforms can construct a chain with other 3rd party transforms together.
Args:
channel_dim: which dimension of input image is the channel, default is the first dimension.
"""
def __init__(self, channel_dim: int = 0) -> None:
if not (isinstance(channel_dim, int) and channel_dim >= -1):
raise AssertionError("invalid channel dimension.")
self.channel_dim = channel_dim
def __call__(self, img: np.ndarray) -> np.ndarray:
"""
Apply the transform to `img`.
"""
return np.moveaxis(img, self.channel_dim, -1)
class AddChannel(Transform):
"""
Adds a 1-length channel dimension to the input image.
Most of the image transformations in ``monai.transforms``
assumes the input image is in the channel-first format, which has the shape
(num_channels, spatial_dim_1[, spatial_dim_2, ...]).
This transform could be used, for example, to convert a (spatial_dim_1[, spatial_dim_2, ...])
spatial image into the channel-first format so that the
multidimensional image array can be correctly interpreted by the other
transforms.
"""
def __call__(self, img: NdarrayTensor):
"""
Apply the transform to `img`.
"""
return img[None]
class RepeatChannel(Transform):
"""
Repeat channel data to construct expected input shape for models.
The `repeats` count includes the origin data, for example:
``RepeatChannel(repeats=2)([[1, 2], [3, 4]])`` generates: ``[[1, 2], [1, 2], [3, 4], [3, 4]]``
Args:
repeats: the number of repetitions for each element.
"""
def __init__(self, repeats: int) -> None:
if repeats <= 0:
raise AssertionError("repeats count must be greater than 0.")
self.repeats = repeats
def __call__(self, img: np.ndarray) -> np.ndarray:
"""
Apply the transform to `img`, assuming `img` is a "channel-first" array.
"""
return np.repeat(img, self.repeats, 0)
class RemoveRepeatedChannel(Transform):
"""
RemoveRepeatedChannel data to undo RepeatChannel
The `repeats` count specifies the deletion of the origin data, for example:
``RemoveRepeatedChannel(repeats=2)([[1, 2], [1, 2], [3, 4], [3, 4]])`` generates: ``[[1, 2], [3, 4]]``
Args:
repeats: the number of repetitions to be deleted for each element.
"""
def __init__(self, repeats: int) -> None:
if repeats <= 0:
raise AssertionError("repeats count must be greater than 0.")
self.repeats = repeats
def __call__(self, img: np.ndarray) -> np.ndarray:
"""
Apply the transform to `img`, assuming `img` is a "channel-first" array.
"""
if np.shape(img)[0] < 2:
raise AssertionError("Image must have more than one channel")
return np.array(img[:: self.repeats, :])
class SplitChannel(Transform):
"""
Split Numpy array or PyTorch Tensor data according to the channel dim.
It can help applying different following transforms to different channels.
Channel number must be greater than 1.
Args:
channel_dim: which dimension of input image is the channel, default to None
to automatically select: if data is numpy array, channel_dim is 0 as
`numpy array` is used in the pre transforms, if PyTorch Tensor, channel_dim
is 1 as in most of the cases `Tensor` is uses in the post transforms.
"""
def __init__(self, channel_dim: Optional[int] = None) -> None:
self.channel_dim = channel_dim
def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> List[Union[np.ndarray, torch.Tensor]]:
if self.channel_dim is None:
# automatically select the default channel dim based on data type
if isinstance(img, torch.Tensor):
channel_dim = 1
else:
channel_dim = 0
else:
channel_dim = self.channel_dim
n_classes = img.shape[channel_dim]
if n_classes <= 1:
raise RuntimeError("input image does not contain multiple channels.")
outputs = []
slices = [slice(None)] * len(img.shape)
for i in range(n_classes):
slices[channel_dim] = slice(i, i + 1)
outputs.append(img[tuple(slices)])
return outputs
class CastToType(Transform):
"""
Cast the Numpy data to specified numpy data type, or cast the PyTorch Tensor to
specified PyTorch data type.
"""
def __init__(self, dtype=np.float32) -> None:
"""
Args:
dtype: convert image to this data type, default is `np.float32`.
"""
self.dtype = dtype
def __call__(
self, img: Union[np.ndarray, torch.Tensor], dtype: Optional[Union[DtypeLike, torch.dtype]] = None
) -> Union[np.ndarray, torch.Tensor]:
"""
Apply the transform to `img`, assuming `img` is a numpy array or PyTorch Tensor.
Args:
dtype: convert image to this data type, default is `self.dtype`.
Raises:
TypeError: When ``img`` type is not in ``Union[numpy.ndarray, torch.Tensor]``.
"""
if isinstance(img, np.ndarray):
return img.astype(self.dtype if dtype is None else dtype) # type: ignore
if isinstance(img, torch.Tensor):
return torch.as_tensor(img, dtype=self.dtype if dtype is None else dtype)
raise TypeError(f"img must be one of (numpy.ndarray, torch.Tensor) but is {type(img).__name__}.")
class ToTensor(Transform):
"""
Converts the input image to a tensor without applying any other transformations.
"""
def __call__(self, img: Union[np.ndarray, torch.Tensor, PILImageImage]) -> torch.Tensor:
"""
Apply the transform to `img` and make it contiguous.
"""
if isinstance(img, torch.Tensor):
return img.contiguous()
return torch.as_tensor(np.ascontiguousarray(img))
class ToNumpy(Transform):
"""
Converts the input data to numpy array, can support list or tuple of numbers and PyTorch Tensor.
"""
def __call__(self, img: Union[List, Tuple, np.ndarray, torch.Tensor, PILImageImage]) -> np.ndarray:
"""
Apply the transform to `img` and make it contiguous.
"""
if isinstance(img, torch.Tensor):
img = img.detach().cpu().numpy() # type: ignore
return np.ascontiguousarray(img)
class ToPIL(Transform):
"""
Converts the input image (in the form of NumPy array or PyTorch Tensor) to PIL image
"""
def __call__(self, img: Union[np.ndarray, torch.Tensor, PILImageImage]) -> PILImageImage:
"""
Apply the transform to `img` and make it contiguous.
"""
if isinstance(img, PILImageImage):
return img
if isinstance(img, torch.Tensor):
img = img.detach().cpu().numpy()
return pil_image_fromarray(img)
class Transpose(Transform):
"""
Transposes the input image based on the given `indices` dimension ordering.
"""
def __init__(self, indices: Optional[Sequence[int]]) -> None:
self.indices = None if indices is None else tuple(indices)
def __call__(self, img: np.ndarray) -> np.ndarray:
"""
Apply the transform to `img`.
"""
return img.transpose(self.indices) # type: ignore
class SqueezeDim(Transform):
"""
Squeeze a unitary dimension.
"""
def __init__(self, dim: Optional[int] = 0) -> None:
"""
Args:
dim: dimension to be squeezed. Default = 0
"None" works when the input is numpy array.
Raises:
TypeError: When ``dim`` is not an ``Optional[int]``.
"""
if dim is not None and not isinstance(dim, int):
raise TypeError(f"dim must be None or a int but is {type(dim).__name__}.")
self.dim = dim
def __call__(self, img: NdarrayTensor) -> NdarrayTensor:
"""
Args:
img: numpy arrays with required dimension `dim` removed
"""
return img.squeeze(self.dim) # type: ignore
class DataStats(Transform):
"""
Utility transform to show the statistics of data for debug or analysis.
It can be inserted into any place of a transform chain and check results of previous transforms.
It support both `numpy.ndarray` and `torch.tensor` as input data,
so it can be used in pre-processing and post-processing.
"""
def __init__(
self,
prefix: str = "Data",
data_shape: bool = True,
value_range: bool = True,
data_value: bool = False,
additional_info: Optional[Callable] = None,
logger_handler: Optional[logging.Handler] = None,
) -> None:
"""
Args:
prefix: will be printed in format: "{prefix} statistics".
data_shape: whether to show the shape of input data.
value_range: whether to show the value range of input data.
data_value: whether to show the raw value of input data.
a typical example is to print some properties of Nifti image: affine, pixdim, etc.
additional_info: user can define callable function to extract additional info from input data.
logger_handler: add additional handler to output data: save to file, etc.
add existing python logging handlers: https://docs.python.org/3/library/logging.handlers.html
Raises:
TypeError: When ``additional_info`` is not an ``Optional[Callable]``.
"""
if not isinstance(prefix, str):
raise AssertionError("prefix must be a string.")
self.prefix = prefix
self.data_shape = data_shape
self.value_range = value_range
self.data_value = data_value
if additional_info is not None and not callable(additional_info):
raise TypeError(f"additional_info must be None or callable but is {type(additional_info).__name__}.")
self.additional_info = additional_info
self.output: Optional[str] = None
logging.basicConfig(level=logging.NOTSET)
self._logger = logging.getLogger("DataStats")
if logger_handler is not None:
self._logger.addHandler(logger_handler)
def __call__(
self,
img: NdarrayTensor,
prefix: Optional[str] = None,
data_shape: Optional[bool] = None,
value_range: Optional[bool] = None,
data_value: Optional[bool] = None,
additional_info: Optional[Callable] = None,
) -> NdarrayTensor:
"""
Apply the transform to `img`, optionally take arguments similar to the class constructor.
"""
lines = [f"{prefix or self.prefix} statistics:"]
if self.data_shape if data_shape is None else data_shape:
lines.append(f"Shape: {img.shape}")
if self.value_range if value_range is None else value_range:
if isinstance(img, np.ndarray):
lines.append(f"Value range: ({np.min(img)}, {np.max(img)})")
elif isinstance(img, torch.Tensor):
lines.append(f"Value range: ({torch.min(img)}, {torch.max(img)})")
else:
lines.append(f"Value range: (not a PyTorch or Numpy array, type: {type(img)})")
if self.data_value if data_value is None else data_value:
lines.append(f"Value: {img}")
additional_info = self.additional_info if additional_info is None else additional_info
if additional_info is not None:
lines.append(f"Additional info: {additional_info(img)}")
separator = "\n"
self.output = f"{separator.join(lines)}"
self._logger.debug(self.output)
return img
class SimulateDelay(Transform):
"""
This is a pass through transform to be used for testing purposes. It allows
adding fake behaviors that are useful for testing purposes to simulate
how large datasets behave without needing to test on large data sets.
For example, simulating slow NFS data transfers, or slow network transfers
in testing by adding explicit timing delays. Testing of small test data
can lead to incomplete understanding of real world issues, and may lead
to sub-optimal design choices.
"""
def __init__(self, delay_time: float = 0.0) -> None:
"""
Args:
delay_time: The minimum amount of time, in fractions of seconds,
to accomplish this delay task.
"""
super().__init__()
self.delay_time: float = delay_time
def __call__(self, img: NdarrayTensor, delay_time: Optional[float] = None) -> NdarrayTensor:
"""
Args:
img: data remain unchanged throughout this transform.
delay_time: The minimum amount of time, in fractions of seconds,
to accomplish this delay task.
"""
time.sleep(self.delay_time if delay_time is None else delay_time)
return img
class Lambda(Transform):
"""
Apply a user-defined lambda as a transform.
For example:
.. code-block:: python
:emphasize-lines: 2
image = np.ones((10, 2, 2))
lambd = Lambda(func=lambda x: x[:4, :, :])
print(lambd(image).shape)
(4, 2, 2)
Args:
func: Lambda/function to be applied.
Raises:
TypeError: When ``func`` is not an ``Optional[Callable]``.
"""
def __init__(self, func: Optional[Callable] = None) -> None:
if func is not None and not callable(func):
raise TypeError(f"func must be None or callable but is {type(func).__name__}.")
self.func = func
def __call__(self, img: Union[np.ndarray, torch.Tensor], func: Optional[Callable] = None):
"""
Apply `self.func` to `img`.
Args:
func: Lambda/function to be applied. Defaults to `self.func`.
Raises:
TypeError: When ``func`` is not an ``Optional[Callable]``.
ValueError: When ``func=None`` and ``self.func=None``. Incompatible values.
"""
if func is not None:
if not callable(func):
raise TypeError(f"func must be None or callable but is {type(func).__name__}.")
return func(img)
if self.func is not None:
return self.func(img)
raise ValueError("Incompatible values: func=None and self.func=None.")
class LabelToMask(Transform):
"""
Convert labels to mask for other tasks. A typical usage is to convert segmentation labels
to mask data to pre-process images and then feed the images into classification network.
It can support single channel labels or One-Hot labels with specified `select_labels`.
For example, users can select `label value = [2, 3]` to construct mask data, or select the
second and the third channels of labels to construct mask data.
The output mask data can be a multiple channels binary data or a single channel binary
data that merges all the channels.
Args:
select_labels: labels to generate mask from. for 1 channel label, the `select_labels`
is the expected label values, like: [1, 2, 3]. for One-Hot format label, the
`select_labels` is the expected channel indices.
merge_channels: whether to use `np.any()` to merge the result on channel dim. if yes,
will return a single channel mask with binary data.
"""
def __init__( # pytype: disable=annotation-type-mismatch
self,
select_labels: Union[Sequence[int], int],
merge_channels: bool = False,
) -> None: # pytype: disable=annotation-type-mismatch
self.select_labels = ensure_tuple(select_labels)
self.merge_channels = merge_channels
def __call__(
self, img: np.ndarray, select_labels: Optional[Union[Sequence[int], int]] = None, merge_channels: bool = False
):
"""
Args:
select_labels: labels to generate mask from. for 1 channel label, the `select_labels`
is the expected label values, like: [1, 2, 3]. for One-Hot format label, the
`select_labels` is the expected channel indices.
merge_channels: whether to use `np.any()` to merge the result on channel dim. if yes,
will return a single channel mask with binary data.
"""
if select_labels is None:
select_labels = self.select_labels
else:
select_labels = ensure_tuple(select_labels)
if img.shape[0] > 1:
data = img[[*select_labels]]
else:
data = np.where(np.in1d(img, select_labels), True, False).reshape(img.shape)
return np.any(data, axis=0, keepdims=True) if (merge_channels or self.merge_channels) else data
class FgBgToIndices(Transform):
def __init__(self, image_threshold: float = 0.0, output_shape: Optional[Sequence[int]] = None) -> None:
"""
Compute foreground and background of the input label data, return the indices.
If no output_shape specified, output data will be 1 dim indices after flattening.
This transform can help pre-compute foreground and background regions for other transforms.
A typical usage is to randomly select foreground and background to crop.
The main logic is based on :py:class:`monai.transforms.utils.map_binary_to_indices`.
Args:
image_threshold: if enabled `image` at runtime, use ``image > image_threshold`` to
determine the valid image content area and select background only in this area.
output_shape: expected shape of output indices. if not None, unravel indices to specified shape.
"""
self.image_threshold = image_threshold
self.output_shape = output_shape
def __call__(
self,
label: np.ndarray,
image: Optional[np.ndarray] = None,
output_shape: Optional[Sequence[int]] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Args:
label: input data to compute foreground and background indices.
image: if image is not None, use ``label = 0 & image > image_threshold``
to define background. so the output items will not map to all the voxels in the label.
output_shape: expected shape of output indices. if None, use `self.output_shape` instead.
"""
if output_shape is None:
output_shape = self.output_shape
fg_indices, bg_indices = map_binary_to_indices(label, image, self.image_threshold)
if output_shape is not None:
fg_indices = np.stack([np.unravel_index(i, output_shape) for i in fg_indices])
bg_indices = np.stack([np.unravel_index(i, output_shape) for i in bg_indices])
return fg_indices, bg_indices
class ConvertToMultiChannelBasedOnBratsClasses(Transform):
"""
Convert labels to multi channels based on brats18 classes:
label 1 is the necrotic and non-enhancing tumor core
label 2 is the the peritumoral edema
label 4 is the GD-enhancing tumor
The possible classes are TC (Tumor core), WT (Whole tumor)
and ET (Enhancing tumor).
"""
def __call__(self, img: np.ndarray) -> np.ndarray:
result = []
# merge labels 1 (tumor non-enh) and 4 (tumor enh) to TC
result.append(np.logical_or(img == 1, img == 4))
# merge labels 1 (tumor non-enh) and 4 (tumor enh) and 2 (large edema) to WT
result.append(np.logical_or(np.logical_or(img == 1, img == 4), img == 2))
# label 4 is ET
result.append(img == 4)
return np.stack(result, axis=0).astype(np.float32)
class AddExtremePointsChannel(RandomizableTransform):
"""
Add extreme points of label to the image as a new channel. This transform generates extreme
point from label and applies a gaussian filter. The pixel values in points image are rescaled
to range [rescale_min, rescale_max] and added as a new channel to input image. The algorithm is
described in Roth et al., Going to Extremes: Weakly Supervised Medical Image Segmentation
https://arxiv.org/abs/2009.11988.
This transform only supports single channel labels (1, spatial_dim1, [spatial_dim2, ...]). The
background ``index`` is ignored when calculating extreme points.
Args:
background: Class index of background label, defaults to 0.
pert: Random perturbation amount to add to the points, defaults to 0.0.
Raises:
ValueError: When no label image provided.
ValueError: When label image is not single channel.
"""
def __init__(self, background: int = 0, pert: float = 0.0) -> None:
self._background = background
self._pert = pert
self._points: List[Tuple[int, ...]] = []
def randomize(self, label: np.ndarray) -> None:
self._points = get_extreme_points(label, rand_state=self.R, background=self._background, pert=self._pert)
def __call__(
self,
img: np.ndarray,
label: Optional[np.ndarray] = None,
sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 3.0,
rescale_min: float = -1.0,
rescale_max: float = 1.0,
):
"""
Args:
img: the image that we want to add new channel to.
label: label image to get extreme points from. Shape must be
(1, spatial_dim1, [, spatial_dim2, ...]). Doesn't support one-hot labels.
sigma: if a list of values, must match the count of spatial dimensions of input data,
and apply every value in the list to 1 spatial dimension. if only 1 value provided,
use it for all spatial dimensions.
rescale_min: minimum value of output data.
rescale_max: maximum value of output data.
"""
if label is None:
raise ValueError("This transform requires a label array!")
if label.shape[0] != 1:
raise ValueError("Only supports single channel labels!")
# Generate extreme points
self.randomize(label[0, :])
points_image = extreme_points_to_image(
points=self._points, label=label, sigma=sigma, rescale_min=rescale_min, rescale_max=rescale_max
)
return np.concatenate([img, points_image], axis=0)
class TorchVision:
"""
This is a wrapper transform for PyTorch TorchVision transform based on the specified transform name and args.
As most of the TorchVision transforms only work for PIL image and PyTorch Tensor, this transform expects input
data to be PyTorch Tensor, users can easily call `ToTensor` transform to convert a Numpy array to Tensor.
"""
def __init__(self, name: str, *args, **kwargs) -> None:
"""
Args:
name: The transform name in TorchVision package.
args: parameters for the TorchVision transform.
kwargs: parameters for the TorchVision transform.
"""
super().__init__()
transform, _ = optional_import("torchvision.transforms", "0.8.0", min_version, name=name)
self.trans = transform(*args, **kwargs)
def __call__(self, img: torch.Tensor):
"""
Args:
img: PyTorch Tensor data for the TorchVision transform.
"""
return self.trans(img)
| [
"noreply@github.com"
] | zuoguoqing.noreply@github.com |
aaafab9349a37a8754b7eebd163d25ce94c5af20 | e501ce040d909a6c9a6fba8fdb918f5403f22aeb | /piece.py | abba8a44a7be1804218dcf8e5f5385190e727e13 | [] | no_license | evandevizio/my-chess | 173ae87f7ec9692f8fc99717e8bf47942f3a5cd2 | 03296473f547590497692249d1ff225881149651 | refs/heads/master | 2021-06-28T07:51:22.143886 | 2020-12-26T13:25:40 | 2020-12-26T13:25:40 | 201,874,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,728 | py | board_map_alpha_to_index = {"a": 0, "b": 1, "c": 2, "d": 3,
"e": 4, "f": 5, "g": 6, "h": 7}
board_map_index_to_alpha = {0: "a", 1: "b", 2: "c", 3: "d",
4: "e", 5: "f", 6: "g", 7: "h"}
piece_key_to_name = {"x": "King", "X": "King", "q": "Queen", "Q": "Queen",
"r": "Rook", "R": "Rook", "b": "Bishop", "B": "Bishop",
"k": "Knight", "K": "Knight", "p": "Pawn", "P": "Pawn"}
class Piece:
def __init__(self, key, color):
self.key = key
self.color = color
def get_moves(self, position, board):
pass
def get_key(self):
return self.key
def get_color(self):
return self.color
def no_jumping(self, from_square, to_square, color, board):
pass
class King(Piece):
is_in_check = False
def get_moves(self, position, board):
file, rank = list(position.strip().lower())
rank = int(rank)
file = board_map_alpha_to_index[file]
i, j = rank, file
available_moves = []
try:
temp = board[i + 1][j]
available_moves.append([i + 1, j])
except:
pass
try:
temp = board[i - 1][j]
available_moves.append([i - 1, j])
except:
pass
try:
temp = board[i][j - 1]
available_moves.append([i, j - 1])
except:
pass
try:
temp = board[i][j + 1]
available_moves.append([i, j + 1])
except:
pass
try:
temp = board[i - 1][j - 1]
available_moves.append([i - 1, j - 1])
except:
pass
try:
temp = board[i + 1][j + 1]
available_moves.append([i + 1, j + 1])
except:
pass
try:
temp = board[i + 1][j - 1]
available_moves.append([i + 1, j - 1])
except:
pass
try:
temp = board[i - 1][j + 1]
available_moves.append([i - 1, j + 1])
except:
pass
# filter negative values
temp = [i for i in available_moves if i[0] >= 0 and i[1] >= 0]
all_available_moves = ["".join([board_map_index_to_alpha[i[1]],
str(i[0])]) for i in temp]
all_available_moves.sort()
return all_available_moves
def no_jumping(self, from_square, to_square, color, board):
blocked_spaces = []
return blocked_spaces
class Queen(Piece):
def get_moves(self, position, board):
file, rank = list(position.strip().lower())
rank = int(rank)
file = board_map_alpha_to_index[file]
available_moves = []
# Vertical and horizontal
for j in range(8):
if j != file:
try:
available_moves.append((rank, j))
except:
pass
for i in range(8):
if i != rank:
try:
available_moves.append((i, file))
except:
pass
# Diagonal
for i, j in zip(range(rank + 1, 8, 1), range(file + 1, 8, 1)):
try:
available_moves.append((i, j))
except:
pass
for i, j in zip(range(rank + 1, 8, 1), range(file - 1, 0 + -1, -1)):
try:
available_moves.append((i, j))
except:
pass
for i, j in zip(range(rank - 1, 0 + -1, -1), range(file + 1, 8, 1)):
try:
available_moves.append((i, j))
except:
pass
for i, j in zip(range(rank - 1, 0 + -1, -1), range(file - 1, 0 + -1, -1)):
try:
available_moves.append((i, j))
except:
pass
# filter negative values
temp = [i for i in available_moves if i[0] >= 0 and i[1] >= 0]
all_available_moves = ["".join([board_map_index_to_alpha[i[1]],
str(i[0])]) for i in temp]
all_available_moves.sort()
return all_available_moves
def no_jumping(self, from_square, to_square, color, board):
blocked = False
blocked_spaces = []
# get rank and file of current and target positions
this_file, this_rank = list(from_square.strip().lower())
this_rank = int(this_rank)
this_color = color # color of current piece
target_file, target_rank = list(to_square.strip().lower())
target_rank = int(target_rank)
# Horizontals:
# ==============================================================
if this_rank == target_rank:
if target_file > this_file: # LEFT ====================================================================
for i in range(board_map_alpha_to_index[this_file] + 1, board_map_alpha_to_index[target_file] + 1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[i] + str(this_rank))
elif (board[i][this_rank] != 0) and (board[i][this_rank].color == this_color):
blocked = True # position is blocked by a friendly piece
position = (board_map_index_to_alpha[i]) + str(this_rank)
blocked_spaces.append(position)
elif (board[i][this_rank] != 0) and (board[i][this_rank].color != this_color):
# positions behind are blocked, but target can be captured
blocked = True
else: # RIGHT ===================================================================
for i in reversed(range(board_map_alpha_to_index[target_file],
board_map_alpha_to_index[this_file])):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[i] + str(this_rank))
elif (board[i][this_rank] != 0) and (board[i][this_rank].color == this_color):
blocked = True # position is blocked by a friendly piece
position = (board_map_index_to_alpha[i]) + str(this_rank)
blocked_spaces.append(position)
elif (board[i][this_rank] != 0) and (board[i][this_rank].color != this_color):
# positions behind are blocked, but target can be captured
blocked = True
# Verticals:
# ==============================================================
if this_file == target_file:
if target_rank > this_rank: # DOWN ====================================================================
for i in range(this_rank + 1, target_rank + 1):
if blocked is True:
blocked_spaces.append(this_file + str(i))
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color == this_color):
blocked = True # position is blocked by a friendly piece
position = this_file + str(i)
blocked_spaces.append(position)
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color != this_color):
# positions behind are blocked, but target can be captured
blocked = True
else: # UP =======================================================================
for i in reversed(range(target_rank, this_rank)):
if blocked is True:
blocked_spaces.append(this_file + str(i))
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color == this_color):
blocked = True # position is blocked by a friendly piece
position = this_file + str(i)
blocked_spaces.append(position)
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
# Diagonals:
# ==============================================================
elif (target_rank < this_rank) and (
board_map_alpha_to_index[target_file] > board_map_alpha_to_index[this_file]):
# UP-RIGHT =====================================================================================
j = board_map_alpha_to_index[this_file] + 1
for i in range(this_rank - 1, target_rank - 1, -1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j += 1
elif (target_rank < this_rank) and \
(board_map_alpha_to_index[target_file] < board_map_alpha_to_index[this_file]):
# UP-LEFT ======================================================================================
j = board_map_alpha_to_index[this_file] - 1
for i in range(this_rank - 1, target_rank - 1, -1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j -= 1
# DOWN-RIGHT ===================================================================================
elif (target_rank > this_rank) and board_map_alpha_to_index[target_file] > board_map_alpha_to_index[this_file]:
j = board_map_alpha_to_index[this_file] + 1
for i in range(this_rank + 1, target_rank + 1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j += 1
# DOWN-LEFT ====================================================================================
elif (target_rank > this_rank) and board_map_alpha_to_index[target_file] < board_map_alpha_to_index[this_file]:
j = board_map_alpha_to_index[this_file] - 1
for i in range(this_rank + 1, target_rank + 1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j -= 1
return blocked_spaces
class Rook(Piece):
def get_moves(self, position, board):
file, rank = list(position.strip().lower())
rank = int(rank)
file = board_map_alpha_to_index[file]
available_moves = []
# rank moves
for j in range(8):
if j != file:
try:
available_moves.append((rank, j))
except:
pass
# file moves
for i in range(8):
if i != rank:
try:
available_moves.append((i, file))
except:
pass
available_moves = ["".join([board_map_index_to_alpha[i[1]],
str(i[0])]) for i in available_moves]
available_moves.sort()
return available_moves
def no_jumping(self, from_square, to_square, color, board):
blocked = False
blocked_spaces = []
# get rank and file of current and target positions
this_file, this_rank = list(from_square.strip().lower())
this_rank = int(this_rank)
this_color = color # color of current piece
target_file, target_rank = list(to_square.strip().lower())
target_rank = int(target_rank)
# Horizontals:
# ==============================================================
if this_rank == target_rank:
if target_file > this_file: # LEFT ====================================================================
for i in range(board_map_alpha_to_index[this_file] + 1, board_map_alpha_to_index[target_file] + 1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[i] + str(this_rank))
elif (board[i][this_rank] != 0) and (board[i][this_rank].color == this_color):
blocked = True # position is blocked by a friendly piece
position = (board_map_index_to_alpha[i]) + str(this_rank)
blocked_spaces.append(position)
elif (board[i][this_rank] != 0) and (board[i][this_rank].color != this_color):
# positions behind are blocked, but target can be captured
blocked = True
else: # RIGHT ===================================================================
for i in reversed(range(board_map_alpha_to_index[target_file],
board_map_alpha_to_index[this_file])):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[i] + str(this_rank))
elif (board[i][this_rank] != 0) and (board[i][this_rank].color == this_color):
blocked = True # position is blocked by a friendly piece
position = (board_map_index_to_alpha[i]) + str(this_rank)
blocked_spaces.append(position)
elif (board[i][this_rank] != 0) and (board[i][this_rank].color != this_color):
# positions behind are blocked, but target can be captured
blocked = True
# Verticals:
# ==============================================================
if this_file == target_file:
if target_rank > this_rank: # DOWN ====================================================================
for i in range(this_rank + 1, target_rank + 1):
if blocked is True:
blocked_spaces.append(this_file + str(i))
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color == this_color):
blocked = True # position is blocked by a friendly piece
position = this_file + str(i)
blocked_spaces.append(position)
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color != this_color):
# positions behind are blocked, but target can be captured
blocked = True
else: # UP =======================================================================
for i in reversed(range(target_rank, this_rank)):
if blocked is True:
blocked_spaces.append(this_file + str(i))
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color == this_color):
blocked = True # position is blocked by a friendly piece
position = this_file + str(i)
blocked_spaces.append(position)
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
return blocked_spaces
class Bishop(Piece):
def get_moves(self, position, board):
file, rank = list(position.strip().lower())
rank = int(rank)
file = board_map_alpha_to_index[file]
available_moves = []
for i, j in zip(range(rank + 1, 8, 1), range(file + 1, 8, 1)):
try: # (+rank, +file)
available_moves.append((i, j))
except:
pass
for i, j in zip(range(rank + 1, 8, 1), range(file - 1, 0 + -1, -1)):
try: # (+rank, -file)
available_moves.append((i, j))
except:
pass
for i, j in zip(range(rank - 1, 0 + -1, -1), range(file + 1, 8, 1)):
try: # (-rank, +file)
available_moves.append((i, j))
except:
pass
for i, j in zip(range(rank - 1, 0 + -1, -1), range(file - 1, 0 + -1, -1)):
try: # (-rank, -file)
available_moves.append((i, j))
except:
pass
available_moves = ["".join([board_map_index_to_alpha[i[1]],
str(i[0])]) for i in available_moves]
available_moves.sort()
return available_moves
def no_jumping(self, from_square, to_square, color, board):
blocked = False
blocked_spaces = []
# get rank and file of current and target positions
this_file, this_rank = list(from_square.strip().lower())
this_rank = int(this_rank)
this_color = color # color of current piece
target_file, target_rank = list(to_square.strip().lower())
target_rank = int(target_rank)
# Diagonals:
# ==============================================================
if (target_rank < this_rank) and (board_map_alpha_to_index[target_file] > board_map_alpha_to_index[this_file]):
# UP-RIGHT =====================================================================================
j = board_map_alpha_to_index[this_file] + 1
for i in range(this_rank - 1, target_rank - 1, -1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j += 1
elif (target_rank < this_rank) and \
(board_map_alpha_to_index[target_file] < board_map_alpha_to_index[this_file]):
# UP-LEFT ======================================================================================
j = board_map_alpha_to_index[this_file] - 1
for i in range(this_rank - 1, target_rank - 1, -1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j -= 1
# DOWN-RIGHT ===================================================================================
elif (target_rank > this_rank) and board_map_alpha_to_index[target_file] > board_map_alpha_to_index[this_file]:
j = board_map_alpha_to_index[this_file] + 1
for i in range(this_rank + 1, target_rank + 1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j += 1
elif (target_rank > this_rank) and board_map_alpha_to_index[target_file] < board_map_alpha_to_index[this_file]:
# DOWN-LEFT ====================================================================================
j = board_map_alpha_to_index[this_file] - 1
for i in range(this_rank + 1, target_rank + 1):
if blocked is True:
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color == this_color):
blocked = True # position is blocked by a friendly piece
blocked_spaces.append(board_map_index_to_alpha[j] + str(i))
elif (board[j][i] != 0) and (board[j][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
j -= 1
return blocked_spaces
class Knight(Piece):
def get_moves(self, position, board):
file, rank = list(position.strip().lower())
rank = int(rank)
file = board_map_alpha_to_index[file]
i, j = rank, file
available_moves = []
try:
temp = board[i + 1][j - 2]
available_moves.append([i + 1, j - 2])
except:
pass
try:
temp = board[i + 2][j - 1]
available_moves.append([i + 2, j - 1])
except:
pass
try:
temp = board[i + 2][j + 1]
available_moves.append([i + 2, j + 1])
except:
pass
try:
temp = board[i + 1][j + 2]
available_moves.append([i + 1, j + 2])
except:
pass
try:
temp = board[i - 1][j + 2]
available_moves.append([i - 1, j + 2])
except:
pass
try:
temp = board[i - 2][j + 1]
available_moves.append([i - 2, j + 1])
except:
pass
try:
temp = board[i - 2][j - 1]
available_moves.append([i - 2, j - 1])
except:
pass
try:
temp = board[i - 1][j - 2]
available_moves.append([i - 1, j - 2])
except:
pass
# filter negative values
temp = [i for i in available_moves if i[0] >= 0 and i[1] >= 0]
all_available_moves = ["".join([board_map_index_to_alpha[i[1]],
str(i[0])]) for i in temp]
all_available_moves.sort()
return all_available_moves
def no_jumping(self, from_square, to_square, color, board):
blocked_spaces = []
return blocked_spaces
class Pawn(Piece):
def get_moves(self, position, board):
# clamp x and y values to prevent them from going out of range
clamp = lambda n, minn, maxn: max(min(maxn, n), minn)
x, y = list(position.strip().lower())
y = int(y)
x = board_map_alpha_to_index[x]
available_moves = []
if y == 1 or y == 6: # pawn is in its starting position (+2)
if self.color == 'black':
if board[x][y + 2] == 0:
temp = board[x][y + 2]
available_moves.append((x, y + 2))
if self.color == 'white':
if board[x][y - 2] == 0:
temp = board[x][y - 2]
available_moves.append((x, y - 2))
if self.color == 'black': # standard pawn move (+1)
if board[x][y + 1] == 0:
temp = board[x][y + 1]
available_moves.append((x, y + 1))
if self.color == 'white':
if board[x][y - 1] == 0:
temp = board[x][y - 1]
available_moves.append((x, y - 1))
# Diagonal captures
if x == 0: # pawn is on the left edge of the board
if self.color == 'black':
if (board[x + 1][y + 1] != 0) and (board[x + 1][y + 1].get_color() != self.color):
temp = board[x + 1][y + 1]
available_moves.append((x + 1, y + 1))
elif self.color == 'white':
if (board[x + 1][y - 1] != 0) and (board[x + 1][y - 1].get_color() != self.color):
temp = board[x + 1][y - 1]
available_moves.append((x + 1, y - 1))
if x == 7: # pawn is on the right edge of the board
if self.color == 'black':
if (board[x - 1][y + 1] != 0) and (board[x - 1][y + 1].get_color() != self.color):
temp = board[x - 1][y + 1]
available_moves.append((x - 1, y + 1))
elif self.color == 'white':
if (board[x - 1][y - 1] != 0) and (board[x - 1][y - 1].get_color() != self.color):
temp = board[x - 1][y - 1]
available_moves.append((x - 1, y - 1))
else:
if self.color == 'black':
if (board[x + 1][y + 1] != 0) and (board[x + 1][y + 1].get_color() != self.color):
temp = board[x + 1][y + 1]
available_moves.append((x + 1, y + 1))
elif (board[x - 1][y + 1] != 0) and (board[x - 1][y + 1].get_color() != self.color):
temp = board[x - 1][y + 1]
available_moves.append((x - 1, y + 1))
else:
if (board[x + 1][y - 1] != 0) and (board[x + 1][y - 1].get_color() != self.color):
temp = board[x + 1][y - 1]
available_moves.append((x + 1, y - 1))
elif (board[x - 1][y - 1] != 0) and (board[x - 1][y - 1].get_color() != self.color):
temp = board[x - 1][y - 1]
available_moves.append((x - 1, y - 1))
# filter negative values
temp = [i for i in available_moves if i[0] >= 0 and i[1] >= 0]
all_available_moves = ["".join([board_map_index_to_alpha[i[0]],
str(i[1])]) for i in temp]
all_available_moves.sort()
return all_available_moves
def no_jumping(self, from_square, to_square, color, board):
blocked = False
blocked_spaces = []
this_file, this_rank = list(from_square.strip().lower())
this_rank = int(this_rank)
this_color = color # color of current piece
target_file, target_rank = list(to_square.strip().lower())
target_rank = int(target_rank)
if this_file == target_file:
if target_rank > this_rank: # DOWN ====================================================================
for i in range(this_rank + 1, target_rank + 1):
if blocked is True:
blocked_spaces.append(this_file + str(i))
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color == this_color):
blocked = True # position is blocked by a friendly piece
position = this_file + str(i)
blocked_spaces.append(position)
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color != this_color):
# positions behind are blocked, but target can be captured
blocked = True
else: # UP =======================================================================
for i in reversed(range(target_rank, this_rank)):
if blocked is True:
blocked_spaces.append(this_file + str(i))
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color == this_color):
blocked = True # position is blocked by a friendly piece
position = this_file + str(i)
blocked_spaces.append(position)
elif (board[board_map_alpha_to_index[this_file]][i] != 0) and \
(board[board_map_alpha_to_index[this_file]][i].color != this_color):
blocked = True # positions behind are blocked, but target can be captured
return blocked_spaces
| [
"31998161+evanthecannibal@users.noreply.github.com"
] | 31998161+evanthecannibal@users.noreply.github.com |
e6f6729fd58389dd869ab4888b0916e5456bb7a1 | f9d549547f316ca38332eb822d6829f285a6dda6 | /Tasks/Community/ts_scriptExamples/vbot.example.py | a79ebc92dbc5110ad9c888e54826124998b03df5 | [
"MIT"
] | permissive | clarkwhitty/Velocity-assets | ce12f590230527d0cf62a77a1608db228b39464d | 67a886c5d3d46d3c7e74405b7fc28b4c9de9f7eb | refs/heads/master | 2021-06-05T14:47:13.327371 | 2019-12-16T16:04:49 | 2019-12-16T16:04:49 | 148,567,363 | 0 | 0 | MIT | 2018-09-13T02:01:45 | 2018-09-13T02:01:44 | null | UTF-8 | Python | false | false | 1,447 | py | import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--resourceId', action='store', dest='resourceId', help='resource ID')
parser.add_argument('--ipAddress', action='store', dest='ipAddress', help='IP address')
results, unknown = parser.parse_known_args()
print('The inventory ID of the selected resource is ' + results.resourceId)
print('The IP address of the selected resource is ' + results.ipAddress)
if os.environ.has_key('VELOCITY_PARAM_TMBL_FILE') is True:
print("[INFO] TOPOLOGY FILE: " + os.environ['VELOCITY_PARAM_TMBL_FILE'])
if os.environ.has_key('VELOCITY_PARAM_RESERVATION_ID') is True:
print("[INFO] RESERVATION ID: " + os.environ['VELOCITY_PARAM_RESERVATION_ID'])
if os.environ.has_key('VELOCITY_PARAM_REPORT_ID') is True:
print("[INFO] REPORT_ID: " + os.environ['VELOCITY_PARAM_REPORT_ID'])
print("[INFO] VELOCITY API ROOT: " + os.environ['VELOCITY_PARAM_VELOCITY_API_ROOT'])
print("[INFO] VELOCITY TOKEN: " + os.environ['VELOCITY_PARAM_VELOCITY_TOKEN'])
print("[INFO] BUILD PARAMETER: " + os.environ['VELOCITY_PARAM_build'])
if os.environ.has_key('VELOCITY_PARAM_topologyResourceId') is True:
print("[INFO] The selected resource has this ID in the topology: " + os.environ['VELOCITY_PARAM_topologyResourceId'])
if os.environ.has_key('VELOCITY_PARAM_resourceId') is True:
print("[INFO] The selected resource has this ID in the inventory: " + os.environ['VELOCITY_PARAM_resourceId'])
| [
"clark.whitty@spirent.com"
] | clark.whitty@spirent.com |
c6830214cc1648ab00b58ba514d0713d9bcdfd55 | e3877c43b26b4c6791172a9187380bc414f4319f | /Practice/largest_xor.py | c636e3173b499442fa87f5b64a1edacc04c59944 | [] | no_license | saqibns/CodeVita | e9fbd9a8e0f4d9730a820b3ddeaac244762ad48c | d2c0fc41a11a1af40867ce8e1bc7c94cf2db9ff2 | refs/heads/master | 2021-01-01T19:20:05.152621 | 2017-09-08T05:53:55 | 2017-09-08T05:53:55 | 30,918,098 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | l = int(input())
r = int(input())
xor = 0
for i in range(l, r + 1):
for j in range(l, r + 1):
new_xor = i ^ j
if new_xor > xor:
xor = new_xor
print(xor) | [
"shamsi.saqib@gmail.com"
] | shamsi.saqib@gmail.com |
40996d998fba3f367e6a414a12ce392bce6a18ba | 86d67bcc5b81d2f429e0670b5938f4d40e20449e | /ex28/ex28_chapter9_ex3.py | d700e4ee45c3ef0db55216b71b96ae793c1f6e3e | [
"MIT"
] | permissive | BjornChrisnach/Scripting_with_python_SNHU | 7e37c5aeddf87e64ae4b4ab799d043ba5abeb421 | 791d4726ab59ee7414ed8a7059edfb62a88d91dd | refs/heads/main | 2023-05-06T21:11:52.338871 | 2021-05-30T18:33:42 | 2021-05-30T18:33:42 | 369,430,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | inp = input('Enter a file name: ')
fhandle = open(inp)
email_lst = list()
i = 0
j = 0
dict_email_from_date = dict()
for line in fhandle:
words = line.split()
# print('Debug:', words)
if len(words) == 0 or words[0] != 'From': continue
# if words[0] != 'From' :
# fill up the email lst
email_lst.append(words[1])
# set the range max to a variable
length = len(email_lst) - 1
# if minimum length = 2, compare them if True set the value j as the count
if len(email_lst) >= 2:
for k in range(0, length):
if email_lst[k] == email_lst[length]:
# retrieve the current count, add one
j = dict_email_from_date[email_lst[i]]
j += 1
dict_email_from_date[email_lst[i]] = j
break
# add as a new dictioanary item
elif k == length or k == length - 1:
j = 1
dict_email_from_date[email_lst[i]] = j
break
else:
continue
# if length != 2, then it's 1, so add as a new dictionary item
else:
j = 1
dict_email_from_date[email_lst[i]] = j
i += 1
# Done, print the result
# print(dict_email_from_date)
maximum = (max(dict_email_from_date.values()))
maximum_key = dict_email_from_date.keys()
for key in maximum_key:
if dict_email_from_date[key] == maximum:
maximum_key = key
print(maximum_key, maximum) | [
"41198499+BjornChrisnach@users.noreply.github.com"
] | 41198499+BjornChrisnach@users.noreply.github.com |
d355ec057204546f35eee6349d7c84c3ee495a04 | d0b406d99837e189fca621979d7f09f176d20b5f | /apps/organization/admin.py | 876f8868addb77f2c9f5ad4ba32bfedced1a6433 | [] | no_license | Timm-Lee/MxStable | 5afe81ee6fdd309ccbc26678c569881abd5b114c | 23da9a3f2e5a73452d1d29f22766e78dbb826269 | refs/heads/master | 2020-12-30T14:57:09.297787 | 2017-05-14T01:08:40 | 2017-05-14T01:08:40 | 91,095,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from django.contrib import admin
# Register your models here.
from .models import CityDict, CourseOrg, Teacher
class CityDictAdmin(admin.ModelAdmin):
list_display = ['name', 'desc', 'add_time']
search_fields = ['name', 'desc']
list_filter = ['name', 'desc', 'add_time']
class CourseOrgAdmin(admin.ModelAdmin):
list_display = ['name', 'click_nums', 'fav_nums', 'image', 'address', 'city', 'add_time']
search_fields = ['name', 'desc', 'click_nums', 'fav_nums', 'image', 'address', 'city']
list_filter = ['name', 'click_nums', 'fav_nums', 'address', 'city__name', 'add_time']
relfield_style = 'fk-ajax'
class TeacherAdmin(admin.ModelAdmin):
list_display = ['org', 'name', 'work_years', 'work_company', 'work_position', 'points', 'click_nums', 'fav_nums', 'add_time']
search_fields = ['org', 'name', 'work_years', 'work_company', 'work_position', 'points', 'click_nums', 'fav_nums']
list_filter = ['org__name', 'name', 'work_years', 'work_company', 'work_position', 'points', 'click_nums', 'fav_nums', 'add_time']
admin.site.register(CityDict, CityDictAdmin)
admin.site.register(CourseOrg, CourseOrgAdmin)
admin.site.register(Teacher, TeacherAdmin) | [
"Timm_Lee@qq.com"
] | Timm_Lee@qq.com |
6ee3ad7ed2666cd3c2c2e7bb9947e9d2975cadf8 | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /PyQt5/QtSensors/QAltimeterFilter.py | 7f6d16487b33ff0384829272d015bff8aad4003c | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # encoding: utf-8
# module PyQt5.QtSensors
# from F:\Python\Python36\lib\site-packages\PyQt5\QtSensors.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
from .QSensorFilter import QSensorFilter
class QAltimeterFilter(QSensorFilter):
"""
QAltimeterFilter()
QAltimeterFilter(QAltimeterFilter)
"""
def filter(self, QAltimeterReading): # real signature unknown; restored from __doc__
""" filter(self, QAltimeterReading) -> bool """
return False
def __init__(self, QAltimeterFilter=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
| [
"5149528+ventifang@user.noreply.gitee.com"
] | 5149528+ventifang@user.noreply.gitee.com |
fed9f1c3fb053b3e634b0860e07876d76f1a063c | f94d04062c69731463755d8bde076057c4ee169f | /Talleres/taller_cicloswhile.py | 4b35e3e151bf89ece2d7fc77f761a49d06a0c36b | [] | no_license | ValeriaAraque/Pogramacion | 5596856459f9a14ea4cb72c0f9e76d1612a274b9 | a88fb4446e80bcdc2ce0e24bef55d9243a72638c | refs/heads/master | 2023-01-21T04:34:37.120205 | 2020-11-25T17:50:45 | 2020-11-25T17:50:45 | 283,563,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | #------Ciclo while------
# Leer números enteros de teclado, hasta que el usuario ingrese el 0. Finalmente, mostrar la sumatoria de todos los números ingresados
preguntaNumero = "Ingrese un numero entero o 0 para terminar: "
numeroUsuario = int(input(preguntaNumero))
suma = 0
while (numeroUsuario != 0):
suma += numeroUsuario
numeroUsuario = int(input(preguntaNumero))
print (f"La sumatoria de los numeros que ingresaste antes de ingresar el cero fue de {suma}")
#Ejercicio 2
print ("-----SEGUNDO EJERCICIO-----")
preguntaEntero1 = "Ingrese un numero entero: "
preguntaEntero2 = "Ingrese otro numero entero: "
numeroEntero1 = int(input(preguntaEntero1))
numeroEntero2 = int(input(preguntaEntero2))
while (numeroEntero2 <= numeroEntero1):
numeroEntero2 = int(input(preguntaEntero2))
print (f"El primer numero que ingresaste fue {numeroEntero1} y el segundo numero que ingresaste fue el {numeroEntero2} ")
#Ejercicio 3
print ("----TERCER EJERCICIO----")
numeroEntero1 = int(input(preguntaEntero1))
numeroEntero2 = int(input(preguntaEntero2))
while (numeroEntero2 >= numeroEntero1):
numeroEntero1 = numeroEntero2
numeroEntero2 = int(input(preguntaEntero2))
print ("Has ingresado un numero menor al anterior")
| [
"valeriaraquec@gmail.com"
] | valeriaraquec@gmail.com |
c8771294e96bf4e6068819db598f3f1ee27023a0 | b5a2b6418088a4f6acf7d7595ae8f7d0c974e6eb | /lpr/label_image.py | 2450796cfb3052038a9d04e91e5bd898a4353b55 | [] | no_license | TruckIdentification/lpr | e5dad4bcc1a8d9ce122457dcd2232ae9b2aea3c6 | 81cc30fe40e4b3b35e25acf442aa78c2631f1e36 | refs/heads/master | 2021-04-12T07:50:42.783242 | 2018-03-20T07:36:43 | 2018-03-20T07:36:43 | 125,849,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,434 | py | import tensorflow as tf
import sys
import os
import cv2
import chepai as cp
dict = {'jing': '京', 'hu': '沪', 'yue': '粤', 'n': 'N','b': 'B','v': 'V', 'c': 'C', 'x': 'X'
, 'z': 'Z', 'l': 'L', 'k': 'K', 'j': 'J', 'h': 'H', 'g': 'G','m': 'M'
, 'f': 'F', 'd': 'D', 's': 'S', 'a': 'A', 'p': 'P', 'u': 'U','min': '闽'
, 'y': 'Y', 't': 'T', 'r': 'R', 'e': 'E', 'w': 'W', 'q': 'Q','su': '苏'
, '9': '9', '8': '8', '7': '7', '6': '6', '5': '5', '4': '4','zhe': '浙'
, '3': '3', '2': '2', '1': '1', '0': '0'}
path = 'partition'
dir_path = 'partition/'
image_datas = cv2.imread('111.png')
an = cp.partition(image_datas)
pai_out = ""
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# for infile in os.listdir(path):
# image_data = tf.gfile.FastGFile(dir_path+infile, 'rb').read()
# image_dataa = tf.gfile.FastGFile('pr.png', 'rb').read()
# print(image_dataa)
with tf.Session() as sess:
for image_data in an:
cv2.imwrite('1.png', image_data)
image_dataa = tf.gfile.FastGFile('1.png', 'rb').read()
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_dataa})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
# for node_id in top_k:
pai = label_lines[top_k[0]]
score = predictions[0][top_k[0]]
# print('%s (score = %.5f)' % (pai, score))
# print('------------------------------------------------------------')
pai_out+=dict[str(pai)]
print("---------------------------------------------")
print("---------------------------------------------")
print("----------------下面输出车牌-------------------")
print("----------------"+pai_out+"-------------------")
print("---------------------------------------------")
print("---------------------------------------------")
| [
"1549134149@qq.com"
] | 1549134149@qq.com |
4069518080c129a85d574bf923676519845cf3f1 | b7b9d710033ddaa744619fcf2798ee62046bd6c8 | /Many_Results.py | ce3e46e4922b28b819b78403ca3b1b237e0ddcda | [] | no_license | ArthurWalker/Matching_Address | feb6f345a49aac40fc3ceb4568ced87420703fdb | 42329d37da05730d99f9b528418313ad42b69c93 | refs/heads/master | 2020-04-03T15:50:50.810162 | 2018-11-27T10:29:49 | 2018-11-27T10:29:49 | 155,380,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,761 | py | def match(row,df):
row['New_Geo_Address'] = df.iloc[0]['Full_Address']
row['New_ADDRESS_REFENENCE'] = df.iloc[0]['ADDRESS_REFERENCE']
row['New_EIRCODE'] = df.iloc[0]['EIRCODE']
row['New_SMALL_AREA_REF'] = df.iloc[0]['SMALL_AREA_REF']
return row
def fix(row,dict,geo_df):
if (row['DwellingData_id'] in dict):
df_geo_add_ref = geo_df[geo_df['ADDRESS_REFERENCE'].isin(dict[float(row['DwellingData_id'])])]
search_city = df_geo_add_ref[df_geo_add_ref.loc[:,'PRINCIPAL_POST_TOWN']==row['MPRN city']]
if (search_city.shape[0]>0):
if (search_city.shape[0]==1):
if search_city.iloc[0]['ADDRESS_REFERENCE']==row['ADDRESS_REFERENCE']:
row['New_Change']=False
else:
row['New_Change']=True
row=match(row,search_city)
else:
search_thoroughfare = search_city[search_city.loc[:,'THOROUGHFARE'].str.contains(row['MPRN street']) | search_city.loc[:,'THOROUGHFARE'].str.contains(row['MPRN address4'])]
if (search_thoroughfare.shape[0]>0):
if (search_thoroughfare.shape[0]==1):
if search_thoroughfare.iloc[0]['ADDRESS_REFERENCE'] == row['ADDRESS_REFERENCE']:
row['New_Change'] = False
else:
row['New_Change'] = True
row = match(row, search_thoroughfare)
else:
row['New_Change'] = True
row = match(row, search_thoroughfare)
#row['List_results']=list(search_thoroughfare.ADDRESS_REFERENCE)
else:
row['New_Change'] = False
#row['List_results'] = list(search_city.ADDRESS_REFERENCE)
else:
row['New_Change'] = False
else:
row['New_Change']=False
return row
def dealing_with_MANY_RESULTS(dwelling_df,geo_df,dict):
dwelling_df['New_Change'] = ""
dwelling_df['New_EIRCODE']=""
dwelling_df['New_SMALL_AREA_REF']=""
dwelling_df['New_Geo_Address']=""
dwelling_df['List_results']=""
dwelling_df['New_ADDRESS_REFENENCE']=""
geo_df['Full_Address'] = (geo_df['ADDR_LINE_1'] + " " + geo_df['ADDR_LINE_2'] + " " + geo_df['ADDR_LINE_3'] + " " + \
geo_df['ADDR_LINE_4'] + " " + geo_df['ADDR_LINE_5'] + " " + geo_df['ADDR_LINE_6'] + " " + \
geo_df['ADDR_LINE_7'] + " " + geo_df['ADDR_LINE_8'] + " " + geo_df['ADDR_LINE_9'] + " " + \
geo_df['ADDR_LINE_10'])
dwelling_df = dwelling_df.apply(fix,args=(dict,geo_df,),axis=1)
return dwelling_df
| [
"phuc.phamhuu2@dcu.ie"
] | phuc.phamhuu2@dcu.ie |
749e588eae6941ca5de72d0d9293bd321e5594bd | 105025a1cc839ea27a08d05c9bd873a4e7319b91 | /DCGAN_WGAN/utils.py | 7bddad7d3c839dbf03af9ec0178f438bb69948ca | [] | no_license | AlexaYuqinD/Automatic-Image-Enhancement | bbe0c634184a4cbeadfc7ff1fdfcf74d96bc1340 | 6c3304d43aa2f75e17b4b8d65d0919d55f82f93a | refs/heads/master | 2020-05-21T02:17:28.824316 | 2019-08-14T23:16:49 | 2019-08-14T23:16:49 | 185,874,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,292 | py | from config import config
import math
import numpy as np
import scipy.stats as st
import torch
import torch.nn as nn
from scipy import signal
from scipy.ndimage.filters import convolve
def filter_forward(image, filters):
"""
:image: input image
:filters: output filter
"""
return filters(image)
def gaussian_kernel(kernel_size, sigma, channels):
"""
:kernel_size: filter width and height length
:sigma: range of gaussian distribution
:channels: choose how many channel you use, default is 3
"""
interval = (2 * sigma + 1) / kernel_size
x = np.linspace(start=-sigma-interval / 2, stop=sigma + interval / 2, num=kernel_size+1)
new_x = st.norm.cdf(x)
kernel1d = np.diff(new_x)
kernel_raw = np.sqrt(np.outer(kernel1d, kernel1d))
kernel = kernel_raw / np.sum(kernel_raw)
out_filter = np.array(kernel, dtype=np.float32)
out_filter = np.reshape(out_filter, newshape=(1, 1, kernel_size, kernel_size)) # 4-dimensional shape 21, 21, 1, 1
out_filter = np.repeat(out_filter, channels, axis=0)
out_filter = torch.from_numpy(out_filter)
return out_filter
def kernel_to_conv2d(kernel_size, sigma, channels):
"""
:kernel_size: filter width and height length
:sigma: range of gaussian distribution
:channels: choose how many channel you use, default is 3
"""
out_filter = gaussian_kernel(kernel_size, sigma, channels)
gaussian_filter = nn.Conv2d(channels, channels, kernel_size, groups=channels, bias=False, padding=10)
gaussian_filter.weight.data = out_filter
gaussian_filter.weight.requires_grad = False
return gaussian_filter
def gaussian_blur(image, kernel_size, sigma, channels, device):
"""
:image: input image
:kernel_size: filter width and height length
:sigma: range of gaussian distribution
:channels: choose how many channel you use, default is 3
:device: cuda or cpu
"""
out_filter = kernel_to_conv2d(kernel_size, sigma, channels).to(device)
tensor_image = filter_forward(image, out_filter)
return tensor_image
def gray_scale(image):
"""
:image: (batch_size, image_size), image_size = image_width * image_height * channels
:return: (batch_size, image_size with one channel)
"""
gray_image = torch.unsqueeze(image[:, 0] * 0.299 + image[:, 1] * 0.587 + image[:, 2] * 0.114, 1)
return gray_image
def psnr(image1, image2):
"""
:psnr: approximate estimate of absolute error
:image1: (batch_size, channels, height, width) style image
:image2: (batch_size, channels, height, width) enhanced image
:return: psnr_score of image1 and image2
"""
image_size = config.channels * config.height * config.width
image1 = image1.view(config.batch_size, image_size)
image2 = image2.view(config.batch_size, image_size)
# compute MSE with image1 and image2
MSE = torch.sum(torch.pow((image1 - image2), 2)) / (config.batch_size * image_size)
# compute psnr score
psnr_score = 20 * math.log10(1) - 10 * math.log10(MSE)
return psnr_score
def psnr_full(image1, image2):
"""
:psnr: approximate estimate of absolute error
:image1: (channels, height, width) style image
:image2: (channels, height, width) enhanced image
:return: psnr_score of image1 and image2
"""
image_size = image1.shape[0] * image1.shape[1] * image1.shape[2]
image1 = image1.view(-1, image_size)
image2 = image2.view(-1, image_size)
# compute MSE with image1 and image2
MSE = torch.sum(torch.pow((image1 - image2), 2)) / image_size
# compute psnr score
psnr_score = 20 * math.log10(1) - 10 * math.log10(MSE)
return psnr_score
def fspecial_gauss(window_size, window_sigma):
"""
Mimic 'fspecial' of MATLAB function
return a normalized gaussian kernel, mu_x and mu_y is set to 0
"""
radius = window_size // 2
offset = 0
start, stop = -radius, radius + 1
if window_size % 2 == 0:
offset = 0.5
stop = radius
x, y = np.mgrid[offset + start:stop, offset + start:stop]
kernel = np.exp(-((x ** 2 + y ** 2) / (2.0 * window_sigma ** 2)))
norm_kernel = kernel / kernel.sum()
norm_kernel = torch.from_numpy(norm_kernel).float()
return norm_kernel
def ssim(image1, image2, kernel_size=11, kernel_sigma=1.5):
"""
:ssim: consider image degradation as perceived change in structural information,
while incorporating contrast masking and luminance masking
:image1: (batch_size, channels, height, width) style image
:image2: (batch_size, channels, height, width) enhanced image
:kernel_size: gaussian kernel size (window size of image)
:kernel_sigma: standard deviation of gaussian kernel
"""
if type(image1) is not np.ndarray:
image1 = image1.detach().cpu().numpy()
if type(image2) is not np.ndarray:
image2 = image2.cpu().numpy()
# filter size should not be larger than height or width of images.
filter_size = min(kernel_size, config.height, config.width)
if filter_size:
filter_sigma = filter_size * kernel_sigma / kernel_size
else:
filter_sigma = 0
if kernel_size:
if len(image1.shape) == 4:
window = np.reshape(fspecial_gauss(filter_size, filter_sigma), newshape=(1, 1, filter_size, filter_size))
elif len(image1.shape) == 3:
window = np.reshape(fspecial_gauss(filter_size, filter_sigma), newshape=(1, filter_size, filter_size))
mu1 = signal.fftconvolve(image1, window, mode='same')
mu2 = signal.fftconvolve(image2, window, mode='same')
sigma11 = signal.fftconvolve(image1*image1, window, mode='same')
sigma22 = signal.fftconvolve(image2*image2, window, mode='same')
sigma12 = signal.fftconvolve(image1*image2, window, mode='same')
else:
# empty gaussian blur kernel, no need to convolve
mu1 = image1
mu2 = image2
sigma11 = image1 * image1
sigma22 = image2 * image2
sigma12 = image1 * image2
mu_11 = mu1 * mu1
mu_22 = mu2 * mu2
mu_12 = mu1 * mu2
sigma11 -= mu_11
sigma22 -= mu_22
sigma12 -= mu_12
k_1, k_2 = 0.01, 0.03
L = 255
# bitdepth of image, 2 ^ (bits per pixel) - 1
c_1 = (k_1 * L) ** 2
c_2 = (k_2 * L) ** 2
v_1 = 2.0 * sigma12 + c_2
v_2 = sigma11 + sigma22 + c_2
ssim_score = np.mean(((2.0 * mu_12 + c_1) * v_1) / ((mu_11 + mu_22 + c_1) * v_2))
cs_map = np.mean(v_1 / v_2)
return ssim_score, cs_map
def multi_scale_ssim(image1, image2, kernel_size=11, kernel_sigma=1.5, weights=None):
"""
:ssim: consider image degradation as perceived change in structural information,
while incorporating contrast masking and luminance masking
:image1: (batch_size, channels, height, width) style image
:image2: (batch_size, channels, height, width) enhanced image
:kernel_size: gaussian kernel size (window size of image)
:kernel_sigma: standard deviation of gaussian kernel
:weights: weights of different scales
"""
if type(image1) is not np.ndarray:
image1 = image1.detach().cpu().numpy()
if type(image2) is not np.ndarray:
image2 = image2.cpu().numpy()
ms_ssim = np.array([])
cs_map = np.array([])
if weights:
weights = np.array(weights)
else:
weights = np.array([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = len(weights)
if len(image1.shape) == 4:
downsample = np.ones((1, 1, 2, 2)) / 4.0
elif len(image1.shape) == 3:
downsample = np.ones((1, 2, 2)) / 3.0
for i in range(levels):
ssim_score, cs = ssim(image1, image2, kernel_size, kernel_sigma)
ms_ssim = np.append(ms_ssim, ssim_score)
cs_map = np.append(cs_map, cs)
downsample_filtered = [convolve(image, downsample, mode='reflect') for image in [image1, image2]]
if len(image1.shape) == 4:
image1, image2 = [image[:, :, ::2, ::2] for image in downsample_filtered]
elif len(image1.shape) == 3:
image1, image2 = [image[:, ::2, ::2] for image in downsample_filtered]
return np.prod(cs_map[0:levels-1] ** weights[0:levels-1]) * (ms_ssim[levels-1] ** weights[levels-1])
| [
"dyq4430@126.com"
] | dyq4430@126.com |
0f57e7973dc5f0875edca862cb0d535aa7d86cbd | 725caa41dd51620e8deeae5ff322e965b9cc9bd1 | /pythonStudy/com/pallasli/study/MyClass.py | 45d4b8a40d12d35bb05000e40c7a41539adb7e6f | [] | no_license | PallasLi/python | 474d28864b608643cde2a27712deac591dff151e | 791ac097b708152333da6930c56c42684b3b91ce | refs/heads/master | 2019-07-14T23:00:54.981551 | 2016-06-24T13:52:07 | 2016-06-24T13:52:07 | 54,302,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | '''
Created on 2016年3月17日
@author: lyt
'''
# '''和"""可处理带格式的字符串
# '和"不带格式
class MyClass(object):
"描述"
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
def minus(self):
print(self)
@staticmethod
def add( ):
"方法描述"
try:
c=1/1
pass
except RuntimeError as e:
print(e)
except IOError as e:
print(e)
print(MyClass.__doc__)
print(MyClass.add.__doc__)
print(MyClass.__init__.__doc__) | [
"ytli1987@163.com"
] | ytli1987@163.com |
604d4f45540e9f060e211611c875869690951851 | cf540df0f31fdfa06eed34572d65261641988d63 | /general_plot.py | 2f2bc544a09cdc1d00b12593c73fa7dbee7f9051 | [] | no_license | cmilke/HH4b_vbf_analysis | bcc0e45e511b538ce5a6c262b1cd88b1d52ff97e | 660219fa95f88351b14eb4c1624ca5f2c9036b42 | refs/heads/master | 2022-11-19T01:57:05.561780 | 2020-07-22T17:34:03 | 2020-07-22T17:34:03 | 279,408,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,116 | py | #!/usr/bin/python3
import sys
import argparse
import pickle
import numpy
import itertools
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from uproot_methods import TLorentzVector as LV
from uproot_wrapper import event_iterator
from plotting_utils import plot_wrapper
import analysis_utils as autils
from tagger_methods import Tagger_options as Tag
#_cvv_vals = [0, 0.5, 1, 1.5, 2, 4]
_cvv_vals = [-1,1]
_VBF_samples = {
# 0 : 'MC16d_VBF-HH-bbbb_cvv0',
# 0.5: 'MC16d_VBF-HH-bbbb_cvv0p5',
1 : 'MC16d_VBF-HH-bbbb_cvv1',
# 1.5: 'MC16d_VBF-HH-bbbb_cvv1p5',
# 2 : 'MC16d_VBF-HH-bbbb_cvv2',
# 4 : 'MC16d_VBF-HH-bbbb_cvv4'
}
_blacklist = [
'Deta_of_VBF_mjjmax_mass',
#'roc',
#'mjjmax_',
'roc_example',
'rocs_2jet', 'rocs_3jet',
'fox-wolfram',
'centrality'
]
_plots = plot_wrapper(_blacklist)
_cvv_labelmaker = lambda cvv: 'ggF' if cvv==-1 else '$C_{2V}$='f'{cvv}'
_plots.add_hist1('num_VBF_candidates', 'Number of Available VBF Candidates',
[-1,1], 8, (0,8), xlabel='Number of Jets', normalize=False,
labelmaker=_cvv_labelmaker)
_plots.add_hist1('num_non_btagged', 'Number of non-B-Tagged Jets',
[-1,1], 8, (0,8), xlabel='Number of Jets', normalize=False,
labelmaker=_cvv_labelmaker)
_plots.add_hist1('pt', '$p_T$ Distribution of VBF Candidate Jets',
[''], 100, (0,200), xlabel='$p_T$ (GeV)', normalize=False)
_plots.add_hist1('eta', '$\eta$ Distribution of VBF Candidate Jets',
[''], 100, (-6,6), xlabel='$\eta$', normalize=False)
_plots.add_hist1('mjjmax', 'Leading $M_{jj}$ Distribution of VBF Candidate Jets',
[-1,1], 100, (0,3000), xlabel='Leading $M_{jj}$', normalize=False,
labelmaker=_cvv_labelmaker)
_plots.add_hist1('mjjmax_cumulative', 'Leading $M_{jj}$ Distribution of VBF Candidate Jets,\nCumulatively Summed',
[-1,1], 100, (0,3000), xlabel='Leading $M_{jj}$', normalize=False, cumulative=-1,
labelmaker=_cvv_labelmaker)
_plots.add_hist1('mjjmax_cumulative_norm', 'Leading $M_{jj}$ Distribution of VBF Candidate Jets,\nCumulatively Summed and Normalized',
[-1,1], 100, (0,3000), xlabel='Leading $M_{jj}$', cumulative={-1:1,1:-1},
labelmaker=_cvv_labelmaker)
_plots.add_hist1('mjjmax_Deta3_cumulative_norm', 'Leading $M_{jj}$ Distribution of VBF Candidate Jets (Post $\Delta \eta > 3$ Cut) ,\nCumulatively Summed and Normalized',
[-1,1], 1000, (-2,3000), xlabel='Leading $M_{jj}$', cumulative={-1:1,1:-1},
labelmaker=_cvv_labelmaker)
_plots.add_hist1('BDT1_cumulative_norm', 'Cumulative Distribution of BDT 1 Response',
[-1,1], 10000, (-1,1), xlabel='BDT Response', cumulative={-1:1,1:-1},
zooms=[((-.1,.2),(0,1)), ((-0.03,-0.02),(0.8,0.82))], labelmaker=_cvv_labelmaker)
_plots.add_hist1('centrality3jet', 'Centrality of 3-Jet Events',
[-1,1], 20, (-1,1), xlabel=r'$2 \times (\frac{\eta_C - \eta_L}{\eta_R - \eta_L}) - 1$',
labelmaker=_cvv_labelmaker)
_plots.add_hist1('centralityGT3jet', 'Centrality of Events with $\geq$ Four Jets',
[-1,1], 20, (-1,1), xlabel=r'$2 \times (\frac{\eta_C - \eta_L}{\eta_R - \eta_L}) - 1$',
labelmaker=_cvv_labelmaker)
_plots.add_hist1('centralityPtGT3jet', 'Centrality of Events with $\geq$ Four Jets,\nJets Chosen by Highest $p_T$',
[-1,1], 20, (-1,1), xlabel=r'$2 \times (\frac{\eta_C - \eta_L}{\eta_R - \eta_L}) - 1$',
labelmaker=_cvv_labelmaker)
_plots.add_hist1('centrality', 'Centrality of Events with $\geq$ Three Jets',
[-1,1], 20, (-1,1), xlabel=r'$2 \times (\frac{\eta_C - \eta_L}{\eta_R - \eta_L}) - 1$',
labelmaker=_cvv_labelmaker)
_fw_moments = [ fwi for fwi in range(11) ]
for fwi in _fw_moments:
_plots.add_hist1(f'fox-wolfram_{fwi}', f'Fox-Wolfram Moment {fwi} of All Non-B-Tagged Jets',
_cvv_vals, 100, (0,3), labelmaker=_cvv_labelmaker)
for mass in [0, 1000]:
_plots.add_hist1(f'Deta_of_VBF_mjjmax_mass{mass}', '$\Delta \eta$ Distribution of VBF Jets w/ $M_{jj}>$'f'{mass} GeV',
_cvv_vals, 40, (0,10), xlabel='$\Delta \eta$', normalize=False, labelmaker=_cvv_labelmaker)
_simple_taggers = ['mjjmax_Deta3', 'mjjmax']
#_BDT_taggers = ['BDT: mjj-Deta', 'BDT: mjj-Deta-FW', 'BDT: mjj-Deta-FW-Cent']#, 'BDT: mjjLSL_Deta_Cent_FW']
_BDT_taggers = ['BDT: mjj-Deta-FW']
_taggers = _simple_taggers + _BDT_taggers
_plots.add_roc('roc_example', 'Efficiency/Rejection Performance\nof Various VBF/ggF Discriminators', ['mjjmax'] )
#_plots.add_roc('rocs_base', 'Efficiency/Rejection Performance\nof Various VBF/ggF Discriminators', _taggers )
_plots.add_roc('rocs_weighted', 'Weighted Efficiency/Rejection Performance\nof Various VBF/ggF Discriminators', _taggers, zooms=[((0.2,0.6),(0.6,1))] )
_plots.add_roc('rocs_2jet', 'Efficiency/Rejection Performance of Various VBF/ggF Discriminators\nFor Events with 2 VBF Candidate Jets', _taggers)
_plots.add_roc('rocs_3jet', 'Efficiency/Rejection Performance of Various VBF/ggF Discriminators\nFor Events with 3 or More VBF Candidate Jets', _taggers)
#_plots['rocs'].add_marker('mjjmax_Deta3', 1000, annotation='1000 GeV', marker='.', color='red')
_plots['rocs_weighted'].add_marker('mjjmax_Deta3', 1000, annotation='1000 GeV', marker='.', color='blue')
_plots['rocs_weighted'].add_marker('BDT: mjj-Deta-FW', 0.3, annotation='', marker='.', color='green')
#_plots['rocs'].add_marker('mjjmax', 1000, annotation='1000 GeV', marker='.', color='blue')
#_plots['rocs_2jet'].add_marker('mjjSL', 735, annotation='735 GeV', marker='*', color='red')
#_plots['rocs_3jet'].add_marker('mjjSL', 735, annotation='735 GeV', marker='*', color='red')
_output_branches = [
'run_number', 'event_number', 'mc_sf', 'ntag', 'njets',
'n_vbf_candidates',
('jets', ['vbf_candidates_E', 'vbf_candidates_pT', 'vbf_candidates_eta', 'vbf_candidates_phi'])
]
_output_branches+=[f'FoxWolfram{i}' for i in _fw_moments]
make_reco_vector = lambda jet: LV.from_ptetaphie(jet['resolvedJets_pt'], jet['resolvedJets_eta'], jet['resolvedJets_phi'], jet['resolvedJets_E'])
make_nano_vector = lambda jet: LV.from_ptetaphie(jet['vbf_candidates_pT'], jet['vbf_candidates_eta'], jet['vbf_candidates_phi'], jet['vbf_candidates_E'])
def process_events(events, skip_num=0, bgd=False, cvv_value=-1):
basic_efficiency_count = [0,0,0]
num_jets = [0]*20
num_shared = 0
num_not_shared = 0
num_pt_matched = 0
num_pt_not_matched= 0
num_negative_weighted=0
for event_index, event in enumerate(events):
if event_index < skip_num: continue
weight = event['mc_sf'][0]
if weight < 0: continue #num_negative_weighted+=1
vecs = [ make_nano_vector(jet) for jet in event['jets'] ]
num_jets[len(vecs)] += 1
_plots['num_non_btagged'].fill( len(vecs), cvv_value )
_plots['num_VBF_candidates'].fill( event['n_vbf_candidates'], cvv_value )
for fwi in _fw_moments: _plots[f'fox-wolfram_{fwi}'].fill(event[f'FoxWolfram{fwi}'], cvv_value)
basic_efficiency_count[0] += weight
# Handle Roc Curves
if len(vecs) > 1 and (cvv_value == 1 or bgd):
basic_efficiency_count[1] += weight
# Deal with Simple Taggers
for tagger in _simple_taggers:
tag_value = Tag[tagger](vecs)
#_plots['rocs'].fill( tag_value, bgd, tagger)
_plots['rocs_weighted'].fill( tag_value, bgd, tagger, weight=weight)
if len(vecs) == 2: _plots['rocs_2jet'].fill( tag_value, bgd, tagger)
else: _plots['rocs_3jet'].fill( tag_value, bgd, tagger)
if tagger == 'mjjmax_Deta3':
_plots['mjjmax_Deta3_cumulative_norm'].fill(tag_value, cvv_value, weight)
if tag_value > 1000: basic_efficiency_count[2] += weight
if tagger == 'mjjmax':
_plots['mjjmax'].fill(tag_value, cvv_value)
_plots['mjjmax_cumulative'].fill(tag_value, cvv_value)
_plots['mjjmax_cumulative_norm'].fill(tag_value, cvv_value)
_plots['roc_example'].fill(tag_value, bgd)
# Deal with the not simple taggers
#_plots['rocs'].fill( Tag['BDT1'](cvv_value, event_index), bgd, 'BDT1')
#_plots['rocs_weighted'].fill( Tag['BDT1'](cvv_value, event_index), bgd, 'BDT1', weight=weight)
for bdt in _BDT_taggers:
bdt_value = Tag[bdt](event=event, vectors=vecs)
#print(bdt_value, event[f'FoxWolfram1'])
_plots['rocs_weighted'].fill( bdt_value, bgd, bdt, weight=weight)
_plots['BDT1_cumulative_norm'].fill(bdt_value, cvv_value, weight)
# Look into Centrality
if len(vecs) == 3 and (cvv_value == 1 or bgd):
etas = sorted([ v.eta for v in vecs ])
centrality = 2*(etas[1] - etas[0]) / (etas[2] - etas[0]) - 1
_plots['centrality3jet'].fill(centrality, cvv_value)
_plots['centrality'].fill(centrality, cvv_value)
if len(vecs) > 3 and (cvv_value == 1 or bgd):
mjj_pairs = [ ( (vecs[i]+vecs[j]).mass, (i,j) ) for i,j in itertools.combinations(range(len(vecs)), 2) ]
mjj_pairs.sort(reverse=True)
chosen_jets = { i:vecs[i] for i in mjj_pairs[0][1] }
possible_additions = [ (i,vecs[i]) for i in mjj_pairs[1][1] if i not in chosen_jets ]
possible_additions.sort(key=lambda t: t[1].pt, reverse=True)
if len(possible_additions) > 1: num_not_shared += 1
else: num_shared += 1
chosen_jets[ possible_additions[0][0] ] = possible_additions[0][1]
etas = sorted([ jet.eta for jet in chosen_jets.values() ])
centrality = 2*(etas[1] - etas[0]) / (etas[2] - etas[0]) - 1
_plots['centralityGT3jet'].fill(centrality, cvv_value)
_plots['centrality'].fill(centrality, cvv_value)
pt_chosen_jets = { i:vec for i,vec in enumerate( sorted(vecs, key=lambda v: v.pt, reverse=True)[:3] ) }
etas = sorted([ jet.eta for jet in pt_chosen_jets.values() ])
centrality = 2*(etas[1] - etas[0]) / (etas[2] - etas[0]) - 1
_plots['centralityPtGT3jet'].fill(centrality, cvv_value)
mjj_keys = sorted( chosen_jets.keys() )
pt_keys = sorted( pt_chosen_jets.keys() )
if mjj_keys == pt_keys: num_pt_matched += 1
else: num_pt_not_matched += 1
# Create Delta-eta of leading mjj pair distribution
if not bgd and len(vecs) > 1:
deta_mjj_list = [ ( (i+j).mass, abs(i.eta - j.eta) ) for i,j in itertools.combinations(vecs, 2) ]
deta_mjj_list.sort() # Sort by mjj
Deta_filtered = [ (mass,Deta) for mass, Deta in deta_mjj_list ] #if Deta > 3 ]
for mass in [0,1000]:
mass_filtered = [ pair for pair in Deta_filtered if pair[0] > mass ]
if len(mass_filtered) > 0:
vbf_pair = mass_filtered[0]
_plots[f'Deta_of_VBF_mjjmax_mass{mass}'].fill(vbf_pair[1], cvv_value)
if cvv_value == 1:
for v in vecs:
_plots['pt'].fill(v.pt)
_plots['eta'].fill(v.eta)
jet_counts = numpy.array(num_jets[0:10])
#print(jet_counts)
#for count,frac in enumerate(jet_counts/jet_counts.sum()): print(f'{count}: {frac*100:4.1f}')
print(num_shared, num_not_shared)
print(num_pt_matched, num_pt_not_matched)
print(f'Negative={num_negative_weighted}')
#print(basic_efficiency_count)
def extract_data(num_events, events_to_skip):
for cvv_value, vbf_sample in _VBF_samples.items():
sig_events = event_iterator(autils.NanoNtuples[vbf_sample], 'VBF_tree', _output_branches, num_events)
process_events(sig_events, skip_num=events_to_skip , cvv_value=cvv_value)
bgd_events = event_iterator(autils.NanoNtuples['MC16d_ggF-HH-bbbb'], 'VBF_tree', _output_branches, num_events)
process_events(bgd_events, skip_num=events_to_skip, bgd=True)
def draw_distributions():
parser = argparse.ArgumentParser()
parser.add_argument( "-r", required = False, default = False, action = 'store_true', help = "Refresh cache",)
parser.add_argument( "-p", required = False, default = False, action = 'store_true', help = "Print only, do not plot",)
parser.add_argument( "-n", required = False, default = 1e4, type=float, help = "How many events to run over",)
parser.add_argument( "-s", required = False, default = 0, type=float, help = "How many events to skip",)
args = parser.parse_args()
refresh = args.r
num_events = int(args.n) if args.n > 0 else None
events_to_skip = int(args.s)
cache = {}
cache_file = '.cache/general_plots.p'
if refresh: extract_data(num_events, events_to_skip)
else: cache = pickle.load( open(cache_file, 'rb') )
if not args.p:
print('Data extracted, plotting...')
_plots.plot_all(refresh, cache)
if refresh: pickle.dump( cache, open(cache_file, 'wb') )
draw_distributions()
| [
"chrisdmilke@gmail.com"
] | chrisdmilke@gmail.com |
5f18c8ae40409e78a1614a7c2b854124d6ce62d5 | 43366a771f049270a59377c695674e58500ce8f9 | /main_from_user.py | 9b1ef9fa625bb9b1251fa2276ca5c9b1ae18d0bb | [] | no_license | harrisonluft/Point-in-Polygon | 02b47405e6483967356cb78ad93f4b1b51907abb | 2114f2c2a87d4f422cb8da78eff5ed5c74404607 | refs/heads/master | 2023-05-30T09:22:00.813389 | 2021-06-19T20:01:20 | 2021-06-19T20:01:20 | 309,147,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,420 | py | from plotter import Plotter
def user_input():
data_list = []
while True:
try:
user_point = input('Please input coordinate as x, y: ')
user_point_float = [float(user_point.split(',')[0]), float(user_point.split(',')[1])]
break
except:
print('Invalid input, please try again')
data_list.append(user_point_float)
return data_list
def import_data(path):
raw_list = []
data_list = []
with open(path, "r") as f:
for line in f.readlines():
raw_list.append(line.split(','))
raw_list = raw_list[1:] # getting rid of the headings
for i in range(len(raw_list)): # converting coordinates to float
data_list.append([float(raw_list[i][1]), float(raw_list[i][2])])
return data_list
def export_data(path, exports):
id = []
n = 1
for i in range(len(exports)):
id.append(str(n+i))
with open(path, "w") as f:
f.write(','.join(['ID', 'classifications', '\n']))
for j in range(len(exports)):
f.write(','.join([id[j], exports[j], '\n']))
def minimum(values):
smallest = values[0]
for i in values[1:]:
if smallest > i:
smallest = i
return smallest
def maximum(values):
biggest = values[0]
for i in values[1:]:
if biggest < i:
biggest = i
return biggest
# adapted from https://www.kite.com/python/answers/how-to-determine-if-a-point-is-on-a-line-segment-in-python
# using point/slope formula to determine if input point is on line segment of polygon
def on_line_seg(x1, y1, x2, y2, input_x, input_y):
if x1 != x2:
slope = (y2 - y1)/(x2 - x1)
on_line = input_y - y1 == slope * (input_x - x1)
line_seg_mbr = (min(x1, x2) <= input_x <= max(x1, x2)) and (min(y1, y2) <= input_y <= max(y1, y2))
# using mbr methodology to confirm point is in between points of line segments
on_border = on_line and line_seg_mbr
if on_border:
return True
else:
return False
else:
on_border = (x2 == input_x) and (min(y1, y2) <= input_y <= max(y1, y2))
if on_border:
return True
else:
return False
def overlap_check(x1, y1, x2, y2, x3, y3, x4, y4): # identifies coincident lines
y_overlap = y1 == y2 and y1 == y3 and y1 == y4
x_overlap = x1 <= x4 and x2 >= x3
overlap = y_overlap and x_overlap
return overlap
# Adapted from https://rosettacode.org/wiki/Find_the_intersection_of_two_lines
# and Torben Jansen from https://observablehq.com/@toja/line-box-intersection published 1 Oct 2018
def get_intersect(x1, y1, x2, y2, x3, y3, x4, y4):
# returns a (x, y) tuple or None if there is no intersection or coincident
# will use the (x, y) return as a reference to the original lines to adjust for crossing vertices
d = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
if d:
ua = ((x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)) / d
ub = ((x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)) / d
else:
return
if not (0 <= ua <= 1 and 0 <= ub <= 1):
return
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
return x, y
# outputs the kind of intersection for each point crossing each line in the polygon
def intersect_check(x1, y1, x2, y2, x3, y3, x4, y4):
# if mbr intersect, check coincidence as a marker for checking vertices
if overlap_check(x1, y1, x2, y2, x3, y3, x4, y4):
return 'coincident'
else:
return get_intersect(x1, y1, x2, y2, x3, y3, x4, y4)
# function to translate list of intersection types into count of ray crossings.
def counter(line, line_plus_one, line_plus_two, point, point_plus_one, point_plus_two, point_minus_one, n_count):
if point is None: # if no intersection do not add to count
pass
elif point_plus_one == 'coincident': # if intersects coincident then test orientations
max_y1 = max(line[1][1], line[0][1]) # orientation for line before coincidence
max_y2 = max(line_plus_two[1][1], line_plus_two[0][1]) # orientation for line after coincidence
if (max_y1 > point[1] and max_y2 > point_plus_two[1]) or \
(max_y1 == point[1] and max_y2 == point_plus_two[1]): # if same orientation count 0
pass
else: # if not, count +1
n_count += 1
elif point == point_plus_one and point != 'boundary': # vertex identification
max_y1 = max(line[1][1], line[0][1]) # orientation for line 1
max_y2 = max(line_plus_one[1][1], line_plus_one[0][1]) # orientation for line i + 1
if (max_y1 > point[1] and max_y2 > point_plus_one[1]) or \
(max_y1 == point[1] and max_y2 == point_plus_one[1]): # if same orientation count 0
pass
else: # count +1 if opposing lines
n_count += 1
# ignore cases that would cause double counting
elif (point == point_minus_one) or point == 'coincident' or point_minus_one == 'coincident':
pass
else:
n_count += 1 # if ordinary intersection +1 to count
return n_count
class Poly:
def __init__(self, poly_points):
self.poly_points = poly_points
self.values_list()
self.lines_list()
def values_list(self):
self.x_values = []
self.y_values = []
for i in range(len(self.poly_points)):
self.x_values.append(self.poly_points[i][0])
self.y_values.append(self.poly_points[i][1])
def lines_list(self):
self.lines = []
p1 = self.x_values[0], self.y_values[0]
for i in range(len(self.poly_points)):
if i == 0:
continue
else:
self.lines.append(tuple([p1, (self.x_values[i], self.y_values[i])]))
p1 = self.x_values[i], self.y_values[i]
self.lines.append(tuple([p1, (self.x_values[0], self.y_values[0])]))
# generate minimum bounding rectangle for first-pass inclusion/exclusion of input points
def mbr(self):
self.min_x = minimum(self.x_values)
self.min_y = minimum(self.y_values)
self.max_x = maximum(self.x_values)
self.max_y = maximum(self.y_values)
def classify_mbr(self, x, y):
if (x <= self.max_x) and (y <= self.max_y) and (x >= self.min_x) and (y >= self.min_y):
return 'inside'
else:
return 'outside'
# generate list of mbr results, ray-line intersections, and coincidence for each point
def rca_ray(self, ray_lines):
res = []
for item in ray_lines:
temp = []
if self.classify_mbr(item[0][0], item[0][1]) == 'outside': # determine inside/outside for MBR
temp.append('outside')
elif item in self.poly_points: # assign boundary points if in polygon boundary points
temp.append('boundary')
else:
for line in self.lines: # identify points residing on polygon borders
if on_line_seg(line[0][0], line[0][1], line[1][0], line[1][1],
item[0][0], item[0][1]):
temp.append('boundary')
else: # identify intersecting points and coincident lines
temp.append(intersect_check(line[0][0], line[0][1], line[1][0], line[1][1],
item[0][0], item[0][1], item[1][0], item[1][1]))
res.append(temp)
self.results = res
# test orientation of intersections and coincidence and count line crossings
def rca_count(self):
count_list = []
for item in self.results:
n = 0
for i in range(len(item)):
# specify conditions for end of list since counter function references i + 1 and i + 2 points/lines
if i == (len(item)-2):
n = counter(self.lines[i], self.lines[i + 1], self.lines[0],
item[i], item[i + 1], item[0], item[i - 1], n)
elif i == (len(item)-1) and len(item) != 1:
n = counter(self.lines[i], self.lines[0], self.lines[1],
item[i], item[0], item[1], item[i - 1], n)
elif i == 0: # specify condition for first item in list since counter references i - 1
if item[i] == 'outside':
n = 0
else:
n = counter(self.lines[i], self.lines[i + 1], self.lines[i + 2],
item[i], item[i + 1], item[i + 2], item[-1], n)
else: # general case for points not at end of list
n = counter(self.lines[i], self.lines[i + 1], self.lines[i + 2],
item[i], item[i + 1], item[i + 2], item[i - 1], n)
for i in range(len(item)):
if item[i] == 'boundary':
n = -1
else:
pass
count_list.append(n)
self.count = count_list
def define_label(self):
label = []
for n in self.count:
if (n % 2) == 0:
label.append('outside')
elif n < 0:
label.append('boundary')
elif (n % 2) != 0:
label.append('inside')
self.point_label = label
# Creating input point class
class Points:
def __init__(self, points):
self.points = points
def get_point(self, i):
return self.points[i][0], self.points[i][1]
def ray_lines(self, mbr_max_x):
self.rca_x = mbr_max_x + 1
self.ray_lines = []
for i in range(len(self.points)):
self.ray_lines.append(tuple([(self.points[i][0], self.points[i][1]), (self.rca_x, self.points[i][1])]))
return self.ray_lines
def main(polygon_path, output_path):
plot = Plotter()
# import data
poly_points = import_data(polygon_path)
points = user_input()
# init Polygon class
polygon = Poly(poly_points)
# create bounding box for polygon
polygon.mbr()
# init Point class
input_points = Points(points)
# create ray's for each point based on max x value of polygon bounding box
input_points.ray_lines(polygon.max_x)
# generate list with intersections, collinear instances, boundaries and MBR tests
polygon.rca_ray(input_points.ray_lines)
# count based on elements in list: +1 for plain intersection,
# +0 for same side vertex/coincident instance,
# and +1 for dual side vertex/coincident instance
# borders are given as -1,
# outside mbr is given 0
polygon.rca_count()
# apply labels to counts
polygon.define_label()
# export point result
export_data(output_path, polygon.point_label)
# plot
plot.add_polygon(polygon.x_values, polygon.y_values)
for i in range(len(input_points.points)):
plot.add_line(input_points.ray_lines[i][0][0], input_points.ray_lines[i][1][0],
input_points.ray_lines[i][0][1], input_points.ray_lines[i][1][1])
plot.add_point(input_points.points[i][0], input_points.points[i][1], kind=polygon.point_label[i])
plot.show()
if __name__ == '__main__':
main(POLYGON CSV HERE,
OUTPUT CSV HERE)
| [
"harrisonluft@gmail.com"
] | harrisonluft@gmail.com |
cf4eed4c6955b31983a5e1ef9550ccef9c7abb99 | 0af30c2e3ddcc80a19ea9cfaad9d7e1fedf8b876 | /210127/다이나믹 프로그래밍/효율적인 화폐 구성/김채린.py | 314b431313f5a678f4a8022697364c2068be95a8 | [] | no_license | winterash2/algorithm_study_2021_1 | d1cd6077f71f68e7fc3eb6dfae7b2cc220885e4c | c1fee62c7e5e560c3bf7ae5e6166866d0147f23f | refs/heads/master | 2023-04-02T20:11:04.169856 | 2021-04-05T11:18:22 | 2021-04-05T11:18:22 | 327,563,535 | 1 | 2 | null | 2021-01-24T14:17:40 | 2021-01-07T09:28:08 | Python | UTF-8 | Python | false | false | 121 | py | import sys
input = sys.stdin.readline
n, m=map(int,input().split())
d=[]
for i in range(n):
d.append(int(input()))
| [
"61822411+zzerii@users.noreply.github.com"
] | 61822411+zzerii@users.noreply.github.com |
f165385bdee69eba491eb899ed659d42f21e8653 | 58d690aa25417bd71ea3c9deab273bffc72be6ba | /src/m3u8_parser.py | 931adacda1ee23fefc2fe3e691fffa7836bc4f3f | [
"MIT"
] | permissive | cu1455/uddd_hls_download | 7db994005ca692cd1a326bea0c0a6eb29e91fe38 | 8e5a4290ed9e30ec8d4bbc4fca0cd5bd4eb5513b | refs/heads/main | 2023-06-16T05:04:21.889763 | 2021-07-12T20:58:03 | 2021-07-12T20:58:03 | 382,706,315 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,012 | py | # -*- coding: utf-8 -*-
import sys
import requests
from urllib.parse import urlparse
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
class M3U8():
# Constructor
def __init__(self, inputURL, header, cookies, proxies, parsedHeader=None, parsedCookies=None, parsedProxies=None):
# Common attributes
self.fullURL = inputURL # Eg. https://example.com/path1/path2/playlist.m3u8?Key-Pair-Id=ABCDE12345
self.generalURL = '' # Eg. https://example.com/path1/path2/
self.tokens = [] # Tokens of downloaded file
self.type = '' # master or media
# For master m3u8 file
self.masterINFO = [] # List of dicts with subUri, resolution and bandwidth
# For media m3u8 file
self.playlistType = '' # Can be directly read or manually determined (if not specified), VOD or EVENT
self.targetDuration = '' # Maximum length for each ts segment
self.mediaSequence = '' # Current media sequence
self.ts = [] # List of dicts with each segments' uri and length
# Optinoal attribute for sub m3u8
self.keys = [] # List of dicts with encryptMethod, keyURI, key, iv
# User option for getting file
self.headers = {}
self.cookies = {}
self.proxies = {}
# Parsing user option
if proxies:
self.proxies = {'https':proxies}
if header:
try:
headersTokens = header.split('; ')
for headersToken in headersTokens:
self.headers[headersToken.split('=')[0]] = headersToken.split('=')[1]
except:
print('[ERROR] Headers in invalid format')
sys.exit()
if cookies:
try:
cookiesTokens = cookies.split('; ')
for cookieToken in cookiesTokens:
self.cookies[cookieToken.split('=')[0]] = cookieToken.split('=')[1]
except:
print('[ERROR] Cookies in invalid format')
sys.exit()
if parsedHeader:
self.headers = parsedHeader
if parsedCookies:
self.cookies = parsedCookies
if parsedProxies:
self.proxies = parsedProxies
if 'user-agent' not in self.headers:
self.headers = {'user-agent': DEFAULT_USER_AGENT}
# Download file and parse into tokens
def get_tokens(self):
rawTask = requests.get(self.fullURL, headers=self.headers, cookies=self.cookies, proxies=self.proxies,timeout=10.00)
task = rawTask.content.decode('utf-8')
result = task.split('\n') # Seperate by line
if rawTask.status_code == 403:
raise Exception('Forbidden')
elif rawTask.status_code == 404:
raise Exception('Not Found')
return result
# Determine type of m3u8 file
def get_type(self):
for token in self.tokens:
if token.startswith('#EXTINF:'):
return 'media'
return 'master'
# General funtion for getting from given url
def parse_m3u8(self, operation=None):
# If just listening for changes while downloading, the program should not exit
# Return false when url is not available while downloading, true otherwise
try:
self.tokens = self.get_tokens()
except:
if operation == 'update':
return False
else:
print('[ERROR] Provided URL is invalid or expired.')
sys.exit()
if (self.tokens[0] != '#EXTM3U'):
print('[ERROR] The file is not a valid m3u8 file.')
sys.exit()
firstM3U8 = self.fullURL.find('.m3u8')
lastSlash = self.fullURL.rfind('/',0,firstM3U8)
self.generalURL = self.fullURL[0:lastSlash+1]
self.type = self.get_type()
if self.type == 'master':
self.parse_master()
else:
self.parse_media()
return True
# Parse master m3u8 file
def parse_master(self):
tokenLength = len(self.tokens)
i = 1
result = []
while i < tokenLength:
token = self.tokens[i]
splitedToken = token.split(':')
if splitedToken[0] != '#EXT-X-STREAM-INF':
i += 1
continue
# URI
subURI = self.tokens[i+1]
streamInfs = splitedToken[1].split(',')
bandWidth = '-'
resolution = '-'
for streamInf in streamInfs:
if '=' not in streamInf:
continue
infToken = streamInf.split('=')
attribute = infToken[0]
attributeValue = infToken[1]
if attribute == 'BANDWIDTH':
bandWidth = attributeValue
elif attribute == 'RESOLUTION':
resolution = attributeValue
result.append({'subURI':subURI,'resolution':resolution,'bandWidth':bandWidth})
i += 2
self.masterINFO = result
# Parse media m3u8 file
def parse_media(self):
tokenLength = len(self.tokens)
i = 1
self.playlistType = 'EVENT'
while i < tokenLength:
token = self.tokens[i]
splitedToken = token.split(':')
attribute = splitedToken[0]
if attribute == '#EXT-X-ENDLIST':
self.playlistType = 'VOD'
elif attribute != '':
try:
attributeValue = splitedToken[1]
except IndexError:
pass
if attribute == '#EXT-X-PLAYLIST-TYPE':
self.playlistType = attributeValue
elif attribute == '#EXT-X-TARGETDURATION':
self.targetDuration = attributeValue
elif attribute == '#EXT-X-MEDIA-SEQUENCE':
self.mediaSequence = attributeValue
elif attribute == '#EXT-X-PLAYLIST-TYPE':
self.type = attributeValue
elif attribute == '#EXT-X-KEY':
keyTokens = token.split(',')
for keytoken in keyTokens:
keyAttribute = keytoken.split('=')
keyAttribute1 = keyAttribute[0]
keyAttribute2 = keyAttribute[1]
if 'METHOD' in keyAttribute1:
encryptMethod = keyAttribute2
elif 'URI' in keyAttribute1:
keyURI = keyAttribute2[1:-1]
elif 'IV' in keyAttribute1:
if len(keyAttribute2) < 34:
iv = '{:0>32d}'.format(keyAttribute2[2:])
else:
iv = keyAttribute2[-32:]
if encryptMethod == 'AES-128':
try:
if urlparse(keyURI).netloc == '':
keyFullURL = self.generalURL + keyURI
else:
keyFullURL = keyURI
keyFile = requests.get(keyFullURL, headers=self.headers, cookies=self.cookies, proxies=self.proxies, timeout=10.00)
key = keyFile.content
except:
print ('[ERROR] Unable to download the key file.')
sys.exit()
if iv == None:
iv = '{:0>32d}'.format(self.mediaSequence)
self.keys.append({'encryptMethod':encryptMethod,'keyURI':keyURI,'key':key,'iv':iv})
elif attribute == '#EXTINF':
segmentLength = attributeValue[:-1]
segmentURI = self.tokens[i+1]
self.ts.append({'segmentURI':segmentURI,'segmentLength':segmentLength})
i += 2
continue
i += 1
# Print parsed info
def print_info(self):
print('fullURL: ')
print(self.fullURL)
print('generalURL: ')
print(self.generalURL)
print('type: ')
print(self.type)
if self.type == 'master':
print('masterINFO: ')
print(self.masterINFO)
else:
print('playlistType: ')
print(self.playlistType)
print('targetDuration: ')
print(self.targetDuration)
print('mediaSequence: ')
print(self.mediaSequence)
print('keys: ')
print(self.keys)
print('headers: ')
print(self.headers)
print('cookies: ')
print(self.cookies)
print('proxies: ')
print(self.proxies)
"""
print('ts: ')
print(self.ts)
""" | [
"86881109+cu1455@users.noreply.github.com"
] | 86881109+cu1455@users.noreply.github.com |
42c8fd5d1f01c04351c2a1798dd8cce0574b378a | 677ddde8c47e8f4f8d3e9957d0ff669e68f9f6f2 | /EDD_Proy2/EDD_Proy2/settings.py | 2aa9cd4b484f4258141c133fbe04ccd14cbcda86 | [] | no_license | JorgeEspina/Proyecto2s12017_201403632_201503393 | 205d35dd0ed54b3b9dbe19ee3b5b53be27cc151b | 5fed57e839173a9ae93f8e9e0c55157fc6b68815 | refs/heads/master | 2021-01-19T10:36:33.408262 | 2017-05-08T05:52:11 | 2017-05-08T05:52:11 | 87,880,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | """
Django settings for EDD_Proy2 project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%&1u^9&n*nbueg#x5=ngiq$@2%o7f%zo36curpx*-^gt9yhe7#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'EDD_Proy2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'EDD_Proy2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"noreply@github.com"
] | JorgeEspina.noreply@github.com |
e76670bfa19fa21996106d93183214b2fb2f3640 | e79db276d389b09e5685c3dafbced4be07c181d3 | /src/PersonalExtractors/BankOfMaharashtraExtractor/bank_of_maharashtra_extractor.py | 27ed8e37e43b23e024f6143a7849221eee433356 | [] | no_license | roshan6111/ofiiceproject | 9c44e815fb609f01d4804462db00997a73d70d7f | 7078b5facbafe92c5a2875fedb992cf9a744c5d0 | refs/heads/master | 2022-12-11T17:48:07.438237 | 2019-06-30T07:18:24 | 2019-06-30T07:18:24 | 194,487,027 | 0 | 0 | null | 2022-12-08T01:47:28 | 2019-06-30T07:16:22 | Python | UTF-8 | Python | false | false | 4,010 | py | import re
from src.Utils import bsr_utils
from src.Utils import constants
def process_desc_custum(json_formatted_data, desc_pattern, line, existing_desc):
m = desc_pattern.match(line)
description_extended = existing_desc + m.group(constants.DESCRIPTION_STR)
if len(json_formatted_data[constants.TRANSACTIONS_STR]) > 0:
json_formatted_data[constants.TRANSACTIONS_STR][-1][
constants.DESCRIPTION_STR] += ' ' + bsr_utils.pretty_format(description_extended)
def process_transaction(json_formatted_data, i, existing_desc, transaction_regex, file_content, desc_regex,
ignorable_regexes):
line = file_content[i]
return_statement = False
Description_value = ''
transaction_pattern = re.compile(transaction_regex)
desc_pattern = re.compile(desc_regex)
ignorable_patterns = [re.compile(ignorable_regex) for ignorable_regex in ignorable_regexes]
m = transaction_pattern.match(line)
if transaction_pattern.match(line):
if (len(bsr_utils.pretty_format(m.group(constants.DATE_STR))) > 0) and len(
bsr_utils.pretty_format(m.group(constants.DESCRIPTION_STR))) > 0:
Description_value = bsr_utils.pretty_format(m.group(constants.DESCRIPTION_STR))
else:
Description_value = ''
return_statement = True
opening_balance = bsr_utils.get_opening_balance(json_formatted_data)
transaction_type = bsr_utils.get_transaction_type(opening_balance, bsr_utils.pretty_format(
m.group(constants.CLOSING_BALANCE_STR)))
json_formatted_data[constants.TRANSACTIONS_STR].append({
constants.DATE_STR: bsr_utils.pretty_format(m.group(constants.DATE_STR)),
# constants.DESCRIPTION_STR: bsr_utils.pretty_format(m.group(constants.DESCRIPTION_STR)),
constants.DESCRIPTION_STR: Description_value,
constants.TYPE_STR: transaction_type,
constants.AMOUNT_STR: bsr_utils.pretty_format(m.group(constants.AMOUNT_STR)),
constants.CLOSING_BALANCE_STR: bsr_utils.pretty_format(m.group(constants.CLOSING_BALANCE_STR))
})
if return_statement:
return existing_desc
elif bsr_utils.is_ignorable(ignorable_patterns, line):
pass
elif desc_pattern.match(line):
if existing_desc is None or existing_desc == '':
existing_desc = existing_desc + line
else:
process_desc_custum(json_formatted_data, desc_pattern, line, existing_desc)
existing_desc = ''
return existing_desc
def extract(_file, password):
header_pattern = re.compile(constants.MAHARASHTRA_BANK_HEADER_REGEX)
file_end_pattern = re.compile(constants.MAHARASHTRA_BANK_STATEMENT_END_REGEX)
file_content = bsr_utils.get_file_content(_file, password)
json_formatted_data = {
constants.TRANSACTIONS_STR: []
}
is_transaction_started = False
acc_details = ''
existing_desc = ''
i = 0
if file_content == 'wrongpassword':
return 'wrongpassword'
elif file_content == 'pdfnotreadable':
return 'pdfnotreadable'
while i < len(file_content):
line = file_content[i]
if file_end_pattern.match(line):
break
if is_transaction_started:
existing_desc = process_transaction(json_formatted_data, i, existing_desc,
constants.MAHARASHTRA_BANK_TRANSACTION_REGEX, file_content,
constants.MAHARASHTRA_BANK_DESC_REGEX,
constants.MAHARASHTRA_BANK_IGNORABLE_REGEXS)
elif header_pattern.search(line):
is_transaction_started = True
bsr_utils.put_acc_details(json_formatted_data, acc_details,
constants.MAHARASHTRA_BANK_ACCOUNT_DETAILS_REGEX)
else:
acc_details += line + '\n'
i += 1
return json_formatted_data
| [
"roshan@zup.today"
] | roshan@zup.today |
76a9acaf06ed647f5329818ed4650ab73952cbb8 | 7246faf9a222269ce2612613f58dc5ff19091f10 | /leetcode/1662.py | d793883d41ed3cb54e390d971c41a4c5ca4f7ffd | [] | no_license | gusdn3477/Algorithm_Study | 87a2eb72a8488d9263a86db70dadc7944434d41d | 3fefe1dcb40122157845ffc542f41cb097711cc8 | refs/heads/main | 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | class Solution:
def arrayStringsAreEqual(self, word1: List[str], word2: List[str]) -> bool:
a = ''
b = ''
for i in word1:
a += i
for i in word2:
b += i
return a == b | [
"gusdn3477@naver.com"
] | gusdn3477@naver.com |
2b8e0f7cc47c50698ff14eb2bb688b25f20ccf77 | 76d4430567b68151df1855f45ea4408f9bebe025 | /test/functional/wallet_importmulti.py | 9b417874ed72b862538be622fc4351394626492d | [
"MIT"
] | permissive | MicroBitcoinOrg/MicroBitcoin | f761b2ff04bdcb650d7c0ddbef431ef95cd69541 | db7911968445606bf8899903322d5d818d393d88 | refs/heads/master | 2022-12-27T10:04:21.040945 | 2022-12-18T05:05:17 | 2022-12-18T05:05:17 | 132,959,214 | 21 | 33 | MIT | 2020-06-12T04:38:45 | 2018-05-10T22:07:51 | C++ | UTF-8 | Python | false | false | 44,483 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The MicroBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.script import (
CScript,
OP_NOP,
)
from test_framework.test_framework import MicroBitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import (
get_key,
get_multisig,
test_address,
)
class ImportMultiTest(MicroBitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue() # Sync the timestamp to the wallet, so that importmulti works
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# MicroBitcoin Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info("Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Unsuccessful labelling for internal addresses"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info("Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(COINBASE_MATURITY)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(COINBASE_MATURITY)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(COINBASE_MATURITY)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(COINBASE_MATURITY)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.nodes[1].syncwithvalidationinterfacequeue()
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Import P2WPKH address as watch only
self.log.info("Should import a P2WPKH address as watch only")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=True,
solvable=False)
# Import P2WPKH address with public key but no private key
self.log.info("Should import a P2WPKH address and public key as solvable but not spendable")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2wpkh_addr,
ismine=False,
solvable=True)
# Import P2WPKH address with key and check it is spendable
self.log.info("Should import a P2WPKH address with key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=False,
ismine=True)
# P2WSH multisig address without scripts or keys
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=False)
# Same P2WSH multisig address as above, but now with witnessscript + private keys
self.log.info("Should import a p2wsh with respective witness script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now",
"witnessscript": multisig.redeem_script,
"keys": multisig.privkeys},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=True,
ismine=True,
sigsrequired=2)
# P2SH-P2WPKH address with no redeemscript or public or private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh without redeem script or keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=False,
ismine=False)
# P2SH-P2WPKH address + redeemscript + public key with no private key
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=False)
# P2SH-P2WPKH address + redeemscript + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True)
# P2SH-P2WSH multisig + redeemscript with no private key
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_p2wsh_addr},
"timestamp": "now",
"redeemscript": multisig.p2wsh_script,
"witnessscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_p2wsh_addr,
solvable=True,
ismine=False)
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should not import a p2sh-p2wpkh address from descriptor without checksum and private key")
self.test_importmulti({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now",
"label": "Unsuccessful P2SH-P2WPKH descriptor import",
"keys": [key.privkey]},
success=False,
error_code=-5,
error_message="Missing checksum")
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
p2sh_p2wpkh_label = "Successful P2SH-P2WPKH descriptor import"
self.log.info("Should import a p2sh-p2wpkh address from descriptor and private key")
self.test_importmulti({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"label": p2sh_p2wpkh_label,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
labels=[p2sh_p2wpkh_label])
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["bcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7scl8gn", "bcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpchehegrg8"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
p2pkh_label = "P2PKH descriptor import"
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": p2pkh_label},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
labels=[p2pkh_label])
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress(address_type="bech32")
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Bech32m addresses and descriptors cannot be imported
self.log.info("Bech32m addresses and descriptors cannot be imported")
self.test_importmulti(
{
"scriptPubKey": {"address": "bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6"},
"timestamp": "now",
},
success=False,
error_code=-5,
error_message="Bech32m addresses cannot be imported into legacy wallets",
)
self.test_importmulti(
{
"desc": descsum_create("tr({})".format(pub)),
"timestamp": "now",
},
success=False,
error_code=-5,
error_message="Bech32m descriptors cannot be imported into legacy wallets",
)
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey wallet
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress(address_type="bech32")
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info('Imported scripts with pubkeys should not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress(address_type="bech32")
addr2 = self.nodes[0].getnewaddress(address_type="bech32")
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wsh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc(self.default_wallet_name)
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv', # m/0'/0'/0
'bcrt1q8vprchan07gzagd5e6v9wd7azyucksq2xc76k8', # m/0'/0'/1
'bcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjp9lulu', # m/0'/0'/2
'bcrt1qau64272ymawq26t90md6an0ps99qkrse58m640', # m/0'/0'/3
'bcrt1qsg97266hrh6cpmutqen8s4s962aryy77jp0fg0', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('', 'bech32')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
| [
"iamstenman@protonmail.com"
] | iamstenman@protonmail.com |
b242db63f303ccd809a3b05d0134b6e4d1e318f3 | 99cf0bf1d00328297c6b8c4df0de32f4e3a6c23f | /store/admin.py | ff370586f4cddc5067b44763c43f5f12228b2cb9 | [] | no_license | akashmrc98/Gstore | 063048d4fbf26c0fc8233d524f035fd5548d51dd | 04b454a5b6426d6bfb6bd69028ae325a9dbd3f31 | refs/heads/master | 2020-04-27T12:56:38.626832 | 2019-03-07T13:42:50 | 2019-03-07T13:42:50 | 174,350,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | from django.contrib import admin
from .models import GUser, Items, Temp, Cart, Order, Order_Final, SUser
class Guser_Admin(admin.ModelAdmin):
list_display = (
"user_id",
"name",
"dob",
"sex",
"phone",
"address",
"email",
)
class Suser_Admin(admin.ModelAdmin):
list_display = (
"user_id",
"name",
"dob",
"sex",
"phone",
"address",
"email",
)
class Item_Admin(admin.ModelAdmin):
list_display = (
"item_id",
"item_name",
"stock",
"price",
)
class Item_Order(admin.ModelAdmin):
list_display = (
"order_id",
"time_stamp",
"item_name",
"quantity",
"price",
)
class Item_Order_Final(admin.ModelAdmin):
list_display = (
"uid",
)
admin.site.site_header="GSTORE"
admin.site.site_title="GSTORE"
admin.site.register(SUser, Suser_Admin)
admin.site.register(GUser, Guser_Admin)
admin.site.register(Items, Item_Admin)
admin.site.register(Order_Final,Item_Order_Final)
admin.site.register(Order, Item_Order)
admin.site.register(Cart)
admin.site.register(Temp)
| [
"akashmadduru@gmail.com"
] | akashmadduru@gmail.com |
27cef3273fbea6594127d3f349650e9e27d66252 | 68a38a56e36c67cdcdfaddc3a7e24dec46dc838b | /app/src/application/handler/user.py | bd296cea869957a190e7a578c5c4c8a314292f6f | [
"MIT"
] | permissive | hagifoo/gae-pomodoro | 56adbf505bf8a8052bc2818bbbbce567eb74b1ee | 6babdfc8d4ac8483b59b4da1d2c9b13fddcc4383 | refs/heads/master | 2021-01-12T02:10:53.517008 | 2017-02-20T01:59:15 | 2017-02-20T01:59:15 | 78,483,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,203 | py | """
This module provides user and user timer handling
"""
from application.handler import JsonHandler, TaskHandler, signin_user_only
from domain.repository import UserRepository
import error
class Handler(JsonHandler):
@signin_user_only
def get(self):
return self.user.to_json()
class SlackChannelsHandler(JsonHandler):
@signin_user_only
def get(self):
repository = UserRepository()
user = repository.get(self.user.id)
return user.slack.get_channels()
class TimerStartHandler(JsonHandler):
@signin_user_only
def get(self):
repository = UserRepository()
user = repository.get(self.user.id)
if user.slack.is_notify():
user.slack.notify_start()
return user.timer.start()
class TimerStopHandler(JsonHandler):
@signin_user_only
def get(self):
repository = UserRepository()
user = repository.get(self.user.id)
if user.slack.is_notify():
user.slack.notify_stop()
return user.timer.stop()
class TimerEndTaskHandler(TaskHandler):
def post(self):
user_id = self.request.get('id')
if not user_id:
raise error.TaskUnrecoverableException(
error.BadRequestException('`id` parameter is not specified'))
repository = UserRepository()
user = repository.get(user_id)
if user is None:
raise error.TaskUnrecoverableException(
error.NotFoundException('No such user: {}'.format(user_id)))
repository.add_pomodoro(user)
user.timer.stop_after_break()
if user.slack.is_notify():
user.slack.notify_end()
class TimerStopTaskHandler(TaskHandler):
def post(self):
user_id = self.request.get('id')
if not user_id:
raise error.TaskUnrecoverableException(
error.BadRequestException('`id` parameter is not specified'))
repository = UserRepository()
user = repository.get(user_id)
if user is None:
raise error.TaskUnrecoverableException(
error.NotFoundException('No such user: {}'.format(user_id)))
user.timer.stop()
| [
"hagiharatoshishige@gmail.com"
] | hagiharatoshishige@gmail.com |
3843f381b3d6a2e009ea019d635947124fc99156 | fc05249c73f910a4d36f471eb91e05256a64cdfe | /roms/make_bry_phd16.py | 991513bd9be6ce35aac02cc3ffdedfe61ec96daf | [] | no_license | rsoutelino/sandbox | f51b37619cd7a61a0446d83e2e1c2af58f14802a | 814d215582d8e14514ba93daf1b41f6d118b906c | refs/heads/master | 2023-03-02T12:05:18.703732 | 2023-03-02T01:58:15 | 2023-03-02T01:58:15 | 28,204,889 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 29,082 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Creates initial conditions netCDF file for ROMS
#
# Rafael Soutelino - rsoutelino@gmail.com
#
# Using some material from Matlab scripts by
# "Copyright (c) 2003 UCLA - Patrick Marchesiello"
#
# Last modification: Aug, 2010
#####################################################################
print ' \n' + '==> ' + ' IMPORTING MODULES ...\n' + ' '
# IMPORTING MODULES #################################################
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import delaunay
from matplotlib.mlab import griddata
from mpl_toolkits.basemap import Basemap
import datetime as dt
import netCDF4
import scipy.io as sp
# classes and functions to the computings
from roms_setup import run_setup, zlev, ztosigma
#####################################################################
# SCRIPT START ######################################################
# Basic Settings:
filenamestr = '_bry.nc'
filetypestr = 'ROMS Boundary Conditions file'
# READING PREVIOUSLY BUILT RELEVANT FILES: ###########################
# metadata ascii file
# OA-created netcdf initial T, S file
# grid netcdf file
print ' \n' + '==> ' + ' READING ASCII METADATA FILE ...\n' + ' '
run = run_setup('../phd16_run.setup')
print ' \n' + '==> ' + ' READING FEATURE MODEL FIELD ...\n' + ' '
datafile = sp.loadmat(run.datadir + run.ini_filename)
# assigning some variables from data file
Zlev = datafile['z'][:].ravel(); Zlev = np.abs(Zlev); Zlev = -Zlev
N1 = Zlev.size
lon = datafile['lon'][:]
lat = datafile['lat'][:]
temp = datafile['temp'][:]
salt = datafile['salt'][:]
u = datafile['u'][:]
v = datafile['v'][:]
ubar = datafile['ubar'][:]
vbar = datafile['vbar'][:]
zeta = datafile['ssh'][:]
print ' \n' + '==> ' + ' READING GRID NETCDF FILE ...\n' + ' '
grdfile = netCDF4.Dataset(run.rundir + run.run_name + '_grd.nc')
# assigning some variables from grid file
rlon2 = grdfile.variables['lon_rho'][:]
rlat2 = grdfile.variables['lat_rho'][:]
vlon2 = grdfile.variables['lon_v'][:]
vlat2 = grdfile.variables['lat_v'][:]
ulon2 = grdfile.variables['lon_u'][:]
ulat2 = grdfile.variables['lat_u'][:]
angle = grdfile.variables['angle'][:]
h2 = grdfile.variables['h'][:]
rmask2 = grdfile.variables['mask_rho'][:]
# DOING COMPUTATIONS TO INTERPOLATE THE FIELDS TO ROMS GRID #########
# Modify the bathymetry
f = np.where(h2 >= 5000)
h2[f] = 5000; del f
N = int(run.klevels)
Jrho, Irho = rlon2.shape
Mr2, Lr2 = rlon2.shape
Lu2 = Lr2-1; Mu2 = Mr2
Lv2 = Lr2; Mv2 = Mr2-1
cosa = np.cos(angle); sina = np.sin(angle); del angle
rmask2 = np.ma.masked_where(rmask2 == 0, rmask2)
hu = griddata(rlon2.ravel(), rlat2.ravel(), h2.ravel(), ulon2, ulat2)
hv = griddata(rlon2.ravel(), rlat2.ravel(), h2.ravel(), vlon2, vlat2)
[Zsig,dZsig] = zlev(h2,run.theta_s,run.theta_b,run.tcline,run.klevels)
[ZsigU,dZsigU] = zlev(hu,run.theta_s,run.theta_b,run.tcline,run.klevels)
[ZsigV,dZsigV] = zlev(hv,run.theta_s,run.theta_b,run.tcline,run.klevels)
### Interpolating T, S to ROMS 3D S-COORD grid ###############################
lN = run.klevels
lt = np.size(run.time)
ZETA = np.zeros([lt, Jrho, Irho])
UBAR = np.zeros([lt, Mu2, Lu2])
VBAR = np.zeros([lt, Mv2, Lv2])
TEMP = np.zeros([lt, N, Mv2, Lv2])
SALT = np.zeros([lt, N, Mv2, Lv2])
U = np.zeros([lt, N, Mu2, Lu2])
V = np.zeros([lt, N, Mv2, Lv2])
z2 = np.zeros([N1, Jrho, Irho])
Zlev2 = np.zeros([N1, 1])
print ' \n' + '==> ' + ' INTERPOLATING TEMPERATURE ...\n' + ' '
for k in np.arange(0, N1, 1):
print 'TEMP: Z Level = ' + str(-1*Zlev[k]) + ' m'
z1 = np.squeeze(temp[k,:,:])
z2[N1-k-1,:,:] = griddata(lon.ravel(),lat.ravel(),z1.ravel(),rlon2,rlat2)
Zlev2[N1-k-1] = Zlev[k]
print ' \n' + '==> ' + ' INTERPOLATING TEMP FROM Z --> S COORD ...\n' + ' '
TEMP = ztosigma(z2,Zsig,Zlev2); del z1, z2
###
z2 = np.zeros([N1, Jrho, Irho])
print ' \n' + '==> ' + ' INTERPOLATING SALINITY ...\n' + ' '
for k in np.arange(0, N1, 1):
print 'SALT: Z Level = ' + str(-1*Zlev[k]) + ' m'
z1 = np.squeeze(salt[k,:,:])
z2[N1-k-1,:,:] = griddata(lon.ravel(),lat.ravel(),z1.ravel(),rlon2,rlat2)
Zlev2[N1-k-1] = Zlev[k]
print ' \n' + '==> ' + ' INTERPOLATING SALT FROM Z --> S COORD ...\n' + ' '
SALT = ztosigma(z2,Zsig,Zlev2);
###
z2 = np.zeros([N1, Mu2, Lu2])
print ' \n' + '==> ' + ' INTERPOLATING U-velocity ...\n' + ' '
for k in np.arange(0, N1, 1):
print 'U-Vel: Z Level = ' + str(-1*Zlev[k]) + ' m'
z1 = np.squeeze(u[k,:,:])
z2[N1-k-1,:,:] = griddata(lon.ravel(),lat.ravel(),z1.ravel(),ulon2,ulat2)
Zlev2[N1-k-1] = Zlev[k]
print ' \n' + '==> ' + ' INTERPOLATING V-Vel FROM Z --> S COORD ...\n' + ' '
U = ztosigma(z2,ZsigU,Zlev2);
###
z2 = np.zeros([N1, Mv2, Lv2])
print ' \n' + '==> ' + ' INTERPOLATING V-velocity ...\n' + ' '
for k in np.arange(0, N1, 1):
print 'V-Vel: Z Level = ' + str(-1*Zlev[k]) + ' m'
z1 = np.squeeze(v[k,:,:])
z2[N1-k-1,:,:] = griddata(lon.ravel(),lat.ravel(),z1.ravel(),vlon2,vlat2)
Zlev2[N1-k-1] = Zlev[k]
print ' \n' + '==> ' + ' INTERPOLATING V-Vel FROM Z --> S COORD ...\n' + ' '
V = ztosigma(z2,ZsigV,Zlev2);
###
print ' \n' + '==> ' + ' INTERPOLATING UBAR-velocity ...\n' + ' '
UBAR = griddata(lon.ravel(),lat.ravel(),ubar.ravel(),ulon2,ulat2)
print ' \n' + '==> ' + ' INTERPOLATING VBAR-velocity ...\n' + ' '
VBAR = griddata(lon.ravel(),lat.ravel(),vbar.ravel(),vlon2,vlat2)
print ' \n' + '==> ' + ' INTERPOLATING FREE-SURFACE ...\n' + ' '
ZETA = griddata(lon.ravel(),lat.ravel(),zeta.ravel(),rlon2,rlat2)
# WRITING THE NETCDF FILE ####################################################
# Based on "bry_limit.cdl" NETCDF sample structure
# some computings regarding netcdf variables:
t = np.arange(0, run.time);
N = int(run.klevels)
theta_s = run.theta_s
theta_b = run.theta_b
Mp, Lp = h2.shape
L = Lp - 1
M = Mp - 1
Np = N + 1
if run.spherical == 1:
spherical = 'T'
else:
spherical = 'F'
ds = 1.0 / N
lev = np.arange(1, N+1, 1)
sc = -1 + (lev-0.5)*ds
Ptheta = np.sinh(theta_s*sc) / np.sinh(theta_s)
Rtheta = np.tanh( theta_s*(sc+0.5) ) / ( 2* np.tanh(0.5*theta_s) ) - 0.5
Cs = (1-theta_b)*Ptheta + theta_b * Rtheta
scw = np.arange(-1, 0+ds, ds)
Pthetaw = np.sinh( theta_s*scw ) / np.sinh(theta_s)
Rthetaw = np.tanh( theta_s*(scw+0.5) ) / (2*np.tanh(0.5*theta_s)) - 0.5
Csw = (1-theta_b)*Pthetaw + theta_b*Rthetaw
### GETTING SLICES FOR PROVIDE EXTERNAL BOUNDARY CONDITIONS ####################
# NORTH #####
# getting the northern slice to use as boundary condition
temp_north = TEMP[:,-1,:]; temp_north.shape = (1, run.klevels, Lp)
salt_north = SALT[:,-1,:]; salt_north.shape = (1, run.klevels, Lp)
#zeta_north = ZETA[:,-1,:]; zeta_north.shape = (1, Lp)
u_north = U[:,-1,:]; u_north.shape = (1, run.klevels, L)
v_north = V[:,-1,:]; v_north.shape = (1, run.klevels, Lp)
ubar_north = UBAR[-1,:]; ubar_north.shape = (1, L)
vbar_north = VBAR[-1,:]; vbar_north.shape = (1, Lp)
zeta_north = ZETA[-1,:]; zeta_north.shape = (1, Lp)
# repeating as many times as the model will run
temp_north = temp_north.repeat(t.size, axis=0)
salt_north = salt_north.repeat(t.size, axis=0)
#zeta_north = zeta_north.repeat(t.size, axis=0)
u_north = u_north.repeat(t.size, axis=0)
v_north = v_north.repeat(t.size, axis=0)
ubar_north = ubar_north.repeat(t.size, axis=0)
vbar_north = vbar_north.repeat(t.size, axis=0)
zeta_north = zeta_north.repeat(t.size, axis=0)
# EAST #######
# getting the eastern slice to use as boundary condition
temp_east = TEMP[:,:,-1]; temp_east.shape = (1, run.klevels, Mp)
salt_east = SALT[:,:,-1]; salt_east.shape = (1, run.klevels, Mp)
u_east = U[:,:,-1]; u_east.shape = (1, run.klevels, Mp)
v_east = V[:,:,-1]; v_east.shape = (1, run.klevels, M)
ubar_east = UBAR[:,-1]; ubar_east.shape = (1, Mp)
vbar_east = VBAR[:,-1]; vbar_east.shape = (1, M)
zeta_east = ZETA[:,-1]; zeta_east.shape = (1, Mp)
# repeating as many times as the model will run
temp_east = temp_east.repeat(t.size, axis=0)
salt_east = salt_east.repeat(t.size, axis=0)
u_east = u_east.repeat(t.size, axis=0)
v_east = v_east.repeat(t.size, axis=0)
ubar_east = ubar_east.repeat(t.size, axis=0)
vbar_east = vbar_east.repeat(t.size, axis=0)
zeta_east = zeta_east.repeat(t.size, axis=0)
# SOUTH #####
# getting the southern slice to use as boundary condition
temp_south = TEMP[:,1,:]; temp_south.shape = (1, run.klevels, Lp)
salt_south = SALT[:,1,:]; salt_south.shape = (1, run.klevels, Lp)
u_south = U[:,1,:]; u_south.shape = (1, run.klevels, L)
v_south = V[:,1,:]; v_south.shape = (1, run.klevels, Lp)
ubar_south = UBAR[1,:]; ubar_south.shape = (1, L)
vbar_south = VBAR[1,:]; vbar_south.shape = (1, Lp)
zeta_south = ZETA[1,:]; zeta_south.shape = (1, Lp)
# repeating as many times as the model will run
temp_south = temp_south.repeat(t.size, axis=0)
salt_south = salt_south.repeat(t.size, axis=0)
u_south = u_south.repeat(t.size, axis=0)
v_south = v_south.repeat(t.size, axis=0)
ubar_south = ubar_south.repeat(t.size, axis=0)
vbar_south = vbar_south.repeat(t.size, axis=0)
zeta_south = zeta_south.repeat(t.size, axis=0)
#################################################################################
print ' \n' + '==> ' + ' WRITING NETCDF BOUNDARY CONDITIONS FILE ...\n' + ' '
ncfile = netCDF4.Dataset(run.rundir + run.run_name + filenamestr, mode='w',
clobber='true', format='NETCDF3_CLASSIC')
# creating DIMENSIONS
ncfile.createDimension('xi_rho', size=Lp)
ncfile.createDimension('xi_u', size=L)
ncfile.createDimension('xi_v', size=Lp)
ncfile.createDimension('eta_rho', size=Mp)
ncfile.createDimension('eta_u', size=Mp)
ncfile.createDimension('eta_v', size=M)
ncfile.createDimension('s_rho', size=N)
ncfile.createDimension('s_w', size=Np)
ncfile.createDimension('zeta_time', size=run.time)
ncfile.createDimension('v2d_time', size=run.time)
ncfile.createDimension('v3d_time', size=run.time)
ncfile.createDimension('temp_time', size=run.time)
ncfile.createDimension('salt_time', size=run.time)
ncfile.createDimension('one', size=1)
# creating GLOBAL ATTRIBUTES
setattr(ncfile, 'type', filetypestr)
setattr(ncfile, 'title', run.ini_info)
setattr(ncfile, 'out_file', run.run_name + filenamestr)
setattr(ncfile, 'grd_file', run.run_name + '_grd.nc')
now = dt.datetime.now()
setattr(ncfile,'history',np.str(now))
# creating VARIABLES, ATTRIBUTES and ASSIGNING VALUES
# ---------------------------------------------------------------------------
ncfile.createVariable('spherical', 'c')
setattr(ncfile.variables['spherical'], 'long_name', 'grid type logical switch')
setattr(ncfile.variables['spherical'], 'flag_values', 'T, F')
setattr(ncfile.variables['spherical'], 'flag_meanings', 'spherical, cartesian')
ncfile.variables['spherical'][:] = spherical
# ---------------------------------------------------------------------------
ncfile.createVariable('Vtransform', 'd', dimensions=('one'))
setattr(ncfile.variables['Vtransform'], 'long_name',
'vertical terrain-following transformation equation')
ncfile.variables['Vtransform'][:] = run.vtransform
# ---------------------------------------------------------------------------
ncfile.createVariable('Vstretching', 'd', dimensions=('one'))
setattr(ncfile.variables['Vstretching'], 'long_name',
'vertical terrain-following stretching function')
ncfile.variables['Vstretching'][:] = run.vstretching
# ---------------------------------------------------------------------------
ncfile.createVariable('theta_s', 'd', dimensions=('one'))
setattr(ncfile.variables['theta_s'], 'long_name',
'S-coordinate surface control parameter')
ncfile.variables['theta_s'][:] = run.theta_s
# ---------------------------------------------------------------------------
ncfile.createVariable('theta_b', 'd', dimensions=('one'))
setattr(ncfile.variables['theta_b'], 'long_name',
'S-coordinate bottom control parameter')
ncfile.variables['theta_b'][:] = run.theta_b
# ---------------------------------------------------------------------------
ncfile.createVariable('Tcline', 'd', dimensions=('one'))
setattr(ncfile.variables['Tcline'], 'long_name',
'S-coordinate surface/bottom layer width')
setattr(ncfile.variables['Tcline'], 'units', 'meter')
ncfile.variables['Tcline'][:] = run.tcline
# ---------------------------------------------------------------------------
ncfile.createVariable('hc', 'd', dimensions=('one'))
setattr(ncfile.variables['hc'],'long_name',
'S-coordinate parameter, critical depth')
setattr(ncfile.variables['hc'], 'units', 'meter')
ncfile.variables['hc'][:] = run.hc
# ---------------------------------------------------------------------------
ncfile.createVariable('s_rho', 'd', dimensions=('s_rho'))
setattr(ncfile.variables['s_rho'], 'long_name', 'S-coordinate at RHO-points')
setattr(ncfile.variables['s_rho'], 'valid_min', -1.0)
setattr(ncfile.variables['s_rho'], 'valid_max', 0.0)
setattr(ncfile.variables['s_rho'], 'positive', 'up')
setattr(ncfile.variables['s_rho'], 'standard_name', 'ocean_s_coordinate_g1')
setattr(ncfile.variables['s_rho'], 'formula_terms',
's: s_rho C: Cs_r eta: zeta depth: h depth_c: hc')
ncfile.variables['s_rho'][:] = sc
# ---------------------------------------------------------------------------
ncfile.createVariable('s_w', 'd', dimensions=('s_w'))
setattr(ncfile.variables['s_w'], 'long_name', 'S-coordinate at W-points')
setattr(ncfile.variables['s_w'], 'valid_min', -1.0)
setattr(ncfile.variables['s_w'], 'valid_max', 0.0)
setattr(ncfile.variables['s_w'], 'positive', 'up')
setattr(ncfile.variables['s_w'], 'standard_name', 'ocean_s_coordinate_g1')
setattr(ncfile.variables['s_w'], 'formula_terms',
's: s_rho C: Cs_w eta: zeta depth: h depth_c: hc')
ncfile.variables['s_w'][:] = scw
# ---------------------------------------------------------------------------
ncfile.createVariable('Cs_r', 'd', dimensions=('s_rho'))
setattr(ncfile.variables['Cs_r'], 'long_name',
'S-coordinate stretching curves at RHO-points')
setattr(ncfile.variables['Cs_r'], 'valid_min', -1.0)
setattr(ncfile.variables['Cs_r'], 'valid_max', 0.0)
ncfile.variables['Cs_r'][:] = Cs
# ---------------------------------------------------------------------------
ncfile.createVariable('Cs_w', 'd', dimensions=('s_w'))
setattr(ncfile.variables['Cs_w'], 'long_name',
'S-coordinate stretching curves at W-points')
setattr(ncfile.variables['Cs_w'], 'valid_min', -1.0)
setattr(ncfile.variables['Cs_w'], 'valid_max', 0.0)
ncfile.variables['Cs_w'][:] = Csw
# ---------------------------------------------------------------------------
ncfile.createVariable('h', 'd', dimensions=('eta_rho', 'xi_rho'))
setattr(ncfile.variables['h'], 'long_name', 'bathymetry at RHO-points')
setattr(ncfile.variables['h'], 'units', 'meter')
setattr(ncfile.variables['h'], 'coordinates', 'lon_rho lat_rho')
ncfile.variables['h'][:] = h2
# ---------------------------------------------------------------------------
ncfile.createVariable('lon_rho', 'd', dimensions=('eta_rho', 'xi_rho'))
setattr(ncfile.variables['lon_rho'], 'long_name', 'longitude of RHO-points')
setattr(ncfile.variables['lon_rho'], 'units', 'degree_east')
setattr(ncfile.variables['lon_rho'], 'standard_name', 'longitude')
ncfile.variables['lon_rho'][:] = grdfile.variables['lon_rho'][:]
# ---------------------------------------------------------------------------
ncfile.createVariable('lat_rho', 'd', dimensions=('eta_rho', 'xi_rho'))
setattr(ncfile.variables['lat_rho'], 'long_name', 'latitude of RHO-points')
setattr(ncfile.variables['lat_rho'], 'units', 'degree_north')
setattr(ncfile.variables['lat_rho'], 'standard_name', 'latitude')
ncfile.variables['lat_rho'][:] = grdfile.variables['lat_rho'][:]
# ---------------------------------------------------------------------------
ncfile.createVariable('lon_u', 'd', dimensions=('eta_u', 'xi_u'))
setattr(ncfile.variables['lon_u'], 'long_name', 'longitude of U-points')
setattr(ncfile.variables['lon_u'], 'units', 'degree_east')
setattr(ncfile.variables['lon_u'], 'standard_name', 'longitude')
ncfile.variables['lon_u'][:] = grdfile.variables['lon_u'][:]
# ---------------------------------------------------------------------------
ncfile.createVariable('lat_u', 'd', dimensions=('eta_u', 'xi_u'))
setattr(ncfile.variables['lat_u'], 'long_name', 'latitude of U-points')
setattr(ncfile.variables['lat_u'], 'units', 'degree_north')
setattr(ncfile.variables['lat_u'], 'standard_name', 'latitude')
ncfile.variables['lat_u'][:] = grdfile.variables['lat_u'][:]
# ---------------------------------------------------------------------------
ncfile.createVariable('lon_v', 'd', dimensions=('eta_v', 'xi_v'))
setattr(ncfile.variables['lon_v'], 'long_name', 'longitude of V-points')
setattr(ncfile.variables['lon_v'], 'units', 'degree_east')
setattr(ncfile.variables['lon_v'], 'standard_name', 'lonitude')
ncfile.variables['lon_v'][:] = grdfile.variables['lon_v'][:]
# ---------------------------------------------------------------------------
ncfile.createVariable('lat_v', 'd', dimensions=('eta_v', 'xi_v'))
setattr(ncfile.variables['lat_v'], 'long_name', 'latitude of V-points')
setattr(ncfile.variables['lat_v'], 'units', 'degree_north')
setattr(ncfile.variables['lat_v'], 'standard_name', 'latitude')
ncfile.variables['lat_v'][:] = grdfile.variables['lat_v'][:]
# ---------------------------------------------------------------------------
ncfile.createVariable('v3d_time', 'd', dimensions=('v3d_time'))
setattr(ncfile.variables['v3d_time'], 'long_name', '3D momentum time')
setattr(ncfile.variables['v3d_time'], 'units', 'days since 0000-01-01 00:00:00')
ncfile.variables['v3d_time'][:] = t
# ---------------------------------------------------------------------------
ncfile.createVariable('temp_time', 'd', dimensions=('temp_time'))
setattr(ncfile.variables['temp_time'], 'long_name', 'potential temperature time')
setattr(ncfile.variables['temp_time'], 'units', 'days since 0000-01-01 00:00:00')
ncfile.variables['temp_time'][:] = t
# ---------------------------------------------------------------------------
ncfile.createVariable('salt_time', 'd', dimensions=('salt_time'))
setattr(ncfile.variables['salt_time'], 'long_name', 'salinity time')
setattr(ncfile.variables['salt_time'], 'units', 'days since 0000-01-01 00:00:00')
ncfile.variables['salt_time'][:] = t
# ---------------------------------------------------------------------------
#ncfile.createVariable('u_west', 'd', dimensions=('v3d_time', 's_rho', 'eta_u'))
#setattr(ncfile.variables['u_west'], 'long_name', '3D u-momentum western boundary condition')
#setattr(ncfile.variables['u_west'], 'units', 'meter second-1')
#setattr(ncfile.variables['u_west'], 'time', 'v3d_time')
#ncfile.variables['u_west'][:] = u_west
# ---------------------------------------------------------------------------
ncfile.createVariable('u_east', 'd', dimensions=('v3d_time', 's_rho', 'eta_u'))
setattr(ncfile.variables['u_east'], 'long_name', '3D u-momentum eastern boundary condition')
setattr(ncfile.variables['u_east'], 'units', 'meter second-1')
setattr(ncfile.variables['u_east'], 'time', 'v3d_time')
ncfile.variables['u_east'][:] = u_east
# ---------------------------------------------------------------------------
ncfile.createVariable('u_south', 'd', dimensions=('v3d_time', 's_rho', 'xi_u'))
setattr(ncfile.variables['u_south'], 'long_name', '3D u-momentum southern boundary condition')
setattr(ncfile.variables['u_south'], 'units', 'meter second-1')
setattr(ncfile.variables['u_south'], 'time', 'v3d_time')
ncfile.variables['u_south'][:] = u_south
## ---------------------------------------------------------------------------
ncfile.createVariable('u_north', 'd', dimensions=('v3d_time', 's_rho', 'xi_u'))
setattr(ncfile.variables['u_north'], 'long_name', '3D u-momentum northern boundary condition')
setattr(ncfile.variables['u_north'], 'units', 'meter second-1')
setattr(ncfile.variables['u_north'], 'time', 'v3d_time')
ncfile.variables['u_north'][:] = u_north
# ---------------------------------------------------------------------------
ncfile.createVariable('ubar_east', 'd', dimensions=('v3d_time', 'eta_u'))
setattr(ncfile.variables['ubar_east'], 'long_name', '2D u-momentum eastern boundary condition')
setattr(ncfile.variables['ubar_east'], 'units', 'meter second-1')
setattr(ncfile.variables['ubar_east'], 'time', 'v3d_time')
ncfile.variables['ubar_east'][:] = ubar_east
# ---------------------------------------------------------------------------
ncfile.createVariable('ubar_south', 'd', dimensions=('v3d_time', 'xi_u'))
setattr(ncfile.variables['ubar_south'], 'long_name', '2D u-momentum southern boundary condition')
setattr(ncfile.variables['ubar_south'], 'units', 'meter second-1')
setattr(ncfile.variables['ubar_south'], 'time', 'v3d_time')
ncfile.variables['ubar_south'][:] = ubar_south
## ---------------------------------------------------------------------------
ncfile.createVariable('ubar_north', 'd', dimensions=('v3d_time', 'xi_u'))
setattr(ncfile.variables['ubar_north'], 'long_name', '2D u-momentum northern boundary condition')
setattr(ncfile.variables['ubar_north'], 'units', 'meter second-1')
setattr(ncfile.variables['ubar_north'], 'time', 'v3d_time')
ncfile.variables['ubar_north'][:] = ubar_north
## ---------------------------------------------------------------------------
#ncfile.createVariable('v_west', 'd', dimensions=('v3d_time', 's_rho', 'eta_v'))
#setattr(ncfile.variables['v_west'], 'long_name', '3D v-momentum western boundary condition')
#setattr(ncfile.variables['v_west'], 'units', 'meter second-1')
#setattr(ncfile.variables['v_west'], 'time', 'v3d_time')
#ncfile.variables['v_west'][:] = v_west
# ---------------------------------------------------------------------------
ncfile.createVariable('v_east', 'd', dimensions=('v3d_time', 's_rho', 'eta_v'))
setattr(ncfile.variables['v_east'], 'long_name', '3D v-momentum eastern boundary condition')
setattr(ncfile.variables['v_east'], 'units', 'meter second-1')
setattr(ncfile.variables['v_east'], 'time', 'v3d_time')
ncfile.variables['v_east'][:] = v_east
# ---------------------------------------------------------------------------
ncfile.createVariable('v_south', 'd', dimensions=('v3d_time', 's_rho', 'xi_v'))
setattr(ncfile.variables['v_south'], 'long_name', '3D v-momentum sovthern boundary condition')
setattr(ncfile.variables['v_south'], 'units', 'meter second-1')
setattr(ncfile.variables['v_south'], 'time', 'v3d_time')
ncfile.variables['v_south'][:] = v_south
## ---------------------------------------------------------------------------
ncfile.createVariable('v_north', 'd', dimensions=('v3d_time', 's_rho', 'xi_v'))
setattr(ncfile.variables['v_north'], 'long_name', '3D v-momentum northern boundary condition')
setattr(ncfile.variables['v_north'], 'units', 'meter second-1')
setattr(ncfile.variables['v_north'], 'time', 'v3d_time')
ncfile.variables['v_north'][:] = v_north
# ---------------------------------------------------------------------------
ncfile.createVariable('vbar_east', 'd', dimensions=('v3d_time', 'eta_v'))
setattr(ncfile.variables['vbar_east'], 'long_name', '2D v-momentum eastern boundary condition')
setattr(ncfile.variables['vbar_east'], 'units', 'meter second-1')
setattr(ncfile.variables['vbar_east'], 'time', 'v3d_time')
ncfile.variables['vbar_east'][:] = vbar_east
# ---------------------------------------------------------------------------
ncfile.createVariable('vbar_south', 'd', dimensions=('v3d_time', 'xi_v'))
setattr(ncfile.variables['vbar_south'], 'long_name', '2D v-momentum southern boundary condition')
setattr(ncfile.variables['vbar_south'], 'units', 'meter second-1')
setattr(ncfile.variables['vbar_south'], 'time', 'v3d_time')
ncfile.variables['vbar_south'][:] = vbar_south
## ---------------------------------------------------------------------------
ncfile.createVariable('vbar_north', 'd', dimensions=('v3d_time', 'xi_v'))
setattr(ncfile.variables['vbar_north'], 'long_name', '2D v-momentum northern boundary condition')
setattr(ncfile.variables['vbar_north'], 'units', 'meter second-1')
setattr(ncfile.variables['vbar_north'], 'time', 'v3d_time')
ncfile.variables['vbar_north'][:] = vbar_north
# ---------------------------------------------------------------------------
ncfile.createVariable('zeta_east', 'd', dimensions=('temp_time', 'eta_rho'))
setattr(ncfile.variables['zeta_east'], 'long_name', 'free-surface eastern boundary condition')
setattr(ncfile.variables['zeta_east'], 'units', 'meter')
setattr(ncfile.variables['zeta_east'], 'time', 'temp_time')
ncfile.variables['zeta_east'][:] = zeta_east
# ---------------------------------------------------------------------------
ncfile.createVariable('zeta_south', 'd', dimensions=('temp_time', 'xi_rho'))
setattr(ncfile.variables['zeta_south'], 'long_name', 'free-surface southern boundary condition')
setattr(ncfile.variables['zeta_south'], 'units', 'meter')
setattr(ncfile.variables['zeta_south'], 'time', 'temp_time')
ncfile.variables['zeta_south'][:] = zeta_south
## ---------------------------------------------------------------------------
ncfile.createVariable('zeta_north', 'd', dimensions=('temp_time', 'xi_rho'))
setattr(ncfile.variables['zeta_north'], 'long_name', 'free-surface northern boundary condition')
setattr(ncfile.variables['zeta_north'], 'units', 'meter')
setattr(ncfile.variables['zeta_north'], 'time', 'temp_time')
ncfile.variables['zeta_north'][:] = zeta_north
## ---------------------------------------------------------------------------
#ncfile.createVariable('temp_west', 'd', dimensions=('temp_time', 's_rho', 'eta_rho'))
#setattr(ncfile.variables['temp_west'], 'long_name', 'potential temperature western boundary condition')
#setattr(ncfile.variables['temp_west'], 'units', 'celcius')
#setattr(ncfile.variables['temp_west'], 'time', 'temp_time')
#ncfile.variables['temp_west'][:] = temp_west
# ---------------------------------------------------------------------------
ncfile.createVariable('temp_east', 'd', dimensions=('temp_time', 's_rho', 'eta_rho'))
setattr(ncfile.variables['temp_east'], 'long_name', 'potential temperature eastern boundary condition')
setattr(ncfile.variables['temp_east'], 'units', 'celcius')
setattr(ncfile.variables['temp_east'], 'time', 'temp_time')
ncfile.variables['temp_east'][:] = temp_east
# ---------------------------------------------------------------------------
ncfile.createVariable('temp_south', 'd', dimensions=('temp_time', 's_rho', 'xi_rho'))
setattr(ncfile.variables['temp_south'], 'long_name', 'potential temperature southern boundary condition')
setattr(ncfile.variables['temp_south'], 'units', 'celcius')
setattr(ncfile.variables['temp_south'], 'time', 'temp_time')
ncfile.variables['temp_south'][:] = temp_south
## ---------------------------------------------------------------------------
ncfile.createVariable('temp_north', 'd', dimensions=('temp_time', 's_rho', 'xi_rho'))
setattr(ncfile.variables['temp_north'], 'long_name', 'potential temperature northern boundary condition')
setattr(ncfile.variables['temp_north'], 'units', 'celcius')
setattr(ncfile.variables['temp_north'], 'time', 'temp_time')
ncfile.variables['temp_north'][:] = temp_north
## ---------------------------------------------------------------------------
#ncfile.createVariable('salt_west', 'd', dimensions=('salt_time', 's_rho', 'eta_rho'))
#setattr(ncfile.variables['salt_west'], 'long_name', 'salinity western boundary condition')
#setattr(ncfile.variables['salt_west'], 'time', 'salt_time')
#ncfile.variables['salt_west'][:] = salt_west
# ---------------------------------------------------------------------------
ncfile.createVariable('salt_east', 'd', dimensions=('salt_time', 's_rho', 'eta_rho'))
setattr(ncfile.variables['salt_east'], 'long_name', 'salinity eastern boundary condition')
setattr(ncfile.variables['salt_east'], 'time', 'salt_time')
ncfile.variables['salt_east'][:] = salt_east
# ---------------------------------------------------------------------------
ncfile.createVariable('salt_south', 'd', dimensions=('salt_time', 's_rho', 'xi_rho'))
setattr(ncfile.variables['salt_south'], 'long_name', 'salinity southern boundary condition')
setattr(ncfile.variables['salt_south'], 'time', 'salt_time')
ncfile.variables['salt_south'][:] = salt_south
## ---------------------------------------------------------------------------
ncfile.createVariable('salt_north', 'd', dimensions=('salt_time', 's_rho', 'xi_rho'))
setattr(ncfile.variables['salt_north'], 'long_name', 'salinity northern boundary condition')
setattr(ncfile.variables['salt_north'], 'time', 'salt_time')
ncfile.variables['salt_north'][:] = salt_north
ncfile.sync()
print ' \n' + '==> ' + ' ############################################# ...\n' + ' '
print ' \n' + '==> ' + ' BOUNDARY CONDITIONS FILE SUCCESSFULLY CREATED ...\n' + ' '
print ' \n' + '==> ' + ' ############################################# ...\n' + ' '
| [
"rsoutelino@gmail.com"
] | rsoutelino@gmail.com |
de3892b248bf72a4a58df72192eb0d7948c9185f | 8f13a1afb871d8cc2db0549c21e6fc10b6ad3f54 | /29_1.py | 6c51ddae9ed20cf7819b6c6052b8dfb0167ee2a8 | [] | no_license | SebNik/Project-Euler | 159ddaec942081e85934c802e64efb29609fd613 | 53288a08c5ac302df550faaa90c8f6e62452d3ec | refs/heads/master | 2022-11-05T18:17:34.901908 | 2022-10-27T15:03:45 | 2022-10-27T15:03:45 | 239,459,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | import timeit
def v29(max=101):
list_products=[]
for a in range(2,max):
for b in range(2,max):
c=a**b
if c not in list_products:
list_products.append(c)
return len(list_products)
#print(v29())
print(timeit.timeit(v29, number=10)/10)
#0.6384788267 | [
"noreply@github.com"
] | SebNik.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.