seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41637205602 | import base64
import logging
import sys
from string import digits, ascii_uppercase
import traceback
import time
from ins import *
from disasm import *
import gpu
class Tape:
'''
Tape is just a looped array of instructions.
'''
@classmethod
def from_inss(cls, inss):
'''Create tape from instructions'''
ans = cls()
ans.data = inss
return ans
@classmethod
def from_bytes(cls, rom):
ans = cls()
# Each instruction is 24 bits, which is 3 bytes
ans.data = []
for i in range(0, len(rom), 3):
ins_bytes = rom[i: i + 3]
ans.data.append(Ins.from_bytes(ins_bytes))
return ans
def to_bytes(self):
ans = [ins.to_bytes() for ins in self.data]
return b''.join(ans)
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def __repr__(self):
return str(self.data)
class Regs:
'''
64 registers that are 6-bit elements each.
'''
def __init__(self):
self.data = [0] * 64
def dump(self):
n = 8
for i in range(0, len(self.data), n):
xs = self.data[i:i + n]
xs = [str(x).rjust(2) for x in xs]
print(' '.join(xs))
def __getitem__(self, i):
assert 0 <= self.data[i] < 64
return self.data[i]
def __setitem__(self, i, v):
if i != 0:
self.data[i] = v & 0o77
def __len__(self):
return len(self.data)
class Mem:
'''
2**18 16-bit words.
'''
def __init__(self):
self.data = [0] * (2**18)
self.addr = 0 # 18-bit index for `self.data`
def dump(self):
# Warning: very big
n = 8
for i in range(0, len(self.data), n):
xs = self.data[i:i + n]
xs = [str(x).rjust(2) for x in xs]
print(' '.join(xs))
def __getitem__(self, i):
assert 0 <= self.data[i] < 64
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v & 0o77
def __len__(self):
return len(self.data)
def twos_comp(n, bits=6):
mask = (1 << bits) - 1
neg = n ^ mask
return (neg + 1) & mask
def to_int(n, bits=6):
'''Turn a two's complement number into a Python int'''
if (n & (1 << (bits - 1))) != 0:
n = n - (1 << bits)
return n
def bit_string(n, bits=6):
return format(n % (1 << bits), '0{}b'.format(bits))
def from_int(n, bits=6):
'''Turn a Python int to a two''s complement number'''
return int(bit_string(n), 2)
def sar(n, d, bits=6):
'''Arithmetic right shift'''
sign = n & (1 << (bits - 1))
if sign == 0:
return n >> d
else:
inv_s = max(bits - d, 0)
sign_mask = (1 << bits) - (1 << inv_s)
return (n >> d) | sign_mask
def rol(n, d, bits=6):
'''Rotate bits left'''
s = bit_string(n)
d = d % bits
s = s[-d:] + s[:-d]
return int(s, 2)
# `x` is a placeholder for an invalid character
serial_dict = digits + ascii_uppercase + ' +-*/<=>()[]{}#$_?|^&!~,.:\nx'
def chr_from_serial(n):
return serial_dict[n]
def str_from_serial(ns):
return ''.join([chr_from_serial(n) for n in ns])
def serial_from_chr(c):
return serial_dict.index(c)
def serial_from_str(s):
return bytearray([serial_from_chr(c) for c in s])
class Emu:
def __init__(self, use_gpu=False):
self.regs = Regs()
self.mem = Mem()
self.use_gpu = use_gpu
if use_gpu:
self.gpu = gpu.Gpu()
self.gpu.start()
self.pc = 0
self.halted = False
self.cf = False
self.clock = 0
self.buffer = ''
self.out = sys.stdout
@classmethod
def from_filename(cls, filename, use_gpu=False):
ans = cls(use_gpu)
s = open(filename).read().strip()
s = base64.b64decode(s)
ans.tape = Tape.from_bytes(s)
return ans
# Ops
def op_hlt(self, ins):
self.halted = True
# Arithmetic and logic
def op_add(self, ins):
rd, ra, rb = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] + self.regs[rb]
def op_addi(self, ins):
rd, ra, ib = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] + ib
def op_sub(self, ins):
rd, ra, rb = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] + twos_comp(self.regs[rb])
def op_or(self, ins):
rd, ra, rb = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] | self.regs[rb]
def op_ori(self, ins):
rd, ra, ib = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] | ib
def op_xor(self, ins):
rd, ra, rb = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] ^ twos_comp(self.regs[rb])
def op_xori(self, ins):
rd, ra, ib = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] ^ ib
def op_and(self, ins):
rd, ra, rb = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] & twos_comp(self.regs[rb])
def op_andi(self, ins):
rd, ra, ib = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] & ib
def op_shl(self, ins):
rd, ra, rb = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] << self.regs[rb]
def op_shr(self, ins):
rd, ra, rb = ins.a, ins.b, ins.c
self.regs[rd] = self.regs[ra] >> self.regs[rb]
# Comparison
def op_cmp(self, ins):
(cmp_type, cm, a, b) = ins.as_cmp()
# Determine left and right sides of comparison
if cmp_type == CmpType.RA_RB:
left = self.regs[a]
right = self.regs[b]
elif cmp_type == CmpType.RB_RA:
left = self.regs[b]
right = self.regs[a]
elif cmp_type == CmpType.RA_IB:
left = self.regs[a]
right = b
elif cmp_type == CmpType.IA_RB:
left = a
right = self.regs[b]
# Do comparison
if cm == Cm.TR:
self.cf = True
elif cm == Cm.FA:
self.cf = False
elif cm == Cm.EQ:
self.cf = left == right
elif cm == Cm.NE:
self.cf = left != right
elif cm == Cm.SL:
self.cf = to_int(left) < to_int(right)
elif cm == Cm.SG:
self.cf = to_int(left) > to_int(right)
elif cm == Cm.UL:
self.cf = left < right
elif cm == Cm.UG:
self.cf = left > right
else:
raise ValueError('Unhandled cm: {}'.format(cm))
def op_shi(self, ins):
(shi_type, rd, ra, ib) = ins.as_shi()
if shi_type == ShiType.SHLI:
self.regs[rd] = self.regs[ra] << ib
elif shi_type == ShiType.SHRI:
self.regs[rd] = self.regs[ra] >> ib
elif shi_type == ShiType.SARI:
self.regs[rd] = sar(self.regs[ra], ib)
elif shi_type == ShiType.ROLI:
self.regs[rd] = rol(self.regs[ra], ib)
else:
raise ValueError('Unhandled shi_type: {}'.format(shi_type))
def op_ld(self, ins):
rd, ra, ib = ins.a, ins.b, ins.c
rs = (self.regs[ra] + ib) & 0o77
self.regs[rd] = self.regs[rs]
def op_st(self, ins):
rs, ra, ib = ins.a, ins.b, ins.c
rd = (self.regs[ra] + ib) & 0o77
self.regs[rd] = self.regs[rs]
def op_fm(self, ins):
(fm_type, pr, rd, ra) = ins.as_fm()
# 12 bit buffer
ans = (self.regs[rd] * self.regs[ra]) & 0o7777
if fm_type == FmType.U:
ans = ans >> pr
else:
ans = sar(ans, pr, bits=12)
# `ans` will be deduce back to 6 bits when stored
self.regs[rd] = ans
def op_lbl(self, ins):
# Do nothing
pass
def op_jup(self, ins, reverse=False):
key = (ins.partial_jump_key(), self.regs[ins.c])
# Search for the label matching key
start = self.pc
i = start
while True:
if reverse:
i = (i + 1) % len(self.tape)
else:
i = (i - 1) % len(self.tape)
if i == start:
raise ValueError('Couldn''t find label: {}'.format(key))
ch_ins = self.tape[i] # Check instruction
if ch_ins.op == Op.LBL and \
self.should_execute(ch_ins) and \
ch_ins.label_key() == key:
# We always increment the PC after executing an instruction.
# To offset that, we subtract 1 here.
self.pc = i - 1
return
def op_jdn(self, ins):
self.op_jup(ins, reverse=True)
def get_input(self):
s = input('> ')
self.buffer += s
def op_io_serial_incoming(self, ins):
(rd, ix_, rs_) = ins.as_io()
# Optionally read more input if we don't have any in the buffer
if len(self.buffer) == 0:
self.get_input()
# Send the length of buffer
self.regs[rd] = from_int(len(self.buffer))
def op_io_serial_read(self, ins):
(rd, ix_, rs_) = ins.as_io()
# Optionally read more input if we don't have any in the buffer
if len(self.buffer) == 0:
self.get_input()
# Pop the first char and send it
c = self.buffer[0]
self.buffer = self.buffer[1:]
self.regs[rd] = from_int(serial_from_chr(c))
def op_io_serial_write(self, ins):
(rd_, ix_, rs) = ins.as_io()
c = chr_from_serial(self.regs[rs])
self.out.write(c)
self.out.flush()
def reset_clock(self):
self.start = time.time()
self.clock == 0
if self.use_gpu:
should_halt = self.gpu.update()
if should_halt:
self.halted = True
def op_io_clock_lo_cs(self, ins):
(rd, ix_, rs) = ins.as_io()
if rs == 0:
# Get lower 6 bits of clock
self.regs[rd] = self.clock & 0o77
else:
self.reset_clock()
def op_io_clock_hi_cs(self, ins):
(rd, ix_, rs) = ins.as_io()
if rs == 0:
# Get upper 6 bits of clock
self.regs[rd] = (self.clock & 0o7700) >> 6
else:
self.reset_clock()
def op_io_mem_addr_lo(self, ins):
(rd_, ix_, rs) = ins.as_io()
# Clear lower bits without clearing upper bits
mask = (0o77 << 6) + (0o77 << (6 * 2))
self.mem.addr &= mask
self.mem.addr |= self.regs[rs]
def op_io_mem_addr_mid(self, ins):
(rd_, ix_, rs) = ins.as_io()
mask = 0o77 + (0o77 << (6 * 2))
self.mem.addr &= mask
self.mem.addr |= (self.regs[rs] << 6)
def op_io_mem_addr_hi(self, ins):
(rd_, ix_, rs) = ins.as_io()
mask = 0o77 + (0o77 << 6)
self.mem.addr &= mask
self.mem.addr |= (self.regs[rs] << (6 * 2))
def op_io_mem_read(self, ins):
(rd, ix_, rs_) = ins.as_io()
self.regs[rd] = self.mem[self.mem.addr]
self.mem.addr = (self.mem.addr + 1) % len(self.mem)
def op_io_mem_write(self, ins):
(rd_, ix_, rs) = ins.as_io()
self.mem[self.mem.addr] = self.regs[rs]
self.mem.addr = (self.mem.addr + 1) % len(self.mem)
def op_gpu_x(self, ins):
(rd_, ix_, rs) = ins.as_io()
self.gpu.set_x(self.regs[rs])
def op_gpu_y(self, ins):
(rd_, ix_, rs) = ins.as_io()
self.gpu.set_y(self.regs[rs])
def op_gpu_draw(self, ins):
(rd_, ix, rs) = ins.as_io()
self.gpu.draw(self.regs[rs])
op_io_switch = {
IoDevice.SERIAL_INCOMING: op_io_serial_incoming,
IoDevice.SERIAL_READ: op_io_serial_read,
IoDevice.SERIAL_WRITE: op_io_serial_write,
IoDevice.CLOCK_LO_CS: op_io_clock_lo_cs,
IoDevice.CLOCK_HI_CS: op_io_clock_hi_cs,
IoDevice.MEM_ADDR_HI: op_io_mem_addr_hi,
IoDevice.MEM_ADDR_MID: op_io_mem_addr_mid,
IoDevice.MEM_ADDR_LO: op_io_mem_addr_lo,
IoDevice.MEM_READ: op_io_mem_read,
IoDevice.MEM_WRITE: op_io_mem_write,
IoDevice.GPU_X: op_gpu_x,
IoDevice.GPU_Y: op_gpu_y,
IoDevice.GPU_DRAW: op_gpu_draw,
}
def op_io(self, ins):
(rd_, ix, rs_) = ins.as_io()
if ix in Emu.op_io_switch:
op_io_func = Emu.op_io_switch[ix]
op_io_func(self, ins)
else:
logging.warning('Unknown IO device')
self.halted = True
op_switch = {
Op.HLT: op_hlt,
Op.ADD: op_add,
Op.ADDI: op_addi,
Op.SUB: op_sub,
Op.OR: op_or,
Op.ORI: op_ori,
Op.XOR: op_xor,
Op.XORI: op_xori,
Op.AND: op_and,
Op.ANDI: op_andi,
Op.SHL: op_shl,
Op.SHR: op_shr,
Op.CMP: op_cmp,
Op.SHI: op_shi,
Op.LD: op_ld,
Op.ST: op_st,
Op.FM: op_fm,
Op.LBL: op_lbl,
Op.JUP: op_jup,
Op.JDN: op_jdn,
Op.IO: op_io,
}
def should_execute(self, ins):
return \
(ins.cond == Cond.UN) or \
(ins.cond == Cond.TR and self.cf) or \
(ins.cond == Cond.FA and not self.cf)
def execute(self, ins):
if self.should_execute(ins):
if ins.op in Emu.op_switch:
op_func = Emu.op_switch[ins.op]
op_func(self, ins)
else:
raise ValueError('Unhandled op: {}'.format(ins.op))
def run(self, log_inss=False):
if log_inss:
# Keep a log of instructions executed
inss_log = []
self.start = time.time()
self.clock = 0
while not self.halted:
ins = self.tape[self.pc]
if log_inss:
inss_log.append(self.pc)
# 19483
self.execute(ins)
self.pc = (self.pc + 1) % len(self.tape) # Tape is looped
now = time.time()
elapsed = now - self.start
elapsed = round(elapsed, 2)
elapsed = int(elapsed * 100)
# Clock does not wrap
self.clock = min(elapsed, 0o7777)
if self.use_gpu:
self.gpu.quit()
if log_inss:
return inss_log
def execute_dbg_cmd(self, cmd):
'''Return value: whether to go to the next instruction or not'''
cmd = cmd.split()
if len(cmd) == 0:
print('No cmd')
return False
try:
if cmd[0] == 'p':
i = int(cmd[1])
print('regs[{}] = {}'.format(i, self.regs[i]))
elif cmd[0] == 'x':
i = int(cmd[1])
print('mem[{}] = {}'.format(i, self.mem[i]))
elif cmd[0] == 'xi':
print('mem.addr = {}'.format(self.mem.addr))
elif cmd[0] == 'reg':
self.regs.dump()
elif cmd[0] == 'l':
inss = self.tape[self.pc: self.pc + 5]
disasm = Disasm.disasm_tape(inss)
print(disasm)
elif cmd[0] in {'n', 's'}:
return True
elif cmd[0] == 'q':
self.halted = True
return True
elif cmd[0] == 'c':
self.stepping = False
return True
else:
print('Unknown cmd')
return False
except ValueError:
print('Error processing cmd')
return False
def run_dbg(self):
self.clock = 0
self.stepping = True
prev_cmd = None
while not self.halted:
ins = self.tape[self.pc]
print('{}: {}'.format(self.pc, Disasm.disasm(ins)))
if self.stepping:
while True:
cmd = input('edb> ')
if cmd == '' and prev_cmd:
cmd = prev_cmd
go_next = self.execute_dbg_cmd(cmd)
prev_cmd = cmd
if go_next:
break
self.execute(ins)
self.pc = (self.pc + 1) % len(self.tape) # Tape is looped
# Use fake clock where each instruction takes one centisecond
self.clock = min(self.clock + 1, 0o7777)
def save_tape(self, filename):
with open(filename, 'w') as f:
b = self.tape.to_bytes()
s = base64.b64encode(b).decode()
f.write(s)
if __name__ == '__main__':
assert len(sys.argv) >= 2
filename = sys.argv[1]
emu = Emu.from_filename(filename, use_gpu=True)
try:
emu.run()
except KeyboardInterrupt:
traceback.print_exc(file=sys.stdout)
# Print the PC before we quit
print('EMU PC:', emu.pc)
sys.exit(1)
| qxxxb/emu | emu.py | emu.py | py | 16,844 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ins.to_bytes",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "string.digits",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 147,
"usage_type": "name"
},
{
"api_name": "gpu.Gpu",
"li... |
18773450898 | import os, math, sys
from collections import Counter
def get_vocabulary(item_class):
class_vocabulary = []
for filename in os.listdir('train/' + item_class):
file = open(os.path.join('train/' + item_class, filename), encoding='latin-1')
class_vocabulary += [word for line in file for word in line.split() if word.isalpha()]
return class_vocabulary
def test(item_class):
classifications = []
for filename in os.listdir('test/' + item_class):
file = open(os.path.join('test/' + item_class, filename), encoding='latin-1')
# W ← EXTRACTTOKENSFROMDOC(V, d)
tokens = [word for line in file for word in line.split() if word in all_vocab]
# do score[c] ← log prior[c]
score_ham = math.log(ham_prior)
score_spam = math.log(spam_prior)
for token in tokens:
# do score[c] += log condprob[t][c]
score_ham += math.log(ham_bayes_dict[token])
score_spam += math.log(spam_bayes_dict[token])
if score_ham > score_spam:
classifications.append('ham')
else:
classifications.append('spam')
return classifications
# Nc ← COUNTDOCSINCLASS(D, c)
spam_count = len(os.listdir('train/spam'))
ham_count = len(os.listdir('train/ham'))
# N ← COUNTDOCS(D)
total_doc_count = spam_count + ham_count
# prior[c] ← Nc/N
spam_prior = spam_count / total_doc_count
ham_prior = ham_count / total_doc_count
# textc ← CONCATENATETEXTOFALLDOCSINCLASS(D, c)
spam_vocab = get_vocabulary("spam")
ham_vocab = get_vocabulary("ham")
exclude_stopwords = sys.argv[1].split('=')[1]
# if the user wants to exclude stopwords
if exclude_stopwords == 'true':
stopwords = [line.rstrip() for line in open('stopwords.txt')]
spam_vocab = [word for word in spam_vocab if word not in stopwords]
ham_vocab = [word for word in ham_vocab if word not in stopwords]
# V ← EXTRACTVOCABULARY(D)
all_vocab = spam_vocab + ham_vocab
# Tct ← COUNTTOKENSOFTERM(textc, t)
spam_vocab_count_dict = Counter(spam_vocab)
ham_vocab_count_dict = Counter(ham_vocab)
# do condprob[t][c] ← Tct+1/∑t′(Tct′+1)
spam_bayes_dict = { k: (spam_vocab_count_dict[k] + 1) / (len(spam_vocab) + len(spam_vocab_count_dict)) for k in all_vocab}
ham_bayes_dict = { k: (ham_vocab_count_dict[k] + 1) / (len(ham_vocab) + len(ham_vocab_count_dict)) for k in all_vocab}
spam_classifications = test('spam')
ham_classifications = test('ham')
spam_accuracy = spam_classifications.count('spam') / len(spam_classifications)
ham_accuracy = ham_classifications.count('ham') / len(ham_classifications)
spam_test_files_len = len(os.listdir('test/spam'))
ham_test_files_len = len(os.listdir('test/ham'))
spam_test_ratio = spam_test_files_len / (spam_test_files_len + ham_test_files_len)
ham_test_ratio = ham_test_files_len / (spam_test_files_len + ham_test_files_len)
total_accuracy = spam_accuracy * spam_test_ratio + ham_accuracy * ham_test_ratio
print("spam percent classified accurately: " + str(spam_accuracy))
print("ham percent classified accurately: " + str(ham_accuracy))
print("total accuracy: " + str(total_accuracy) )
| myociss/probabilistic-classifiers | bayes.py | bayes.py | py | 3,170 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 15,... |
26815440104 | from keras.models import Sequential, load_model
from keras.layers import Embedding, Conv1D, MaxPooling1D, LSTM, GRU, Dense
from keras.layers.wrappers import Bidirectional
y_idx2word=8
def run_classifier(x_train_seq,x_test_seq,y_train_one_hot,y_test_one_hot):
model = Sequential()
print("-------------------------------------------building model----------------------------------------")
# embedding layer
model.add(Embedding(7000,
300,
input_length=20))
# RNN layers
for d in range(1):
model.add(Bidirectional(GRU(units=100,
dropout=0.2,
recurrent_dropout=0.2,
return_sequences=True)))
model.add(Bidirectional(GRU(units=100,
dropout=0.2,
recurrent_dropout=0.2,
return_sequences=False)))
# fully-connected output layer
model.add(Dense(y_idx2word, activation='softmax'))
# ---- COMPILE THE MODEL ----
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
print("-------------------------------------------training model----------------------------------------")
history = model.fit(x_train_seq,
y_train_one_hot,
batch_size=64,
epochs=15, validation_data=(x_test_seq, y_test_one_hot))
print("-------------------------------------------model trained------------------------------------------")
acc = model.evaluate(x_test_seq,y_test_one_hot)
print("accuracy is:",acc[1]*100)
return acc | NehalAB/Text_Classifier | multi_class_neural_network.py | multi_class_neural_network.py | py | 1,777 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.models.Sequential",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "keras.layers.Embedding",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "keras.layers.wrappers.Bidirectional",
"line_number": 16,
"usage_type": "call"
},
{
... |
40762266793 |
def getadd():
import requests
import json
import getip
ip = getip.get()
send_url = f'http://api.ipstack.com/{ip}?access_key=7cf3582503675544e752924eb3142e79&format=1'
r = requests.get(send_url)
j = json.loads(r.text)
lat = str(j['latitude'])
lon = str(j['longitude'])
print(lat + ", " + lon)
return j['city'] + ", " + j["country_code"]
def fame__():
from config import db
from flask import session
views = 0
user = "secret" #session.get('logged_in')
print(user)
rating = 0
stmt = db.query("SELECT * FROM views WHERE friend_id = %s", (user,))
for row in stmt:
if row['to_user'] == user:
if row['notification'].find("viewed") == 0:
views += 1
if views <= 5:
rating += 1
elif 5 < views < 50:
for i in range(5, 50, 5):
rating += i
if views >= 50:
rating = 10
else:
rating = 0
print("up to here")
db.query("UPDATE fame_rating SET rating = %s WHERE username = %s", (rating, user,))
| usthandwa/WeThinkCode_Work | Matcha/views/functions.py | functions.py | py | 1,063 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "getip.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "config.db.query",
"line_number":... |
33008352550 | import keyboard
import time
import random
from ctypes import windll, wintypes, byref
from functools import reduce
from cafe_coding_download import enable
enable()
f = open('cafe_coding_download.py', 'r', encoding='UTF-8')
data = f.read()
f.close()
i = 0
s = ''
while True:
if keyboard.read_key():
s += data[i]
print('\r' + s, end='')
if data[i] == '\n':
s = ''
#改行後の空白は一気に表示
while data[i+1] == ' ':
s += data[i+1]
i+=1
print('\r' + s, end='')
i+=1
# すべて文字を表示しきったら初期化
if i == len(data):
i = 0
s = ''
print()
print()
| Yotty0404/Cafe_Coding | cafe_coding_keyboard.py | cafe_coding_keyboard.py | py | 741 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "cafe_coding_download.enable",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keyboard.read_key",
"line_number": 19,
"usage_type": "call"
}
] |
2226540156 | from dataclasses import dataclass, field
# dataclass ja cria para nós o init, repr e eq
@dataclass(init=True)
class Pessoa:
_nome: str
_idade: float
enderecos: list[str] = field(default_factory=list)
def __post_init__(self):
print("depois do init")
@property
def nome(self):
return self._nome
if __name__ == "__main__":
p1 = Pessoa('micael', 26)
print(p1) | michaelmedina10/estudos-python | oop/dataclass.py | dataclass.py | py | 411 | python | pt | code | 1 | github-code | 1 | [
{
"api_name": "dataclasses.field",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 5,
"usage_type": "call"
}
] |
31666142254 | # author: sunshine
# datetime:2021/8/5 下午3:17
import torch
import torch.nn as nn
from transformers import BertModel
class SMPNet(nn.Module):
def __init__(self, args, num_class):
super(SMPNet, self).__init__()
self.bert = BertModel.from_pretrained(args.bert_path)
self.fc1 = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(768 * 4, 128),
nn.ReLU(),
nn.Linear(128, num_class)
)
self.fc2 = nn.Sequential(
nn.Dropout(0.1),
nn.Linear(768 * 4, 128),
nn.ReLU(),
nn.Linear(128, num_class)
)
def forward(self, x1=None, x2=None):
out = ()
if x1 is not None:
x1 = self.bert(*x1, output_hidden_states=True)
x1_hidden = x1.hidden_states
concat1 = torch.cat([x1_hidden[-1][:, 0], x1_hidden[-2][:, 0], x1_hidden[-3][:, 0], x1.pooler_output],
dim=-1)
out1 = self.fc1(concat1)
out = out + (out1,)
if x2 is not None:
x2 = self.bert(*x2, output_hidden_states=True)
x2_hidden = x2.hidden_states
concat2 = torch.cat([x2_hidden[-1][:, 0], x2_hidden[-2][:, 0], x2_hidden[-3][:, 0], x2.pooler_output],
dim=-1)
out2 = self.fc2(concat2)
out = out + (out2,)
return out
| fushengwuyu/smp2020_ewect | src/model.py | model.py | py | 1,411 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel.from_pretrained",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tr... |
19845668758 | from django.shortcuts import render
from .forms import UploadTransactionFileForm
from .models import TransactionFIles, Transactions
from .serializers import TransactionnsSerializer
from .utils.mixins import TransactionMixin
from rest_framework.generics import ListCreateAPIView
from rest_framework.views import APIView, Response, status
from django.http import HttpResponseRedirect
import os
import ipdb
def home_view(request):
context = {}
if request.POST:
form = UploadTransactionFileForm(request.POST, request.FILES)
if form.is_valid():
file_upload = request.FILES["transaction_file"].read()
file_text = file_upload.decode("UTF-8")
file_arr = file_text.split("\n")
# ipdb.set_trace()
# for line in file_arr:
# ipdb.set_trace()
data = []
for line in file_arr:
obj = {
"type": line[0],
"date": f"{line[1:5]}-{line[5:7]}-{line[7:9]}",
"value": line[9:19],
"cpf": line[19:30],
"credit_card": line[30:42],
"attendance_hour": f"{line[42:44]}:{line[44:46]}:{line[46:48]}",
"shop_owner": line[48:62],
"shop_name": line[62:80],
}
data.append(obj)
# ipdb.set_trace()
serializer = TransactionnsSerializer(data=data, many=True)
serializer.is_valid(raise_exception=True)
serializer.save()
context["data"] = data
return render(request, "table.html", context)
else:
form = UploadTransactionFileForm()
context["form"] = form
return render(request, "home.html", context)
context["form"] = UploadTransactionFileForm()
return render(request, "home.html", context)
# def home_view(request):
# context = {}
# if request.POST:
# form = UploadTransactionFileForm(request.POST, request.FILES)
# if form.is_valid():
# upload = request.FILES["transaction_file"]
# teste = TransactionFIles(upload=upload)
# teste.save()
# ipdb.set_trace()
# else:
# form = UploadTransactionFileForm()
# context["form"] = form
# return render(request, "home.html", context)
# context["form"] = UploadTransactionFileForm()
# return render(request, "home.html", context)
class TransactionsView(APIView):
def post(self, request):
upload_file = request.FILES["transaction_file"].read()
upload_txt = upload_file.decode("UTF-8")
upload_arr = upload_txt.split("\n")
for i in upload_arr:
obj = {
"type": i[0],
"date": f"{i[1:5]} - {i[5:7]} - {i[7:9]}",
"value": int(i[9:17].i[17:19]),
"cpf": i[19:30],
"credit_card": i[30:42],
"hour": f"{i[42:44]:{i[44:46]:i[46:48]}}",
"shop_owner": i[48:62],
"shop_name": i[62:80],
}
serializer = TransactionnsSerializer(data=obj)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status.HTTP_201_CREATED)
| reisquaza/CNAB | transactions/views.py | views.py | py | 3,344 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "forms.UploadTransactionFileForm",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "serializers.TransactionnsSerializer",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 50,
"usage_type": "call"
}... |
17315387689 | """
Code adapted from https://github.com/uvavision/Double-Hard-Debias/blob/master/eval.py
"""
import glob
import os
import numpy as np
from sklearn.utils import Bunch
from sklearn.cluster import AgglomerativeClustering, KMeans
from six import iteritems
def evaluate_categorization(word_vectors, X, y, method='kmeans', seed=None):
"""
Evaluate embeddings on categorization task.
Parameters
----------
w: dict
Embeddings to test.
X: vector, shape: (n_samples, )
Vector of words.
y: vector, shape: (n_samples, )
Vector of cluster assignments.
"""
# Get all word embeddings into a matrix
vectors = []
for word in word_vectors:
vectors.append(word_vectors[word])
# Mean of all embeddings
mean_vector = np.mean(vectors, axis=0, keepdims=True)
w = word_vectors
new_x = []
new_y = []
exist_cnt = 0
for idx, word in enumerate(X.flatten()):
if word in w:
new_x.append(X[idx])
new_y.append(y[idx])
exist_cnt += 1
# Number of words in BLESS that also exists in our vocabulary
print('exist {} in {}'.format(exist_cnt, len(X)))
X = np.array(new_x)
y = np.array(new_y)
# Put all the words that were in both BLESS and our vocab into a matrix
words = np.vstack([w.get(word, mean_vector) for word in X.flatten()])
ids = np.random.RandomState(seed).choice(range(len(X)), len(X), replace=False)
# Evaluate clustering on several hyperparameters of AgglomerativeClustering and
# KMeans
best_purity = 0
if method == "all" or method == "agglomerative":
best_purity = calculate_purity(y[ids], AgglomerativeClustering(n_clusters=len(set(y)),
affinity="euclidean",
linkage="ward").fit_predict(words[ids]))
for affinity in ["cosine", "euclidean"]:
for linkage in ["average", "complete"]:
purity = calculate_purity(y[ids], AgglomerativeClustering(n_clusters=len(set(y)),
affinity=affinity,
linkage=linkage).fit_predict(words[ids]))
best_purity = max(best_purity, purity)
if method == "all" or method == "kmeans":
purity = calculate_purity(y[ids], KMeans(random_state=seed, n_init=10, n_clusters=len(set(y))).
fit_predict(words[ids]))
best_purity = max(purity, best_purity)
return best_purity
def calculate_purity(y_true, y_pred, verbose=False):
"""
Calculate purity for given true and predicted cluster labels.
Parameters
----------
y_true: array, shape: (n_samples, 1)
True cluster labels
y_pred: array, shape: (n_samples, 1)
Cluster assingment.
Returns
-------
purity: float
Calculated purity.
"""
assert len(y_true) == len(y_pred)
true_clusters = np.zeros(
shape=(len(set(y_true)), len(y_true))) # creates sparse array (categories, words both in bench and vocab)
pred_clusters = np.zeros_like(true_clusters) # creates sparse array (categories, words both in bench and vocab)
for id, cl in enumerate(set(y_true)):
if verbose:
print("true:", id)
true_clusters[id] = (y_true == cl).astype("int") # Everwhere the label is of a certain class, put a 1
for id, cl in enumerate(set(y_pred)):
if verbose:
print("pred:", id)
pred_clusters[id] = (y_pred == cl).astype("int") # Everwhere the label is in a certain cluster, put a 1
# For each clust in the prediction, find the true cluster that has the MOST overlap
# Sum up the number of words that overlap between the pred and true cluster that has the most overlap
# Divide this by (the number of words both in bench and vocab)
M = pred_clusters.dot(true_clusters.T)
return 1. / len(y_true) * np.sum(np.max(M, axis=1))
def evaluate_cate(wv_dict, benchmarks, method='all', seed=None):
categorization_tasks = benchmarks
categorization_results = {}
# Calculate results using helper function
for name, data in iteritems(categorization_tasks):
print("Sample data from {}, num of samples: {} : \"{}\" is assigned class {}".format(
name, len(data.X), data.X[0], data.y[0]))
categorization_results[name] = evaluate_categorization(wv_dict, data.X, data.y, method=method, seed=None)
print("Cluster purity on {} {}".format(name, categorization_results[name]))
def create_bunches(bench_paths):
output = {}
for path in bench_paths:
files = glob.glob(os.path.join(path, "*.txt"))
name = os.path.basename(path)
if name == 'EN-BATTIG':
sep = ","
else:
sep = " "
X = []
y = []
names = []
for cluster_id, file_name in enumerate(files):
with open(file_name) as f:
lines = f.read().splitlines()[:]
X += [l.split(sep) for l in lines]
y += [os.path.basename(file_name).split(".")[0]] * len(lines)
output[name] = Bunch(X=np.array(X, dtype="object"), y=np.array(y).astype("object"))
if sep == ",":
data = output[name]
output[name] = Bunch(X=data.X[:, 0], y=data.y, freq=data.X[:, 1], frequency=data.X[:, 2], rank=data.X[:, 3],
rfreq=data.X[:, 4])
return output
| YolandaMDavis/DoubleHardMulticlass | common/concept.py | concept.py | py | 5,620 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.mean",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": ... |
37404377541 | #!/usr/bin/env python3
# coding: utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import multiprocessing as mp
import math
import plotly
from plotly.graph_objs import Scatter, Line
import numpy as np
from numpy.lib.stride_tricks import as_strided as ast
from scipy.ndimage import gaussian_filter
from scipy import misc
def get_imgs_psnr(imgs1, imgs2):
# per image
def get_psnr(img1, img2):
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
pixel_max = 255.0
# return 10 * math.log10((pixel_max ** 2) / mse)
return 20 * math.log10(pixel_max / math.sqrt(mse))
assert imgs1.shape[0] == imgs2.shape[0], "Batch size is not match."
assert np.mean(imgs1) >= 1.0, "Check 1st input images range"
assert np.mean(imgs2) >= 1.0, "Check 2nd input images range"
PSNR_RGB = list()
for num_of_batch in range(imgs1.shape[0]):
PSNR_RGB.append(get_psnr(imgs1[num_of_batch], imgs2[num_of_batch]))
PSNR_RGB = np.mean(np.array(PSNR_RGB))
return PSNR_RGB
# imgs1/imgs2: 0 ~ 255, float32, numpy, rgb
# [batch_size, height, width, channel]
# return: list
def get_imgs_psnr_ssim(imgs1, imgs2):
# per image
def get_psnr(img1, img2):
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
pixel_max = 255.0
# return 10 * math.log10((pixel_max ** 2) / mse)
return 20 * math.log10(pixel_max / math.sqrt(mse))
def block_view(A, block=(3, 3)):
"""Provide a 2D block view to 2D array. No error checking made.
Therefore meaningful (as implemented) only for blocks strictly
compatible with the shape of A."""
# simple shape and strides computations may seem at first strange
# unless one is able to recognize the 'tuple additions' involved ;-)
shape = (A.shape[0] / block[0], A.shape[1] / block[1]) + block
strides = (block[0] * A.strides[0], block[1] * A.strides[1]) + A.strides
return ast(A, shape=shape, strides=strides)
def ssim(img1, img2, C1=0.01**2, C2=0.03**2):
bimg1 = block_view(img1, (4, 4))
bimg2 = block_view(img2, (4, 4))
s1 = np.sum(bimg1, (-1, -2))
s2 = np.sum(bimg2, (-1, -2))
ss = np.sum(bimg1*bimg1, (-1, -2)) + np.sum(bimg2*bimg2, (-1, -2))
s12 = np.sum(bimg1*bimg2, (-1, -2))
vari = ss - s1*s1 - s2*s2
covar = s12 - s1*s2
ssim_map = (2 * s1 * s2 + C1) * (2 * covar + C2) / ((s1 * s1 + s2 * s2 + C1) * (vari + C2))
return np.mean(ssim_map)
# FIXME there seems to be a problem with this code
def ssim_exact(img1, img2, sd=1.5, C1=0.01**2, C2=0.03**2):
mu1 = gaussian_filter(img1, sd)
mu2 = gaussian_filter(img2, sd)
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = gaussian_filter(img1 * img1, sd) - mu1_sq
sigma2_sq = gaussian_filter(img2 * img2, sd) - mu2_sq
sigma12 = gaussian_filter(img1 * img2, sd) - mu1_mu2
ssim_num = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2))
ssim_den = ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
ssim_map = ssim_num / ssim_den
return np.mean(ssim_map)
assert imgs1.shape[0] == imgs2.shape[0], "Batch size is not match."
assert np.mean(imgs1) >= 1.0, "Check 1st input images range"
assert np.mean(imgs2) >= 1.0, "Check 2nd input images range"
PSNR_RGB, SSIM = list(), list()
for num_of_batch in range(imgs1.shape[0]):
# RGB
PSNR_RGB.append(get_psnr(imgs1[num_of_batch], imgs2[num_of_batch]))
SSIM.append(ssim_exact(imgs1[num_of_batch], imgs2[num_of_batch]))
PSNR_RGB = np.mean(np.array(PSNR_RGB))
SSIM = np.mean(np.array(SSIM))
return PSNR_RGB, SSIM
def save_imgs(img, global_img, origin_img, downsample_img, file_name):
'''Save image
'''
N_samples, height, width, channel = img.shape
N_row = int(N_samples/4)
N_col = 4*4
repeat_ds_img = []
for index in range(N_samples):
#print downsample_img.shape
repeat_ds_img.append(downsample_img[index].repeat(axis=0, repeats=8).repeat(axis=1, repeats=8).astype(np.uint8))
repeat_ds_img = np.array(repeat_ds_img)
combined_imgs = np.ones((N_row*height, N_col*width, channel))
for i in range(N_row):
for j in range(int(N_col/4)):
n = 4*j
m = 4*j+1
l = 4*j+2
k = 4*j+3
combined_imgs[int(i*height):int((i+1)*height), int(n*width):int((n+1)*width), :] = repeat_ds_img[int(i*(N_col/4)+j)]
combined_imgs[int(i*height):int((i+1)*height), int(m*width):int((m+1)*width), :] = origin_img[int(i*(N_col/4)+j)]
combined_imgs[int(i*height):int((i+1)*height), int(l*width):int((l+1)*width), :] = img[int(i*(N_col/4)+j)]
combined_imgs[int(i*height):int((i+1)*height), int(k*width):int((k+1)*width), :] = global_img[int(i*(N_col/4)+j)]
print('Saving the images.', file_name)
misc.imsave(file_name, combined_imgs)
# Converts a state from the OpenAI Gym (a numpy array) to a batch tensor
def state_to_tensor(state):
return torch.from_numpy(state).float().unsqueeze(0)
class ParamInit(object):
'''parameter initializer
'''
def __init__(self, method, **kargs):
super(ParamInit, self).__init__()
self.inits = {
'xavier_normal': torch.nn.init.xavier_normal,
'xavier_uniform': torch.nn.init.xavier_uniform,
'kaming_normal': torch.nn.init.kaiming_normal,
'kaming_uniform': torch.nn.init.kaiming_uniform,
}
if method not in self.inits:
raise RuntimeError('unknown initialization %s' % method)
self.method = self.inits[method]
self.kargs = kargs
def __call__(self, module):
classname = module.__class__.__name__
if 'Conv' in classname and hasattr(module, 'weight'):
self.method(module.weight, **self.kargs)
elif 'BatchNorm' in classname:
module.weight.data.normal_(1.0, 0.02)
module.bias.data.fill_(0)
# Global counter
class Counter():
def __init__(self):
self.val = mp.Value('i', 0)
self.lock = mp.Lock()
def increment(self):
with self.lock:
self.val.value += 1
def value(self):
with self.lock:
return self.val.value
# Plots min, max and mean + standard deviation bars of a population over time
def plot_line(xs, ys_population, title, filename):
max_colour = 'rgb(0, 132, 180)'
mean_colour = 'rgb(0, 172, 237)'
std_colour = 'rgba(29, 202, 255, 0.2)'
ys = torch.Tensor(ys_population)
ys_min = ys.min(1)[0].squeeze()
ys_max = ys.max(1)[0].squeeze()
ys_mean = ys.mean(1).squeeze()
ys_std = ys.std(1).squeeze()
ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std
trace_max = Scatter(x=xs, y=ys_max.numpy(), line=Line(color=max_colour, dash='dash'), name='Max')
trace_upper = Scatter(x=xs, y=ys_upper.numpy(), line=Line(color='transparent'), name='+1 Std. Dev.', showlegend=False)
trace_mean = Scatter(x=xs, y=ys_mean.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')
trace_lower = Scatter(x=xs, y=ys_lower.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color='transparent'), name='-1 Std. Dev.', showlegend=False)
trace_min = Scatter(x=xs, y=ys_min.numpy(), line=Line(color=max_colour, dash='dash'), name='Min')
plotly.offline.plot({
'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],
'layout': dict(title=title,
xaxis={'title': 'Step'},
yaxis={'title': 'Average'})
}, filename=filename, auto_open=False)
def plot_loss(xs, p_loss, v_loss, title, filename):
p_loss_color = 'rgb(0, 255, 0)'
v_loss_color = 'rgb(255, 0, 0)'
trace_p = Scatter(x=xs, y=p_loss, line=Line(color=p_loss_color), mode='lines', name='Policy Loss')
trace_v = Scatter(x=xs, y=v_loss, line=Line(color=v_loss_color), mode='lines', name='Value Loss')
plotly.offline.plot({
'data': [trace_p, trace_v],
'layout': dict(title=title,
xaxis={'title': 'Step'},
yaxis={'title': 'Average'})
}, filename=filename, auto_open=False)
| chenaddsix/pytorch_a3c | utils.py | utils.py | py | 7,553 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.mean",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 28,
... |
36221665148 | import numpy as np
import os
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
app = Flask(__name__)
@app.route("/")
def welcome():
"""List all available api routes."""
return \
(
f"Available Routes:<br/>"
f"<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end<br/>"
f"<br/>"
f"NOTE: please format dates <em>start</em> and <em>end</em> as YYYY-MM-DD"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Return amount of precipitation over the past year"""
# initialize session
session = Session(bind=engine)
# Calculate the date 1 year ago from the last data point in the database
maxdate = session.query(func.max(Measurement.date))
mdsplit = maxdate.scalar().split("-")
prevyear = dt.date(int(mdsplit[0]), int(mdsplit[1]), int(mdsplit[2])) - dt.timedelta(days=365)
# Perform a query to retrieve the data and precipitation scores
prevyearmeasurements = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date>prevyear)
# store query results in dictionary
precipitation = {row[0]: row[1] for row in prevyearmeasurements}
# close the session to end the communication with the database
session.close()
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
"""Return a list of all stations"""
session = Session(bind=engine)
# Query all stations
stations = session.query(Station).all()
# close the session to end the communication with the database
session.close()
# store query results in dictionary
all_stations = []
for station in stations:
station_dict = {}
station_dict["id"] = station.id
station_dict["station"] = station.station
station_dict["name"] = station.name
station_dict["latitude"] = station.latitude
station_dict["longitude"] = station.longitude
station_dict["elevation"] = station.elevation
all_stations.append(station_dict)
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def tobs():
"""Return a list of all temperature observatons for last year of measurements"""
session = Session(bind=engine)
maxdate = session.query(func.max(Measurement.date))
mdsplit = maxdate.scalar().split("-")
prevyear = dt.date(int(mdsplit[0]), int(mdsplit[1]), int(mdsplit[2])) - dt.timedelta(days=365)
# query previous year's measurements
prevyearmeasurements = session.query(Measurement.station, Measurement.tobs, Station.name).\
filter(Measurement.station == Station.station, Measurement.date>prevyear)
# close the session to end the communication with the database
session.close()
# store query results in dictionary
all_temps = []
for tobs in prevyearmeasurements:
temp_dict = {}
temp_dict["station"] = tobs[0]
temp_dict["temp"] = tobs[1]
temp_dict["name"] = tobs[2]
all_temps.append(temp_dict)
return jsonify(all_temps)
@app.route("/api/v1.0/<start>")
def tempstart(start):
"""Return temperature statistics for dates greater than and equal to the start date"""
session = Session(bind=engine)
# the start date variable is expected to be in the format YYYY-MM-DD. split it into parts, cast those
# parts as ints as arguments to a date object for use in the query
ssplit = start.split("-")
datestart = dt.date(int(ssplit[0]),int(ssplit[1]),int(ssplit[2]))
# query tmax, tmin, and tavg from the given start date
tmax = session.query(func.max(Measurement.tobs)).filter(Measurement.date>=datestart).scalar()
tmin = session.query(func.min(Measurement.tobs)).filter(Measurement.date>=datestart).scalar()
tavg = round(session.query(func.avg(Measurement.tobs)).filter(Measurement.date>=datestart).scalar(), 1)
# close the session to end the communication with the database
session.close()
return \
(
f"Temperature statistics starting from {start}:</br>"
"</br>"
f"Maximum Temperature: {tmax} °F</br>"
f"Minimum Temperature: {tmin} °F</br>"
f"Average Temperature: {tavg} °F"
)
@app.route("/api/v1.0/<start>/<end>")
def tempstartend(start, end):
"""Return temperature statistics for dates between the start and end date inclusive"""
session = Session(bind=engine)
# turn the date variables into date objects for querying
ssplit = start.split("-")
datestart = dt.date(int(ssplit[0]),int(ssplit[1]),int(ssplit[2]))
esplit = end.split("-")
dateend = dt.date(int(esplit[0]),int(esplit[1]),int(esplit[2]))
# query tmax, tmin, and tavg from the given start date
tmax = session.query(func.max(Measurement.tobs)).filter(Measurement.date>=datestart, \
Measurement.date<=dateend).scalar()
tmin = session.query(func.min(Measurement.tobs)).filter(Measurement.date>=datestart, \
Measurement.date<=dateend).scalar()
tavg = round(session.query(func.avg(Measurement.tobs)).filter(Measurement.date>=datestart, \
Measurement.date<=dateend).scalar(),1)
# close the session to end the communication with the database
session.close()
return \
(
f"Temperature statistics starting from {start} and ending {end}:</br>"
"</br>"
f"Maximum Temperature: {tmax} °F</br>"
f"Minimum Temperature: {tmin} °F</br>"
f"Average Temperature: {tavg} °F"
)
if __name__ == "__main__":
app.run(debug=True) | epayne323/sqlalchemy-challenge | app.py | app.py | py | 6,008 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.automap.automap_base",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 21,
"usage_type": "call"
},
{
"api_name... |
1063359949 | from django.db import models
class BaseModel(models.Model):
created_at = models.DateTimeField(
"Data de Criação", auto_now=False, auto_now_add=True
)
modified_at = models.DateTimeField(
"Data de Modificação", auto_now=True, auto_now_add=False
)
class Meta:
abstract = True
class Manga(BaseModel):
STATUS_CHOICES = (
(1, "Cancelado"),
(2, "Completo"),
(3, "Completo (Em Tradução)"),
(4, "Em publicação"),
(5, "Pausado"),
)
cdm_id = models.CharField(
"CDM ID", max_length=255, blank=False, null=False, unique=True
)
title = models.CharField("Título", max_length=255, blank=False, null=False)
link = models.URLField("Link", max_length=255, blank=False, null=False, unique=True)
img_url = models.URLField("Imagem", max_length=255, blank=False, null=True)
publish_year = models.IntegerField("Ano", blank=False, null=True)
status = models.IntegerField(
"Status", choices=STATUS_CHOICES, blank=False, null=True
)
author = models.ForeignKey(
"mangas.Author",
verbose_name="Autor",
on_delete=models.SET_NULL,
blank=False,
null=True,
)
designer = models.ForeignKey(
"mangas.Designer",
verbose_name="Artista",
on_delete=models.SET_NULL,
blank=False,
null=True,
)
genres = models.ManyToManyField("mangas.Genre", verbose_name="Gênero")
types = models.ManyToManyField("mangas.Type", verbose_name="Tipos")
summary = models.TextField("Resumo", blank=False, null=True)
class Meta:
verbose_name = "mangá"
verbose_name_plural = "mangás"
ordering = ("created_at",)
def __str__(self):
return self.title
class Author(BaseModel):
name = models.CharField("Nome", max_length=125, unique=True)
class Meta:
verbose_name = "autor"
verbose_name_plural = "autores"
ordering = ("created_at",)
def __str__(self):
return self.name
class Designer(BaseModel):
name = models.CharField("Nome", max_length=125, unique=True)
class Meta:
verbose_name = "artista"
verbose_name_plural = "artistas"
ordering = ("created_at",)
def __str__(self):
return self.name
class Genre(BaseModel):
name = models.CharField("Nome", max_length=125, unique=True)
class Meta:
verbose_name = "gênero"
verbose_name_plural = "gêneros"
ordering = ("created_at",)
def __str__(self):
return self.name
class Type(BaseModel):
name = models.CharField("Nome", max_length=125, unique=True)
class Meta:
verbose_name = "tipo"
verbose_name_plural = "tipos"
ordering = ("created_at",)
def __str__(self):
return self.name
| CleysonPH/cdm-unofficial-api | mangas/models.py | models.py | py | 2,848 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name... |
19890027037 | import torch
import numpy as np
import torch.nn as nn
import math
from PIL import Image
import os
from core.constants import palette, NUM_CLASSES, IGNORE_LABEL
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def norm(x):
out = (x - 0.5) * 2
return out.clamp(-1, 1)
def reset_grads(model, require_grad):
for p in model.parameters():
p.requires_grad_(require_grad)
return model
def imresize_torch(image_batch, scale, mode):
new_size = np.ceil(scale * np.array([image_batch.shape[2], image_batch.shape[3]])).astype(np.int)
if mode=='bicubic':
return nn.functional.interpolate(image_batch, size=(new_size[0], new_size[1]), mode=mode, align_corners=True)
else:
return nn.functional.interpolate(image_batch, size=(new_size[0], new_size[1]), mode=mode)
def calc_gradient_penalty(netD, real_data, fake_data, LAMBDA, device):
alpha = torch.rand(1, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.to(device)
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates
interpolates = torch.autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def save_networks(path, netDst, netGst, netDts, netGts, Gst, Gts, Dst, Dts, opt, semseg_cs=None):
if not opt.debug_run:
try:
os.makedirs(path)
except OSError:
pass
if len(opt.gpus) > 1:
torch.save(Dst + [netDst.module], '%s/Dst.pth' % (path))
torch.save(Gst + [netGst.module], '%s/Gst.pth' % (path))
torch.save(Dts + [netDts.module], '%s/Dts.pth' % (path))
torch.save(Gts + [netGts.module], '%s/Gts.pth' % (path))
if semseg_cs != None:
torch.save(semseg_cs.module, '%s/semseg_cs.pth' % (path))
else:
torch.save(Dst + [netDst], '%s/Dst.pth' % (path))
torch.save(Gst + [netGst], '%s/Gst.pth' % (path))
torch.save(Dts + [netDts], '%s/Dts.pth' % (path))
torch.save(Gts + [netGts], '%s/Gts.pth' % (path))
if semseg_cs != None:
torch.save(semseg_cs, '%s/semseg_cs.pth' % (path))
def colorize_mask(mask):
# mask: tensor of the mask
# returns: numpy array of the colorized mask
new_mask = Image.fromarray(mask.cpu().numpy().astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
new_mask = np.array(new_mask.convert('RGB')).transpose((2, 0, 1))
return new_mask
def nanmean_torch(x):
num = torch.where(torch.isnan(x), torch.full_like(x, 0), torch.full_like(x, 1)).sum()
value = torch.where(torch.isnan(x), torch.full_like(x, 0), x).sum()
return value / num
def confusion_matrix_torch(y_pred, y_true, num_classes):
N = num_classes
y = (N * y_true + y_pred).type(torch.long)
y = torch.bincount(y)
if len(y) < N * N:
y = torch.cat((y, torch.zeros(N * N - len(y), dtype=torch.long).cuda()))
y = y.reshape(N, N)
return y
def compute_cm_batch_torch(y_pred, y_true, ignore_label, classes):
batch_size = y_pred.shape[0]
confusion_matrix = torch.zeros((classes, classes)).cuda()
for i in range(batch_size):
y_pred_curr = y_pred[i, :, :]
y_true_curr = y_true[i, :, :]
inds_to_calc = y_true_curr != ignore_label
y_pred_curr = y_pred_curr[inds_to_calc]
y_true_curr = y_true_curr[inds_to_calc]
assert y_pred_curr.shape == y_true_curr.shape
confusion_matrix += confusion_matrix_torch(y_pred_curr, y_true_curr, classes)
return confusion_matrix
def compute_iou_torch(confusion_matrix):
intersection = torch.diag(confusion_matrix)
ground_truth_set = confusion_matrix.sum(dim=1)
predicted_set = confusion_matrix.sum(dim=0)
union = ground_truth_set + predicted_set - intersection
iou = intersection / union.type(torch.float32)
miou = nanmean_torch(iou)
return iou, miou
def GeneratePyramid(image, num_scales, curr_scale, scale_factor, is_label=False):
scales_pyramid = []
if isinstance(image, Image.Image):
for i in range(0, curr_scale + 1, 1):
scale = math.pow(scale_factor, num_scales - i)
curr_size = (np.ceil(scale * np.array(image.size))).astype(np.int)
curr_scale_image = image.resize(curr_size, Image.BICUBIC if not is_label else Image.NEAREST)
curr_scale_image = RGBImageToNumpy(curr_scale_image) if not is_label else ImageToNumpy(curr_scale_image)
scales_pyramid.append(curr_scale_image)
elif isinstance(image, torch.Tensor):
for i in range(0, curr_scale + 1, 1):
if is_label:
scale = math.pow(scale_factor, num_scales - i)
else:
scale = math.pow(scale_factor, curr_scale - i)
curr_scale_image = imresize_torch(image, scale, mode='nearest' if is_label else 'bicubic')
curr_scale_image = curr_scale_image.squeeze(0) if is_label else curr_scale_image
scales_pyramid.append(curr_scale_image)
return scales_pyramid
def RGBImageToNumpy(im):
im = ImageToNumpy(im)
im = (im - 128.) / 128 # change from 0..255 to -1..1
return im
def ImageToNumpy(im):
im = np.asarray(im, np.float32)
if len(im.shape) == 3:
im = np.transpose(im, (2, 0, 1))
return im
class runningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class ** 2,
).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(
lt.flatten(), lp.flatten(), self.n_classes
)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return (
{
"Overall Acc: \t": acc,
"Mean Acc : \t": acc_cls,
"FreqW Acc : \t": fwavacc,
"Mean IoU : \t": mean_iu,
},
cls_iu,
)
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
def norm_image(im, norm_type='tanh_norm'):
if norm_type == 'tanh_norm':
out = (im + 1) / 2
elif norm_type == 'general_norm':
out = (im - im.min())
out = out / out.max()
else:
raise NotImplemented()
# assert torch.max(out) <= 1 and torch.min(out) >= 0
torch.clamp(out, 0, 1)
return out
def calculte_cs_validation_accuracy(semseg_net, target_val_loader, epoch_num, tb_path, device):
from torch.utils.tensorboard import SummaryWriter
semseg_net.eval()
tb = SummaryWriter(tb_path)
with torch.no_grad():
running_metrics_val = runningScore(NUM_CLASSES)
cm = torch.zeros((NUM_CLASSES, NUM_CLASSES)).cuda()
for val_batch_num, (target_images, target_labels) in enumerate(target_val_loader):
target_images = target_images.to(device)
target_labels = target_labels.to(device)
with torch.no_grad():
pred_softs = semseg_net(target_images)
pred_labels = torch.argmax(pred_softs, dim=1)
cm += compute_cm_batch_torch(pred_labels, target_labels, IGNORE_LABEL, NUM_CLASSES)
running_metrics_val.update(target_labels.cpu().numpy(), pred_labels.cpu().numpy())
if val_batch_num == 0:
t = norm_image(target_images[0])
t_lbl = colorize_mask(target_labels[0])
pred_lbl = colorize_mask(pred_labels[0])
tb.add_image('Semseg/Validtaion/target', t, epoch_num)
tb.add_image('Semseg/Validtaion/target_label', t_lbl, epoch_num)
tb.add_image('Semseg/Validtaion/prediction_label', pred_lbl, epoch_num)
iou, miou = compute_iou_torch(cm)
# proda's calc:
score, class_iou = running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
for k, v in class_iou.items():
print(k, v)
running_metrics_val.reset()
return iou, miou, cm | shahaf1313/ProCST | core/functions.py | functions.py | py | 9,425 | python | en | code | 25 | github-code | 1 | [
{
"api_name": "numpy.ceil",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.interpolate... |
25410789355 | import optparse
import xml.etree.ElementTree
from util import build_utils
MANIFEST_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
package="%(package)s"
split="%(split)s">
<uses-sdk android:minSdkVersion="21" />
<application android:hasCode="%(has_code)s">
</application>
</manifest>
"""
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--main-manifest', help='The main manifest of the app')
parser.add_option('--out-manifest', help='The output manifest')
parser.add_option('--split', help='The name of the split')
parser.add_option(
'--has-code',
action='store_true',
default=False,
help='Whether the split will contain a .dex file')
(options, args) = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('main_manifest', 'out_manifest', 'split')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def Build(main_manifest, split, has_code):
"""Builds a split manifest based on the manifest of the main APK.
Args:
main_manifest: the XML manifest of the main APK as a string
split: the name of the split as a string
has_code: whether this split APK will contain .dex files
Returns:
The XML split manifest as a string
"""
doc = xml.etree.ElementTree.fromstring(main_manifest)
package = doc.get('package')
return MANIFEST_TEMPLATE % {
'package': package,
'split': split.replace('-', '_'),
'has_code': str(has_code).lower()
}
def main():
options = ParseArgs()
main_manifest = file(options.main_manifest).read()
split_manifest = Build(
main_manifest,
options.split,
options.has_code)
with file(options.out_manifest, 'w') as f:
f.write(split_manifest)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
[options.main_manifest] + build_utils.GetPythonDependencies())
if __name__ == '__main__':
main()
| hanpfei/chromium-net | build/android/gyp/generate_split_manifest.py | generate_split_manifest.py | py | 2,284 | python | en | code | 289 | github-code | 1 | [
{
"api_name": "optparse.OptionParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "util.build_utils.AddDepfileOption",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "util.build_utils",
"line_number": 24,
"usage_type": "name"
},
{
"api_name... |
12137137685 | import datetime
class Message:
"""Represents a message sent to a chat.
Attributes
----------
chat: :class:`models.Chat`
The chat the message belongs to
type: :class:`str`
The type of message sent
id: :class:`str`
The id of the message
content: :class:`str`
What the message contained
created_at: :class:`datetime`
When the message was sent
author: :class:`models.User`
Who sent the message
command: Optional[:class:`dlive.Command`]
The command of the message
"""
def __init__(self, bot, data, chat, author):
self.chat = chat
self._data = data
self._bot = bot
self.type = data["type"]
self.id: int = data["id"]
self.content = data["content"]
self.created_at = datetime.datetime.utcfromtimestamp(
int(data["createdAt"][:-9]))
self.author = author
self.command = None
def __str__(self):
return self.content
async def delete(self):
"""Deletes the message from the chat."""
chat = await self._bot.get_chat(self._data["sender"]["username"])
return await self._bot.http.delete_chat_message(chat, self.id) | A-Trash-Coder/dlive.py | dlive/models/message.py | message.py | py | 1,232 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "attribute"
}
] |
71728283555 | from fastapi import FastAPI, Cookie, Response
from typing import Union
from pydantic import BaseModel
from typing_extensions import Annotated
app = FastAPI()
@app.get("/books")
async def books(
ads_id: Annotated[Union[str, None], Cookie()]
):
return {
"code": 200,
"message": "访问成功",
"ads_id": ads_id
}
| Mengxin-yi/fastApiProject | mainCookie.py | mainCookie.py | py | 355 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "typing_extensions.Annotated",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "fastapi.Cooki... |
32180876446 | import torch
from .base_model import BaseModel
from .BigGAN_networks import *
from util.util import toggle_grad, loss_hinge_dis, loss_hinge_gen, ortho, default_ortho, toggle_grad, prepare_z_y, \
make_one_hot, to_device, multiple_replace, random_word
import pandas as pd
from .OCR_network import *
from torch.nn import CTCLoss, MSELoss, L1Loss
from torch.nn.utils import clip_grad_norm_
import random
import unicodedata
import sys
activation_dict = {'inplace_relu': nn.ReLU(inplace=True),
'relu': nn.ReLU(inplace=False),
'ir': nn.ReLU(inplace=True),}
class ScrabbleGANBaseModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(G_shared=False)
parser.set_defaults(first_layer=True)
parser.set_defaults(one_hot=True)
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
opt.G_activation = activation_dict[opt.G_nl]
opt.D_activation = activation_dict[opt.D_nl]
# load saved model to finetune:
if self.isTrain and opt.saved_model!='':
opt.G_init = os.path.join(opt.checkpoints_dir, opt.saved_model)
opt.D_init = os.path.join(opt.checkpoints_dir, opt.saved_model)
opt.OCR_init = os.path.join(opt.checkpoints_dir, opt.saved_model)
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
self.loss_names = ['G', 'D', 'Dreal', 'Dfake', 'OCR_real', 'OCR_fake', 'grad_fake_OCR', 'grad_fake_adv']
self.loss_G = torch.zeros(1)
self.loss_D =torch.zeros(1)
self.loss_Dreal =torch.zeros(1)
self.loss_Dfake =torch.zeros(1)
self.loss_OCR_real =torch.zeros(1)
self.loss_OCR_fake =torch.zeros(1)
self.loss_grad_fake_OCR =torch.zeros(1)
self.loss_grad_fake_adv =torch.zeros(1)
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
self.model_names = ['G', 'D', 'OCR']
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
# Next, build the model
opt.n_classes = len(opt.alphabet)
self.netG = Generator(**vars(opt))
self.Gradloss = torch.nn.L1Loss()
self.netconverter = strLabelConverter(opt.alphabet)
self.netOCR = CRNN(opt).to(self.device)
if len(opt.gpu_ids) > 0:
assert (torch.cuda.is_available())
self.netOCR.to(opt.gpu_ids[0])
self.netG.to(opt.gpu_ids[0])
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
if len(opt.gpu_ids) > 1:
self.netOCR = torch.nn.DataParallel(self.netOCR, device_ids=opt.gpu_ids, dim=1, output_device=opt.gpu_ids[0]).cuda()
self.netG = torch.nn.DataParallel(self.netG, device_ids=opt.gpu_ids, output_device=opt.gpu_ids[0]).cuda()
self.OCR_criterion = CTCLoss(zero_infinity=True, reduction='none')
print(self.netG)
if self.isTrain: # only defined during training time
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
# define and initialize optimizers. You can define one optimizer for each network.
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.G_lr, betas=(opt.G_B1, opt.G_B2), weight_decay=0, eps=opt.adam_eps)
self.optimizer_OCR = torch.optim.Adam(self.netOCR.parameters(),
lr=opt.OCR_lr, betas=(opt.OCR_B1, opt.OCR_B2), weight_decay=0,
eps=opt.adam_eps)
self.optimizers = [self.optimizer_G, self.optimizer_OCR]
self.optimizer_G.zero_grad()
self.optimizer_OCR.zero_grad()
exception_chars = ['ï', 'ü', '.', '_', 'ö', ',', 'ã', 'ñ']
if opt.lex.endswith('.tsv'):
self.lex = pd.read_csv(opt.lex, sep='\t')['lemme']
self.lex = [word.split()[-1] for word in self.lex if
(pd.notnull(word) and all(char not in word for char in exception_chars))]
elif opt.lex.endswith('.txt'):
with open(opt.lex, 'rb') as f:
self.lex = f.read().splitlines()
lex=[]
for word in self.lex:
try:
word=word.decode("utf-8")
except:
continue
if len(word)<20:
lex.append(word)
self.lex = lex
else:
raise ValueError('could not load lexicon ')
self.fixed_noise_size = 2
self.fixed_noise, self.fixed_fake_labels = prepare_z_y(self.fixed_noise_size, opt.dim_z,
len(self.lex), device=self.device,
fp16=opt.G_fp16, seed=opt.seed)
self.fixed_noise.sample_()
self.fixed_fake_labels.sample_()
self.rep_dict = {"'":"", '"':'', ' ':'_', ';':'', '.':''}
fixed_words_fake = [self.lex[int(i)].encode('utf-8') for i in self.fixed_fake_labels]
self.fixed_text_encode_fake, self.fixed_text_len = self.netconverter.encode(fixed_words_fake)
if self.opt.one_hot:
self.one_hot_fixed = make_one_hot(self.fixed_text_encode_fake, self.fixed_text_len, self.opt.n_classes)
# Todo change to display names of classes instead of numbers
self.label_fix = [multiple_replace(word.decode("utf-8"), self.rep_dict) for word in fixed_words_fake]
visual_names_fixed_noise = ['fake_fixed_' + 'label_' + label for label in self.label_fix]
visual_names_grad_OCR = ['grad_OCR_fixed_' + 'label_' + label for label in self.label_fix]
visual_names_grad_G = ['grad_G_fixed_' + 'label_' + label for label in self.label_fix]
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
self.visual_names = ['real', 'fake']
self.visual_names.extend(visual_names_fixed_noise)
self.visual_names.extend(visual_names_grad_G)
self.visual_names.extend(visual_names_grad_OCR)
self.z, self.label_fake = prepare_z_y(opt.batch_size, opt.dim_z, len(self.lex),
device=self.device, fp16=opt.G_fp16, z_dist=opt.z_dist, seed=opt.seed)
if opt.single_writer:
self.fixed_noise = self.z[0].repeat((self.fixed_noise_size, 1))
self.z = self.z[0].repeat((opt.batch_size, 1)).to(self.device)
self.z.requires_grad=True
self.optimizer_z = torch.optim.SGD([self.z], lr=opt.G_lr)
self.optimizer_z.zero_grad()
self.l1_loss = L1Loss()
self.mse_loss = MSELoss()
self.OCRconverter = OCRLabelConverter(opt.alphabet)
self.epsilon = 1e-7
self.real_z = None
self.real_z_mean = None
def visualize_fixed_noise(self):
if self.opt.single_writer:
self.fixed_noise = self.z[0].repeat((self.fixed_noise_size, 1))
if self.opt.one_hot:
images = self.netG(self.fixed_noise, self.one_hot_fixed.to(self.device))
else:
images = self.netG(self.fixed_noise, self.fixed_text_encode_fake.to(self.device))
loss_G = loss_hinge_gen(self.netD(**{'x': images, 'z': self.fixed_noise}), self.fixed_text_len.detach(), self.opt.mask_loss)
# self.loss_G = loss_hinge_gen(self.netD(self.fake, self.rep_label_fake))
# OCR loss on real data
pred_fake_OCR = self.netOCR(images)
preds_size = torch.IntTensor([pred_fake_OCR.size(0)] * len(self.fixed_text_len)).detach()
# loss_OCR_fake = self.OCR_criterion(pred_fake_OCR.log_softmax(2), self.fixed_text_encode_fake.detach().to(self.device),
# preds_size, self.fixed_text_len.detach())
loss_OCR_fake = self.OCR_criterion(pred_fake_OCR, self.fixed_text_encode_fake.detach().to(self.device),
preds_size, self.fixed_text_len.detach())
loss_OCR_fake = torch.mean(loss_OCR_fake[~torch.isnan(loss_OCR_fake)])
grad_fixed_OCR = torch.autograd.grad(loss_OCR_fake, images)
grad_fixed_adv = torch.autograd.grad(loss_G, images)
_, preds = pred_fake_OCR.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = self.OCRconverter.decode(preds.data, preds_size.data, raw=False)
raw_preds = self.OCRconverter.decode(preds.data, preds_size.data, raw=True)
print('######## fake images OCR prediction ########')
for i in range(self.fixed_noise_size):
print('%-20s => %-20s, gt: %-20s' % (raw_preds[i], sim_preds[i], self.lex[int(self.fixed_fake_labels[i])]))
image = images[i].unsqueeze(0).detach()
grad_OCR = torch.abs(grad_fixed_OCR[0][i]).unsqueeze(0).detach()
grad_OCR = (grad_OCR / torch.max(grad_OCR)) * 2 - 1
grad_adv = torch.abs(grad_fixed_adv[0][i]).unsqueeze(0).detach()
grad_adv = (grad_adv / torch.max(grad_adv)) * 2 - 1
label = self.label_fix[i]
setattr(self, 'grad_OCR_fixed_' + 'label_' + label, grad_OCR)
setattr(self, 'grad_G_fixed_' + 'label_' + label, grad_adv)
setattr(self, 'fake_fixed_' + 'label_' + label, image)
def get_current_visuals(self):
self.visualize_fixed_noise()
with torch.no_grad():
preds_size = torch.IntTensor([self.pred_real_OCR.size(0)] * len(self.label)).detach()
_, preds = self.pred_real_OCR.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = self.OCRconverter.decode(preds.data, preds_size.data, raw=False)
raw_preds = self.OCRconverter.decode(preds.data, preds_size.data, raw=True)
print('######## real images OCR prediction ########')
for i in range(min(3, len(self.label))):
print('%-20s => %-20s, gt: %-20s' % (
raw_preds[i], sim_preds[i], self.label[i].decode('utf-8', 'strict')))
self.netOCR.train()
ones_img = torch.ones(eval('self.fake_fixed_' + 'label_' +
self.label_fix[0]).shape, dtype=torch.float32)
ones_img[:, :, :, 0:min(self.real.shape[3], ones_img.shape[3])] = self.real[0, :, :, 0:min(self.real.shape[3], ones_img.shape[3])]
self.real = ones_img
ones_img = torch.ones(eval('self.fake_fixed_' + 'label_' +
self.label_fix[0]).shape, dtype=torch.float32)
ones_img[:, :, :, 0:min(self.fake.shape[3], ones_img.shape[3])] = self.fake[0, :, :, 0:min(self.fake.shape[3], ones_img.shape[3])]
self.fake = ones_img
self.netG.train()
return super(ScrabbleGANBaseModel, self).get_current_visuals()
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
# if hasattr(self, 'real'): del self.real, self.one_hot_real, self.text_encode, self.len_text
self.real = input['img'].to(self.device)
if 'label' in input.keys():
self.label = input['label']
self.text_encode, self.len_text = self.netconverter.encode(self.label)
if self.opt.one_hot:
self.one_hot_real = make_one_hot(self.text_encode, self.len_text, self.opt.n_classes).to(self.device).detach()
self.text_encode = self.text_encode.to(self.device).detach()
self.len_text = self.len_text.detach()
self.img_path = input['img_path'] # get image paths
self.idx_real = input['idx'] # get image paths
def load_networks(self, epoch):
BaseModel.load_networks(self, epoch)
if self.opt.single_writer:
load_filename = '%s_z.pkl' % (epoch)
load_path = os.path.join(self.save_dir, load_filename)
self.z = torch.load(load_path)
def forward(self, words=None, z=None):
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
if hasattr(self, 'fake'): del self.fake, self.text_encode_fake, self.len_text_fake, self.one_hot_fake
self.label_fake.sample_()
if words is None:
words = [self.lex[int(i)] for i in self.label_fake]
if self.opt.capitalize:
for i, word in enumerate(words):
if random.random()<0.5:
word = list(word)
word[0] = unicodedata.normalize('NFKD',word[0].upper()).encode('ascii', 'ignore').decode("utf-8")
word = ''.join(word)
words[i] = word
words = [word.encode('utf-8') for word in words]
if z is None:
if not self.opt.single_writer:
self.z.sample_()
else:
if z.shape[0]==1:
self.z = z.repeat((len(words), 1))
self.z = z.repeat((len(words), 1))
else:
self.z = z
self.words = words
self.text_encode_fake, self.len_text_fake = self.netconverter.encode(self.words)
self.text_encode_fake = self.text_encode_fake.to(self.device)
if self.opt.one_hot:
self.one_hot_fake = make_one_hot(self.text_encode_fake, self.len_text_fake, self.opt.n_classes).to(self.device)
try:
self.fake = self.netG(self.z, self.one_hot_fake)
except:
print(words)
else:
self.fake = self.netG(self.z, self.text_encode_fake) # generate output image given the input data_A
def backward_D_OCR(self):
# Real
if self.real_z_mean is None:
pred_real = self.netD(self.real.detach())
else:
pred_real = self.netD(**{'x': self.real.detach(), 'z': self.real_z_mean.detach()})
# Fake
try:
pred_fake = self.netD(**{'x': self.fake.detach(), 'z': self.z.detach()})
except:
print('a')
# Combined loss
self.loss_Dreal, self.loss_Dfake = loss_hinge_dis(pred_fake, pred_real, self.len_text_fake.detach(), self.len_text.detach(), self.opt.mask_loss)
self.loss_D = self.loss_Dreal + self.loss_Dfake
# OCR loss on real data
self.pred_real_OCR = self.netOCR(self.real.detach())
preds_size = torch.IntTensor([self.pred_real_OCR.size(0)] * self.opt.batch_size).detach()
loss_OCR_real = self.OCR_criterion(self.pred_real_OCR, self.text_encode.detach(), preds_size, self.len_text.detach())
self.loss_OCR_real = torch.mean(loss_OCR_real[~torch.isnan(loss_OCR_real)])
# total loss
loss_total = self.loss_D + self.loss_OCR_real
# backward
loss_total.backward()
for param in self.netOCR.parameters():
param.grad[param.grad!=param.grad]=0
param.grad[torch.isnan(param.grad)]=0
param.grad[torch.isinf(param.grad)]=0
if self.opt.clip_grad > 0:
clip_grad_norm_(self.netD.parameters(), self.opt.clip_grad)
return loss_total
def backward_OCR(self):
# OCR loss on real data
self.pred_real_OCR = self.netOCR(self.real.detach())
preds_size = torch.IntTensor([self.pred_real_OCR.size(0)] * self.opt.batch_size).detach()
loss_OCR_real = self.OCR_criterion(self.pred_real_OCR, self.text_encode.detach(), preds_size, self.len_text.detach())
self.loss_OCR_real = torch.mean(loss_OCR_real[~torch.isnan(loss_OCR_real)])
# backward
self.loss_OCR_real.backward()
for param in self.netOCR.parameters():
param.grad[param.grad!=param.grad]=0
param.grad[torch.isnan(param.grad)]=0
param.grad[torch.isinf(param.grad)]=0
if self.opt.clip_grad > 0:
clip_grad_norm_(self.netD.parameters(), self.opt.clip_grad)
return self.loss_OCR_real
def backward_D(self):
# Real
if self.real_z_mean is None:
pred_real = self.netD(self.real.detach())
else:
pred_real = self.netD(**{'x': self.real.detach(), 'z': self.real_z_mean.detach()})
pred_fake = self.netD(**{'x': self.fake.detach(), 'z': self.z.detach()})
# Combined loss
self.loss_Dreal, self.loss_Dfake = loss_hinge_dis(pred_fake, pred_real, self.len_text_fake.detach(), self.len_text.detach(), self.opt.mask_loss)
self.loss_D = self.loss_Dreal + self.loss_Dfake
# backward
self.loss_D.backward()
if self.opt.clip_grad > 0:
clip_grad_norm_(self.netD.parameters(), self.opt.clip_grad)
return self.loss_D
def backward_G(self):
self.loss_G = loss_hinge_gen(self.netD(**{'x': self.fake, 'z': self.z}), self.len_text_fake.detach(), self.opt.mask_loss)
# OCR loss on real data
pred_fake_OCR = self.netOCR(self.fake)
preds_size = torch.IntTensor([pred_fake_OCR.size(0)] * self.opt.batch_size).detach()
loss_OCR_fake = self.OCR_criterion(pred_fake_OCR, self.text_encode_fake.detach(), preds_size, self.len_text_fake.detach())
self.loss_OCR_fake = torch.mean(loss_OCR_fake[~torch.isnan(loss_OCR_fake)])
# total loss
self.loss_T = self.loss_G + self.opt.gb_alpha*self.loss_OCR_fake
grad_fake_OCR = torch.autograd.grad(self.loss_OCR_fake, self.fake, retain_graph=True)[0]
self.loss_grad_fake_OCR = 10**6*torch.mean(grad_fake_OCR**2)
grad_fake_adv = torch.autograd.grad(self.loss_G, self.fake, retain_graph=True)[0]
self.loss_grad_fake_adv = 10**6*torch.mean(grad_fake_adv**2)
if not self.opt.no_grad_balance:
self.loss_T.backward(retain_graph=True)
grad_fake_OCR = torch.autograd.grad(self.loss_OCR_fake, self.fake, create_graph=True, retain_graph=True)[0]
grad_fake_adv = torch.autograd.grad(self.loss_G, self.fake, create_graph=True, retain_graph=True)[0]
a = self.opt.gb_alpha * torch.div(torch.std(grad_fake_adv), self.epsilon+torch.std(grad_fake_OCR))
if a is None:
print(self.loss_OCR_fake, self.loss_G, torch.std(grad_fake_adv), torch.std(grad_fake_OCR))
if a>1000 or a<0.0001:
print(a)
b = self.opt.gb_alpha * (torch.mean(grad_fake_adv) -
torch.div(torch.std(grad_fake_adv), self.epsilon+torch.std(grad_fake_OCR))*
torch.mean(grad_fake_OCR))
# self.loss_OCR_fake = a.detach() * self.loss_OCR_fake + b.detach() * torch.sum(self.fake)
self.loss_OCR_fake = a.detach() * self.loss_OCR_fake
self.loss_T = (1-1*self.opt.onlyOCR)*self.loss_G + self.loss_OCR_fake
self.loss_T.backward(retain_graph=True)
grad_fake_OCR = torch.autograd.grad(self.loss_OCR_fake, self.fake, create_graph=False, retain_graph=True)[0]
grad_fake_adv = torch.autograd.grad(self.loss_G, self.fake, create_graph=False, retain_graph=True)[0]
self.loss_grad_fake_OCR = 10 ** 6 * torch.mean(grad_fake_OCR ** 2)
self.loss_grad_fake_adv = 10 ** 6 * torch.mean(grad_fake_adv ** 2)
with torch.no_grad():
self.loss_T.backward()
else:
self.loss_T.backward()
if self.opt.clip_grad > 0:
clip_grad_norm_(self.netG.parameters(), self.opt.clip_grad)
if any(torch.isnan(loss_OCR_fake)) or torch.isnan(self.loss_G):
print('loss OCR fake: ', loss_OCR_fake, ' loss_G: ', self.loss_G, ' words: ', self.words)
sys.exit()
def optimize_D_OCR(self):
self.forward()
self.set_requires_grad([self.netD], True)
self.set_requires_grad([self.netOCR], True)
self.optimizer_D.zero_grad()
if self.opt.OCR_init in ['glorot', 'xavier', 'ortho', 'N02']:
self.optimizer_OCR.zero_grad()
self.backward_D_OCR()
def optimize_OCR(self):
self.forward()
self.set_requires_grad([self.netD], False)
self.set_requires_grad([self.netOCR], True)
if self.opt.OCR_init in ['glorot', 'xavier', 'ortho', 'N02']:
self.optimizer_OCR.zero_grad()
self.backward_OCR()
def optimize_D(self):
self.forward()
self.set_requires_grad([self.netD], True)
self.backward_D()
def optimize_D_OCR_step(self):
self.optimizer_D.step()
if self.opt.OCR_init in ['glorot', 'xavier', 'ortho', 'N02']:
self.optimizer_OCR.step()
self.optimizer_D.zero_grad()
self.optimizer_OCR.zero_grad()
def optimize_D_step(self):
self.optimizer_D.step()
if any(torch.isnan(self.netD.infer_img.blocks[0][0].conv1.bias)):
print('D is nan')
sys.exit()
self.optimizer_D.zero_grad()
def optimize_G(self):
self.forward()
self.set_requires_grad([self.netD], False)
self.set_requires_grad([self.netOCR], False)
self.backward_G()
def optimize_G_step(self):
if self.opt.single_writer and self.opt.optimize_z:
self.optimizer_z.step()
self.optimizer_z.zero_grad()
if not self.opt.not_optimize_G:
self.optimizer_G.step()
self.optimizer_G.zero_grad()
def optimize_ocr(self):
self.set_requires_grad([self.netOCR], True)
# OCR loss on real data
pred_real_OCR = self.netOCR(self.real)
preds_size =torch.IntTensor([pred_real_OCR.size(0)] * self.opt.batch_size).detach()
self.loss_OCR_real = self.OCR_criterion(pred_real_OCR, self.text_encode.detach(), preds_size, self.len_text.detach())
self.loss_OCR_real.backward()
self.optimizer_OCR.step()
def optimize_z(self):
self.set_requires_grad([self.z], True)
def optimize_parameters(self):
self.forward()
self.set_requires_grad([self.netD], False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
self.set_requires_grad([self.netD], True)
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
def test(self):
self.visual_names = ['fake']
self.netG.eval()
with torch.no_grad():
self.forward()
def train_GD(self):
self.netG.train()
self.netD.train()
self.optimizer_G.zero_grad()
self.optimizer_D.zero_grad()
# How many chunks to split x and y into?
x = torch.split(self.real, self.opt.batch_size)
y = torch.split(self.label, self.opt.batch_size)
counter = 0
# Optionally toggle D and G's "require_grad"
if self.opt.toggle_grads:
toggle_grad(self.netD, True)
toggle_grad(self.netG, False)
for step_index in range(self.opt.num_critic_train):
self.optimizer_D.zero_grad()
with torch.set_grad_enabled(False):
self.forward()
D_input = torch.cat([self.fake, x[counter]], 0) if x is not None else self.fake
D_class = torch.cat([self.label_fake, y[counter]], 0) if y[counter] is not None else y[counter]
# Get Discriminator output
D_out = self.netD(D_input, D_class)
if x is not None:
pred_fake, pred_real = torch.split(D_out, [self.fake.shape[0], x[counter].shape[0]]) # D_fake, D_real
else:
pred_fake = D_out
# Combined loss
self.loss_Dreal, self.loss_Dfake = loss_hinge_dis(pred_fake, pred_real, self.len_text_fake.detach(), self.len_text.detach(), self.opt.mask_loss)
self.loss_D = self.loss_Dreal + self.loss_Dfake
self.loss_D.backward()
counter += 1
self.optimizer_D.step()
# Optionally toggle D and G's "require_grad"
if self.opt.toggle_grads:
toggle_grad(self.netD, False)
toggle_grad(self.netG, True)
# Zero G's gradients by default before training G, for safety
self.optimizer_G.zero_grad()
self.forward()
self.loss_G = loss_hinge_gen(self.netD(self.fake, self.label_fake), self.len_text_fake.detach(), self.opt.mask_loss)
self.loss_G.backward()
self.optimizer_G.step()
| amzn/convolutional-handwriting-gan | models/ScrabbleGAN_baseModel.py | ScrabbleGAN_baseModel.py | py | 25,408 | python | en | code | 235 | github-code | 1 | [
{
"api_name": "base_model.BaseModel",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "base_model.BaseModel.__init__",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "base_model.BaseModel",
"line_number": 28,
"usage_type": "name"
},
{
"api_name"... |
28413956290 | import json, os, requests, subprocess
# Your Discogs username and API key
username = ''
api_key = ''
# The ID of the folder containing your collection
folder_id = 0
# Base URL for Discogs API
base_url = 'https://api.discogs.com'
# Endpoint for retrieving collection releases
endpoint = f'/users/{username}/collection/folders/{folder_id}/releases'
# Set up authentication headers
headers = {'User-Agent': 'MyDiscogsClient/1.0', 'Authorization': f'Discogs token={api_key}'}
# Make request to API
response = requests.get(base_url + endpoint, headers=headers)
directory = 'thumbs/'
# Make request to API and retrieve all pages of results
page = 1
thumbnails = []
while True:
response = requests.get(base_url + endpoint, headers=headers, params={'page': page})
response_data = response.json()
# Extract album thumbnails from each release
for release in response_data['releases']:
# Check if release has an image and add it to the list of thumbnails
if release['basic_information']['cover_image']:
thumbnails.append(release['basic_information']['cover_image'])
# Check if there are more pages of results
if 'next' in response.links:
page += 1
else:
break
for thumbnail in thumbnails:
filename = thumbnail.split("/")[-1]
file_path = f"{directory}{filename}"
if os.path.isfile(file_path):
print(f"{filename} already exists in {directory}")
else:
# Download the file using wget
print(f"Downloading {filename} from {thumbnail} to {directory}")
subprocess.run(['wget', '-P', directory, thumbnail])
| notaSWE/wallofrecords | local_option/get_collection.py | get_collection.py | py | 1,616 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
23657966447 | #!Python-2.7.11/bin/python
import os,sys
import argparse
import numpy as np
import copy
sys.path.append('Python-2.7.11/lib/python2.7/site-packages')
from ete2 import Tree,TreeStyle,TextFace,NodeStyle
parser = argparse.ArgumentParser(description='Phylogenetic Tree analysis for Cancer Evolution.')
parser.add_argument('-i', '--maf', help='Maf file', required=True)
parser.add_argument('-g', '--germline', help='Germline mutation file', default=None)
parser.add_argument('-n', '--root-number', help='Root number for construction tree, default None', type=int, default=None)
parser.add_argument('-s', '--samplelist', help='Samplelist', default=None)
parser.add_argument('-o', '--outdir', help='Output dir', required=True)
parser.add_argument('-p', '--patient', help='Patient ID', default="patient")
parser.add_argument('-f', '--func', help='Function filter, default: coding',default='coding',choices=['coding','non-synonymous','exon','gene','all'])
parser.add_argument('-c', '--chr', help='Chr column index for Germline, default=0', default=0,type=int)
parser.add_argument('-t', '--position', help='Position column index for Germline, default=1', default=1,type=int)
parser.add_argument('-r', '--ref', help='Ref-base column index for Germline, default=3', default=3,type=int)
parser.add_argument('-a', '--alt', help='Alt-base column index for Germline, default=4', default=4,type=int)
parser.add_argument('-x', '--gene', help='Gene column index for Germline, default=6', default=6,type=int)
parser.add_argument('-l', '--indel', help='Wether INDEL used for tree construction', action='store_true')
parser.add_argument('-d', '--drivergene', help='Driver Genes for marke, default None', default=None)
args = parser.parse_args()
def filter_funcs(func, type='coding'):
coding_funcs = ['Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Silent', 'Splice_Site', \
'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins']
nonsyn_funcs = ['Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Splice_Site', \
'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins']
exon_funcs = ['3\'UTR','5\'UTR','Missense_Mutation','Nonsense_Mutation','Nonstop_Mutation','Silent','Splicing_Site',\
'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins']
notgene_funcs = ['3\'Flank','5\'Flank','IGR']
if type == 'all':
return True
if type == 'coding':
return func in coding_funcs
if type == 'non-synonymous':
return func in nonsyn_funcs
if type == 'exon':
return func in exon_funcs
if type == 'gene':
return func not in notgene_funcs
return True
if not os.path.exists(args.outdir):
assert not os.system('mkdir %s'%args.outdir)
## user defined samples and genes
samples_use = []
if args.samplelist:
if ',' in args.samplelist:
samples_use = args.samplelist.split(',')
else:
samples_use = [each.split()[0] for each in open(args.samplelist).read().strip().split('\n')]
matrix = {}
mutation = {}
ids_maf = []
samples_maf = []
i = 1
for line in open(args.maf):
if i == 1:
i += 1
continue
array = line.strip().split('\t')
if array[15] not in samples_use and args.samplelist:
continue
## INDEL filter
if (not args.indel) and (array[9] == 'INS' or array[9] == 'DEL' or array[9] == 'INDEL'):
continue
## func filter
if not filter_funcs(array[8], args.func):
continue
id = '%s:%s:%s>%s' % (array[4], array[5], array[10], array[12])
if id not in ids_maf:
ids_maf.append(id)
matrix[id] = {}
mutation[id] = {'ref':array[10], 'gene':array[0]}
mutation[id][array[15]] = array[12]
matrix[id][array[15]] = float(array[33])/(int(array[33])+int(array[32])) ## AF
if array[15] not in samples_maf:
samples_maf.append(array[15])
samples = samples_use or samples_maf
samples.insert(0,args.patient)
ids_germline = []
if args.germline:
i = 1
for line in open(args.germline):
if line.startswith('##'):
continue
if i == 1:
i += 1
continue
array = line.strip().split('\t')
id = '%s:%s:%s>%s' % (array[args.chr], array[args.position], array[args.ref], array[args.alt])
if id in mutation:
continue
ids_germline.append(id)
mutation[id] = {'ref':array[args.ref], 'gene':array[args.gene]}
mutation[id][args.patient] = array[args.alt]
matrix[id] = {args.patient:1}
# mutation[id][args.patient] = array[args.alt]
# matrix[id] = {args.patient:1}
for each in samples[1:]:
mutation[id][each] = array[args.alt]
matrix[id][each] = 0
prefix = os.path.join(args.outdir,args.patient+'.'+args.func)
matrix_cln = open(prefix+'.clean.matrix.txt','w')
matrix_cln.write('ID\t'+'\t'.join(samples)+'\n')
data = []
N = 0
if args.root_number:
N = args.root_number
id_select = []
for id in ids_maf:
line = [matrix[id].get(each,0) for each in samples]
if len([each for each in line if each>0]) == 0:
continue
if not (args.germline and args.root_number):
line[0] = int(len([each for each in line[1:] if each>0]) == len(line[1:]))
data.append(line)
matrix_cln.write('\t'.join([id]+[str(each) for each in line])+'\n')
id_select.append(id)
for id in ids_germline:
line = [matrix[id].get(each,0) for each in samples]
data.append(line)
matrix_cln.write('\t'.join([id]+[str(each) for each in line])+'\n')
id_select.append(id)
if args.root_number and N <= 0:
break
N = N -1
if args.root_number and N > 0:
for n in range(N):
id = 'G:%d'%n
ref = ['A','T','C','G'][n%4]
alt = ['A','T','C','G'][(n+1)%4]
mutation[id] = {'ref':ref, 'gene':'NA'}
mutation[id][args.patient] = alt
matrix[id] = {args.patient:1}
for each in samples[1:]:
mutation[id][each] = ref
matrix[id][each] = 0
data.append([1]+[0]*(len(samples)-1))
id_select.append(id)
matrix_cln.write(id+'\t1\t'+'\t'.join(['0']*(len(samples)-1))+'\n')
matrix_cln.close()
## root
order_ids = np.array(id_select)
order_samples = np.array(samples)
d = np.array(data)
f = np.array((d>0).tolist(),dtype=np.int)
## add root
#root_stat = np.array([each.sum()==e.shape[1] for each in e], dtype=np.int)
#f = e.transpose().tolist()
#f.insert(0,root_stat)
#f = np.array(f).transpose()
cols = range(f.shape[1])
rows = range(f.shape[0])
## mutual exclude sort
for x in cols[::-1]:
index = copy.deepcopy(rows)
index.sort(key = lambda i : -f[i,x])
f = f[index, ::]
order_ids = order_ids[index]
stat_file = open(prefix+'.stat','w')
meg_file = open(prefix+'.nucl.meg','w')
meg_file.write('#mega\n!Title evolution;\n!Format DataType=DNA indel=-\n\n')
stat_file.write('\t%d\t%d\n' % (f.shape[1], f.shape[0]))
sample_mapping = {}
for i in np.arange(f.shape[1]):
column = [str(each) for each in f[::,i].tolist()]
sample_mapping['sample%d'%i] = order_samples[i]
sample = 'sample%d '%i
stat_file.write('%s%s\n' %(sample[0:10], ''.join(column)))
meg_file.write('#'+order_samples[i]+'\n')
seq = ''.join([mutation[each].get(order_samples[i],mutation[each]['ref']) for each in order_ids])
meg_file.write('\n'.join([seq[each*60:min((each+1)*60,len(seq))] for each in range(len(seq)/60+1)])+'\n')
# meg_file.write(''.join([mutation[each].get(order_samples[i],mutation[each]['ref']) for each in order_ids])+'\n')
stat_file.close()
meg_file.close()
## phylip
open(os.path.join(args.outdir,'pars.params.'+args.func),'w').write('%s\nY\n'%(prefix+'.stat'))
phylip = '''
log=%s/log
cd %s && \\
cp Evolution/phylip-3.695/src/font1 fontfile && \\
if [ -f outtree ];then rm -f outtree; fi && \\
if [ -f outfile ];then rm -f outfile; fi && \\
if [ -f plotfile ];then rm -f plotfile; fi && \\
echo %s ... pars infer tree ... > $log
echo ........................................... >> $log
Evolution/phylip-3.695/exe/pars < pars.params.%s >> $log && \\
mv -f outfile %s.pars_out && \\
mv -f outtree %s.tree
#echo %s ... drawtree ... > $log
#echo ........................................... >> $log
#Evolution/phylip-3.695/exe/drawtree < drawtree.params.%s >> $log && \\
#mv -f plotfile %s.tree.pdf && \\
#convert %s.tree.pdf %s.tree.png
''' % (args.outdir, args.outdir, args.patient, args.func, prefix, prefix, args.patient, args.func, prefix, prefix, prefix)
open(os.path.join(args.outdir,args.func+'.createTree.sh'),'w').write(phylip)
assert not os.system('sh %s' % (os.path.join(args.outdir,args.func+'.createTree.sh')))
t = Tree(prefix+'.tree')
for node in t:
node.name = sample_mapping.get(node.name,node.name)
t.write(format=1,outfile=prefix+'.tree.nk')
open(os.path.join(args.outdir,'drawtree.params.'+args.func),'w').write('%s\nY\n'%(prefix+'.tree.nk'))
drawtree = '''
cd %s && \
drawtree < drawtree.params.%s && \
mv -f plotfile %s.phylip.tree.pdf && \
convert %s.phylip.tree.pdf %s.phylip.tree.png
''' % (args.outdir, args.func, prefix, prefix, prefix)
open(os.path.join(args.outdir,args.func+'.drawTree.sh'),'w').write(drawtree)
assert not os.system('sh %s' % (os.path.join(args.outdir,args.func+'.drawTree.sh')))
t.name = 'node0'
t.set_outgroup(args.patient)
i = 1
for node in t.traverse("levelorder"):
if node.name == "":
node.name = "node%d"%i
i += 1
node2labels = t.get_cached_content(store_attr="name")
drivergenes = []
if args.drivergene:
drivergenes = [each.split()[0] for each in open(args.drivergene).read().strip().split('\n')]
branches = ['Germline','Germline-node1']
pat2branch = {'1'+'0'*(len(order_samples)-1):'Germline', '1'*len(order_samples):'Germline-node1'}
edge_muts = {'Germline':{'id':[],'driver':[]}, 'Germline-node1':{'id':[],'driver':[]}}
for node in t.iter_descendants("preorder"):
if node.name == 'node0' or node.up.name == 'node0':
continue
pattern = ''.join([str(int(each in node2labels[t&node.name])) for each in order_samples])
branches.append('%s-%s'%(node.up.name,node.name))
pat2branch[pattern] = '%s-%s'%(node.up.name,node.name)
edge_muts['%s-%s'%(node.up.name,node.name)] = {'id':[],'driver':[]}
branches.append('Out-of-tree')
pat2branch['0'*len(order_samples)] = 'Out-of-tree'
edge_muts['Out-of-tree'] = {'id':[],'driver':[]}
for i,id in enumerate(order_ids):
pattern = ''.join(np.array(f[i],dtype=np.str))
if pattern in pat2branch:
edge_muts[pat2branch[pattern]]['id'].append(id)
if mutation[id]['gene'] in drivergenes:
edge_muts[pat2branch[pattern]]['driver'].append(id)
else:
edge_muts['Out-of-tree']['id'].append(id)
if mutation[id]['gene'] in drivergenes:
edge_muts['Out-of-tree']['driver'].append(id)
mutation_in_tree = open(prefix+'.tree_mutations.xls','w')
mutation_in_tree.write('Branches\t'+'\t'.join(['%s.Mutation\t%s.Gene'%(each,each) for each in branches])+'\n')
mutation_in_tree.write('Count\t'+'\t'.join(['%d\t%d' %(len(edge_muts[each]['id']),\
len(set([mutation[x]['gene'] for x in edge_muts[each]['id']]))) for each in branches])+'\n')
driver_in_tree = open(prefix+'.tree_mutations.driver.xls','w')
driver_in_tree.write('Branches\t'+'\t'.join(['%s.Mutation\t%s.Gene'%(each,each) for each in branches])+'\n')
driver_in_tree.write('Count\t'+'\t'.join(['%d\t%d' %(len(edge_muts[each]['driver']),\
len(set([mutation[x]['gene'] for x in edge_muts[each]['driver']]))) for each in branches])+'\n')
i = 0
while True:
lines = ['']
if i == 0:
lines[0] = 'Mutations'
flag = False
for each in branches:
if len(edge_muts[each]['id']) > i:
lines.append(edge_muts[each]['id'][i])
lines.append(mutation[edge_muts[each]['id'][i]]['gene'])
flag = True
else:
lines.append('')
lines.append('')
i += 1
if not flag:
break
mutation_in_tree.write('\t'.join(lines)+'\n')
mutation_in_tree.close()
i = 0
while True:
lines = ['']
if i == 0:
lines[0] = 'Mutations'
flag = False
for each in branches:
if len(edge_muts[each]['driver']) > i:
lines.append(edge_muts[each]['driver'][i])
lines.append(mutation[edge_muts[each]['driver'][i]]['gene'])
flag = True
else:
lines.append('')
lines.append('')
i += 1
if not flag:
break
driver_in_tree.write('\t'.join(lines)+'\n')
driver_in_tree.close()
ts = TreeStyle()
ts.show_leaf_name = True
ts.margin_bottom = 40
ts.margin_top = 40
ts.margin_left = 10
ts.margin_right = 10
ts.show_scale = False
ts.scale=0.3
style1 = NodeStyle()
style1['size'] = 4
style1['fgcolor'] = '#5500ff'
style1['hz_line_color'] = '#55aa00'
style1['vt_line_color'] = '#55aa00'
style1['hz_line_width'] = 2
style1['vt_line_width'] = 2
style2 = NodeStyle()
style2['size'] = 6
style2['fgcolor'] = '#0055ff'
style2['hz_line_color'] = '#55aa00'
style2['vt_line_color'] = '#55aa00'
style2['hz_line_width'] = 2
style2['vt_line_width'] = 2
t0 = t&'node1'
t0.add_face(TextFace('1'),column=0,position='branch-right')
t0.img_style = style1
for node in t0.iter_descendants("preorder"):
if node.name.startswith('node'):
(t0&node.name).add_face(TextFace(node.name.replace('node','')),column=0,position='branch-right')
node.img_style = style1
else:
node.img_style = style2
t0.render(prefix+'.ete.tree.png',dpi=300,tree_style=ts,w=2000)
#t0.render(prefix+'.ete.tree.pdf',tree_style=ts,w=2000)
'''
t0 = t&'node1'
t0.render(prefix+'.ete.tree.png',dpi=300,w=1000,h=600)
t0.render(prefix+'.ete.tree.pdf',w=1000,h=600)
'''
Rscript='''
setwd('%s')
dat<-read.table("%s",head=T,row.names=1)
dat[dat>0] <- 1
dat$%s<-as.numeric(sapply(1:nrow(dat),function(x) all(dat[x,]>0)))
d<-data.matrix(dat)
h<-hclust(dist(t(d)))
png("%s",type="cairo-png")
plot(h,hang=-1,xlab="",ylab="",axes=FALSE,sub="")
dev.off()
'''%(args.outdir, prefix+'.clean.matrix.txt', args.patient, args.patient+'.tree.png')
#open(os.path.join(args.outdir,'hclust.R'),'w').write(Rscript)
#assert not os.system('Rscript %s' % (os.path.join(args.outdir,'hclust.R')))
| gda7090/cancer | phylogenetic_tree_phylip.py | phylogenetic_tree_phylip.py | py | 13,457 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
... |
28663203794 | import os
from dotenv import load_dotenv
import sqlalchemy
from sqlalchemy import join
from sqlalchemy.orm import sessionmaker, query
from models import create_tables, Publisher, Book, Shop, Stock, Sale
load_dotenv()
user = os.environ.get('USER')
password = os.environ.get('PASSWORD')
db = os.environ.get('DB')
DSN = f"postgresql://{user}:{password}@localhost:5432/{db}"
engine = sqlalchemy.create_engine(DSN)
create_tables(engine)
Session = sessionmaker(bind=engine)
session = Session()
publisher1 = Publisher(name='АСТ')
publisher2 = Publisher(name='Эксмо')
session.add_all([publisher1, publisher2])
session.commit()
book1 = Book(title='Капитанская дочка', publisher=publisher1)
book2 = Book(title='Руслан и Людмила', publisher=publisher2)
book3 = Book(title='Евгений Онегин', publisher=publisher1)
session.add_all([book1, book2, book3])
session.commit()
shop1 = Shop(name='Буквоед')
shop2 = Shop(name='Лабиринт')
shop3 = Shop(name='Книжный дом')
session.add_all([shop1, shop2, shop3])
session.commit()
stock1 = Stock(book=book1, shop=shop1, count=9)
stock2 = Stock(book=book2, shop=shop2, count=8)
stock3 = Stock(book=book3, shop=shop3, count=7)
session.add_all([stock1, stock2, stock3])
session.commit()
sale1 = Sale(price=111, date_sale='2023-02-08', stock=stock1, count=1)
sale2 = Sale(price=222, date_sale='2023-02-09', stock=stock2, count=1)
sale3 = Sale(price=333, date_sale='2023-02-10', stock=stock3, count=1)
sale4 = Sale(price=444, date_sale='2023-02-11', stock=stock1, count=1)
sale5 = Sale(price=555, date_sale='2023-02-12', stock=stock2, count=1)
session.add_all([sale1, sale2, sale3, sale4, sale5])
session.commit()
pub_name = input('Название издательства: ')
pub_id = input('Идентификатор издательства: ')
def get_sales(publisher_name='', publisher_id=''):
if(publisher_name != ''):
q = session.query(Publisher).filter(Publisher.name == publisher_name).one()
publisher_id = q.id
records = session.query(
Publisher, Book, Stock, Shop, Sale
).filter(
Publisher.id == publisher_id,
).filter(
Publisher.id == Book.id_publisher,
).filter(
Stock.id_book == Book.id,
).filter(
Shop.id == Stock.id_shop,
).filter(
Sale.id_stock == Stock.id,
).filter(
Publisher.id == publisher_id,
).all()
for publisher, book, stock, shop, sale in records:
print(book.title, shop.name, sale.price, sale.date_sale, sep = '|')
if __name__ == '__main__':
get_sales(pub_name, pub_id)
session.close() | juicebiz/13-orm | main.py | main.py | py | 2,648 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"... |
26184567771 | # %%
import torch
import torch.nn as nn
import torch.optim as optim
import torchtext
from torchtext.data import Field, BucketIterator, TabularDataset
from torchtext.data.functional import sentencepiece_tokenizer, load_sp_model
from pathlib import Path
import dill
import numpy as np
import os
import random
import re
# %%
# path where the model and training data (for vocabulary) is saved
# name of the model: "model.pt"
path = Path("model/sentencepiece/20200728-152255/english_pretraining_with_sp/")
INPUT = "Das Wetter ist sehr schön."
# %%
sp_deu = load_sp_model(str(path / "spm_deu.model"))
sp_nds = load_sp_model(str(path / "spm_nds.model"))
#%%
SRC = Field(use_vocab = False, tokenize = sp_deu.encode,
init_token = sp_deu.bos_id(),
eos_token = sp_deu.eos_id(),
pad_token = sp_deu.pad_id(),
batch_first = True
)
TRG = Field(use_vocab = False, tokenize = sp_nds.encode,
init_token = sp_nds.bos_id(),
eos_token = sp_nds.eos_id(),
pad_token = sp_nds.pad_id(),
batch_first = True
)
print(SRC)
# %%
class Encoder(nn.Module):
def __init__(self,
input_dim,
hid_dim,
n_layers,
n_heads,
pf_dim,
dropout,
device,
max_length = 100):
super().__init__()
self.device = device
self.tok_embedding = nn.Embedding(input_dim, hid_dim)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([EncoderLayer(hid_dim,
n_heads,
pf_dim,
dropout,
device)
for _ in range(n_layers)])
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, src, src_mask):
#src = [batch size, src len]
#src_mask = [batch size, src len]
batch_size = src.shape[0]
src_len = src.shape[1]
pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, src len]
src = self.dropout((self.tok_embedding(src) * self.scale) + self.pos_embedding(pos))
#src = [batch size, src len, hid dim]
for layer in self.layers:
src = layer(src, src_mask)
#src = [batch size, src len, hid dim]
return src
#
# %%
class EncoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
#src = [batch size, src len, hid dim]
#src_mask = [batch size, src len]
#self attention
_src, _ = self.self_attention(src, src, src, src_mask)
#dropout, residual connection and layer norm
src = self.self_attn_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
#positionwise feedforward
_src = self.positionwise_feedforward(src)
#dropout, residual and layer norm
src = self.ff_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
return src
# %%
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
self.fc_q = nn.Linear(hid_dim, hid_dim)
self.fc_k = nn.Linear(hid_dim, hid_dim)
self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_o = nn.Linear(hid_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask = None):
batch_size = query.shape[0]
#query = [batch size, query len, hid dim]
#key = [batch size, key len, hid dim]
#value = [batch size, value len, hid dim]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
#Q = [batch size, query len, hid dim]
#K = [batch size, key len, hid dim]
#V = [batch size, value len, hid dim]
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
#Q = [batch size, n heads, query len, head dim]
#K = [batch size, n heads, key len, head dim]
#V = [batch size, n heads, value len, head dim]
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
#energy = [batch size, n heads, query len, key len]
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim = -1)
#attention = [batch size, n heads, query len, key len]
x = torch.matmul(self.dropout(attention), V)
#x = [batch size, n heads, query len, head dim]
x = x.permute(0, 2, 1, 3).contiguous()
#x = [batch size, query len, n heads, head dim]
x = x.view(batch_size, -1, self.hid_dim)
#x = [batch size, query len, hid dim]
x = self.fc_o(x)
#x = [batch size, query len, hid dim]
return x, attention
# %%
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, hid_dim, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
#x = [batch size, seq len, hid dim]
x = self.dropout(torch.relu(self.fc_1(x)))
#x = [batch size, seq len, pf dim]
x = self.fc_2(x)
#x = [batch size, seq len, hid dim]
return x
# %%
class Decoder(nn.Module):
def __init__(self,
output_dim,
hid_dim,
n_layers,
n_heads,
pf_dim,
dropout,
device,
max_length = 100):
super().__init__()
self.device = device
self.tok_embedding = nn.Embedding(output_dim, hid_dim)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([DecoderLayer(hid_dim,
n_heads,
pf_dim,
dropout,
device)
for _ in range(n_layers)])
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, trg len]
#src_mask = [batch size, src len]
batch_size = trg.shape[0]
trg_len = trg.shape[1]
pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, trg len]
trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos))
#trg = [batch size, trg len, hid dim]
for layer in self.layers:
trg, attention = layer(trg, enc_src, trg_mask, src_mask)
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
output = self.fc_out(trg)
#output = [batch size, trg len, output dim]
return output, attention
# %%
class DecoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.enc_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.encoder_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len, hid dim]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, trg len]
#src_mask = [batch size, src len]
#self attention
_trg, _ = self.self_attention(trg, trg, trg, trg_mask)
#dropout, residual connection and layer norm
trg = self.self_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#encoder attention
_trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)
#dropout, residual connection and layer norm
trg = self.enc_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#positionwise feedforward
_trg = self.positionwise_feedforward(trg)
#dropout, residual and layer norm
trg = self.ff_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
return trg, attention
# %% [markdown]
# ### Seq2Seq
#
# %%
class Seq2Seq(nn.Module):
def __init__(self,
encoder,
decoder,
src_pad_idx,
trg_pad_idx,
device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.device = device
def make_src_mask(self, src):
#src = [batch size, src len]
src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
#src_mask = [batch size, 1, 1, src len]
return src_mask
def make_trg_mask(self, trg):
#trg = [batch size, trg len]
trg_pad_mask = (trg != self.trg_pad_idx).unsqueeze(1).unsqueeze(2)
#trg_pad_mask = [batch size, 1, 1, trg len]
trg_len = trg.shape[1]
trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), device = self.device)).bool()
#trg_sub_mask = [trg len, trg len]
trg_mask = trg_pad_mask & trg_sub_mask
#trg_mask = [batch size, 1, trg len, trg len]
return trg_mask
def forward(self, src, trg):
#src = [batch size, src len]
#trg = [batch size, trg len]
src_mask = self.make_src_mask(src)
trg_mask = self.make_trg_mask(trg)
#src_mask = [batch size, 1, 1, src len]
#trg_mask = [batch size, 1, trg len, trg len]
enc_src = self.encoder(src, src_mask)
#enc_src = [batch size, src len, hid dim]
output, attention = self.decoder(trg, enc_src, trg_mask, src_mask)
#output = [batch size, trg len, output dim]
#attention = [batch size, n heads, trg len, src len]
return output, attention
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output, _ = model(src, trg[:,:-1])
#output = [batch size, trg len - 1, output dim]
#trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:,1:].contiguous().view(-1)
#output = [batch size * trg len - 1, output dim]
#trg = [batch size * trg len - 1]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# %%
def instantiate_objects(SRC,TRG, vocab_size=vocab_size):
INPUT_DIM = vocab_size
OUTPUT_DIM = vocab_size
# hidden_dim was 256 before
HID_DIM = 256
ENC_LAYERS = 3
DEC_LAYERS = 3
ENC_HEADS = 8
DEC_HEADS = 8
ENC_PF_DIM = 512
DEC_PF_DIM = 512
ENC_DROPOUT = 0.1
DEC_DROPOUT = 0.1
enc = Encoder(INPUT_DIM,
HID_DIM,
ENC_LAYERS,
ENC_HEADS,
ENC_PF_DIM,
ENC_DROPOUT,
device)
dec = Decoder(OUTPUT_DIM,
HID_DIM,
DEC_LAYERS,
DEC_HEADS,
DEC_PF_DIM,
DEC_DROPOUT,
device)
return enc, dec
# %%
device = torch.device("cpu")
# %%
def translate_sentence(sentence, src_field, trg_field, model, device, max_len = 50):
model.eval()
if isinstance(sentence, str):
tokens = sp_deu.encode(sentence)
else:
tokens = [token for token in sentence]
if tokens[0] != src_field.init_token and tokens[-1] != src_field.eos_token:
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = tokens
src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device)
src_mask = model.make_src_mask(src_tensor)
with torch.no_grad():
enc_src = model.encoder(src_tensor, src_mask)
trg_indexes = [trg_field.init_token]
for i in range(max_len):
trg_tensor = torch.LongTensor(trg_indexes).unsqueeze(0).to(device)
trg_mask = model.make_trg_mask(trg_tensor)
with torch.no_grad():
output, attention = model.decoder(trg_tensor, enc_src, trg_mask, src_mask)
pred_token = output.argmax(2)[:,-1].item()
trg_indexes.append(pred_token)
if pred_token == trg_field.eos_token:
break
trg_tokens = sp_nds.decode(trg_indexes)
return trg_tokens
# %%
enc , dec = instantiate_objects(SRC,TRG)
SRC_PAD_IDX = SRC.pad_token
TRG_PAD_IDX = TRG.pad_token
model = Seq2Seq(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX, device).to(device)
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)
model.load_state_dict(torch.load(path / 'model.pt', map_location=torch.device(device)))
def translate_to_platt(input_string):
translation = translate_sentence(input_string, SRC, TRG, model, device)
#translation = ''.join(translation[:-1]).replace('▁', ' ')
return translation
# %%
import pandas as pd
untranslated = pd.read_csv("preprocessed_data/tatoeba/untranslated_sentences.csv", index_col = 0)
untranslated = untranslated[["deu"]]
def get_length(df):
df_output = df.copy()
# split by a simple tokenizer which uses regex and return the length of tokens
df_output.deu = df_output.deu.str.split(r"[\s.,;:?!-\"\']+")
return df_output.applymap(len)
def get_range(df, start, end):
df_length = get_length(df)
df_length = df_length[df_length.deu.ge(start) & df_length.deu.le(end)]
return df.loc[df_length.index,:]
untranslated = get_range(untranslated,1,30)
#%%
sentence = untranslated["deu"].sample().item()
print(sentence)
print(translate_to_platt(sentence))
# %%
ai_comparison = False
if ai_comparison:
ai_translation = untranslated[["deu"]].sample(50, random_state=42)
ai_translation["ai_nds"] = untranslated["deu"].sample(50, random_state=42).apply(translate_to_platt)
ai_translation.to_csv("preprocessed_data/tatoeba/ai_translations_model_pre_training_en_sentencepiece.csv")
# %%
# %%
#sentence = "Ich habe während der Mittagspause ein wenig geschlafen, weil ich so müde war."
sentence = "Nachdem wir die Erdbeeren gepflückt haben, backen wir einen leckeren Kuchen."
#sentence = "Das Wetter wird übermorgen sehr schön."
sentence = "Mein Handy muss aufgeladen werden."
translate_to_platt(sp_deu.encode(sentence))
# %%
if __name__ == "__main__":
print(translate_to_platt(INPUT))
| mmcux/de-nds-translation | translate_input.py | translate_input.py | py | 18,238 | python | en | code | 32 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torchtext.data.functional.load_sp_model",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torchtext.data.functional.load_sp_model",
"line_number": 33,
"usage_type": "call"
... |
27438269251 | from __future__ import absolute_import
from __future__ import division
import re
from functools import reduce
import wx
import wx.stc
from six.moves import xrange
from graphics.GraphicCommons import ERROR_HIGHLIGHT, SEARCH_RESULT_HIGHLIGHT, REFRESH_HIGHLIGHT_PERIOD
from plcopen.structures import ST_BLOCK_START_KEYWORDS, IEC_BLOCK_START_KEYWORDS, LOCATIONDATATYPES
from editors.EditorPanel import EditorPanel
from controls.CustomStyledTextCtrl import CustomStyledTextCtrl, faces, GetCursorPos
# -------------------------------------------------------------------------------
# Textual programs Viewer class
# -------------------------------------------------------------------------------
NEWLINE = "\n"
NUMBERS = [str(i) for i in xrange(10)]
LETTERS = ['_']
for i in xrange(26):
LETTERS.append(chr(ord('a') + i))
LETTERS.append(chr(ord('A') + i))
[STC_PLC_WORD, STC_PLC_COMMENT, STC_PLC_NUMBER, STC_PLC_STRING,
STC_PLC_VARIABLE, STC_PLC_PARAMETER, STC_PLC_FUNCTION, STC_PLC_JUMP,
STC_PLC_ERROR, STC_PLC_SEARCH_RESULT,
STC_PLC_EMPTY] = range(11)
[SPACE, WORD, NUMBER, STRING, WSTRING, COMMENT, PRAGMA, DPRAGMA] = range(8)
re_texts = {}
re_texts["letter"] = "[A-Za-z]"
re_texts["digit"] = "[0-9]"
re_texts["identifier"] = "((?:%(letter)s|(?:_(?:%(letter)s|%(digit)s)))(?:_?(?:%(letter)s|%(digit)s))*)" % re_texts
IDENTIFIER_MODEL = re.compile(re_texts["identifier"])
LABEL_MODEL = re.compile("[ \t\n]%(identifier)s:[ \t\n]" % re_texts)
EXTENSIBLE_PARAMETER = re.compile("IN[1-9][0-9]*$")
HIGHLIGHT_TYPES = {
ERROR_HIGHLIGHT: STC_PLC_ERROR,
SEARCH_RESULT_HIGHLIGHT: STC_PLC_SEARCH_RESULT,
}
def LineStartswith(line, symbols):
return reduce(lambda x, y: x or y, map(line.startswith, symbols), False)
class TextViewer(EditorPanel):
def _init_Editor(self, prnt):
self.Editor = CustomStyledTextCtrl(parent=prnt, name="TextViewer", size=wx.Size(0, 0), style=0)
self.Editor.ParentWindow = self
self.Editor.CmdKeyAssign(ord('+'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_ZOOMIN)
self.Editor.CmdKeyAssign(ord('-'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_ZOOMOUT)
self.Editor.SetViewWhiteSpace(False)
self.Editor.SetLexer(wx.stc.STC_LEX_CONTAINER)
# Global default styles for all languages
self.Editor.StyleSetSpec(wx.stc.STC_STYLE_DEFAULT, "face:%(mono)s,size:%(size)d" % faces)
self.Editor.StyleClearAll() # Reset all to be like the default
self.Editor.StyleSetSpec(wx.stc.STC_STYLE_LINENUMBER, "back:#C0C0C0,size:%(size)d" % faces)
self.Editor.SetSelBackground(1, "#E0E0E0")
# Highlighting styles
self.Editor.StyleSetSpec(STC_PLC_WORD, "fore:#00007F,bold,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_VARIABLE, "fore:#7F0000,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_PARAMETER, "fore:#7F007F,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_FUNCTION, "fore:#7F7F00,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_COMMENT, "fore:#7F7F7F,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_NUMBER, "fore:#007F7F,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_STRING, "fore:#007F00,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_JUMP, "fore:#FF7FFF,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_ERROR, "fore:#FF0000,back:#FFFF00,size:%(size)d" % faces)
self.Editor.StyleSetSpec(STC_PLC_SEARCH_RESULT, "fore:#FFFFFF,back:#FFA500,size:%(size)d" % faces)
# Indicators styles
self.Editor.IndicatorSetStyle(0, wx.stc.STC_INDIC_SQUIGGLE)
if self.ParentWindow is not None and self.Controler is not None:
self.Editor.IndicatorSetForeground(0, wx.RED)
else:
self.Editor.IndicatorSetForeground(0, wx.WHITE)
# Line numbers in the margin
self.Editor.SetMarginType(1, wx.stc.STC_MARGIN_NUMBER)
self.Editor.SetMarginWidth(1, 50)
# Folding
self.Editor.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPEN, wx.stc.STC_MARK_BOXMINUS, "white", "#808080")
self.Editor.MarkerDefine(wx.stc.STC_MARKNUM_FOLDER, wx.stc.STC_MARK_BOXPLUS, "white", "#808080")
self.Editor.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERSUB, wx.stc.STC_MARK_VLINE, "white", "#808080")
self.Editor.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERTAIL, wx.stc.STC_MARK_LCORNER, "white", "#808080")
self.Editor.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEREND, wx.stc.STC_MARK_BOXPLUSCONNECTED, "white", "#808080")
self.Editor.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPENMID, wx.stc.STC_MARK_BOXMINUSCONNECTED, "white", "#808080")
self.Editor.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERMIDTAIL, wx.stc.STC_MARK_TCORNER, "white", "#808080")
# Indentation size
self.Editor.SetTabWidth(2)
self.Editor.SetUseTabs(0)
self.Editor.SetModEventMask(wx.stc.STC_MOD_BEFOREINSERT |
wx.stc.STC_MOD_BEFOREDELETE |
wx.stc.STC_PERFORMED_USER)
self.Bind(wx.stc.EVT_STC_STYLENEEDED, self.OnStyleNeeded, self.Editor)
self.Editor.Bind(wx.stc.EVT_STC_MARGINCLICK, self.OnMarginClick)
self.Editor.Bind(wx.stc.EVT_STC_UPDATEUI, self.OnUpdateUI)
self.Editor.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
if self.Controler is not None:
self.Editor.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.stc.EVT_STC_DO_DROP, self.OnDoDrop, self.Editor)
self.Bind(wx.stc.EVT_STC_MODIFIED, self.OnModification, self.Editor)
def __init__(self, parent, tagname, window, controler, debug=False, instancepath=""):
if tagname != "" and controler is not None:
self.VARIABLE_PANEL_TYPE = controler.GetPouType(tagname.split("::")[1])
EditorPanel.__init__(self, parent, tagname, window, controler, debug)
self.Keywords = []
self.Variables = {}
self.Functions = {}
self.TypeNames = []
self.Jumps = []
self.EnumeratedValues = []
self.DisableEvents = True
self.TextSyntax = None
self.CurrentAction = None
self.InstancePath = instancepath
self.ContextStack = []
self.CallStack = []
self.ResetSearchResults()
self.RefreshHighlightsTimer = wx.Timer(self, -1)
self.Bind(wx.EVT_TIMER, self.OnRefreshHighlightsTimer, self.RefreshHighlightsTimer)
def __del__(self):
self.RefreshHighlightsTimer.Stop()
def GetTitle(self):
if self.Debug or self.TagName == "":
if len(self.InstancePath) > 15:
return "..." + self.InstancePath[-12:]
return self.InstancePath
return EditorPanel.GetTitle(self)
def GetInstancePath(self):
return self.InstancePath
def IsViewing(self, tagname):
if self.Debug or self.TagName == "":
return self.InstancePath == tagname
else:
return self.TagName == tagname
def GetText(self):
return self.Editor.GetText()
def SetText(self, text):
self.Editor.SetText(text)
def SelectAll(self):
self.Editor.SelectAll()
def Colourise(self, start, end):
self.Editor.Colourise(start, end)
def StartStyling(self, pos, mask):
self.Editor.StartStyling(pos, mask)
def SetStyling(self, length, style):
self.Editor.SetStyling(length, style)
def GetCurrentPos(self):
return self.Editor.GetCurrentPos()
def ResetSearchResults(self):
self.Highlights = []
self.SearchParams = None
self.SearchResults = None
self.CurrentFindHighlight = None
def OnModification(self, event):
if not self.DisableEvents:
mod_type = event.GetModificationType()
if mod_type & wx.stc.STC_MOD_BEFOREINSERT:
if self.CurrentAction is None:
self.StartBuffering()
elif self.CurrentAction[0] != "Add" or self.CurrentAction[1] != event.GetPosition() - 1:
self.Controler.EndBuffering()
self.StartBuffering()
self.CurrentAction = ("Add", event.GetPosition())
wx.CallAfter(self.RefreshModel)
elif mod_type & wx.stc.STC_MOD_BEFOREDELETE:
if self.CurrentAction is None:
self.StartBuffering()
elif self.CurrentAction[0] != "Delete" or self.CurrentAction[1] != event.GetPosition() + 1:
self.Controler.EndBuffering()
self.StartBuffering()
self.CurrentAction = ("Delete", event.GetPosition())
wx.CallAfter(self.RefreshModel)
event.Skip()
def OnDoDrop(self, event):
try:
values = eval(event.GetDragText())
except Exception:
values = event.GetDragText()
if isinstance(values, tuple):
message = None
if values[1] in ["program", "debug"]:
event.SetDragText("")
elif values[1] in ["functionBlock", "function"]:
blocktype = values[0]
blockname = values[2]
if len(values) > 3:
blockinputs = values[3]
else:
blockinputs = None
if values[1] != "function":
if blockname == "":
dialog = wx.TextEntryDialog(self.ParentWindow, _("Block name"), _("Please enter a block name"), "", wx.OK | wx.CANCEL | wx.CENTRE)
if dialog.ShowModal() == wx.ID_OK:
blockname = dialog.GetValue()
else:
event.SetDragText("")
return
dialog.Destroy()
if blockname.upper() in [name.upper() for name in self.Controler.GetProjectPouNames(self.Debug)]:
message = _("\"%s\" pou already exists!") % blockname
elif blockname.upper() in [name.upper() for name in self.Controler.GetEditedElementVariables(self.TagName, self.Debug)]:
message = _("\"%s\" element for this pou already exists!") % blockname
else:
self.Controler.AddEditedElementPouVar(self.TagName, values[0], blockname)
self.RefreshVariablePanel()
self.RefreshVariableTree()
blockinfo = self.Controler.GetBlockType(blocktype, blockinputs, self.Debug)
hint = ',\n '.join(
[" " + fctdecl[0]+" := (*"+fctdecl[1]+"*)" for fctdecl in blockinfo["inputs"]] +
[" " + fctdecl[0]+" => (*"+fctdecl[1]+"*)" for fctdecl in blockinfo["outputs"]])
if values[1] == "function":
event.SetDragText(blocktype+"(\n "+hint+")")
else:
event.SetDragText(blockname+"(\n "+hint+")")
elif values[1] == "location":
_pou_name, pou_type = self.Controler.GetEditedElementType(self.TagName, self.Debug)
if len(values) > 2 and pou_type == "program":
var_name = values[3]
dlg = wx.TextEntryDialog(
self.ParentWindow,
_("Confirm or change variable name"),
_('Variable Drop'), var_name)
dlg.SetValue(var_name)
var_name = dlg.GetValue() if dlg.ShowModal() == wx.ID_OK else None
dlg.Destroy()
if var_name is None:
return
elif var_name.upper() in [name.upper() for name in self.Controler.GetProjectPouNames(self.Debug)]:
message = _("\"%s\" pou already exists!") % var_name
elif var_name.upper() in [name.upper() for name in self.Controler.GetEditedElementVariables(self.TagName, self.Debug)]:
message = _("\"%s\" element for this pou already exists!") % var_name
else:
location = values[0]
if not location.startswith("%"):
dialog = wx.SingleChoiceDialog(
self.ParentWindow,
_("Select a variable class:"),
_("Variable class"),
[_("Input"), _("Output"), _("Memory")],
wx.DEFAULT_DIALOG_STYLE | wx.OK | wx.CANCEL)
if dialog.ShowModal() == wx.ID_OK:
selected = dialog.GetSelection()
else:
selected = None
dialog.Destroy()
if selected is None:
event.SetDragText("")
return
if selected == 0:
location = "%I" + location
elif selected == 1:
location = "%Q" + location
else:
location = "%M" + location
if values[2] is not None:
var_type = values[2]
else:
var_type = LOCATIONDATATYPES.get(location[2], ["BOOL"])[0]
self.Controler.AddEditedElementPouVar(
self.TagName,
var_type, var_name,
location=location, description=values[4])
self.RefreshVariablePanel()
self.RefreshVariableTree()
event.SetDragText(var_name)
else:
event.SetDragText("")
elif values[1] == "NamedConstant":
_pou_name, pou_type = self.Controler.GetEditedElementType(self.TagName, self.Debug)
if pou_type == "program":
initval = values[0]
var_name = values[3]
dlg = wx.TextEntryDialog(
self.ParentWindow,
_("Confirm or change variable name"),
_('Variable Drop'), var_name)
dlg.SetValue(var_name)
var_name = dlg.GetValue() if dlg.ShowModal() == wx.ID_OK else None
dlg.Destroy()
if var_name is None:
return
elif var_name.upper() in [name.upper() for name in self.Controler.GetProjectPouNames(self.Debug)]:
message = _("\"%s\" pou already exists!") % var_name
else:
var_type = values[2]
if not var_name.upper() in [name.upper() for name in self.Controler.GetEditedElementVariables(self.TagName, self.Debug)]:
self.Controler.AddEditedElementPouVar(self.TagName,
var_type,
var_name,
description=values[4], initval=initval)
self.RefreshVariablePanel()
self.RefreshVariableTree()
event.SetDragText(var_name)
elif values[1] == "Global":
var_name = values[0]
dlg = wx.TextEntryDialog(
self.ParentWindow,
_("Confirm or change variable name"),
_('Variable Drop'), var_name)
dlg.SetValue(var_name)
var_name = dlg.GetValue() if dlg.ShowModal() == wx.ID_OK else None
dlg.Destroy()
if var_name is None:
return
elif var_name.upper() in [name.upper() for name in self.Controler.GetProjectPouNames(self.Debug)]:
message = _("\"%s\" pou already exists!") % var_name
else:
if not var_name.upper() in [name.upper() for name in self.Controler.GetEditedElementVariables(self.TagName, self.Debug)]:
kwargs = dict(description=values[4]) if len(values)>4 else {}
self.Controler.AddEditedElementPouExternalVar(self.TagName, values[2], var_name, **kwargs)
self.RefreshVariablePanel()
self.RefreshVariableTree()
event.SetDragText(var_name)
elif values[1] == "Constant":
event.SetDragText(values[0])
elif values[3] == self.TagName:
self.ResetBuffer()
event.SetDragText(values[0])
wx.CallAfter(self.RefreshModel)
else:
message = _("Variable don't belong to this POU!")
if message is not None:
dialog = wx.MessageDialog(self, message, _("Error"), wx.OK | wx.ICON_ERROR)
dialog.ShowModal()
dialog.Destroy()
event.SetDragText("")
event.Skip()
def SetTextSyntax(self, syntax):
self.TextSyntax = syntax
if syntax in ["ST", "ALL"]:
self.Editor.SetMarginType(2, wx.stc.STC_MARGIN_SYMBOL)
self.Editor.SetMarginMask(2, wx.stc.STC_MASK_FOLDERS)
self.Editor.SetMarginSensitive(2, 1)
self.Editor.SetMarginWidth(2, 12)
if syntax == "ST":
self.BlockStartKeywords = ST_BLOCK_START_KEYWORDS
self.BlockEndKeywords = ST_BLOCK_START_KEYWORDS
else:
self.BlockStartKeywords = IEC_BLOCK_START_KEYWORDS
self.BlockEndKeywords = IEC_BLOCK_START_KEYWORDS
else:
self.BlockStartKeywords = []
self.BlockEndKeywords = []
def SetKeywords(self, keywords):
self.Keywords = [keyword.upper() for keyword in keywords]
self.Colourise(0, -1)
def RefreshJumpList(self):
if self.TextSyntax == "IL":
self.Jumps = [jump.upper() for jump in LABEL_MODEL.findall(self.GetText())]
# Buffer the last model state
def RefreshBuffer(self):
self.Controler.BufferProject()
if self.ParentWindow:
self.ParentWindow.RefreshTitle()
self.ParentWindow.RefreshFileMenu()
self.ParentWindow.RefreshEditMenu()
def StartBuffering(self):
self.Controler.StartBuffering()
if self.ParentWindow:
self.ParentWindow.RefreshTitle()
self.ParentWindow.RefreshFileMenu()
self.ParentWindow.RefreshEditMenu()
def ResetBuffer(self):
if self.CurrentAction is not None:
self.Controler.EndBuffering()
self.CurrentAction = None
def GetBufferState(self):
if not self.Debug and self.TextSyntax != "ALL":
return self.Controler.GetBufferState()
return False, False
def Undo(self):
if not self.Debug and self.TextSyntax != "ALL":
self.Controler.LoadPrevious()
self.ParentWindow.CloseTabsWithoutModel()
def Redo(self):
if not self.Debug and self.TextSyntax != "ALL":
self.Controler.LoadNext()
self.ParentWindow.CloseTabsWithoutModel()
def HasNoModel(self):
if not self.Debug and self.TextSyntax != "ALL":
return self.Controler.GetEditedElement(self.TagName) is None
return False
def RefreshView(self, variablepanel=True):
EditorPanel.RefreshView(self, variablepanel)
if self.Controler is not None:
self.ResetBuffer()
self.DisableEvents = True
old_cursor_pos = self.GetCurrentPos()
line = self.Editor.GetFirstVisibleLine()
column = self.Editor.GetXOffset()
old_text = self.GetText()
new_text = self.Controler.GetEditedElementText(self.TagName, self.Debug)
if old_text != new_text:
self.SetText(new_text)
new_cursor_pos = GetCursorPos(old_text, new_text)
self.Editor.LineScroll(column, line)
if new_cursor_pos is not None:
self.Editor.GotoPos(new_cursor_pos)
else:
self.Editor.GotoPos(old_cursor_pos)
self.RefreshJumpList()
self.Editor.EmptyUndoBuffer()
self.DisableEvents = False
self.RefreshVariableTree()
self.TypeNames = [typename.upper() for typename in self.Controler.GetDataTypes(self.TagName, True, self.Debug)]
self.EnumeratedValues = [value.upper() for value in self.Controler.GetEnumeratedDataValues()]
self.Functions = {}
for category in self.Controler.GetBlockTypes(self.TagName, self.Debug):
for blocktype in category["list"]:
blockname = blocktype["name"].upper()
if blocktype["type"] == "function" and blockname not in self.Keywords and blockname not in self.Variables.keys():
interface = dict([(name, {}) for name, _type, _modifier in blocktype["inputs"] + blocktype["outputs"] if name != ''])
for param in ["EN", "ENO"]:
if param not in interface:
interface[param] = {}
if blockname in self.Functions:
self.Functions[blockname]["interface"].update(interface)
self.Functions[blockname]["extensible"] |= blocktype["extensible"]
else:
self.Functions[blockname] = {"interface": interface,
"extensible": blocktype["extensible"]}
self.Colourise(0, -1)
def RefreshVariableTree(self):
words = self.TagName.split("::")
self.Variables = self.GenerateVariableTree([
(variable.Name, variable.Type, variable.Tree)
for variable in
self.Controler.GetEditedElementInterfaceVars(
self.TagName, True, self.Debug)
])
if self.Controler.GetEditedElementType(self.TagName, self.Debug)[1] == "function" or words[0] == "T" and self.TextSyntax == "IL":
return_type, (var_tree, _var_dimension) = self.Controler.GetEditedElementInterfaceReturnType(self.TagName, True, self.Debug)
if return_type is not None:
self.Variables[words[-1].upper()] = self.GenerateVariableTree(var_tree)
else:
self.Variables[words[-1].upper()] = {}
def GenerateVariableTree(self, list):
tree = {}
for var_name, _var_type, (var_tree, _var_dimension) in list:
tree[var_name.upper()] = self.GenerateVariableTree(var_tree)
return tree
def IsValidVariable(self, name, context):
return context is not None and context.get(name, None) is not None
def IsCallParameter(self, name, call):
if call is not None:
return (call["interface"].get(name.upper(), None) is not None or
call["extensible"] and EXTENSIBLE_PARAMETER.match(name.upper()) is not None)
return False
def RefreshLineFolding(self, line_number):
if self.TextSyntax in ["ST", "ALL"]:
level = wx.stc.STC_FOLDLEVELBASE + self.Editor.GetLineIndentation(line_number)
line = self.Editor.GetLine(line_number).strip()
if line == "":
if line_number > 0:
if LineStartswith(self.Editor.GetLine(line_number - 1).strip(), self.BlockEndKeywords):
level = self.Editor.GetFoldLevel(self.Editor.GetFoldParent(line_number - 1)) & wx.stc.STC_FOLDLEVELNUMBERMASK
else:
level = self.Editor.GetFoldLevel(line_number - 1) & wx.stc.STC_FOLDLEVELNUMBERMASK
if level != wx.stc.STC_FOLDLEVELBASE:
level |= wx.stc.STC_FOLDLEVELWHITEFLAG
elif LineStartswith(line, self.BlockStartKeywords):
level |= wx.stc.STC_FOLDLEVELHEADERFLAG
elif LineStartswith(line, self.BlockEndKeywords):
if LineStartswith(self.Editor.GetLine(line_number - 1).strip(), self.BlockEndKeywords):
level = self.Editor.GetFoldLevel(self.Editor.GetFoldParent(line_number - 1)) & wx.stc.STC_FOLDLEVELNUMBERMASK
else:
level = self.Editor.GetFoldLevel(line_number - 1) & wx.stc.STC_FOLDLEVELNUMBERMASK
self.Editor.SetFoldLevel(line_number, level)
def OnStyleNeeded(self, event):
self.TextChanged = True
line_number = self.Editor.LineFromPosition(self.Editor.GetEndStyled())
if line_number == 0:
start_pos = last_styled_pos = 0
else:
start_pos = last_styled_pos = self.Editor.GetLineEndPosition(line_number - 1) + 1
self.RefreshLineFolding(line_number)
end_pos = event.GetPosition()
self.StartStyling(start_pos, 0xff)
current_context = self.Variables
current_call = None
current_pos = last_styled_pos
state = SPACE
line = ""
word = ""
while current_pos < end_pos:
char = chr(self.Editor.GetCharAt(current_pos)).upper()
line += char
if char == NEWLINE:
self.ContextStack = []
current_context = self.Variables
if state == COMMENT:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_COMMENT)
elif state == NUMBER:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_NUMBER)
elif state == WORD:
if word in self.Keywords or word in self.TypeNames:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_WORD)
elif self.IsValidVariable(word, current_context):
self.SetStyling(current_pos - last_styled_pos, STC_PLC_VARIABLE)
elif self.IsCallParameter(word, current_call):
self.SetStyling(current_pos - last_styled_pos, STC_PLC_PARAMETER)
elif word in self.Functions:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_FUNCTION)
elif self.TextSyntax == "IL" and word in self.Jumps:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_JUMP)
elif word in self.EnumeratedValues:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_NUMBER)
else:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
if word not in ["]", ")"] and (self.GetCurrentPos() < last_styled_pos or self.GetCurrentPos() > current_pos):
self.StartStyling(last_styled_pos, wx.stc.STC_INDICS_MASK)
self.SetStyling(current_pos - last_styled_pos, wx.stc.STC_INDIC0_MASK)
self.StartStyling(current_pos, 0xff)
else:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
last_styled_pos = current_pos
if (state != DPRAGMA) and (state != COMMENT):
state = SPACE
line = ""
line_number += 1
self.RefreshLineFolding(line_number)
elif line.endswith("(*") and state != COMMENT:
self.SetStyling(current_pos - last_styled_pos - 1, STC_PLC_EMPTY)
last_styled_pos = current_pos
if state == WORD:
current_context = self.Variables
state = COMMENT
elif line.endswith("{") and state not in [PRAGMA, DPRAGMA]:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
last_styled_pos = current_pos
if state == WORD:
current_context = self.Variables
state = PRAGMA
elif line.endswith("{{") and state == PRAGMA:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
last_styled_pos = current_pos
state = DPRAGMA
elif state == COMMENT:
if line.endswith("*)"):
self.SetStyling(current_pos - last_styled_pos + 2, STC_PLC_COMMENT)
last_styled_pos = current_pos + 1
state = SPACE
if len(self.CallStack) > 0:
current_call = self.CallStack.pop()
else:
current_call = None
elif state == PRAGMA:
if line.endswith("}"):
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
last_styled_pos = current_pos
state = SPACE
elif state == DPRAGMA:
if line.endswith("}}"):
self.SetStyling(current_pos - last_styled_pos + 1, STC_PLC_EMPTY)
last_styled_pos = current_pos + 1
state = SPACE
elif (line.endswith("'") or line.endswith('"')) and state not in [STRING, WSTRING]:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
last_styled_pos = current_pos
if state == WORD:
current_context = self.Variables
if line.endswith("'"):
state = STRING
else:
state = WSTRING
elif state == STRING:
if line.endswith("'") and not line.endswith("$'"):
self.SetStyling(current_pos - last_styled_pos + 1, STC_PLC_STRING)
last_styled_pos = current_pos + 1
state = SPACE
elif state == WSTRING:
if line.endswith('"') and not line.endswith('$"'):
self.SetStyling(current_pos - last_styled_pos + 1, STC_PLC_STRING)
last_styled_pos = current_pos + 1
state = SPACE
elif char in LETTERS:
if state == NUMBER:
word = "#"
state = WORD
elif state == SPACE:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
word = char
last_styled_pos = current_pos
state = WORD
else:
word += char
elif char in NUMBERS or char == '.' and state != WORD:
if state == SPACE:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
last_styled_pos = current_pos
state = NUMBER
elif state == WORD and char != '.':
word += char
elif char == '(' and state == SPACE:
self.CallStack.append(current_call)
current_call = None
else:
if state == WORD:
if word in self.Keywords or word in self.TypeNames:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_WORD)
elif self.IsValidVariable(word, current_context):
self.SetStyling(current_pos - last_styled_pos, STC_PLC_VARIABLE)
elif self.IsCallParameter(word, current_call):
self.SetStyling(current_pos - last_styled_pos, STC_PLC_PARAMETER)
elif word in self.Functions:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_FUNCTION)
elif self.TextSyntax == "IL" and word in self.Jumps:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_JUMP)
elif word in self.EnumeratedValues:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_NUMBER)
else:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
if word not in ["]", ")"] and (self.GetCurrentPos() < last_styled_pos or self.GetCurrentPos() > current_pos):
self.StartStyling(last_styled_pos, wx.stc.STC_INDICS_MASK)
self.SetStyling(current_pos - last_styled_pos, wx.stc.STC_INDIC0_MASK)
self.StartStyling(current_pos, 0xff)
if char == '.':
if word != "]":
if current_context is not None:
current_context = current_context.get(word, None)
else:
current_context = None
elif char == '(':
self.CallStack.append(current_call)
current_call = self.Functions.get(word, None)
if current_call is None and self.IsValidVariable(word, current_context):
current_call = {"interface": current_context.get(word, {}),
"extensible": False}
current_context = self.Variables
else:
if char == '[' and current_context is not None:
self.ContextStack.append(current_context.get(word, None))
current_context = self.Variables
word = ""
last_styled_pos = current_pos
state = SPACE
elif state == NUMBER:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_NUMBER)
last_styled_pos = current_pos
state = SPACE
if char == ']':
if len(self.ContextStack) > 0:
current_context = self.ContextStack.pop()
else:
current_context = self.Variables
word = char
state = WORD
elif char == ')':
current_context = self.Variables
if len(self.CallStack) > 0:
current_call = self.CallStack.pop()
else:
current_call = None
word = char
state = WORD
current_pos += 1
if state == COMMENT:
self.SetStyling(current_pos - last_styled_pos + 2, STC_PLC_COMMENT)
elif state == NUMBER:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_NUMBER)
elif state == WORD:
if word in self.Keywords or word in self.TypeNames:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_WORD)
elif self.IsValidVariable(word, current_context):
self.SetStyling(current_pos - last_styled_pos, STC_PLC_VARIABLE)
elif self.IsCallParameter(word, current_call):
self.SetStyling(current_pos - last_styled_pos, STC_PLC_PARAMETER)
elif self.TextSyntax == "IL" and word in self.Functions:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_FUNCTION)
elif word in self.Jumps:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_JUMP)
elif word in self.EnumeratedValues:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_NUMBER)
else:
self.SetStyling(current_pos - last_styled_pos, STC_PLC_EMPTY)
else:
self.SetStyling(current_pos - start_pos, STC_PLC_EMPTY)
self.ShowHighlights(start_pos, end_pos)
event.Skip()
def OnMarginClick(self, event):
if event.GetMargin() == 2:
line = self.Editor.LineFromPosition(event.GetPosition())
if self.Editor.GetFoldLevel(line) & wx.stc.STC_FOLDLEVELHEADERFLAG:
self.Editor.ToggleFold(line)
event.Skip()
def OnUpdateUI(self, event):
selected = self.Editor.GetSelectedText()
if self.ParentWindow and selected != "":
self.ParentWindow.SetCopyBuffer(selected, True)
event.Skip()
def Cut(self):
self.ResetBuffer()
self.DisableEvents = True
self.Editor.CmdKeyExecute(wx.stc.STC_CMD_CUT)
self.DisableEvents = False
self.RefreshModel()
self.RefreshBuffer()
def Copy(self):
self.Editor.CmdKeyExecute(wx.stc.STC_CMD_COPY)
if self.ParentWindow:
self.ParentWindow.RefreshEditMenu()
def Paste(self):
self.ResetBuffer()
self.DisableEvents = True
self.Editor.CmdKeyExecute(wx.stc.STC_CMD_PASTE)
self.DisableEvents = False
self.RefreshModel()
self.RefreshBuffer()
def Search(self, criteria):
return self.Controler.SearchInPou(self.TagName, criteria, self.Debug)
def Find(self, direction, search_params):
if self.SearchParams != search_params:
self.ClearHighlights(SEARCH_RESULT_HIGHLIGHT)
self.SearchParams = search_params
self.SearchResults = [
(infos[1:], start, end, SEARCH_RESULT_HIGHLIGHT)
for infos, start, end, _text in
self.Search(search_params)]
self.CurrentFindHighlight = None
if len(self.SearchResults) > 0:
if self.CurrentFindHighlight is not None:
old_idx = self.SearchResults.index(self.CurrentFindHighlight)
if self.SearchParams["wrap"]:
idx = (old_idx + direction) % len(self.SearchResults)
else:
idx = max(0, min(old_idx + direction, len(self.SearchResults) - 1))
if idx != old_idx:
self.RemoveHighlight(*self.CurrentFindHighlight)
self.CurrentFindHighlight = self.SearchResults[idx]
self.AddHighlight(*self.CurrentFindHighlight)
else:
self.CurrentFindHighlight = self.SearchResults[0]
self.AddHighlight(*self.CurrentFindHighlight)
else:
if self.CurrentFindHighlight is not None:
self.RemoveHighlight(*self.CurrentFindHighlight)
self.CurrentFindHighlight = None
def RefreshModel(self):
self.RefreshJumpList()
self.Colourise(0, -1)
self.Controler.SetEditedElementText(self.TagName, self.GetText())
self.ResetSearchResults()
def OnKeyDown(self, event):
key = event.GetKeyCode()
if self.Controler is not None:
if self.Editor.CallTipActive():
self.Editor.CallTipCancel()
key_handled = False
line = self.Editor.GetCurrentLine()
if line == 0:
start_pos = 0
else:
start_pos = self.Editor.GetLineEndPosition(line - 1) + 1
end_pos = self.GetCurrentPos()
lineText = self.Editor.GetTextRange(start_pos, end_pos).replace("\t", " ")
# Code completion
if key == wx.WXK_SPACE and event.ControlDown():
words = lineText.split(" ")
words = [word for i, word in enumerate(words) if word != '' or i == len(words) - 1]
kw = []
if self.TextSyntax == "IL":
if len(words) == 1:
kw = self.Keywords
elif len(words) == 2:
if words[0].upper() in ["CAL", "CALC", "CALNC"]:
kw = self.Functions
elif words[0].upper() in ["JMP", "JMPC", "JMPNC"]:
kw = self.Jumps
else:
kw = self.Variables.keys()
else:
kw = self.Keywords + self.Variables.keys() + self.Functions.keys()
if len(kw) > 0:
if len(words[-1]) > 0:
kw = [keyword for keyword in kw if keyword.startswith(words[-1])]
kw.sort()
self.Editor.AutoCompSetIgnoreCase(True)
self.Editor.AutoCompShow(len(words[-1]), " ".join(kw))
key_handled = True
elif key == wx.WXK_RETURN or key == wx.WXK_NUMPAD_ENTER:
if self.TextSyntax in ["ST", "ALL"]:
indent = self.Editor.GetLineIndentation(line)
if LineStartswith(lineText.strip(), self.BlockStartKeywords):
indent = (indent // 2 + 1) * 2
self.Editor.AddText("\n" + " " * indent)
key_handled = True
elif key == wx.WXK_BACK:
if self.TextSyntax in ["ST", "ALL"]:
if not self.Editor.GetSelectedText():
indent = self.Editor.GetColumn(self.Editor.GetCurrentPos())
if lineText.strip() == "" and len(lineText) > 0 and indent > 0:
self.Editor.DelLineLeft()
self.Editor.AddText(" " * ((max(0, indent - 1) // 2) * 2))
key_handled = True
if not key_handled:
event.Skip()
else:
event.Skip()
def OnKillFocus(self, event):
self.Editor.AutoCompCancel()
event.Skip()
# -------------------------------------------------------------------------------
# Highlights showing functions
# -------------------------------------------------------------------------------
def OnRefreshHighlightsTimer(self, event):
self.RefreshView()
event.Skip()
def ClearHighlights(self, highlight_type=None):
EditorPanel.ClearHighlights(self, highlight_type)
if highlight_type is None:
self.Highlights = []
else:
highlight_type = HIGHLIGHT_TYPES.get(highlight_type, None)
if highlight_type is not None:
self.Highlights = [(infos, start, end, highlight) for (infos, start, end, highlight) in self.Highlights if highlight != highlight_type]
self.RefreshView()
def AddHighlight(self, infos, start, end, highlight_type):
EditorPanel.AddHighlight(self, infos, start, end, highlight_type)
highlight_type = HIGHLIGHT_TYPES.get(highlight_type, None)
if infos[0] == "body" and highlight_type is not None:
self.Highlights.append((infos[1], start, end, highlight_type))
self.Editor.GotoPos(self.Editor.PositionFromLine(start[0]) + start[1])
self.RefreshHighlightsTimer.Start(int(REFRESH_HIGHLIGHT_PERIOD * 1000), oneShot=True)
def RemoveHighlight(self, infos, start, end, highlight_type):
EditorPanel.RemoveHighlight(self, infos, start, end, highlight_type)
highlight_type = HIGHLIGHT_TYPES.get(highlight_type, None)
if infos[0] == "body" and highlight_type is not None and \
(infos[1], start, end, highlight_type) in self.Highlights:
self.Highlights.remove((infos[1], start, end, highlight_type))
self.RefreshHighlightsTimer.Start(int(REFRESH_HIGHLIGHT_PERIOD * 1000), oneShot=True)
def ShowHighlights(self, start_pos, end_pos):
for indent, start, end, highlight_type in self.Highlights:
if start[0] == 0:
highlight_start_pos = start[1] - indent
else:
highlight_start_pos = self.Editor.GetLineEndPosition(start[0] - 1) + start[1] - indent + 1
if end[0] == 0:
highlight_end_pos = end[1] - indent + 1
else:
highlight_end_pos = self.Editor.GetLineEndPosition(end[0] - 1) + end[1] - indent + 2
if highlight_start_pos < end_pos and highlight_end_pos > start_pos:
self.StartStyling(highlight_start_pos, 0xff)
self.SetStyling(highlight_end_pos - highlight_start_pos, highlight_type)
self.StartStyling(highlight_start_pos, 0x00)
until_end = max(0, len(self.Editor.GetText()) - highlight_end_pos)
self.SetStyling(until_end, wx.stc.STC_STYLE_DEFAULT)
| thiagoralves/OpenPLC_Editor | editor/editors/TextViewer.py | TextViewer.py | py | 45,065 | python | en | code | 307 | github-code | 1 | [
{
"api_name": "six.moves.xrange",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "six.moves.xrange",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_n... |
33843097301 | from transformers import ColorTransformer
from ..blend.blend import *
from ..palettes.core_palette import *
from ..scheme.scheme import *
from .image_utils import *
from .string_utils import *
# /**
# * Generate custom color group from source and target color
# *
# * @param source Source color
# * @param color Custom color
# * @return Custom color group
# *
# * @link https://m3.material.io/styles/color/the-color-system/color-roles
# */
# NOTE: Changes made to output format to be Dictionary
def customColor(source, color):
value = color["value"]
from_v = value
to = source
if color["blend"]:
value = Blend.harmonize(from_v, to)
palette = CorePalette.of(value)
tones = palette.a1
return {
"color": color,
"value": value,
"light": {
"color": tones.tone(40),
"onColor": tones.tone(100),
"colorContainer": tones.tone(90),
"onColorContainer": tones.tone(10),
},
"dark": {
"color": tones.tone(80),
"onColor": tones.tone(20),
"colorContainer": tones.tone(30),
"onColorContainer": tones.tone(90),
},
}
# /**
# * Generate a theme from a source color
# *
# * @param source Source color
# * @param customColors Array of custom colors
# * @return Theme object
# */
# NOTE: Changes made to output format to be Dictionary
def themeFromSourceColor(source: int, customColors=[]):
palette = CorePalette.of(source)
return {
"source": source,
"schemes": {
"light": Scheme.light(source),
"dark": Scheme.dark(source),
},
"palettes": {
"primary": palette.a1,
"secondary": palette.a2,
"tertiary": palette.a3,
"neutral": palette.n1,
"neutralVariant": palette.n2,
"error": palette.error,
},
"customColors": [customColor(source, c) for c in customColors],
}
# /**
# * Generate a theme from an image source
# *
# * @param image Image element
# * @param customColors Array of custom colors
# * @return Theme object
# */
def themeFromImage(image, customColors=[]):
colors = topColorsFromImage(image)
return themeFromSourceColor(colors[0], customColors), [
ColorTransformer.argb_to_hex(color) for color in colors
]
# Not really applicable to python CLI
# # /**
# # * Apply a theme to an element
# # *
# # * @param theme Theme object
# # * @param options Options
# # */
# export function applyTheme(theme, options) {
# var _a;
# const target = (options === null || options === void 0 ? void 0 : options.target) || document.body;
# const isDark = (_a = options === null || options === void 0 ? void 0 : options.dark) !== null && _a !== void 0 ? _a : false;
# const scheme = isDark ? theme.schemes.dark : theme.schemes.light;
# for (const [key, value] of Object.entries(scheme.toJSON())) {
# const token = key.replace(/([a-z])([A-Z])/g, "$1-$2").toLowerCase();
# const color = hexFromArgb(value);
# target.style.setProperty(`--md-sys-color-${token}`, color);
# }
# }
# //# sourceMappingURL=theme_utils.js.map
| DimitrisMilonopoulos/mitsugen | src/material_color_utilities_python/utils/theme_utils.py | theme_utils.py | py | 3,222 | python | en | code | 88 | github-code | 1 | [
{
"api_name": "transformers.ColorTransformer.argb_to_hex",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "transformers.ColorTransformer",
"line_number": 84,
"usage_type": "name"
}
] |
74416921314 | from collections import defaultdict
# Utility function to create dictionary
def multi_dict(K, type):
if K == 1:
return defaultdict(type)
else:
return defaultdict(lambda: multi_dict(K-1, type))
with open('input-10.txt') as f:
lines = [row.strip() for row in f]
print(lines)
X=1
cycles = []
def cycle():
global cycles
cycles.append(X)
def execute_instruction(instr):
global X
if instr[0] == 'addx':
print(f"addx {instr[1]}")
cycle()
cycle()
X += int(instr[1])
if instr[0] == "noop":
cycle()
for line in lines:
instr = line.split(' ')
execute_instruction(instr)
print(cycles)
ss=0
for ix in range(19,len(cycles),40):
print(cycles[ix], cycles[ix] * (ix+1))
ss += cycles[ix] * (ix+1)
print(ss)
| fshsweden/AdventOfCode2022 | 10a.py | 10a.py | py | 805 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
}
] |
13382414004 | import unittest
import subprocess
import os
import numpy as np
from openfermion import (
QubitOperator, InteractionOperator, FermionOperator, IsingOperator,
get_interaction_operator, hermitian_conjugated
)
from zquantum.core.circuit import build_uniform_param_grid
from zquantum.core.utils import create_object
from ._utils import evaluate_operator_for_parameter_grid
from ._io import (
load_qubit_operator, save_qubit_operator, load_interaction_operator,
save_interaction_operator, convert_qubitop_to_dict, convert_dict_to_qubitop,
convert_interaction_op_to_dict, convert_dict_to_interaction_op,
convert_isingop_to_dict, convert_dict_to_isingop,
save_ising_operator, load_ising_operator, save_parameter_grid_evaluation
)
class TestQubitOperator(unittest.TestCase):
def test_qubitop_to_dict_io(self):
# Given
qubit_op = QubitOperator(((0, 'Y'), (1, 'X'), (2, 'Z'), (4, 'X')), 3.j)
qubit_op += hermitian_conjugated(qubit_op)
# When
qubitop_dict = convert_qubitop_to_dict(qubit_op)
recreated_qubit_op = convert_dict_to_qubitop(qubitop_dict)
# Then
self.assertEqual(recreated_qubit_op, qubit_op)
def test_qubit_operator_io(self):
# Given
qubit_op = QubitOperator(((0, 'Y'), (3, 'X'), (8, 'Z'), (11, 'X')), 3.j)
# When
save_qubit_operator(qubit_op, 'qubit_op.json')
loaded_op = load_qubit_operator('qubit_op.json')
# Then
self.assertEqual(qubit_op, loaded_op)
os.remove('qubit_op.json')
def test_interaction_op_to_dict_io(self):
# Given
test_op = FermionOperator('1^ 2^ 3 4')
test_op += hermitian_conjugated(test_op)
interaction_op = get_interaction_operator(test_op)
interaction_op.constant = 0.0
# When
interaction_op_dict = convert_interaction_op_to_dict(interaction_op)
recreated_interaction_op = convert_dict_to_interaction_op(interaction_op_dict)
# Then
self.assertEqual(recreated_interaction_op, interaction_op)
def test_interaction_operator_io(self):
# Given
test_op = FermionOperator('1^ 2^ 3 4')
test_op += hermitian_conjugated(test_op)
interaction_op = get_interaction_operator(test_op)
interaction_op.constant = 0.0
# When
save_interaction_operator(interaction_op, 'interaction_op.json')
loaded_op = load_interaction_operator('interaction_op.json')
# Then
self.assertEqual(interaction_op, loaded_op)
os.remove('interaction_op.json')
def test_qubitop_io(self):
# Given
qubit_op = QubitOperator(((0, 'Y'), (1, 'X'), (2, 'Z'), (4, 'X')), 3.j)
# When
save_qubit_operator(qubit_op, 'qubit_op.json')
loaded_op = load_qubit_operator('qubit_op.json')
# Then
self.assertEqual(qubit_op, loaded_op)
os.remove('qubit_op.json')
def test_isingop_to_dict_io(self):
# Given
ising_op = IsingOperator('[] + 3[Z0 Z1] + [Z1 Z2]')
# When
isingop_dict = convert_isingop_to_dict(ising_op)
recreated_isingop = convert_dict_to_isingop(isingop_dict)
# Then
self.assertEqual(recreated_isingop, ising_op)
def test_isingop_io(self):
# Given
ising_op = IsingOperator('[] + 3[Z0 Z1] + [Z1 Z2]')
# When
save_ising_operator(ising_op, 'ising_op.json')
loaded_op = load_ising_operator('ising_op.json')
# Then
self.assertEqual(ising_op, loaded_op)
os.remove('ising_op.json')
def test_save_parameter_grid_evaluation(self):
# Given
ansatz = {'ansatz_type': 'singlet UCCSD', 'ansatz_module': 'zquantum.qaoa.ansatz', 'ansatz_func': 'build_qaoa_circuit', 'ansatz_grad_func': 'build_qaoa_circuit_grads', 'supports_simple_shift_rule': False, 'ansatz_kwargs': {'hamiltonians': [{'schema': 'zapata-v1-qubit_op', 'terms': [{'pauli_ops': [], 'coefficient': {'real': 0.5}}, {'pauli_ops': [{'qubit': 1, 'op': 'Z'}], 'coefficient': {'real': 0.5}}]}, {'schema': 'zapata-v1-qubit_op', 'terms': [{'pauli_ops': [{'qubit': 0, 'op': 'X'}], 'coefficient': {'real': 1.0}}, {'pauli_ops': [{'qubit': 1, 'op': 'X'}], 'coefficient': {'real': 1.0}}]}]}, 'n_params': [2]}
grid = build_uniform_param_grid(ansatz, 1, 0, np.pi, np.pi/10)
backend = create_object({'module_name': 'zquantum.core.interfaces.mock_objects', 'function_name': 'MockQuantumSimulator'})
op = QubitOperator('0.5 [] + 0.5 [Z1]')
parameter_grid_evaluation = evaluate_operator_for_parameter_grid(ansatz, grid, backend, op)
# When
save_parameter_grid_evaluation(parameter_grid_evaluation, "parameter-grid-evaluation.json")
# Then
# TODO
def tearDown(self):
subprocess.run(["rm", "parameter-grid-evaluation.json"])
| wugaxp/qe-openfermion | src/python/qeopenfermion/_io_test.py | _io_test.py | py | 4,933 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "openfermion.QubitOperator",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "openfermion.hermitian_conjugated",
"line_number": 26,
"usage_type": "call"
},
{
... |
3513686248 | import streamlit as st
from streamlit_player import st_player
column1, column2, = st.columns(2)
st.subheader("Send Email Receipts using automation")
st_player("https://youtu.be/G3fTz6VnnTc")
st.divider()
st.subheader("Recording Videos using Flonnect")
st_player("https://youtu.be/id_Oj7cG0Hs")
st.divider()
st.subheader("Fund Raising Knowledge Transfer")
st_player("https://youtu.be/gWe0NEzDwGo")
st.divider()
st.subheader("Fund Raising Tracker For Filling/Tracking Tasks")
st_player("https://youtu.be/6NNCgoUWXo0")
st.divider()
hide_st_style = """
<style>
#Mainmenu {Visibility : hidden;}
footer {Visibility : hidden;}
header {Visibility : hidden;}
</style>
"""
st.markdown(hide_st_style,unsafe_allow_html=True)
| madhuammulu8/FR-Analysis | pages/_👨🏽💻_Knowledge Transfer.py | _👨🏽💻_Knowledge Transfer.py | py | 725 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.columns",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "streamlit.subheader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit_player.st_player",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streaml... |
20454510000 | ### This is the LSTM training module ###
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers import Bidirectional
import numpy as np
import pandas as pd
from matplotlib import pyplot
data = pd.read_csv('C:/Users/Swars/Desktop/FYP/FYP/processed_req_offer.csv')
column = data.label
for i in range(len(column)):
if data.label[i]=='request':
data.label[i]=0
elif data.label[i]=='offer':
data.label[i]=1
elif data.label[i]=='RO':
data.label[i]=2
tokenizer = Tokenizer()
tokenizer.fit_on_texts(data['tweettext'])
vocabulary_size=len(tokenizer.word_index) + 1
sequences = tokenizer.texts_to_sequences(data['tweettext'])
#print sequences[0]
#TRAINING USING GLOVE PRETRAINED
embeddings_index= dict()
f=open('C:/Users/Swars/Desktop/FYP/glove.6B.100d.txt',encoding='utf8')
for line in f:
values=line.split()
word=values[0]
coefs=np.asarray(values[1:],dtype='float32')
embeddings_index[word]=coefs
f.close()
embedding_matrix=np.zeros((vocabulary_size,100))
for word, i in tokenizer.word_index.items():
embedding_vector=embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i]=embedding_vector
df = pad_sequences(sequences, maxlen=30)
model = Sequential()
model.add(Embedding(vocabulary_size, 100,weights=[embedding_matrix], input_length=30,trainable=False))
model.add(Bidirectional(LSTM(100,return_sequences=True, dropout=0.2, recurrent_dropout=0.2)))
model.add(Bidirectional(LSTM(50, dropout=0.2, recurrent_dropout=0.2)))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history=model.fit(df, np.array(data.label), validation_split=0.2, epochs=20)
model.summary()
X=['hi hike is donating rs 10 lacs towards kashmir flood relief go to menu gt rewards gt donate for kashmir and make it count kashmirflood','india offers help to pakistan on floods']
tokenizer.fit_on_texts(X)
sequence = tokenizer.texts_to_sequences(X)
#print sequences[0]
seq = pad_sequences(sequence, maxlen=30)
Y=model.predict_classes(seq)
print (Y)
print ("Training completed 100%")
#score=model.evaluate(xtest,ytest,batchsize)
pyplot.plot(history.history['acc'], label='train')
pyplot.plot(history.history['val_acc'], label='test')
pyplot.legend()
pyplot.show()
#plot_model(model, to_file='model_plot1.png', show_shapes=True, show_layer_names=True) | Sang555/Multimodal-disaster-analysis | CODE/Text_training_module.py | Text_training_module.py | py | 2,674 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.text.Tokenizer",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nump... |
16241233028 | from rest_framework.permissions import (
DjangoModelPermissions, BasePermission, IsAdminUser, SAFE_METHODS)
class IsProfileOwnerOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
print(obj)
if request.method in SAFE_METHODS:
return True
return obj.user == request.user
class IsOwnerOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
print(obj)
if request.method in SAFE_METHODS:
return True
return obj == request.user
class IsAuthorOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
print(obj.author.user == request.user)
return obj.author.user == request.user
| Axubyy/ATS | Backend/week_8/MiniBlog/blog/api/permissions.py | permissions.py | py | 833 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.permissions.BasePermission",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.SAFE_METHODS",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.BasePermission",
"line_number": 15... |
39818017608 | #!/usr/bin/env python3
import json
import requests
import subprocess
session = requests.Session()
proc = subprocess.run(['git', 'for-each-ref', '--format=%(refname:lstrip=3)', 'refs/remotes/origin/??????????????????????????????????'],
stdout=subprocess.PIPE,
check=True,
)
to_delete = []
for branch in proc.stdout.decode().strip().split('\n'):
product = json.loads(subprocess.run(['git', 'show', f'refs/remotes/origin/{branch}:product.json'],
stdout=subprocess.PIPE,
check=True,
).stdout)
rsp = session.head(product['DistributionURL'], allow_redirects=False)
if rsp.status_code == 200:
continue
if rsp.status_code != 404:
print(f'[WARNING] {branch} {rsp.status_code} {rsp.reason}')
to_delete.append('refs/heads/'+branch)
if to_delete:
subprocess.run(['git', 'push', '--delete', 'origin'] + to_delete, check=True)
| HsiangHo/macOS | .github/clean.py | clean.py | py | 882 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"l... |
14477324527 | from django import forms
from .models import Image
from urllib import request
from django.core.files.base import ContentFile
from django.utils.text import slugify
class ImageCreateForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title','url','description')
#我们的用户不会在表单中直接为图片添加 URL
#他们将会使用一个 JavaScropt 工具来从其他网站中选择一张图片然后我们的
#表单将会以参数的形式接收这张图片的 URL
#我们覆写 url 字段的默认控件(widget)为一个 HiddenInput 控件,这个控件将
#会被渲染为属性是 type="hidden" 的 HTML 元素
#用这个控件是因为我们不想让用户看见这个字段
widgets={
'url':forms.HiddenInput,
}
#通过使用以 clean_<fieldname> 形式
#命名的方法来实现。这个方法会在你为一个表单实例执行 is_valid() 时执行
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg','jpeg']
extension = url.rsplit('.',1)[1].lower()
if extension not in valid_extensions:
raise forms.ValidationError('The given url does not match valid image extensions')
return url
#ModelForm 提供了一个 save() 方法来保存目前的模型实例到数据库中,并且返回一个对象
#如果 commit 是 False , save() 方法将会返回一个模型实例但是并不会把这个对象保存到数据库中
def save(self, force_insert=False,force_update=False,commit=True):
image = super(ImageCreateForm, self).save(commit=False)
image_url = self.cleaned_data['url']
image_name = '{}.{}'.format(slugify(image.title),image_url.rsplit('.', 1)[1].lower())
# 从给定的 URL 中下载图片
response = request.urlopen(image_url)
#使用 Python 的 urllib 模块来下载图片,然后调用 save() 方法把图片传递给一个
#ContentFiel 对象,这个对象被下载的文件所实例化
image.image.save(image_name,ContentFile(response.read()),save=False)
if commit:
image.save()
return image
| aangang/bookmarks | bookmarks/images/forms.py | forms.py | py | 2,247 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Image",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.Hidd... |
21703164964 | # Author: Abdulaminkhon Khaydarov
# Date: 06/11/22
# Problem URL: https://leetcode.com/problems/running-sum-of-1d-array/
from typing import List
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
for i in range(1, len(nums)):
nums[i] = nums[i] + nums[i - 1]
return nums
if __name__ == '__main__':
solution = Solution()
# Example 1
print(solution.runningSum([1, 2, 3, 4]) == [1, 3, 6, 10])
# Example 2
print(solution.runningSum([1, 1, 1, 1, 1]) == [1, 2, 3, 4, 5])
# Example 3
print(solution.runningSum([3, 1, 2, 10, 1]) == [3, 4, 6, 16, 17])
| webdastur/leetcode | array/easy/leetcode1480_1.py | leetcode1480_1.py | py | 628 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
}
] |
24856926246 | # -*-coding:utf-8 -*-
import numpy as np
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
"""
Author:
Jack Cui
Blog:
http://blog.csdn.net/c406495762
Zhihu:
https://www.zhihu.com/people/Jack--Cui/
Modify:
2017-10-11
"""
def loadDataSet(fileName):
numFeat = len((open(fileName).readline().split('\t')))
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat - 1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
if __name__ == '__main__':
dataArr, classLabels = loadDataSet('horseColicTraining2.txt')
testArr, testLabelArr = loadDataSet('horseColicTest2.txt')
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth = 2), algorithm = "SAMME", n_estimators = 10)
bdt.fit(dataArr, classLabels)
predictions = bdt.predict(dataArr)
errArr = np.mat(np.ones((len(dataArr), 1)))
print('训练集的错误率:%.3f%%' % float(errArr[predictions != classLabels].sum() / len(dataArr) * 100))
predictions = bdt.predict(testArr)
errArr = np.mat(np.ones((len(testArr), 1)))
print('测试集的错误率:%.3f%%' % float(errArr[predictions != testLabelArr].sum() / len(testArr) * 100)) | Jack-Cherish/Machine-Learning | AdaBoost/sklearn_adaboost.py | sklearn_adaboost.py | py | 1,320 | python | en | code | 8,026 | github-code | 1 | [
{
"api_name": "sklearn.ensemble.AdaBoostClassifier",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.mat",
"line_number": 37,
"usage_type": "call"
},
{
... |
39099255085 | import queue
from threading import Thread
import numpy as np
from transformers import *
from openie import StanfordOpenIE
from utility.utility import *
#from bert_serving.client import BertClient
from rouge import Rouge
from stanfordcorenlp import StanfordCoreNLP
import pickle
from data.raw_data_loader import *
import argparse
'''
nlp = StanfordCoreNLP('/home/ziqiang/stanfordnlp_resources/stanford-corenlp-full-2018-10-05')
bc = BertClient(ip='localhost')
client = StanfordOpenIE()
rougex = Rouge()
'''
g_b=0
import threading
class threadsafe_generator:
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract, tokenizer, rougex, nlp):
"""
Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; list of strings. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.rougex=rougex
self.nlp=nlp
self.tokenizer=tokenizer
article=article[:60]
self.article=article
self.abstract=abstract
# Process the article
self.article_fact=[]
self.article_sent=[]
self.article_fact_tag=[]
for count,sent in enumerate(article):
self.article_sent.append(self.tokenizer.encode(sent))
sent=sent.strip(',')
sent=sent.strip(':')
sentfact=sent_split(sent,self.nlp)
sentfact_file=[]
for i in sentfact:
if word_len(i) >50:
ii=i.split(' ')
ii=ii[0:50]
sentfact_file.append(' '.join(ii))
continue
if len(i) >= 20:
sentfact_file.append(i)
self.article_fact_tag.append(len(sentfact_file))
self.article_fact+=sentfact_file
self.article_id=[]
for fact in self.article_fact:
self.article_id.append(self.tokenizer.encode(fact,add_special_tokens=False))
self.article_len = len(self.article_id) # store the number of sentences of the article
# Process the abstract
self.original_abstract=[]
self.abstract_fact=[]
self.abstract_fact_all=[]
for sent in abstract:
self.original_abstract.append(self.tokenizer.encode(sent))
if word_len(sent) > 20:
sent=sent.strip(',')
sent=sent.strip(':')
sentfact=sent_split(sent,self.nlp)
else:
sentfact=[sent]
self.abstract_fact_all+=sentfact
for i in self.abstract_fact_all:
if word_len(i) >50:
ii=i.split(' ')
ii=ii[0:50]
self.abstract_fact.append(' '.join(ii))
elif len(i) < 15:
continue
else:
self.abstract_fact.append(i)
self.abstract_id=[]
for fact in self.abstract_fact:
self.abstract_id.append(self.tokenizer.encode(fact,add_special_tokens=False))
self.abstract_len = len(self.abstract_id) # store the number of sentences of the article
self.enc_fact=[]
self.enc_sent=[]
self.dec_fact=[]
self.dec_label_bert=[]
self.dec_label_rouge=[]
self.dec_label_sent=[]
self.grap_sim_bert=np.zeros((self.article_len, self.article_len), dtype=np.float16)
self.grap_sim_rouge=np.zeros((self.article_len, self.article_len), dtype=np.float16)
self.grap_entity=np.zeros((self.article_len, self.article_len), dtype=np.float16)
self.grap_cosent=np.zeros((self.article_len, self.article_len), dtype=np.float16)
self.grap_sent=np.zeros((len(self.article), len(self.article)), dtype=np.float16)
def get_enc_fact(self, max_len):
"""Pad the encoder input sequence with pad_id up to max_len."""
for i in self.article_id:
if len(i) > max_len:
self.enc_fact.append(i[0:max_len])
else:
self.enc_fact.append(i)
for i in self.article_sent:
if len(i) > max_len*2:
self.enc_sent.append(i[0:max_len*2])
else:
self.enc_sent.append(i)
def get_dec_fact(self, max_len):
"""Pad the encoder input sequence with pad_id up to max_len."""
for i in self.abstract_id:
if len(i) > max_len:
self.dec_fact.append(i[0:max_len])
else:
self.dec_fact.append(i)
def get_grap(self):
"""Get the sim bert graph """
"""Get the sim rouge graph """
for i,facti in enumerate(self.article_fact):
for j,factj in enumerate(self.article_fact):
scores = self.rougex.get_scores(facti, factj)
self.grap_sim_rouge[i][j]=(scores[0]['rouge-1']['f']+scores[0]['rouge-2']['f'])/2
"""Get the sim sent graph """
for i,facti in enumerate(self.article):
for j,factj in enumerate(self.article):
scores = self.rougex.get_scores(facti, factj)
self.grap_sent[i][j]=(scores[0]['rouge-1']['f']+scores[0]['rouge-2']['f'])/2
"""Get the entity graph"""
"""Get the co-sent graph"""
now=0
for i in self.article_fact_tag:
for x in range(now+i)[now:now+i]:
for y in range(now+i)[now:now+i]:
self.grap_cosent[x][y]=1
now=now+i
def get_dec_label_bert(self):
self.dec_label_bert=[]
self.oral_score_bert=0
def get_dec_label_rouge(self):
rouge=[]
score_rouge=[]
index_rouge=[]
for j in self.abstract_fact:
score=[]
for k in self.article_fact:
scores = self.rougex.get_scores(j, k)
score.append((scores[0]['rouge-1']['f']+scores[0]['rouge-2']['f'])/2)
choose=score.index(max(score))
index_rouge.append(choose)
rouge.append(self.article_fact[choose])
score_rouge.append(max(score))
for i in range(len(self.article_fact)):
if i in index_rouge:
self.dec_label_rouge.append(1)
else:
self.dec_label_rouge.append(0)
self.oral_score_rouge = self.rougex.get_scores(' . '.join(rouge), ' . '.join(self.abstract))
def get_dec_label_rouge_sent(self):
get_dec_label_sent=self.greedy_selection(self.article, self.abstract, 3, self.rougex)
for i in range(len(self.article)):
if i in get_dec_label_sent:
self.dec_label_sent.append(1)
else:
self.dec_label_sent.append(0)
def greedy_selection(self, doc_sent_list, abstract_sent_list, summary_size, rougex):
selected = []
max_rouge = 0.0
reference=''
for i in abstract_sent_list:
reference+=i
reference+=' . '
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(doc_sent_list)):
if (i in selected):
continue
c = selected + [i]
candidates = ''
for j in c:
candidates+=doc_sent_list[j]
candidates+=' . '
scores = rougex.get_scores(candidates, reference)
rouge_score = (scores[0]['rouge-1']['f']+scores[0]['rouge-2']['f'])/2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, max_len):
"""
Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self.init_encoder(example_list, max_len) # initialize the input to the encoder
self.init_decoder(example_list, max_len) # initialize the input and targets for the decoder
self.init_result(example_list)
def init_result(self, example_list):
self.original_article=[]
self.original_abstract=[]
self.original_sent=[]
self.sent_to_fact=[]
for ex in example_list:
self.original_sent.append(ex.article)
self.original_article.append(ex.article_fact)
self.original_abstract.append(ex.abstract)
self.sent_to_fact.append(ex.article_fact_tag)
def init_encoder(self, example_list, max_len):
self.enc_fact=[]
self.enc_sent=[]
self.grap_sim_bert=[]
self.grap_sim_rouge=[]
self.grap_entity=[]
self.grap_cosent=[]
self.grap_sent=[]
for ex in example_list:
ex.get_enc_fact(max_len)
ex.get_grap()
# Fill in the numpy arrays
for ex in example_list:
self.enc_fact.append(ex.enc_fact)
self.enc_sent.append(ex.enc_sent)
self.grap_sim_bert.append(ex.grap_sim_bert)
self.grap_sim_rouge.append(ex.grap_sim_rouge)
self.grap_entity.append(ex.grap_entity)
self.grap_cosent.append(ex.grap_cosent)
self.grap_sent.append(ex.grap_sent)
def init_decoder(self, example_list, max_len):
self.dec_fact=[]
self.dec_label_sent=[]
self.dec_label_bert=[]
self.dec_label_rouge=[]
self.dec_score_bert=[]
self.dec_score_rouge=[]
# Pad the inputs and targets
for ex in example_list:
ex.get_dec_fact(max_len)
ex.get_dec_label_bert()
ex.get_dec_label_rouge()
ex.get_dec_label_rouge_sent()
# Fill in the numpy arrays
for ex in example_list:
self.dec_fact.append(ex.dec_fact)
self.dec_label_sent.append(ex.dec_label_sent)
self.dec_label_bert.append(ex.dec_label_bert)
self.dec_label_rouge.append(ex.dec_label_rouge)
self.dec_score_bert.append(ex.oral_score_bert)
self.dec_score_rouge.append(ex.oral_score_rouge)
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, nlp_path):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).
"""
self._data_path = data_path
self._max_len=50
self._batch_size=4
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self._batch_size)
# Initialize the tool
self.tokenizer=BertTokenizer.from_pretrained('bert-base-uncased')
self.rougex=Rouge()
self.nlp=StanfordCoreNLP(nlp_path)
# Different settings depending on whether we're in single_pass mode or not
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 50 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
#prepear dataloader
self.input_gen = threadsafe_generator(example_generator_DMCNN(self._data_path))
print('finish prepearing')
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
# self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
print('threads started')
def next_batch(self):
"""
Return a Batch from the batch queue.
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
# tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
pass
if self._finished_reading and self._example_queue.qsize() == 0:
print("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
global g_b
while True:
g_b+=1
if g_b%100==0:
print('--------'+str(g_b)+'--------')
print(self._example_queue.qsize())
print(self._batch_queue.qsize())
try:
article, abstract = self.input_gen.__next__() # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
print("The example generator for this example queue filling thread has exhausted data.")
self._finished_reading = True
break
example = Example(article, abstract, self.tokenizer, self.rougex, self.nlp) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
def fill_batch_queue(self):
"""
Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._batch_size * self._bucketing_cache_size):
if self._finished_reading and self._example_queue.qsize() == 0:
break
inputs.append(self._example_queue.get())
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
inputs.sort(key=self.get_sort)
'''
splits = []
len_pre=-1
for indexi,i in enumerate(inputs):
len_now = i.article_len
if len_pre != len_now:
splits.append(indexi)
len_pre=len_now
batches=[]
for indexi,i in enumerate(splits):
if indexi+1 == len(splits):
batches.append(inputs[i:])
else:
batches.append(inputs[i:splits[indexi+1]])
batches_max=[]
for i in batches:
if len(i) <= self._batch_size:
batches_max.append(i)
else:
batches_max+=[i[j:j+self._batch_size] for j in range(0, len(i), self._batch_size)]
'''
batches_max=[]
for indexi,i in enumerate(inputs):
if indexi % self._batch_size ==0:
batches_max.append(inputs[indexi:indexi+self._batch_size])
for b in batches_max: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._max_len))
def get_sort(self, x):
return x.article_len
'''
train_data_loader=Batcher('data/DMCNN/train_*', 'DMCNN')
count=0
countx=0
while True:
batch = train_data_loader.next_batch()
each_batch_size=len(batch.enc_fact)
if train_data_loader._finished_reading == True:
break
f=open('data_file/DMCNN/train_file/'+str(count)+'_train_batch_of '+str(each_batch_size)+' examples.pkl','wb')
pickle.dump(batch,f)
f.close()
count+=1
countx+=each_batch_size
print('Total train data:')
print(countx)
train_data_loader=Batcher('data/DMCNN/val_*', 'DMCNN')
count=0
countx=0
while True:
batch = train_data_loader.next_batch()
each_batch_size=len(batch.enc_fact)
if train_data_loader._finished_reading == True:
break
f=open('data_file/DMCNN/val_file/'+str(count)+'_val_batch_of '+str(each_batch_size)+' examples.pkl','wb')
pickle.dump(batch,f)
f.close()
count+=1
countx+=each_batch_size
print('Total val data:')
print(countx)
'''
def argLoader():
parser = argparse.ArgumentParser()
#device
parser.add_argument('--nlp_path', type=str, default='/home/ziqiang/stanfordnlp_resources/stanford-corenlp-full-2018-10-05')
# Data Setting
parser.add_argument('--data_path', type=str, default='data/DMCNN/train*')
parser.add_argument('--output_path', type=str, default='data_file/DMCNN/train_file/')
args = parser.parse_args()
return args
args = argLoader()
train_data_loader=Batcher(args.data_path, args.nlp_path)
count=0
countx=0
while True:
batch = train_data_loader.next_batch()
each_batch_size=len(batch.enc_fact)
if train_data_loader._finished_reading == True and train_data_loader._batch_queue.qsize() == 0 and train_data_loader._example_queue.qsize() == 0:
break
f=open(args.output_path+str(count)+'_batch_of '+str(each_batch_size)+' examples.pkl','wb')
pickle.dump(batch,f)
f.close()
count+=1
countx+=each_batch_size
print('finish all')
| RuifengYuan/FactExsum-coling2020 | make_data.py | make_data.py | py | 18,770 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "threading.Lock",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.float16",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"li... |
36629116758 | """Возьмите любую из задач с прошлых семинаров (например сериализация данных),
которые вы уже решали. Превратите функции в методы класса, а параметры в свойства.
Задачи должны решаться через вызов методов экземпляра."""
import csv
import json
class SaveToCsv:
def __init__(self, input_file_name, output_file_name):
self.input_file_name = input_file_name
self.output_file_name = output_file_name
def save_to_csv(self):
with open(self.input_file_name, "r") as input_file:
data = [["level", "id", "name"]]
for level, value in json.load(input_file).items():
for id_, name in value.items():
data.append([level, id_, name])
with open(self.output_file_name, "w", newline="") as file:
csv_writer = csv.writer(file)
for each in data:
csv_writer.writerow(each)
if __name__ == '__main__':
save_to_csv = SaveToCsv("D:\\dev\\study\\Immersion _in_Python\\sem_8\\task_8_2.json",
"hw_10_2.csv")
save_to_csv.save_to_csv()
| Pisarev82/Immersion_in_Python | sem_10/hw_10_2.py | hw_10_2.py | py | 1,252 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 21,
"usage_type": "call"
}
] |
6953564000 | from django.http import HttpResponseRedirect, HttpResponse
from django.core.mail import send_mail
from django.shortcuts import render
from contacto.forms import FormularioContactos
from django.template import loader
# Create your views here.
def contactos(request):
#form=FormularioContactos()
if request.method == 'POST':
form=FormularioContactos(request.POST)
if form.is_valid():
cd = form.cleaned_data
send_mail(
cd['asunto'],
cd['mensaje'],
cd.get('email', 'noreply@example.com'),['siteowner@example.com'],
)
return HttpResponseRedirect('/contactos/gracias/')
else:
form = FormularioContactos(initial={'asunto':'adoro!'})
template = loader.get_template('contactos/formulario_contactos.html')
#return render(request, 'contactos/formulario_contactos.html', {'form': form})
return HttpResponse(template.render({'form': form}, request))
| JokerBerlin/python | contacto/views.py | views.py | py | 983 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "contacto.forms.FormularioContactos",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 20,
"usage_type": "call"
... |
43758862321 | # -*- coding: utf-8 -*-
# file: __init__.py
# date: 2021-07-20
import os
import _io
import json
import logging
import time
import datetime
import pyspark
import pyspark.sql
from typing import Any, Union, Dict, List, Tuple
from ... import pysparkit
LOGGER: logging.Logger = pysparkit.get_logger(__name__, level=logging.INFO)
def json_file2dict(json_path: str) -> Dict:
""" Loads a json file as python `Dict` instance. """
output: Dict
json_file: _io.TextIOWrapper = open(json_path)
output = json.load(json_file)
json_file.close()
return output
def get_fs_proto(path: str) -> str:
"""Get file-system's type/proto from path string.
Args:
path: The path(directory or file path) waiting to judge.
Returns:
The file-system proto type, "file" represents local file-
system.
"""
decompose: Tuple = path.split(":")
if len(decompose) < 2 or decompose[0] == "file":
return "file"
else:
return decompose[0]
def if_fs_client_ok(fs_client: str) -> bool:
"""If file-system executable file ok for using with command line.
Args:
fs_client: Ref to `py_dict_rdd2disk`.
"""
client_path: str = os.popen("which %s" % fs_client).read().strip("\n")
return len(client_path) > 2
def hadoop_reset_directory(
directory: str, backup_postfix: str="_bak") -> None:
"""Resets an (existing) directory which using hadoop as client.
Mainly includes following process:
1. Remove historical backup directory.
2. Backup `directory` if it already exist.
3. Remove `directory`, but it should actually not exists after
backup process.
4. Build `directory`'s parent directory in case it not actually
exists.
Args:
directory: The directory waiting to reset with hadoop client.
backup_postfix: Ref to `py_dict_rdd2disk`.
"""
backup_dir: str = directory + backup_postfix
clean_bak_cmd: str = "hadoop fs -rm -r %s" % backup_dir
backup_cmd: str = "hadoop fs -mv %s %s" % (directory, backup_dir)
clean_cmd: str = "hadoop fs -rm -r %s" % directory
build_cmd: str = "hadoop fs -mkdir -p %s" % os.path.dirname(directory)
LOGGER.info(os.popen(clean_bak_cmd).read())
LOGGER.info(os.popen(backup_cmd).read())
LOGGER.info(os.popen(clean_cmd).read())
LOGGER.info(os.popen(build_cmd).read())
def before_data2disk(target_directory: str,
fs_client: str="default", backup_postfix: str="_bak") -> None:
"""Preprocessing before `RDD` data persistence.
Mainly include get the file-system type/proto, do some directory
backup/building job.
Args:
target_directory: Ref to `py_dict_rdd2disk`.
fs_client: Ref to `py_dict_rdd2disk`.
backup_postfix: Ref to `py_dict_rdd2disk`.
"""
if get_fs_proto(target_directory) != "file" and fs_client == "default":
fs_client = "hadoop"
if not if_fs_client_ok(fs_client):
raise Exception("Your %s client is not OK, check it." % fs_client)
if fs_client == "hadoop":
hadoop_reset_directory(target_directory, backup_postfix)
else:
# TODO@202107211350
raise Exception("For now only support hadoop fs client.")
def py_dict_rdd2disk(py_dict_rdd: pyspark.RDD,
target_directory: str, fs_client: str="default",
backup_postfix: str="_bak") -> None:
"""python `Dict` rdd persistence function.
Persistence a python `Dict` spark rdd to file-system, which could
be local file-system or hdfs style distributed fs.
Args:
py_dict_rdd:
Spark `RDD` waiting to persistence, each record should be
a python `Dict` instance.
target_directory:
The directory path to which we hope persistence `py_dict_rdd`.
fs_client:
The file-system interaction client, not necessary for local-
file-system, but necessary for remote or distributed file-
system, for example, hadoop for hdfs. And `fs_client` is
the name of certain file-system executable file name.
backup_postfix:
If the `target_directory` already exists, we will backup it
first under the same parent directory, with appending
`backup_postfix` as the postfix of backup directory.
"""
before_data2disk(target_directory, fs_client, backup_postfix)
json_rdd: pyspark.RDD = py_dict_rdd\
.map(lambda x: json.dumps(x, separators=(',', ':')))\
.saveAsTextFile(target_directory)
def hadoop_if_path_exists(path: str) -> bool:
if not if_fs_client_ok("hadoop"):
raise Exception("hadoop client is not OK")
hadoop_cmd: str = "hadoop fs -ls %s" % path
cmd_result: str = os.popen(hadoop_cmd).read()
if "No such file or directory" in cmd_result:
return False
return True
def hadoop_waiting_util_path_ready(
path: str, waiting_seconds: int=1,
logging_period: int=30
) -> None:
start_time: datetime.datetime = datetime.datetime.now()
while True:
if hadoop_if_path_exists(path):
LOGGER.info("path '%s' has been ready." % path)
break
else:
now: datetime.datetime = datetime.datetime.now()
waiting_seconds_: datetime.timedelta = (now - start_time).seconds
if waiting_seconds_ > waiting_seconds:
LOGGER.info("Out of waiting time for path '%s'..." % path)
break
else:
if waiting_seconds_ % logging_period == 0:
LOGGER.info("Waiting for path '%s' ready..." % path)
def json_line_files2py_dict_rdd(
sc: pyspark.sql.SparkSession, path: str
) -> pyspark.RDD:
return sc.sparkContext.textFile(path)\
.map(lambda x: json.loads(x))
| innerNULL/pysparkit | pysparkit/io/__init__.py | __init__.py | py | 5,878 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.Logger",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "_io.TextIOWrapper",... |
36038909243 | import collections
def calcEquation(equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
record = collections.defaultdict(lambda: collections.defaultdict(int))
for (var1, var2), v in zip(equations, values):
record[var1][var2] = v
record[var2][var1] = 1.0 / v
for i in record:
record[i][i] = 1
for j in record:
for k in record:
if record[j][i] and record[i][k]:
record[j][k] = record[j][i] * record[i][k]
else:
record[j][k] = -1
res = []
for q in queries:
res.append(record[q[0]][q[1]])
return res
print(calcEquation([["a","b"],["e","f"],["b","e"]], [3.4,1.4,2.3], [["b","a"],["a","f"],["f","f"],["e","e"],["c","c"],["a","c"],["f","e"]]))
print(calcEquation([["a","b"],["b","c"],["bc","cd"]], [1.5,2.5,5.0], [["a","c"],["c","b"],["bc","cd"],["cd","bc"]])) | zhaoxy92/leetcode | 399_evaluate_division.py | 399_evaluate_division.py | py | 1,024 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
}
] |
11468083578 | from io import StringIO
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from geostore.models import Layer
from geostore.tests.factories import LayerFactory
from geostore.tests.utils import get_files_tests
class ImportGeojsonTest(TestCase):
def test_default_group(self):
output = StringIO()
call_command(
'import_geojson',
get_files_tests('empty.json'),
'--verbosity=1',
stdout=output
)
# Retrieve the layer
layer = Layer.objects.first()
self.assertIn(f'The created layer pk is {layer.pk}', output.getvalue())
self.assertEqual(layer.layer_groups.count(), 1)
self.assertEqual(layer.layer_groups.first().name, 'default')
def test_default_group_nogroup_rollback(self):
output = StringIO()
call_command(
'import_geojson',
get_files_tests('empty.json'),
'--dry-run',
'--verbosity=1', stdout=output)
self.assertIn("The created layer pk is", output.getvalue())
# Retrieve the layer
self.assertEqual(Layer.objects.count(), 0)
def test_schema_generated(self):
call_command(
'import_geojson',
get_files_tests('bati.geojson'),
'--generate-schema',
verbosity=0)
# Retrieve the layer
layer = Layer.objects.get()
# Assert schema properties are presents
self.assertNotEqual(
layer.schema.get('properties').keys() -
['ALTITUDE', 'ETIQUETTE', 'HAUTEUR', 'ID', 'ID_PG', 'NATURE', 'NOM',
'ORIGIN_BAT', 'PUB_XDECAL', 'PUB_YDECAL', 'ROTATION', 'ROTATION_S',
'XDECAL', 'XDECAL_SYM', 'YDECAL', 'YDECAL_SYM', 'Z_MAX', 'Z_MIN', ], True)
def test_import_geojson_layer_with_bad_settings(self):
bad_json = get_files_tests('bad.json')
with self.assertRaises(CommandError) as error:
call_command(
'import_geojson',
get_files_tests('empty.json'),
f'--layer-settings={bad_json}',
verbosity=0)
self.assertEqual("Please provide a valid layer settings file", str(error.exception))
def test_import_geojson_layer_with_pk_layer(self):
layer = LayerFactory()
self.assertEqual(len(layer.features.all()), 0)
call_command(
'import_geojson',
get_files_tests('toulouse.geojson'),
layer_pk=layer.pk,
verbosity=0
)
self.assertEqual(len(layer.features.all()), 838)
def test_import_geojson_layer_with_wrong_pk_layer(self):
geojson_sample = get_files_tests('toulouse.geojson')
with self.assertRaises(CommandError) as error:
call_command(
'import_geojson',
geojson_sample,
'--layer-pk=999',
'--generate-schema',
'--verbosity=0'
)
self.assertIn("Layer with pk 999 doesn't exist", str(error.exception))
| Terralego/django-geostore | geostore/tests/test_commands/test_import_geojson.py | test_import_geojson.py | py | 3,126 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "django.test.TestCase",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "io.StringIO",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.core.management.call_command",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "... |
36427243193 | """
318. Maximum Product of Word Lengths
Medium
514
47
Favorite
Share
Given a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0.
Example 1:
Input: ["abcw","baz","foo","bar","xtfn","abcdef"]
Output: 16
Explanation: The two words can be "abcw", "xtfn".
Example 2:
Input: ["a","ab","abc","d","cd","bcd","abcd"]
Output: 4
Explanation: The two words can be "ab", "cd".
Example 3:
Input: ["a","aa","aaa","aaaa"]
Output: 0
Explanation: No such pair of words.
"""
"""
Runtime: 1924 ms, faster than 7.93% of Python3 online submissions for Maximum Product of Word Lengths.
Memory Usage: 14 MB, less than 28.14% of Python3 online submissions for Maximum Product of Word Lengths.
"""
from collections import defaultdict
class Solution:
def maxProduct(self, words: List[str]) -> int:
stats=defaultdict(set)
for i,word in enumerate(words):
for a in word:
stats[a].add(i)
res=0
for i in range(len(words)-1):
for j in range(i+1,len(words)):
word0,word1=words[i],words[j]
valid=True
for a in word0:
if i in stats[a] and j in stats[a]:
valid=False
break
if valid is True:
res=max(res,len(word0)*len(word1))
return res
# 位运算存储状态
"""
Runtime: 396 ms, faster than 66.52% of Python3 online submissions for Maximum Product of Word Lengths.
Memory Usage: 13.4 MB, less than 62.39% of Python3 online submissions for Maximum Product of Word Lengths
"""
class Solution:
def maxProduct(self, words: List[str]) -> int:
stats = []
for word in words:
tmp = 0
for a in word:
tmp = tmp | (1 << ord(a) - ord('a'))
stats.append(tmp)
res = 0
for i in range(len(words)):
for j in range(i + 1, len(words)):
if stats[i] & stats[j] == 0:
res = max(res, len(words[i]) * len(words[j]))
return res
| fengyang95/OJ | LeetCode/python3/318_MaximumProductOfWordLengths.py | 318_MaximumProductOfWordLengths.py | py | 2,324 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 37,
"usage_type": "call"
}
] |
26524777453 | #Dependencies
from relu import relu
from convolutional_mlp import LeNetConvPoolLayer
from logistic_sgd import LogisticRegression
from mlp import HiddenLayer
from dropout import dropout_neurons_from_layer
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import theano
class RetinopathyNet(object):
def __init__(self,
rng,
x,
y,
use_dropout,
dropout_rates,
train_batch_size,
nkerns,
parameters=[]):
#Setup layer0
layer0_input = dropout_neurons_from_layer(rng, x, dropout_rates[0], use_dropout)
layer0_input = layer0_input.reshape((train_batch_size, 1, 512, 512))
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(train_batch_size, 1, 512, 512),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
layer0.output = dropout_neurons_from_layer(rng, layer0.output, dropout_rates[1], use_dropout)
#Setup layer 1
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(train_batch_size, nkerns[0], 254, 254),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
layer1.output = dropout_neurons_from_layer(rng, layer1.output, dropout_rates[2], use_dropout)
#Setup layer 2
layer2_input = layer1.output.flatten(2)
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 125 * 125,
n_out=500,
activation=relu
)
layer2.output = dropout_neurons_from_layer(rng, layer2.output, dropout_rates[3], use_dropout)
#Setup layer 3
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=5)
#Load parameters if they are passed in
if len(parameters) > 0:
layer0.params[0].set_value(parameters[0])
layer0.params[1].set_value(parameters[1])
layer1.params[0].set_value(parameters[2])
layer1.params[1].set_value(parameters[3])
layer2.params[0].set_value(parameters[4])
layer2.params[1].set_value(parameters[5])
layer3.params[0].set_value(parameters[6])
layer3.params[1].set_value(parameters[7])
#Relevant functions and attributes for this class
self.layer3 = layer3
self.cost = layer3.negative_log_likelihood(y)
self.errors = layer3.errors(y)
self.params = layer0.params + layer1.params + layer2.params + layer3.params
self.classify_function = theano.function([x, use_dropout], self.layer3.y_pred)
#Pass in a data matrix to get the models predicted labels for it
def classify_images(self, x):
results = self.classify_function(x, 0.0)
return results
| rocket-raccoon/DiabeticRetinopathyDetection | retinopathy_net.py | retinopathy_net.py | py | 2,958 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dropout.dropout_neurons_from_layer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "convolutional_mlp.LeNetConvPoolLayer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "dropout.dropout_neurons_from_layer",
"line_number": 33,
"usage_ty... |
11040136855 | from common.FrontendTexts import FrontendTexts
view_texts = FrontendTexts('materials')
labels = view_texts.getComponent()['selector']['choices']
ACTION_CHOICES = (
(1, labels['edit']),
(2, labels['weight'])
)
UNIT_CHOICES = (
(1, "M"),
(2, "M2"),
(3, "M3"),
(4, "EA")
)
| Conpancol/PyHeroku | CPFrontend/materials/choices.py | choices.py | py | 297 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "common.FrontendTexts.FrontendTexts",
"line_number": 3,
"usage_type": "call"
}
] |
24709845293 | from typing import List
DISTANCE_END = 5
def determine_place(n: int, distance_list: List[int]) -> int:
"""
Функция которая определяет, максимально высокое место участника.
:param n: длинна входного списка с расстояниями бросков участников
:type n: int
:param distance_list: список с расстояниями бросков участников
:type distance_list: List[int]
:return: максимально высокое место участника, или 0,
если не существует ни одного участника, который удовлетворяет условиям
:rtype: int
"""
required_distance = 0
max_distance = max(distance_list)
winner_flag = False
for player_number in range(1, n - 1):
player_distance = distance_list[player_number]
if distance_list[player_number - 1] == max_distance:
winner_flag = True
if player_distance % 10 == DISTANCE_END and winner_flag and player_distance > distance_list[player_number + 1]:
required_distance = max(player_distance, required_distance)
if not required_distance:
return 0
required_player_place = 1
for player_distance in distance_list:
if player_distance > required_distance:
required_player_place += 1
return required_player_place
def main():
"""Основная функция для чтения входных данных и вывода результата."""
n = int(input())
distance_list = list(map(int, input().split()))
print(determine_place(n, distance_list))
if __name__ == '__main__':
main()
| OkhotnikovFN/Yandex-Algorithms | trainings_1.0/hw_2/task_e/e.py | e.py | py | 1,780 | python | ru | code | 1 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
}
] |
73582152353 | import scrapy
from ..items import TutorialItem
class QuoteSpider(scrapy.Spider):
name = 'quote'
pageNumber = 2
# def start_requests(self):
start_urls = [
'http://quotes.toscrape.com/page/1/'
# 'http://quotes.toscrape.com/page/1/',
# 'http://quotes.toscrape.com/page/2/',
]
#for url in urls:
#yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
items = TutorialItem()
all_quotes = response.css('div.quote')
for quote in all_quotes:
title = quote.css('.text::text').extract()
author = quote.css('.author::text').extract()
tag = quote.css('div.tags .tag::text').extract()
#titleget = response.css('title::text').get()
items['title'] = title
items['author'] = author
items['tag'] = tag
#yield {'title': title, 'author':author, 'tags':tag}
yield items
#next_page = response.css('li.next a::attr(href)').get()
next_page = 'http://quotes.toscrape.com/page/'+str(self.pageNumber)+'/'
print('Next Page is: ',next_page)
if self.pageNumber <= 10:
self.pageNumber += 1
yield response.follow(next_page, callback=self.parse)
##textspider = QuoteSpider()
##yieldvalue = textspider.parse()
| Ankitdeveloper15/python | Boring Stuff/Scrapy/tutorial/tutorial/spiders/quotes_spider.py | quotes_spider.py | py | 1,393 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "items.TutorialItem",
"line_number": 19,
"usage_type": "call"
}
] |
30997752571 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import request
from flask_migrate import Migrate
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://postgres:password@localhost:5432/FlaskDB"
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(70))
email = db.Column(db.String(120))
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
@app.route('/')
def home():
return "This is home page"
@app.route('/users', methods=['POST', 'GET'])
def handle_users():
if request.method == 'POST':
if request.is_json:
data = request.get_json()
new_user = User(username=data['username'], email=data['email'])
db.session.add(new_user)
db.session.commit()
return {"message": f"new user {new_user.username} added"}
else:
return {"error": "the request is not json format"}
elif request.method == 'GET':
users = User.query.all()
results = [
{
"username": user.username,
"email": user.email
} for user in users]
return {"count": len(results), "users": results}
@app.route('/users/<user_id>', methods=['GET', 'PUT', 'DELETE'])
def handle_user(user_id):
user_by_id = User.query.get_or_404(user_id)
if request.method == "GET":
response = {
"username": user_by_id.username,
"email": user_by_id.email
}
return {"message": "success", "user": response}
elif request.method == "PUT":
data = request.get_json()
user_by_id.username = data['username']
user_by_id.email = data['email']
db.session.add(user_by_id)
db.session.commit()
return {"message": f"User {user_by_id.username} successfully updated."}
elif request.method == "DELETE":
db.session.delete(user_by_id)
db.session.commit()
return {"message": f"User {user_by_id.username} successfully deleted."}
if __name__ == "__main__":
app.run(port=4996)
| Agasiland/FlaskService | app.py | app.py | py | 2,360 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_migrate.Migrate",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.req... |
1370447652 | import datetime
from django.core import serializers
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from weather.models import *
# Create your views here.
def weather(request, location):
location = location.split(',')[-1]
index = request.GET.get('index')
now = request.GET.get('now')
living_index = request.GET.get('living_index')
temp24 = request.GET.get('temp24')
wind_scale24 = request.GET.get('wind_scale24')
forecast_7d = request.GET.get('forecast_7d')
surrounding_weather = request.GET.get('surrounding_weather')
if index:
time = []
temp = []
now_weather = None
temp_list = RealtimeWeather.objects.filter(city=location).order_by('-id')
now_time = (datetime.datetime.now()).strftime("%H")
for i in range(len(temp_list)):
if now_time == temp_list[i].time.strftime("%H"):
now_weather = temp_list[i].weather
for j in range(i, i+5):
time.append(now_time)
temp.append(temp_list[j].temperature)
now_time = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime("%H")
break
temperature = {
'time': now_time,
'temp': temp
}
data = {
'weather': now_weather,
'temperature': temperature,
}
param = {
'code': 200,
'data': data
}
return JsonResponse(param)
if now:
now_weather = RealtimeWeather.objects.filter(city=location).order_by('-id')[0]
today_weather = TodayWeather.objects.filter(city=location).order_by('-id')[0]
wind = {
'scale': today_weather.wind_direct,
'direction': today_weather.wind_strength
}
data = {
'weather':now_weather.weather,
'temperature':now_weather.temperature,
'wind':wind
}
return JsonResponse(
{
'code':200,
'data':data
}
)
if living_index:
weather_life = WeatherLifeindex.objects.filter(city=location)[0]
clothing = weather_life.dress_index
uv = weather_life.uv_index
car_washing = weather_life.carwash_index
air_pollution_diffusion = weather_life.pm_index
data = {
'clothing':clothing,
'uv':uv,
'car_washing':car_washing,
'air_pollution_diffusion':air_pollution_diffusion
}
return JsonResponse({
'code':200,
'data':data
})
# if temp24:
# time = []
# temperature = []
# temp_list = RealtimeWeather.objects.filter(city=location)
# now_time = (datetime.datetime.now()).strftime("%H")
# for i in range(len(temp_list)):
# if now_time in temp_list[i].time:
# for j in range(i, i + 24):
# time.append(now_time)
# temperature.append(temp_list[j].temperature)
# now_time = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime("%H")
# break
# data = {
# 'time': time,
# 'temperature':temperature
# }
# return JsonResponse({
# 'code': 200,
# 'data': data
# })
# if wind_scale24:
# time = []
# wind_list = []
# temp_list = RealtimeWeather.objects.filter(city=location).order_by('-id')
# now_time = (datetime.datetime.now()).strftime("%H")
# for i in range(len(temp_list)):
# if now_time in temp_list[i].time:
# for j in range(i, i + 24):
# time.append(now_time)
# wind_list.append(temp_list[j].temperature)
# now_time = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime("%H")
# break
# data = {
# 'time': time,
# 'wind': wind_list
# }
# return JsonResponse({
# 'code': 200,
# 'data': data
# })
if forecast_7d:
date_list = []
weather_list = []
temperature_list = []
wind_direction_list = []
wind_scale_list = []
target_day = None
target_day_date = None
# location_list = WCity7dForecast.objects.filter(parent_city=location).order_by('-id')
location_list = WCity7dForecast.objects.filter(location=location).order_by('-id')
# print('*' * 45)
# print('test is ok')
# print(location_list)
# today = datetime.datetime.strftime(datetime.datetime.now(), '%Y/%m/%d')
# today = datetime.datetime.strftime(datetime.datetime.now()+datetime.timedelta(days=-1), '%Y-%m-%d')
today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
# print(today)
for item in location_list:
if today == str(item.date):
# if today == item.date:
print('*' * 45)
print(today)
print(str(item.date))
print('*' * 80)
print('test is ok')
print(item.date)
target_day = item
print(type(target_day.date))
print(target_day.date)
target_day_date = target_day.date
break
print(type(target_day))
for i in range(7):
date_list.append(target_day.date.strftime('%m/%d'))
# date_list.append(target_day_date.strftime('%m/%d'))
weather_list.append(target_day.cond_txt_d)
max_temp = target_day.tmp_max
min_temp = target_day.tmp_min
temp = str(max_temp) + '/' + str(min_temp)
temperature_list.append(temp)
wind_direction_list.append(target_day.wind_dir)
wind_scale_list.append(target_day.wind_sc)
today = datetime.datetime.strftime(datetime.datetime.now()+datetime.timedelta(days=1), '%Y/%m/%d')
try:
target_day = WCity7dForecast.objects.raw(
'select * from w_city_7d_forecast where parent_city=%s and date = %s order by id desc limit 1',[location, today])[0]
except Exception as e:
return JsonResponse({
'code': 400,
'data': 'error'
})
data = {
'location': location_list,
'weather': weather_list,
'temperature': temperature_list,
'wind_direction': wind_direction_list,
'wind_scale': wind_scale_list
}
# data = serializers.serialize("json",data)
return JsonResponse({
'code':200,
'data':data
})
if surrounding_weather:
# district_list = []
district_list = set()
temperature_list = []
weather_list = []
city_list = TodayWeather.objects.filter(city=location).order_by('-id')
for city in city_list:
if city.district not in location:
# district_list.append(city.district)
district_list.add(city.district)
district_list = list(district_list)
for dis in district_list:
#地区对象
loc = TodayWeather.objects.filter(district=dis).order_by('-id')[0]
max_temp = loc.temperature_max
min_temp = loc.temperature_min
temp = str(max_temp) + '/' + str(min_temp)
temperature_list.append(temp)
weather_list.append(loc.weather)
data = {
'location': district_list,
'weather': weather_list,
'temperature': temperature_list
}
return JsonResponse(
{
'code': 200,
'data': data
}
)
| BruceDGit/HexuWeather | Server/weather_server/weather/views.py | views.py | py | 8,222 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "da... |
32466005602 | import random
from colorama import init, Fore, Back, Style
from checker import check_wordle
def text_edit(guess, result, alphabet):
editted_text = []
def _letter_paint(letter, color):
letter_painted = ''
if color=='Green':
letter_painted = Back.GREEN + letter.upper()
elif color=='Yellow':
letter_painted = Back.YELLOW + letter.upper()
elif color=='Grey':
letter_painted = Back.WHITE + Fore.BLACK + letter.upper()
else:
letter_painted = letter.upper()
return letter_painted + Style.RESET_ALL
def _alphabet_edit():
for i in range(0, len(alphabet)):
letter = alphabet[i]
alphabet_print[i] = _letter_paint(letter, alphabet_dict[letter])
for _ in range(0, len(result)):
if result[_]=='G':
color = 'Green'
alphabet_dict[guess[_].upper()] = color
editted_text.append(_letter_paint(guess[_], color))
if result[_]=='Y':
color = 'Yellow'
if alphabet_dict[guess[_].upper()] != "Green":
alphabet_dict[guess[_].upper()] = color
editted_text.append(_letter_paint(guess[_], color))
if result[_]=='W':
color = 'Grey'
if (alphabet_dict[guess[_].upper()] != "Green" or
alphabet_dict[guess[_].upper()] != "Yellow"):
alphabet_dict[guess[_].upper()] = color
editted_text.append(_letter_paint(guess[_], color))
alphabet_print = alphabet.copy()
_alphabet_edit()
if result==['G']*5:
joined_text = ''.join(editted_text)+'\n'
else:
joined_text = ''.join(editted_text)+'\n'+'-'*20+'\n'+' '.join(alphabet_print)+'\n'
return joined_text
def main(alphabet):
words = open('words.txt').read().splitlines()
word_to_guess = random.choice(words)
attempt = 0
for letter in range(0, len(alphabet)):
alphabet_dict[alphabet[letter].upper()] = "Not Used"
while True:
if attempt==6:
print(f'The word was "{word_to_guess}".')
break
guess = input('Enter five-letter word ("q" to exit): ')
if guess=='q':
break
else:
if (guess not in words) or len(guess)!=5:
print("Can't use this word")
else:
attempt+=1
result = random.choice(check_wordle(word_to_guess, guess))
print(text_edit(guess, result, alphabet))
if result==['G']*5:
print('Congratulations!')
break
if __name__ == '__main__':
init()
import string
alphabet = list(string.ascii_uppercase)
alphabet_dict={}
main(alphabet)
| ivlmag/simple_wordle_clone | console_wordle.py | console_wordle.py | py | 2,396 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "colorama.Back.GREEN",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "colorama.Back",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "colorama.Back.YELLOW",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "colo... |
10669863739 | import gspread
import copy
service_account = gspread.service_account('data/~service-account.json')
sheet = service_account.open("Mapout3.0")
scenario_sheet = sheet.worksheet("SCENARIO")
cover_sheet = sheet.worksheet("COVER")
sheet_rows = scenario_sheet.get_all_values()
times = snow_accu = sleet_accu = frzg_accu = total_frzn_accu = rain_accu = total_melt = slr = []
for row in sheet_rows:
if row[0] == 'validTimeUTC':
# times = [row[0]] + [datetime.strptime(v, "%H") for v in row[1:]]
times = row
elif row[0] == 'snow accu':
snow_accu = row
elif row[0] == 'sleet accu':
sleet_accu = row
elif row[0] == 'frzg accu':
frzg_accu = row
elif row[0] == 'total frzn accu':
total_frzn_accu = row
elif row[0] == 'rain accu':
rain_accu = row
elif row[0] == 'total melt (in/hr)':
total_melt = row
elif row[0] == 'slr':
slr = row
forecast = []
for i in range(0, len(times)):
period = {
"validTimeUTC": times[i],
"hourSnowAccu": snow_accu[i],
"hourSleetAccu": sleet_accu[i],
"hourFrzgAccu": frzg_accu[i],
"hourFrznAccu": total_frzn_accu[i],
"hourRainAccu": rain_accu[i],
"hourMelt": total_melt[i],
"hourSlr": slr[i]
}
forecast.append(period)
all_stacks = [[], [], [{
"layerSnowAccu": 0,
"layerSnowSlrAccu": 0,
"layerSleetAccu": 0,
"layerFrzgAccu": 0,
"layerFrznAccu": 0}]]
for i, hour in enumerate(forecast[3:], start=3):
hour_snow = float(hour['hourSnowAccu'])
hour_rain = float(hour['hourRainAccu'])
hour_sleet = float(hour['hourSleetAccu'])
hour_frzg = float(hour['hourFrzgAccu'])
hour_frzn = float(hour['hourFrznAccu'])
hour_slr = float(hour['hourSlr'])
hour_melt = -float(hour['hourMelt'])
if hour_rain or hour_sleet or hour_frzg: #if mix, cap slr at 10
hour_slr = 10 if hour_slr > 10 else hour_slr
hour_accumulations = {
"layerSnowAccu": hour_snow,
"layerSnowSlrAccu": hour_snow * (hour_slr / 10),
"layerSleetAccu": hour_sleet,
"layerFrzgAccu": hour_frzg,
"layerFrznAccu": hour_frzn,
"layerRainAccu": hour_rain,
"layerSlr": hour_slr,
"hourMelt": float(hour['hourMelt'])
}
previous_stack = copy.deepcopy(all_stacks[-1])
current_stack = [hour_accumulations] + previous_stack
for layer in current_stack:
layer_frzn_accu = layer['layerFrznAccu']
layer_frzn_accu_remaining = layer_frzn_accu - hour_melt / 10
if layer_frzn_accu_remaining < 0:
hour_melt = -layer_frzn_accu_remaining * 10
layer_frzn_accu_remaining = 0
else:
hour_melt = 0
try:
prop_remaining = layer_frzn_accu_remaining / layer_frzn_accu
except ZeroDivisionError:
prop_remaining = 0
layer['layerSnowAccu'] *= prop_remaining
layer["layerSnowSlrAccu"] *= prop_remaining
layer['layerSleetAccu'] *= prop_remaining
layer['layerFrzgAccu'] *= prop_remaining
layer['layerFrznAccu'] = layer_frzn_accu_remaining
if hour_melt == 0:
break
all_stacks.append(current_stack)
snow_cover = ['snow cover']
snow_cover_slr = ['slr snow cover']
sleet_cover = ['sleet cover']
frzg_cover = ['frzg cover']
frzn_cover = ['total frzn']
for stack in all_stacks:
stack_snow = stack_slr_snow = stack_sleet = stack_frzg = stack_frzn = 0
for layer in stack:
stack_snow += round(layer['layerSnowAccu'], 1)
stack_slr_snow += round(layer['layerSnowSlrAccu'], 1)
stack_sleet += round(layer['layerSleetAccu'], 2)
stack_frzg += round(layer['layerFrzgAccu'], 2)
stack_frzn += round(layer['layerFrznAccu'], 2)
snow_cover.append(stack_snow)
snow_cover_slr.append(stack_slr_snow)
sleet_cover.append(stack_sleet)
frzg_cover.append(stack_frzg)
frzn_cover.append(stack_frzn)
cover_sheet.update('A1:AM1', [times])
cover_sheet.update('A2:AM2', [snow_cover])
cover_sheet.update('A3:AM3', [snow_cover_slr])
cover_sheet.update('A4:AM4', [sleet_cover])
cover_sheet.update('A5:AM5', [frzg_cover])
cover_sheet.update('A6:AM6', [frzn_cover])
# Stacking graphic code below
# max_length = len(all_stacks[-1])
# stacks_horizontal = []
# for stack in all_stacks:
# formatted_stack = [0 for i in range(max_length - len(stack))]
# for layer in stack:
# formatted_stack.append(round(layer['layerFrznAccu'], 3))
# stacks_horizontal.append(formatted_stack)
#
# sums = []
# for s in stacks_horizontal:
# sums.append(sum(s))
#
# stacks_vertical = []
# for i in range(max_length):
# row = []
# for stack in stacks_horizontal:
# row.append(stack[i])
# stacks_vertical.append(row)
#
# for stack in stacks_vertical.copy():
# if all(layer == 0 for layer in stack):
# stacks_vertical.remove(stack)
#
# with open('../forecasts/stacks.csv', 'w') as file:
# writer = csv.writer(file)
# writer.writerow(times[1:])
# writer.writerows(stacks_vertical)
# writer.writerow([''])
# writer.writerow(sums)
#
# with open('../forecasts/stacks.csv', 'r') as file:
# reader = csv.reader(file)
# data = [next(reader)]
# for row in reader:
# r = [(float(v) if v != '' else v) for v in row]
# data.append(r)
#
# cover_sheet.update(data)
| weathermandgtl/display_grid | data/melt_slr.py | melt_slr.py | py | 5,403 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gspread.service_account",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 71,
"usage_type": "call"
}
] |
31588107873 | import dataclasses
import json
import logging
import re
import sys
import urllib.parse
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Set, List, Union
from para_tranz.utils.config import PROJECT_DIRECTORY, ORIGINAL_PATH, TRANSLATION_PATH, PARA_TRANZ_PATH, LOG_LEVEL, \
LOG_DEBUG_OVERWRITE, OVERRIDE_STRING_STATUS
def relative_path(path: Path) -> Path:
try:
return path.relative_to(PROJECT_DIRECTORY)
except Exception as _:
return path
class CustomFormatter(logging.Formatter):
def format(self, record):
if LOG_DEBUG_OVERWRITE and record.levelno == logging.DEBUG:
self._style._fmt = "\r[%(name)s][%(levelname)s] %(message)s"
else:
self._style._fmt = "[%(name)s][%(levelname)s] %(message)s \n"
return super().format(record)
def make_logger(name: str) -> logging.Logger:
# 设置日志输出
logging.root.setLevel(logging.NOTSET)
logger = logging.getLogger(name)
handle_out = logging.StreamHandler(sys.stdout)
handle_out.setLevel(LOG_LEVEL)
handle_out.terminator = ''
formatter = CustomFormatter()
handle_out.setFormatter(formatter)
logger.addHandler(handle_out)
return logger
@dataclass
class String:
key: str
original: str
translation: str
stage: int = 0 # 词条翻译状态,0为未翻译,1为已翻译,2为有疑问,3为已校对,5为已审核(二校),9为已锁定,-1为已隐藏
context: str = '' # 词条的备注信息
def __post_init__(self):
# 如果从 ParaTranz 输出的 json 导入,则需要将\\n替换回\n
# 本程序输出的 json 不应包含 \\n,原文中的\\n使用^n替代
self.original = self.original.replace('\\n', '\n')
self.translation = self.translation.replace('\\n', '\n')
def as_dict(self) -> Dict:
return dataclasses.asdict(self)
class DataFile:
logger = make_logger('util.py - DataFile')
def __init__(self, path: Union[str, Path], type: str, original_path: Path = None, translation_path: Path = None):
self.path = Path(path) # 相对 original 或者 localization 文件夹的路径
self.original_path = ORIGINAL_PATH / Path(original_path if original_path else path)
self.translation_path = TRANSLATION_PATH / Path(
translation_path if translation_path else path)
self.para_tranz_path = PARA_TRANZ_PATH / self.path.with_suffix('.json')
def get_strings(self) -> List[String]:
raise NotImplementedError
def update_strings(self, strings: List[String], version_migration: bool = False) -> None:
raise NotImplementedError
def save_json(self, ensure_ascii=False, indent=4) -> None:
strings = [s for s in self.get_strings() if s.original] # 只导出原文不为空的词条
# 如果Paratranz json文件已存在,则从中同步任何已翻译词条的状态
if not OVERRIDE_STRING_STATUS and self.para_tranz_path.exists():
self.logger.info(
f"Paratranz 平台数据文件 {relative_path(self.para_tranz_path)} 已存在,从中读取已翻译词条的词条stage状态")
special_stages = (1, 2, 3, 5, 9, -1)
para_strings = self.read_json_strings(self.para_tranz_path)
para_key_strings = {s.key: s for s in para_strings if
s.stage in special_stages} # type:Dict[str, String]
for s in strings:
if s.key in para_key_strings:
para_s = para_key_strings[s.key]
if s.stage != para_s.stage:
self.logger.debug(f"更新词条 {s.key} 的stage:{s.stage}->{para_s.stage}")
s.stage = para_s.stage
self.write_json_strings(self.para_tranz_path, strings, ensure_ascii, indent)
self.logger.info(
f'从 {relative_path(self.path)} 中导出了 {len(strings)} 个词条到 {relative_path(self.para_tranz_path)}')
def update_from_json(self, version_migration: bool = False) -> None:
"""
从json文件读取 ParaTranz 词条数据对象中的译文数据合并到现有数据中
:return:
"""
if self.para_tranz_path.exists():
strings = self.read_json_strings(self.para_tranz_path)
self.update_strings(strings, version_migration)
self.logger.info(
f'从 {relative_path(self.para_tranz_path)} 加载了 {len(strings)} 个词条到 {relative_path(self.translation_path)}')
else:
self.logger.warning(f'未找到 {self.path} 所对应的 ParaTranz 数据 ({self.para_tranz_path}),未更新词条')
def save_file(self) -> None:
raise NotImplementedError
def load_from_file(self) -> None:
raise NotImplementedError
@classmethod
def load_files_from_config(cls) -> List['DataFile']:
raise NotImplementedError
@staticmethod
def read_json_strings(path: Path) -> List[String]:
strings = []
with open(path, 'r', encoding='utf-8') as f:
data = json.load(f) # type:List[Dict]
for d in data:
strings.append(
String(d['key'], d['original'], d.get('translation', ''), d['stage'], d.get('context', '')))
return strings
@staticmethod
def write_json_strings(path: Path, strings: List[String], ensure_ascii=False, indent=4) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, 'w', encoding='utf-8') as f:
data = []
for string in strings:
data.append(string.as_dict())
json.dump(data, f, ensure_ascii=ensure_ascii, indent=indent)
# https://segmentfault.com/a/1190000017940752
# 判断是否包含中文字符
def contains_chinese(s: str) -> bool:
for _char in s:
if '\u4e00' <= _char <= '\u9fa5':
return True
return False
def contains_english(s: str) -> bool:
for _char in s:
if 'a' <= _char <= 'z' or 'A' <= _char <= 'Z':
return True
return False
# From processWithWiredChars.py
# 由于游戏原文文件中可能存在以Windows-1252格式编码的字符(如前后双引号等),所以需要进行转换
def replace_weird_chars(s: str) -> str:
return s.replace('\udc94', '""') \
.replace('\udc93', '""') \
.replace('\udc92', "'") \
.replace('\udc91', "'") \
.replace('\udc96', "-") \
.replace('\udc85', '...')
def normalize_class_path(class_path: str) -> str:
"""
将类路径中可能因混淆产生的部分替换为一个标准形式以便模糊匹配
"""
segments = class_path.split('/')
def normalize(s: str) -> str:
if re.fullmatch(r'[Oo0]+', s):
return 'O'
elif re.fullmatch(r'[a-zA-Z0-9]', s):
return 'X'
elif re.fullmatch(r'([A-Z][a-z0-9]*)+', s):
# 返回驼峰的所有开头字母
return ''.join([c for c in s if c.isupper() or c.isdigit()]).lower()
else:
return s
name_segments = segments[-1].removesuffix('.class').split('$')
class_name = normalize(name_segments[0])
if len(name_segments) > 1:
subclass_name = normalize(name_segments[1])
class_name += '$' + subclass_name
class_name += '.class'
return '/'.join([normalize(s) for s in segments[:-1]] + [class_name])
def url_encode(s: str) -> str:
return urllib.parse.quote(s)
class SetEncoder(json.JSONEncoder):
"""
From: https://stackoverflow.com/questions/8230315/how-to-json-serialize-sets
"""
def default(self, obj):
if isinstance(obj, set):
return sorted(list(obj))
return json.JSONEncoder.default(self, obj)
if __name__ == '__main__':
# print(normalize_class_path(
# 'com/fs/starfarer/renderers/A/OooOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO.class'))
# print(normalize_class_path('com/fs/starfarer/launcher/opengl/GLModPickerV2.class'))
print(url_encode('submarkets.csv#storage$name')) | TruthOriginem/Starsector-096-Localization | para_tranz/utils/util.py | util.py | py | 8,353 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "para_tranz.utils.config.PROJECT_DIRECTORY",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "logging.Formatter",
"line_number": 22,
"usage_type": "attribute"
},
{
... |
21104357591 | # -*- coding: utf-8 -*-
# This program ensures that the last lines in a set of OBS markdown files are italicized.
import re # regular expression module
import io
import os
import string
import sys
import shutil
# Globals
source_dir = r'C:\DCS\Russian\ru_obs.STR\content'
# Inserts underscores at beginning and end of line.
# If line already has asterisk in place of underscores, change them.
def fixline(line):
line = line.strip()
if line[0] == '*':
line = '_' + line[1:]
if line[-1] == '*':
line = line[:-1] + '_'
if line[0] != '_':
line = '_' + line
if line[-1] != '_':
line = line + '_'
return line + '\n'
# Ensure the last line is properly italicized
def convertFile(path, folder):
mdfile = io.open(path, "tr", encoding="utf-8-sig")
lines = mdfile.readlines()
mdfile.close()
italicized = False
count = len(lines)
iLast = 0
while -iLast < count:
iLast -= 1
line = lines[iLast].strip()
if len(line) > 0:
italicized = (line[0] == '_' and line[-1] == '_')
break
if -iLast <= count and not italicized:
bakpath = path + ".orig"
if not os.path.isfile(bakpath):
os.rename(path, bakpath)
output = io.open(path, "tw", encoding='utf-8', newline='\n')
i = 0
while i < count:
if i - iLast != count:
output.write(lines[i])
else:
output.write(fixline(lines[i]))
i += 1
output.close()
filename_re = re.compile(r'[\d][\d]\.md$')
# Creates content folder if needed.
# Calls convertStory to merge and convert one folder (one story) at a time.
def convertFolder(folder):
for entry in os.listdir(folder):
if entry[0] == '.':
continue
path = os.path.join(folder, entry)
if os.path.isdir(path):
convertFolder(path)
elif filename_re.match(entry):
convertFile(path, folder)
# Processes all .txt files in specified directory, one at a time
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] != 'hard-coded-path':
source_dir = sys.argv[1]
if source_dir and os.path.isdir(source_dir):
convertFolder(source_dir)
print("Done.")
elif os.path.isfile(source_dir) and filename_re.search(source_dir):
path = source_dir
source_dir = os.path.dirname(path)
current_dir = source_dir
convertFile(path, source_dir)
print("Done.")
else:
sys.stderr.write("Usage: python obs_italicize_last_line.py <folder>\n Use . for current folder or hard code the path.\n")
| unfoldingWord-dev/tools | md/obs_italicize_last_line.py | obs_italicize_last_line.py | py | 2,684 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "io.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.rename",
"line_number": 47... |
6034178664 | from typing import List, MutableMapping
"""
Summary: The trick is - the problem is the duplicate of the original except for
if you steal from the first house, you can't steal from the last. So, 2 cases:
stole from the first or not. Then, the problem is identical to the original one
_______________________________________________________________________________
https://leetcode.com/problems/house-robber-ii/
You are a professional robber planning to rob houses along a street. Each
house has a certain amount of money stashed. All houses at this place are
arranged in a circle. That means the first house is the neighbor of the last
one. Meanwhile, adjacent houses have a security system connected, and it will
automatically contact the police if two adjacent houses were broken into on
the same night.
Given an integer array nums representing the amount of money of each house,
return the maximum amount of money you can rob tonight without alerting the
police.
Example 1:
Input: nums = [2,3,2]
Output: 3
Explanation: You cannot rob house 1 (money = 2) and then rob house 3
(money = 2), because they are adjacent houses.
Example 2:
Input: nums = [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
Example 3:
Input: nums = [1,2,3]
Output: 3
"""
class Solution:
# D&C
def rob(self, nums: List[int]) -> int:
def _rob_houses(
house_index: int, houses: List[int], first_stolen: bool
) -> int:
if first_stolen:
if house_index >= length - 1:
return 0
else:
if house_index >= length:
return 0
steal_current = (
houses[house_index]
+ _rob_houses(house_index + 2, houses, first_stolen)
)
steal_next = _rob_houses(house_index + 1, houses, first_stolen)
return max(steal_current, steal_next)
length = len(nums)
if length == 1:
return nums[0]
return max(_rob_houses(0, nums, True), _rob_houses(1, nums, False))
# Top-down DP
def rob(self, nums: List[int]) -> int:
Cache = MutableMapping[tuple, int]
def _rob_houses(
house_index: int,
houses: List[int],
first_stolen: bool,
cache: Cache
) -> int:
if first_stolen:
if house_index >= length - 1:
return 0
else:
if house_index >= length:
return 0
if (house_index, first_stolen) in cache:
return cache[(house_index, first_stolen)]
steal_current = (
houses[house_index]
+ _rob_houses(house_index + 2, houses, first_stolen, cache)
)
steal_next = (
_rob_houses(house_index + 1, houses, first_stolen, cache)
)
max_steal = max(steal_current, steal_next)
cache[(house_index, first_stolen)] = max_steal
return max_steal
length = len(nums)
if length == 1:
return nums[0]
cache = {}
return max(
_rob_houses(0, nums, True, cache),
_rob_houses(1, nums, False, cache)
)
# Bottom-up DP
def rob(self, nums: List[int]) -> int:
if len(nums) == 0 or nums is None:
return 0
if len(nums) == 1:
return nums[0]
return max(self.rob_simple(nums[:-1]), self.rob_simple(nums[1:]))
def rob_simple(self, nums: List[int]) -> int:
t1 = 0
t2 = 0
for current in nums:
temp = t1
t1 = max(current + t2, t1)
t2 = temp
return t1
def main():
pass
if __name__ == '__main__':
main()
| EvgeniiTitov/coding-practice | coding_practice/sample_problems/leet_code/medium/greedy-dynamic-backtracking/213_house_robber_2.py | 213_house_robber_2.py | py | 3,911 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "typing.MutableMapping",
"line... |
71006897315 | import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
from cartopy.feature import ShapelyFeature
import cartopy.feature as cfeature
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from plot_prec_comparison import plot_map
# Add lat lon to map figure
def set_lat_lon(ax, xtickrange, ytickrange, label=False,pad=0.05, fontsize=8,pr=ccrs.PlateCarree()):
lon_formatter = LongitudeFormatter(zero_direction_label=True, degree_symbol='')
lat_formatter = LatitudeFormatter(degree_symbol='')
ax.set_yticks(ytickrange, crs=pr)
ax.set_xticks(xtickrange, crs=pr)
if label:
ax.set_xticklabels(xtickrange,fontsize=fontsize)
ax.set_yticklabels(ytickrange,fontsize=fontsize)
ax.tick_params(axis='x', which='both', direction='out', bottom=False, top=True,labeltop=True,labelbottom=False, pad=pad)
ax.tick_params(axis='y', which='both', direction='out', pad=pad)
else:
ax.tick_params(axis='x', which='both', direction='out', bottom=True, top=False, labeltop=False, labelleft=False, labelbottom=False)
ax.tick_params(axis='y', which='both', direction='out', left=True, labelleft=False)
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
ax.set_ylabel('')
ax.set_xlabel('')
def make_plot():
# Load data
s = xr.open_dataset('../data/results/china_dryland_prec_source.nc')
levels=[0.001,0.01,0.1,0.5,1,2]
# define map projection
pr=ccrs.PlateCarree()
fig = plt.figure(figsize=[6, 4])
ax1 = fig.add_axes([0, 0, 1, 1], projection=pr)
im1=plot_map(s.e_to_prec.where(s.e_to_prec.sum(dim='month')>0.001).sum(dim='month'),
ax=ax1, levels=levels, cmap='Blues',extent=[0, 150, 10, 80])
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.set_title('Upwind source region of precipitation in China\'s drylands', fontsize=12)
set_lat_lon(ax1, range(0,151,30), range(10,81,20), label=True, pad=0.05, fontsize=10)
# Add colorbar to big plot
cbarbig1_pos = [ax1.get_position().x0, ax1.get_position().y0-0.03, ax1.get_position().width, 0.02]
caxbig1 = fig.add_axes(cbarbig1_pos)
cb = plt.colorbar(im1, orientation="horizontal", pad=0.15,cax=caxbig1,extend='neither',
ticks=levels)
cb.set_label(label='Precipitation contribution (mm/yr)')
plt.savefig('../figure/fig_china_dryland_prec_source.png',dpi=300,bbox_inches='tight')
print('figure saved')
if __name__=="__main__":
make_plot()
| pkmn99/dryland_moisture | code/plot_china_dryland_source.py | plot_china_dryland_source.py | py | 2,696 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cartopy.crs.PlateCarree",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cartopy.crs",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "cartopy.mpl.ticker.LongitudeFormatter",
"line_number": 15,
"usage_type": "call"
},
{
"api_nam... |
41060910588 | import json
import os
import re
import sys
import gzip
import math
import hashlib
import logging
import portalocker
from collections import defaultdict
from typing import List, Optional, Sequence, Dict
from argparse import Namespace
from tabulate import tabulate
import colorama
# Where to store downloaded test sets.
# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.
#
# Querying for a HOME environment variable can result in None (e.g., on Windows)
# in which case the os.path.join() throws a TypeError. Using expanduser() is
# a safe way to get the user's home folder.
USERHOME = os.path.expanduser("~")
SACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))
sacrelogger = logging.getLogger('sacrebleu')
class Color:
ENABLE_COLORS = True
@staticmethod
def format(msg: str, color: str) -> str:
"""Returns a colored version of the given message string.
:param msg: The string to Color.format.
:param color: The color specifier i.e. 'red', 'blue', 'green', etc.
:return: A colored version of the string if the output is a terminal.
"""
if not Color.ENABLE_COLORS:
return msg
_ansi_str = getattr(colorama.Fore, color.upper(), None)
if _ansi_str:
return f'{_ansi_str}{msg}{colorama.Style.RESET_ALL}'
return msg
def _format_score_lines(scores: dict,
width: int = 2,
multiline: bool = True) -> Dict[str, List[str]]:
"""Formats the scores prior to tabulating them."""
new_scores = {'System': scores.pop('System')}
p_val_break_char = '\n' if multiline else ' '
is_bootstrap = False
def _color_p_value(p: float):
msg = f'(p = {p:.4f})'
if p > 0.05:
return Color.format(msg, 'red')
return msg + '*'
for metric, vals in scores.items():
new_vals = []
for result in vals:
if not isinstance(result, str):
# Format result instances
_str = f'{result.score:.{width}f}'
if result.mean is not None:
is_bootstrap = True
_str += f' ({result.mean:.{width}f} ± {result.ci:.{width}f})'
if result.p_value is not None:
_str += p_val_break_char + _color_p_value(result.p_value)
else:
# Already formatted in non paired-test mode
_str = result
new_vals.append(_str)
if is_bootstrap:
# Change titles
metric += ' (μ ± 95% CI)'
new_scores[metric] = new_vals
return new_scores
def print_results_table(results: dict, signatures: dict, args: Namespace):
"""Prints out a nicely formatted table for multi-system evaluation mode."""
if args.format == 'json':
proper_json = []
dict_keys = list(results.keys())
for i in range(len(results['System'])):
value = {}
value['system'] = results['System'][i]
# parse metrics
for j in range(1, len(dict_keys)):
if isinstance(results[dict_keys[j]][i], str):
value[dict_keys[j]] = results[dict_keys[j]][i]
else:
# Values inside object as dict
value[dict_keys[j]] = results[dict_keys[j]][i].__dict__
proper_json.append(value)
print(json.dumps(proper_json, indent=4))
return
tablefmt = args.format
if tablefmt in ('text'):
tablefmt = 'fancy_grid'
elif tablefmt == 'latex':
# Use booktabs
tablefmt = 'latex_booktabs'
# If paired testing has been given, this'll format the score lines
results = _format_score_lines(
results, args.width, multiline=tablefmt == 'fancy_grid')
new_dict = {}
# Color the column names and the baseline system name and scores
has_baseline = False
baseline_name = ''
for name in results.keys():
val = results[name]
if val[0].startswith('Baseline:') or has_baseline:
if val[0].startswith('Baseline:'):
baseline_name = val[0]
has_baseline = True
val[0] = Color.format(val[0], 'yellow')
new_dict[Color.format(name, 'cyan')] = results[name]
# Finally tabulate
table = tabulate(
new_dict, headers='keys', tablefmt=tablefmt,
colalign=('right', ),
stralign='center',
numalign='center',
floatfmt=f'.{args.width}f')
print(table)
print()
is_paired = args.paired_bs or args.paired_ar
if is_paired:
test_type = 'bootstrap resampling' if args.paired_bs else 'approximate randomization'
n_samples_or_trials = args.paired_bs_n if args.paired_bs else args.paired_ar_n
test_sample_type = 'resampling trials' if args.paired_bs else 'trials'
msg = f'Paired {test_type} test with {n_samples_or_trials} {test_sample_type}'
bline = Color.format('baseline', 'yellow')
bline_name = Color.format(baseline_name, 'yellow')
null_hyp = Color.format('Null hypothesis', 'green')
pval_color = Color.format('highlighted in red', 'red')
# Print fancy header
print('-' * len(msg) + '\n' + msg + '\n' + '-' * len(msg))
print(f' - Each system is pairwise compared to {bline_name}.')
if args.paired_bs:
print(' Actual system score / bootstrap estimated true mean / 95% CI are provided for each metric.')
else:
print(' Actual system score is provided for each metric.')
print()
print(f' - {null_hyp}: the system and the {bline} translations are essentially')
print(f' generated by the same underlying process. For a given system and the {bline},')
print(' the p-value is roughly the probability of the absolute score difference (delta)')
print(f' or higher occurring due to chance, under the assumption that the {null_hyp.lower()} is correct.')
print()
print(f' - Assuming a significance threshold of 0.05, the {null_hyp.lower()} can be rejected')
print(' for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed')
print(f' to chance, hence the system is significantly "different" than the {bline}.')
print(f' Otherwise, the p-values are {pval_color}.')
print()
print(f' - NOTE: Significance does not tell whether a system is "better" than the {bline} but rather')
print(' emphasizes the "difference" of the systems in terms of the replicability of the delta.')
print()
print('-----------------')
print('Metric signatures')
print('-----------------')
for name, sig in signatures.items():
print(f' - {name:<10} {sig}')
def print_single_results(results: List[str], args: Namespace):
"""Re-process metric strings to align them nicely."""
if args.format == 'json':
if len(results) > 1:
proper_json = '[\n' + ',\n'.join(results) + '\n]'
print(proper_json)
else:
print(results[0])
return
# Color confidence strings for emphasis
if 'μ' in results[0]:
color_re = re.compile(r'(\(μ = [0-9\.]+ ± [0-9\.]+\))')
for idx in range(len(results)):
results[idx] = color_re.sub(
lambda m: Color.format(m.group(), 'cyan'), results[idx])
if len(results) == 1:
# Just one system, nothing to align.
print(results[0])
return
# Align by '=' character
lens = []
for line in results:
# If not score_only, split lines from '=' for re-alignment
try:
lens.append(line.index('=') - 1)
except ValueError:
print(line)
if len(lens) > 0:
w = max(lens)
for (_len, line) in zip(lens, results):
left, right = line[:_len], line[_len:]
print(f'{left:>{w}}{right}')
def sanity_check_lengths(system: Sequence[str],
refs: Sequence[Sequence[str]],
test_set: Optional[str] = None):
n_hyps = len(system)
if any(len(ref_stream) != n_hyps for ref_stream in refs):
sacrelogger.error("System and reference streams have different lengths.")
if test_set:
sacrelogger.error("This could be an issue with your system output "
"or with sacreBLEU's reference database if -t is given.")
sacrelogger.error("For the latter, try cleaning out the cache by typing:\n")
sacrelogger.error(f" rm -r {SACREBLEU_DIR}/{test_set}\n")
sacrelogger.error("The test sets will be re-downloaded the next time you run sacreBLEU.")
sys.exit(1)
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline="\n")
return open(file, mode=mode, encoding=encoding, newline="\n")
def my_log(num: float) -> float:
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def sum_of_lists(lists):
"""Aggregates list of numeric lists by summing."""
if len(lists) == 1:
return lists[0]
# Preserve datatype
size = len(lists[0])
init_val = type(lists[0][0])(0.0)
total = [init_val] * size
for ll in lists:
for i in range(size):
total[i] += ll[i]
return total
def args_to_dict(args, prefix: str, strip_prefix: bool = False):
"""Filters argparse's `Namespace` into dictionary with arguments
beginning with the given prefix."""
prefix += '_'
d = {}
for k, v in args.__dict__.items():
if k.startswith(prefix):
k = k.replace(prefix, '') if strip_prefix else k
d[k] = v
return d
def print_test_set(test_set, langpair, requested_fields, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set.
:param test_set: the test set to print
:param langpair: the language pair
:param requested_fields: the fields to print
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
if test_set not in DATASETS:
raise Exception(f"No such test set {test_set}")
fieldnames = DATASETS[test_set].fieldnames(langpair)
all_files = DATASETS[test_set].get_files(langpair)
if "all" in requested_fields and len(requested_fields) != 1:
sacrelogger.error("Cannot use --echo all with other fields")
sys.exit(1)
elif "all" in requested_fields:
requested_fields = fieldnames
# backwards compatibility: allow "ref" even if not present (choose first)
if "ref" in requested_fields and "ref" not in fieldnames:
replacement_ref = min([f for f in fieldnames if f.startswith("ref")])
requested_fields = [f if f != "ref" else replacement_ref for f in requested_fields]
files = []
for field in requested_fields:
if field not in fieldnames:
sacrelogger.error(f"No such field {field} in test set {test_set} for language pair {langpair}.")
sacrelogger.error(f"available fields for {test_set}/{langpair}: {', '.join(fieldnames)}")
if "ref" not in fieldnames:
subref = min([f for f in fieldnames if f.startswith("ref")])
sacrelogger.error(f"'ref' also allowed for backwards compatibility (will return {subref})")
sys.exit(1)
index = fieldnames.index(field)
files.append(all_files[index])
streams = [smart_open(file) for file in files]
streams = filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def get_source_file(test_set: str, langpair: str) -> str:
"""
Returns the source file for a given testset/langpair.
Downloads it first if it is not already local.
:param test_set: The test set (e.g., "wmt19")
:param langpair: The language pair (e.g., "de-en")
:return: the path to the requested source file
"""
if test_set not in DATASETS:
raise Exception(f"No such test set {test_set}")
return DATASETS[test_set].get_source_file(langpair)
def get_reference_files(test_set: str, langpair: str) -> List[str]:
"""
Returns a list of one or more reference file paths for the given testset/langpair.
Downloads the references first if they are not already local.
:param test_set: The test set (e.g., "wmt19")
:param langpair: The language pair (e.g., "de-en")
:return: a list of one or more reference file paths
"""
if test_set not in DATASETS:
raise Exception(f"No such test set {test_set}")
return DATASETS[test_set].get_reference_files(langpair)
def get_files(test_set, langpair) -> List[str]:
"""
Returns the path of the source file and all reference files for
the provided test set / language pair.
Downloads the references first if they are not already local.
:param test_set: The test set (e.g., "wmt19")
:param langpair: The language pair (e.g., "de-en")
:return: a list of the source file and all reference files
"""
if test_set not in DATASETS:
raise Exception(f"No such test set {test_set}")
return DATASETS[test_set].get_files(langpair)
def extract_tarball(filepath, destdir):
sacrelogger.info(f'Extracting {filepath} to {destdir}')
if filepath.endswith('.tar.gz') or filepath.endswith('.tgz'):
import tarfile
with tarfile.open(filepath) as tar:
tar.extractall(path=destdir)
elif filepath.endswith('.zip'):
import zipfile
with zipfile.ZipFile(filepath, 'r') as zipfile:
zipfile.extractall(path=destdir)
def get_md5sum(dest_path):
# Check md5sum
md5 = hashlib.md5()
with open(dest_path, 'rb') as infile:
for line in infile:
md5.update(line)
return md5.hexdigest()
def download_file(source_path, dest_path, extract_to=None, expected_md5=None):
"""Downloading utility.
Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param source_path: the remote uri to download
:param dest_path: where to save the file
:param extract_to: for tarballs, where to extract to
:param expected_md5: the MD5 sum
:return: the set of processed file names
"""
import urllib.request
import ssl
outdir = os.path.dirname(dest_path)
os.makedirs(outdir, exist_ok=True)
# Make sure to open in mode "a"
lockfile = f"{dest_path}.lock"
with portalocker.Lock(lockfile, timeout=60):
if not os.path.exists(dest_path) or os.path.getsize(dest_path) == 0:
sacrelogger.info(f"Downloading {source_path} to {dest_path}")
md5 = hashlib.md5()
try:
with urllib.request.urlopen(source_path) as f, open(dest_path, 'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.error('An SSL error was encountered in downloading the files. If you\'re on a Mac, '
'you may need to run the "Install Certificates.command" file located in the '
'"Python 3" folder, often found under /Applications')
sys.exit(1)
if expected_md5 is not None:
cur_md5 = get_md5sum(dest_path)
if cur_md5 != expected_md5:
sacrelogger.error(f'Fatal: MD5 sum of downloaded file was incorrect (got {cur_md5}, expected {expected_md5}).')
sacrelogger.error(f'Please manually delete {dest_path!r} and rerun the command.')
sacrelogger.error(f'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.')
sys.exit(1)
# Extract the tarball
if extract_to is not None:
extract_tarball(dest_path, extract_to)
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed file names
"""
if test_set not in DATASETS:
raise Exception(f"No such test set {test_set}")
dataset = DATASETS[test_set]
file_paths = dataset.get_files(langpair)
return file_paths
def get_langpairs_for_testset(testset: str) -> List[str]:
"""Return a list of language pairs for a given test set."""
if testset not in DATASETS:
return []
return list(DATASETS[testset].langpairs.keys())
def get_available_testsets() -> List[str]:
"""Return a list of available test sets."""
return sorted(DATASETS.keys(), reverse=True)
def get_available_testsets_for_langpair(langpair: str) -> List[str]:
"""Return a list of available test sets for a given language pair"""
parts = langpair.split('-')
srclang = parts[0]
trglang = parts[1]
testsets = []
for dataset in DATASETS.values():
if f'{srclang}-{trglang}' in dataset.langpairs \
or f'{trglang}-{srclang}' in dataset.langpairs:
testsets.append(dataset.name)
return testsets
def get_available_origlangs(test_sets, langpair) -> List[str]:
"""Return a list of origlang values in according to the raw SGM files."""
if test_sets is None:
return []
origlangs = set()
for test_set in test_sets.split(','):
dataset = DATASETS[test_set]
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', dataset.langpairs[langpair][0])
if rawfile.endswith('.sgm'):
with smart_open(rawfile) as fin:
for line in fin:
if line.startswith('<doc '):
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
origlangs.add(doc_origlang)
return sorted(list(origlangs))
def filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')
re_origlang = re.compile(r'.* origlang="([^"]+)".*\n')
re_id = re.compile(r'.* docid="([^"]+)".*\n')
indices_to_keep = []
for test_set in test_sets.split(','):
dataset = DATASETS[test_set]
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', dataset.langpairs[langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception(f'--origlang and --subset supports only *.sgm files, not {rawfile!r}')
if subset is not None:
if test_set not in SUBSETS:
raise Exception('No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re_origlang.sub(r'\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re_id.sub(r'\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence, keep in zip(sys, indices_to_keep) if keep] for sys in systems]
def print_subset_results(metrics, full_system, full_refs, args):
w = args.width
origlangs = args.origlang if args.origlang else \
get_available_origlangs(args.test_set, args.langpair)
if len(origlangs) == 0:
print('No subset information found. Consider using --origlang argument.')
return
results = defaultdict(list)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = filter_subset(
[full_system, *full_refs], args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
key = f'origlang={origlang}'
if subset in COUNTRIES:
key += f' country={subset}'
elif subset in DOMAINS:
key += f' domain={subset}'
for metric in metrics.values():
score = metric.corpus_score(system, refs)
results[key].append((len(system), score))
max_left_width = max([len(k) for k in results.keys()]) + 1
max_metric_width = max([len(val[1].name) for val in list(results.values())[0]])
for key, scores in results.items():
key = Color.format(f'{key:<{max_left_width}}', 'yellow')
for n_system, score in scores:
print(f'{key}: sentences={n_system:<6} {score.name:<{max_metric_width}} = {score.score:.{w}f}')
# import at the end to avoid circular import
from .dataset import DATASETS, SUBSETS, DOMAINS, COUNTRIES
| mjpost/sacrebleu | sacrebleu/utils.py | utils.py | py | 22,550 | python | en | code | 896 | github-code | 1 | [
{
"api_name": "os.path.expanduser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line... |
1986352570 | from ctypes import alignment
from tkinter import *
from tkinter import messagebox as mb
import json
#Class for GUI components
class Assessment(object):
def __init__(self, database_filename, gui):
#Snag data
with open(database_filename) as f:
data = json.load(f)
self.gui = gui
#Set statements, framing and responses
self.statements = (data['statements'])
self.framing = (data['framing'])
self.responses = (data['responses'])
#Set statement number to 0
self.statement_num=0
#Display title and instructions
self.display_title()
#Assigns statements to display_statements function to update later
self.display_statements()
#Holds an integer value which is used for selected option in a question.
self.resp_selected=IntVar()
#Display radio buttons
self.resps = self.radio_buttons()
#Display responses
self.display_responses()
#Display next button
self.buttons()
#Number of statements
self.data_size = len(self.statements)
#Counter of score
self.score=0
# generate score for a statement and add to self.score
def generate_score(self, statement_num):
#self.resp_selected.get() is an int not a string!
# Please check if my associations for P and N
# are correct.
#Calculate scoring for positively framed questions
if self.framing[statement_num] == "P":
if self.resp_selected.get() == 1:
self.score += 5
elif self.resp_selected.get() == 2:
self.score += 4
elif self.resp_selected.get() == 3:
self.score += 3
elif self.resp_selected.get() == 4:
self.score += 2
elif self.resp_selected.get() == 5:
self.score += 1
#Calculate scoring for negatively framed questions
else:
if self.resp_selected.get() == 1:
self.score += 1
elif self.resp_selected.get() == 2:
self.score += 2
elif self.resp_selected.get() == 3:
self.score += 3
elif self.resp_selected.get() == 4:
self.score += 4
elif self.resp_selected.get() == 5:
self.score += 5
#print(self.score)
#Display result in message box
def display_result(self):
#Calculate average score
result = round(float(self.score / self.data_size), 2)
score_1 = "You scored a", result, "which means you have extremely low grit."
score_2 = "You scored a", result, "which means you have low grit."
score_3 = "You scored a", result, "which means you have medium grit."
score_4 = "You scored a", result, "which means you have high grit."
score_5 = "You scored a", result, "which means you have extremely high grit."
#Message box to display results
if result <= 1:
mb.showinfo("Result", score_1)
elif result <= 2:
mb.showinfo("Result", score_2)
elif result <= 3:
mb.showinfo("Result", score_3)
elif result <= 4:
mb.showinfo("Result", score_4)
elif result <= 5:
mb.showinfo("Result", score_5)
#Show next statement
def next_btn(self):
enter_value = "Please select a value before hitting next."
#Message box to display error message
if self.resp_selected.get() == 0:
mb.showerror("Error", enter_value)
return
# score current statement
self.generate_score(self.statement_num)
#Move to next Question by incrementing the statement_num counter
self.statement_num += 1
#Check if statement_num = data size
if self.statement_num == self.data_size:
# if it is correct then it displays the score
self.display_result()
# destroys the GUI
gui.destroy()
else:
#Show the next question
self.display_statements()
self.display_responses()
#Display button on the screen
def buttons(self):
#Next button
next_button = Button(self.gui, text="Next", command=self.next_btn, width=10,
bg="green", fg="white", font=("Arial", 16, "bold"))
next_button.place(x=350, y=380)
#Quit button
quit_button = Button(self.gui, text="Quit", command=gui.destroy, width=10,
bg="red", fg="white", font=("Arial", 16, "bold"))
quit_button.place(x=40, y=380)
#Display responses (next to radio buttons including select/deselect behavior)
def display_responses(self):
val=0
#Deselect radio buttons
self.resp_selected.set(0)
#Display responses next to radio buttons
for response in self.responses[self.statement_num]:
self.resps[val]['text']=response
val+=1
#Display statements
def display_statements(self):
#Statement formatting
statement_num = Label(self.gui, text=self.statements[self.statement_num],
width=100, font=("Arial", 16, "bold"), anchor='w')
#Statement placement
statement_num.place(x=70, y=100)
#Display radio buttons
def radio_buttons(self):
#Empty response list
r_list = []
#Response placement
y_pos = 150
#Add responses to list
while len(r_list) < 5:
#Radio button formatting
radio_btn = Radiobutton(self.gui, text=" ", variable=self.resp_selected, value=len(r_list)+1, font=("Arial", 14))
#Add radio button to list
r_list.append(radio_btn)
#Radio button placement
radio_btn.place(x=100, y=y_pos)
#Increment the y-axis position by 40
y_pos += 40
#Return radio buttons
return r_list
#This method is used to Display Title
def display_title(self):
#Display title
title = Label(gui, text="Grit Scale Questionnaire", fg="black", font=("Arial", 20, "bold"))
#Place title
title.place(x=0, y=2)
#Display instructions
title = Label(gui, text="Please respond to the following statement. Be honest - there are no right or wrong answers.", fg="black", font=("Arial", 12,))
#Place instructions
title.place(x=0, y=40)
#GUI window
gui = Tk()
#GUI window size
gui.geometry("1200x600")
#GUI window title
gui.title("Grit Scale")
#Object of class
assessment = Assessment("data.json", gui)
#Launch GUI
gui.mainloop()
#Intro landing page
#WHAT IS GRIT?
#• Grit is defined as perseverance and passion for long-term goals
#• It entails working strenuously toward challenges, maintaining effort and interest over years despite failure, adversity, and plateaus in progress
#• Grit is unrelated to talent and can be built through a growth mindset
| ChHarding/grit-scale-HCI584 | grit-scale_CH.py | grit-scale_CH.py | py | 7,484 | python | en | code | null | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "tkinter.me... |
32195531566 | import sys
import os.path
from cStringIO import StringIO
from mapnik import *
from django.conf import settings
import PIL.Image
from ebgeo.maps import bins
from ebgeo.maps.constants import TILE_SIZE
def xml_path(maptype):
path = os.path.join(sys.prefix, 'mapnik', '%s.xml' % maptype)
return path
def get_mapserver(maptype):
return {
'main': MainMap,
'locator': LocatorMap,
'thematic': ThematicMap,
'homepage': HomepageMap
}[maptype]
class MapServer(Map):
"""
A simple wrapper class around Mapnik's Map that provides a little
friendlier interface to setting up a basic map and for common
tasks.
"""
def __init__(self, proj4, width=None, height=None):
width = width or TILE_SIZE
height = height or TILE_SIZE
super(MapServer, self).__init__(width, height, '+init=epsg:900913')
load_map(self, xml_path(self.maptype))
def zoom_to_bbox(self, minx, miny, maxx, maxy):
"""
Zooms map to bounding box - convenience method
"""
return self.zoom_to_box(Envelope(minx, miny, maxx, maxy))
def render_image(self, mimetype='image/png'):
"""
Renders the map as an Mapnik image
"""
img = Image(self.width, self.height)
render(self, img)
return img
def get_graphic(self, mapnik_img, mimetype='image/png'):
"""
Returns the raw bytes of graphic in the target format (PNG, JPG, GIF,
etc.)
"""
img = PIL.Image.fromstring('RGBA', (self.width, self.height), mapnik_img.tostring())
buf = StringIO()
if mimetype.find('/') != -1:
format = mimetype.split('/')[1]
else:
format = mimetype
img.save(buf, format)
try:
return buf.getvalue()
finally:
buf.close()
def export_pdf(self, filename):
"""
Renders map as a PDF, exporting to file given.
"""
import cairo
surface = cairo.PDFSurface(filename, self.width, self.height)
render(self, surface)
def create_layer(self, layer_name, style_name, postgis_table):
"""
Convenience shortcut method for setting up a new layer with
a defined style and PostGIS table name.
"""
layer = Layer(layer_name)
layer.datasource = PostGIS(host=settings.MAPS_POSTGIS_HOST, user=settings.MAPS_POSTGIS_USER, password=settings.MAPS_POSTGIS_PASS, dbname=settings.MAPS_POSTGIS_DB, table=postgis_table)
layer.styles.append(style_name)
return layer
def add_layer(self, layer_name, style_name, postgis_table, skip_if_missing=True):
layer = self.create_layer(layer_name, style_name, postgis_table)
self.layers.append(layer)
def draw_map(self):
raise NotImplementedError('subclasses must implement draw_map() method')
def __call__(self, mimetype='image/png'):
self.draw_map()
img = self.render_image()
return self.get_graphic(img, mimetype)
class MainMap(MapServer):
maptype = 'main'
def draw_map(self):
self.add_layer('coastline', 'coastline', 'coastlines')
self.add_layer('city', 'city-fill', 'cities')
self.add_layer('major-water', 'water', 'water')
self.add_layer('landmarks', 'landmarks', 'landmarks')
self.add_layer('airports', 'airports', 'airports')
self.add_layer('parks', 'parks', 'parks')
# Streets
streets = Layer('streets')
streets.datasource = PostGIS(host=settings.MAPS_POSTGIS_HOST, user=settings.MAPS_POSTGIS_USER, password=settings.MAPS_POSTGIS_PASS, dbname=settings.MAPS_POSTGIS_DB, table='streets')
# Add street styles -- order matters
for style in [
'road-fill',
'arterial-fill',
'highway-fill',
'ramp-border',
'ramp-fill',
'interstate-border',
'interstate-fill',
'road-label',
'arterial-label',
'highway-label',
'interstate-label'
]:
streets.styles.append(style)
self.layers.append(streets)
self.add_layer('neighborhoods', 'neighborhoods', 'neighborhoods')
self.add_layer('city-border', 'city-border', 'city')
class LocatorMap(MapServer):
maptype = 'locator'
def draw_map(self):
self.add_layer('city', 'city-fill', 'cities')
class HomepageMap(LocatorMap):
maptype = 'homepage'
# TODO: Move this somewhere else.
BINNING_METHOD = bins.EqualSize
# TODO: Move this to a config file, maybe subclass from a generic ColorTheme class.
class GreenTheme:
no_value = '#D9FCC3'
range = ['#D9FCC3', '#A0E673', '#5ACC2D', '#22944E', '#13552D']
border = '#C0CCC4'
class ThematicMap(MapServer):
"""
Generates a cloropleth or "thematic" map for a LocationType.
Data values are given as a dict, and keys are ids of the Location objects
that comprise the LocationType.
"""
maptype = 'thematic'
def __init__(self, location_type, theme_data, key_field, colors=None, num_bins=5, **kwargs):
super(ThematicMap, self).__init__(**kwargs)
self.location_type = location_type
self.theme_data = theme_data
self.key_field = key_field
self.colors = colors or GreenTheme
num_bins = num_bins or len(self.colors.range)
self.bins = BINNING_METHOD(theme_data.values(), num_bins)
def draw_map(self):
style = Style()
# Add a default Rule for features that aren't in the values list
default_rule = Rule()
default_rule.symbols.append(PolygonSymbolizer(Color(self.colors.no_value)))
default_rule.symbols.append(LineSymbolizer(Color(self.colors.border), 1.0))
style.rules.append(default_rule)
# TODO: Instead of one rule per object, compose a filter
# expression for the objects with the same value; also, contend
# with string v. numeric in the DBF
for key, value in self.theme_data.iteritems():
rule = Rule()
# The Mapnik C++ signature requires strings, not Unicode
filter_exp = "[%s] = '%s'" % (self.key_field, str(key))
rule.filter = Filter(filter_exp)
color = self.colors.range[self.bins.which_bin(value)]
rule.symbols.append(PolygonSymbolizer(Color(color)))
rule.symbols.append(LineSymbolizer(Color(self.colors.border), 1.0))
style.rules.append(rule)
self.append_style('theme', style)
layer = Layer('theme')
layer.datasource = LocationDatasource(self.location_type)
layer.styles.append('theme')
self.layers.append(layer)
def LocationDatasource(location_type):
"""
Use ebpub.db.Location objects as a datasource for Mapnik layers.
"""
table_sql = """\
(SELECT * FROM db_location WHERE location_type_id = %s) AS db_location
""".strip() % (location_type.id,)
host = settings.DATABASE_HOST and settings.DATABASE_HOST or settings.MAPS_POSTGIS_HOST
port = settings.DATABASE_PORT and settings.DATABASE_PORT or 5432
return PostGIS(host=host,
port=port,
dbname=settings.DATABASE_NAME,
user=settings.DATABASE_USER,
password=settings.DATABASE_PASSWORD,
table=str(table_sql), # Mapnik can't handle any Unicode
estimate_extent=True)
| brosner/everyblock_code | ebgeo/ebgeo/maps/mapserver.py | mapserver.py | py | 7,489 | python | en | code | 130 | github-code | 1 | [
{
"api_name": "os.path.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sys.prefix",
"line_nu... |
23051885140 | import tensorflow as tf
import numpy as np
import os
import argparse
import math
from model import GridCell
from custom_ops import block_diagonal
from data_io import Data_Generator
from matplotlib import pyplot as plt
from utils import draw_heatmap_2D, draw_path_to_target, draw_path_to_target_gif
import itertools
class Path_planning():
def __init__(self, grid_cell_model, max_step=40, max_err=2):
self.model = grid_cell_model
# build model
self.start = tf.placeholder(shape=[2], dtype=tf.float32)
self.target = tf.placeholder(shape=[2], dtype=tf.float32)
self.max_step, self.max_err = max_step, max_err
# self.path_planning(max_step, max_err)
def path_planning(self, num_step):
step = tf.constant(0)
grid_start = self.model.get_grid_code(self.start)
grid_target = self.model.get_grid_code(self.target)
place_seq, _ = self.model.localization_model(self.model.weights_A, grid_start, self.model.grid_cell_dim)
place_seq = tf.expand_dims(place_seq, axis=0)
place_seq_point = tf.expand_dims(self.start, axis=0)
# velocity = self.model.velocity2
num_dir = 100
theta = np.linspace(-np.pi, np.pi, num_dir + 1)[:num_dir]
r = 2.0
velocity = np.zeros(shape=(num_dir, 2), dtype=np.float32)
velocity[:, 0] = r * np.cos(theta)
velocity[:, 1] = r * np.sin(theta)
num_vel = len(velocity)
vel_list = []
interval_length = 1.0 / (self.model.num_interval - 1)
# M_list = []
# for t in range(num_step):
# vel_list.append(velocity * (t + 1))
# M_list.append(self.model.construct_motion_matrix(vel * (t + 1), reuse=tf.AUTO_REUSE))
# M_list = tf.concat(M_list, axis=0)
for t in range(num_step):
vel_list.append(velocity * (t + 1))
r = 1.0
velocity2 = np.zeros(shape=(num_dir, 2), dtype=np.float32)
velocity2[:, 0] = r * np.cos(theta)
velocity2[:, 1] = r * np.sin(theta)
vel_list.append(velocity2)
vel_list = np.concatenate(vel_list, axis=0)
M = self.model.construct_motion_matrix(tf.cast(velocity * interval_length, tf.float32), reuse=tf.AUTO_REUSE)
M2 = self.model.construct_motion_matrix(tf.cast(velocity2 * interval_length, tf.float32), reuse=tf.AUTO_REUSE)
place_max = tf.zeros(shape=(1, len(vel_list)))
grid_code = tf.tile(tf.expand_dims(grid_start, axis=0), [num_vel, 1])
grid_next_pool = []
for t in range(num_step):
grid_code = self.model.motion_model(M, grid_code)
grid_next_pool.append(grid_code)
grid_code = tf.tile(tf.expand_dims(grid_start, axis=0), [num_vel, 1])
grid_code = self.model.motion_model(M2, grid_code)
grid_next_pool.append(grid_code)
self.grid_next_pool = tf.concat(grid_next_pool, axis=0)
grid_code_list = tf.expand_dims(self.grid_next_pool, axis=0)
def cond(step, grid_current, place_seq, place_seq_point, place_max, grid_code_list):
return tf.logical_and(step < self.max_step,
tf.sqrt(tf.reduce_sum((tf.to_float(place_seq_point[-1] - self.target)) ** 2)) > self.max_err)
def body(step, grid_current, place_seq, place_seq_point, place_max, grid_code_list):
# grid_current = self.model.get_grid_code(place_seq_point[-1])
grid_code = tf.tile(tf.expand_dims(grid_current, axis=0), [num_vel, 1])
grid_next_pool = []
for t in range(num_step):
grid_code = self.model.motion_model(M, grid_code)
grid_next_pool.append(grid_code)
grid_code = tf.tile(tf.expand_dims(grid_current, axis=0), [num_vel, 1])
grid_code = self.model.motion_model(M2, grid_code)
grid_next_pool.append(grid_code)
grid_next_pool = tf.concat(grid_next_pool, axis=0)
grid_code_list = tf.concat((grid_code_list, tf.expand_dims(grid_next_pool, axis=0)), axis=0)
direction_pool = tf.reduce_sum(grid_target * grid_next_pool, axis=1)
place_next_pool, _ = self.model.localization_model(self.model.weights_A, grid_next_pool, self.model.grid_cell_dim)
p_max = tf.reduce_max(tf.reshape(place_next_pool, [-1, self.model.place_dim]), axis=1)
g_max = tf.reduce_max(grid_next_pool, axis=1)
mask = p_max
place_max = tf.concat([place_max, tf.expand_dims(p_max, axis=0)], axis=0)
grid_next_pool, direction_pool = tf.boolean_mask(grid_next_pool, mask), tf.boolean_mask(direction_pool, mask)
vel_pool = tf.boolean_mask(vel_list, mask)
pick_idx = tf.argmax(direction_pool)
grid_current = grid_next_pool[pick_idx]
place_predict, _ = self.model.localization_model(self.model.weights_A, grid_current, self.model.grid_cell_dim)
# place_point_predict = tf.cast(place_point_predict, tf.float32)
place_pt = place_seq_point[-1] + tf.cast(vel_pool[pick_idx], tf.float32)
place_seq = tf.concat([place_seq, tf.expand_dims(place_predict, axis=0)], axis=0)
place_seq_point = tf.concat([place_seq_point, tf.expand_dims(place_pt, axis=0)], axis=0)
return tf.add(step, 1), grid_current, place_seq, place_seq_point, place_max, grid_code_list
_, self.grid_current, place_seq, place_seq_point, self.place_max, self.grid_code_list = tf.while_loop(cond, body, [step, grid_start, place_seq, place_seq_point, place_max, grid_code_list],
shape_invariants=[step.get_shape(), grid_start.get_shape(),
tf.TensorShape([None, self.model.num_interval, self.model.num_interval]),
tf.TensorShape([None, 2]),
tf.TensorShape([None, num_vel * (num_step + 1)]),
tf.TensorShape([None, num_vel * (num_step + 1), self.model.grid_cell_dim])])
self.place_seq, self.place_seq_point = place_seq, place_seq_point
def perform_path_planning(planning_model, sess, start, target, max_step=40,
output_dir=None, test_dir_name='test_path_planning>20', plot=True):
output_dir = os.path.join(output_dir, test_dir_name)
if tf.gfile.Exists(output_dir):
tf.gfile.DeleteRecursively(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
success = 0
success_step = 0
nbin = 4
nvel = np.zeros(shape=nbin+1)
count = np.zeros(shape=nbin+1)
num_test = len(start)
place_seq_list = []
for tt in range(num_test):
start_value, target_value = start[tt], target[tt]
# Do path planning
feed_dict = {planning_model.start: start_value, planning_model.target: target_value}
place_seq_value, place_seq_point_value, grid_next_pool, grid_list = sess.run([planning_model.place_seq, planning_model.place_seq_point, planning_model.grid_next_pool, planning_model.grid_code_list], feed_dict=feed_dict)
if len(place_seq_value) < max_step:
success = success + 1
success_step = success_step + len(place_seq_value)
# if success < 100:
# if not os.path.exists(os.path.join(output_dir, 'gif')):
# os.mkdir(os.path.join(output_dir, 'gif'))
# file_name = os.path.join(output_dir, 'gif', 'success%02d.gif' % success)
# draw_path_to_target_gif(file_name, planning_model.model.num_interval, place_seq_point_value, target_value)
vel_seq = np.diff(place_seq_point_value, axis=0)
vel_seq = np.sqrt(np.sum(np.square(vel_seq), axis=1))
nseq = len(vel_seq)
bin_sz = int(np.floor(nseq / nbin))
for i in range(nbin):
nvel[i] = nvel[i] + np.sum(vel_seq[i * bin_sz: max((i+1) * bin_sz, nseq)])
count[i] = count[i] + max((i+1) * bin_sz, nseq) - i * bin_sz
nvel[-1] = nvel[-1] + vel_seq[nseq-1]
count[-1] = count[-1] + 1
if tt < 100:
if plot:
draw_path_to_target(planning_model.model.num_interval, place_seq_point_value, target=target_value,
save_file=os.path.join(output_dir, 'test%02d.png' % tt))
place_seq_list.append(place_seq_point_value)
nvel = nvel / count
success_pro = success / float(num_test)
success_step = success_step / float(success)
print(nvel)
print('Proportion of success %02f, average success step %02f' % (success_pro, success_step))
return place_seq_list
def main(_):
parser = argparse.ArgumentParser()
# training parameters
parser.add_argument('--lr', type=float, default=0.05, help='Initial learning rate for descriptor')
parser.add_argument('--beta1', type=float, default=0.9, help='Beta1 in Adam optimizer')
# simulated data parameters
parser.add_argument('--place_size', type=float, default=1.0, help='Size of the square place')
parser.add_argument('--max_vel1', type=float, default=39, help='maximum of velocity in loss1')
parser.add_argument('--min_vel1', type=float, default=1, help='minimum of velocity in loss1')
parser.add_argument('--max_vel2', type=float, default=3, help='maximum of velocity in loss2')
parser.add_argument('--min_vel2', type=float, default=1, help='minimum of velocity in loss2')
parser.add_argument('--sigma', metavar='N', type=float, nargs='+', default=[0.3], help='sd of gaussian kernel')
parser.add_argument('--num_data', type=int, default=30000, help='Number of simulated data points')
# model parameters
parser.add_argument('--place_dim', type=int, default=1600, help='Dimensions of place, should be N^2')
parser.add_argument('--num_group', type=int, default=16, help='Number of groups of grid cells')
parser.add_argument('--block_size', type=int, default=6, help='Size of each block')
parser.add_argument('--lamda', type=float, default=0.1, help='Hyper parameter to balance two loss terms')
parser.add_argument('--lamda2', type=float, default=1, help='Hyper parameter to balance two loss terms')
parser.add_argument('--motion_type', type=str, default='continuous', help='True if in testing mode')
parser.add_argument('--num_step', type=int, default=1, help='Number of steps in path integral')
parser.add_argument('--GandE', type=float, default=1.0, help='Hyper parameter to balance two loss terms')
parser.add_argument('--save_memory', type=bool, default=False, help='True if in testing mode')
# planning parameters
parser.add_argument('--num_test', type=int, default=1000, help='Maximum number of steps')
parser.add_argument('--max_step', type=int, default=60, help='Maximum number of steps')
parser.add_argument('--max_err', type=float, default=None, help='')
parser.add_argument('--planning_step', metavar='N', type=int, nargs='+', default=[1], help='planning step')
parser.add_argument('--planning_type', type=str, default='normal', help='True if in testing mode')
# utils
parser.add_argument('--output_dir', type=str, default='con_E_s0.3_max40,3_t1',
help='Checkpoint path to load')
parser.add_argument('--ckpt', type=str, default='model.ckpt-5999', help='Checkpoint path to load')
parser.add_argument('--M_file', type=str, default='M.npy', help='Estimated M DILE')
parser.add_argument('--test_dir_name', type=str, default='test_planning', help='Estimated M file')
parser.add_argument('--gpu', type=str, default='0', help='Which gpu to use')
FLAGS = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
model = GridCell(FLAGS)
planning_model = Path_planning(model, FLAGS.max_step)
with tf.Session() as sess:
ckpt_file = os.path.join(FLAGS.output_dir, 'model', FLAGS.ckpt)
# Load checkpoint
assert FLAGS.ckpt is not None, 'no checkpoint provided.'
num_step = len(FLAGS.planning_step)
if FLAGS.planning_type == 'normal':
target_value = np.random.choice(planning_model.model.num_interval - 4, [FLAGS.num_test * 10, 2]) + 2
start_value = np.random.choice(planning_model.model.num_interval - 4, [FLAGS.num_test * 10, 2]) + 2
select_idx = np.where(np.sqrt(np.sum((target_value - start_value) ** 2, axis=1)) > 20)[0]
target_value, start_value = target_value[select_idx[:FLAGS.num_test]], start_value[select_idx[:FLAGS.num_test]]
for planning_step in FLAGS.planning_step:
planning_model.path_planning(planning_step)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
print('Loading checkpoint {}.'.format(ckpt_file))
saver.restore(sess, ckpt_file)
perform_path_planning(planning_model, sess, start_value, target_value, max_step=FLAGS.max_step,
output_dir=FLAGS.output_dir,
test_dir_name='%s_t%d' % (FLAGS.test_dir_name, planning_step))
elif FLAGS.planning_type == 'plot':
output_dir = os.path.join(FLAGS.output_dir, FLAGS.test_dir_name)
if tf.gfile.Exists(output_dir):
tf.gfile.DeleteRecursively(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
target_value = np.random.uniform(low=planning_model.model.num_interval - 4,
high=planning_model.model.num_interval - 2, size=(FLAGS.num_test * 10, 2))
r = np.random.uniform(low=25, high=40, size=FLAGS.num_test * 10)
theta = np.tile(np.expand_dims(np.linspace(start=np.pi * 0.1, stop=np.pi * 0.35, num=num_step), axis=1),
(1, FLAGS.num_test * 10))
# theta = np.random.uniform(low=np.pi * 0.1, high=np.pi * 0.5, size=(num_step, FLAGS.num_test * 10))
# theta = np.sort(theta, axis=0)
start_value = np.zeros(shape=(num_step, FLAGS.num_test * 10, 2))
start_value[:, :, 0] = target_value[:, 0] - r * np.cos(theta)
start_value[:, :, 1] = target_value[:, 1] - r * np.sin(theta)
select_idx = np.where(np.sum(np.sum(start_value < 0, axis=-1), axis=0) == 0)[0]
start_value = start_value[:, select_idx[:FLAGS.num_test]]
place_seq_list_multistep = []
for i in range(num_step):
planning_step = FLAGS.planning_step[i]
planning_model.path_planning(planning_step)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
print('Loading checkpoint {}.'.format(ckpt_file))
print('Testing planning step %d...' % planning_step)
saver.restore(sess, ckpt_file)
place_seq_list = perform_path_planning(planning_model, sess, start_value[i], target_value, max_step=FLAGS.max_step,
output_dir=FLAGS.output_dir, plot=False)
place_seq_list_multistep.append(place_seq_list)
for i in range(len(place_seq_list)):
place_seq = []
for j in range(len(place_seq_list_multistep)):
place_seq.append(place_seq_list_multistep[j][i])
draw_path_to_target(planning_model.model.num_interval, place_seq, target=target_value[i],
save_file=os.path.join(output_dir, 'plot%02d.png' % i))
if __name__ == '__main__':
tf.app.run() | ruiqigao/GridCell | path_planning.py | path_planning.py | py | 16,048 | python | en | code | 18 | github-code | 1 | [
{
"api_name": "tensorflow.placeholder",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.placeholder",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": ... |
39633978744 | import pygame # 1. pygame 선언
import random
pygame.init() # 2. pygame 초기화
# 3. pygame에 사용되는 전역변수 선언
BLACK = (0, 0, 0)
size = [600, 800]
screen = pygame.display.set_mode(size)
done = False
clock = pygame.time.Clock()
# 4. pygame 무한루프
def runGame():
global done
while not done:
clock.tick(10)
screen.fill(BLACK)
for event in pygame.event.get():
if event.type == pygame.QUIT:
done=True
pygame.display.update()
runGame()
pygame.quit()
def runGame():
bomb_image = pygame.image.load('bomb.png')
bomb_image = pygame.transform.scale(bomb_image, (50, 50))
bombs = []
for i in range(5):
rect = pygame.Rect(bomb_image.get_rect())
rect.left = random.randint(0, size[0])
rect.top = -100
dy = random.randint(3, 9) ## 떨어지는 속도
bombs.append({'rect': rect, 'dy': dy})
person_image = pygame.image.load('person.png')
person_image = pygame.transform.scale(person_image, (100, 100))
person = pygame.Rect(person_image.get_rect())
person.left = size[0] // 2 - person.width // 2
person.top = size[1] - person.height
person_dx = 0
person_dy = 0
| kyungkkk/week9 | game2.py | game2.py | py | 1,222 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Cl... |
1633661385 | from collections import deque
from fuzzywuzzy import fuzz
from datasets.fuman_base import load_fuman_rant
dataset = load_fuman_rant('data/20151023/bad-rants-4189.csv')
duplicates = set()
deduped = list()
n_elements = len(dataset.data)
rant_indexes = deque([i for i in range(n_elements)])
while rant_indexes:
i = rant_indexes.popleft()
end = min(i + 4, n_elements)
window = [j for j in range(i + 1, end) if j not in duplicates and j in rant_indexes]
r1 = dataset.data[i]
dups = [j for j in window if fuzz.ratio(r1, dataset.data[j]) > 90]
for j in dups:
rant_indexes.remove(j)
duplicates.update(dups)
deduped.append(i)
rants = dataset.data
print('Found', len(duplicates), 'duplicates')
print('Deduped list has', len(deduped), 'elements')
assert len(rants) == len(deduped) + len(duplicates), "Missing rants!"
long_deduped = [rants[i].replace('\n', ' ') for i in deduped if len(rants[i]) > 50]
with open('data/output/bad-rants-deduped.csv', "wt") as fp:
for rant in long_deduped:
fp.write(rant + '\n')
| dumoulma/py-evalfilter | src/deduplicate_rants.py | deduplicate_rants.py | py | 1,055 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datasets.fuman_base.load_fuman_rant",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fuzzywuzzy.fuzz.ratio",
"line_number": 17,
"usage_type": "call"
},
{
"api_na... |
14749681743 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 22:57:54 2019
@author: weichi
"""
import datetime as dt
from datetime import datetime
import pytz
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model, metrics, model_selection
from sklearn.model_selection import train_test_split
# Reconstruct time infomation by `month`, `day`, and `hour`
def get_time(x):
time_str = '2019 %d %d %d' % (x[0], x[1], x[2])
taipei_tz = pytz.timezone('Asia/Taipei')
time = dt.datetime.strptime(time_str, '%Y %m %d %H').replace(tzinfo=taipei_tz)
return time
# check the next row is the next hour or not.
# If it is not, the `pm2.5_next_hour` column will be given NaN.
def check_next_hour(x):
one_hour = dt.timedelta(hours=1)
if x[2] - x[1] == one_hour:
return x[0]
return np.nan
#%% read data from saved file
df5 = pd.read_csv('complete_data_5.csv', index_col=0)
#%% conbine date information
df5['date'] = df5[['month', 'day', 'hour']].apply(get_time, axis=1)
#%% plot
# Add explicitly converter
pd.plotting.register_matplotlib_converters()
# Plt
plt.figure(figsize=(12, 7))
plt.scatter(df5['date'], df5['pm2.5'])
#%% shift and append previous 1~5 hours data as columns next to original dataframe, maybe 10 better
titles = ['pm2.5', 'temp', 'humidity', 'ws', 'wd', 'precp']
for i in range(1, 8):
for item in titles:
title = item + '_' + str(i)
df5[title] = df5[item].shift(periods=i)
#df5['pm2.5_shift_1'] = df5['pm2.5'].shift(periods=-1)
#%% drop nan, date column and reset index
df5 = df5.dropna(axis=0)
df5 = df5.reset_index(drop=True)
date = df5['date']
df5 = df5.drop(['date', 'pm1.0', 'pm10.0', 'month', 'day', 'hour'], axis=1)
#%% Normalization
std = df5.std()
mean = df5.mean()
df5 = (df5 - mean) / std
df5['date'] = date
#%% shift pm2.5 data to get the next x hour
shift_amount = 1
df5['pm2.5_shift_1'] = df5['pm2.5'].shift(-shift_amount)
df5['time_shift_1'] = df5['date'].shift(-shift_amount)
#%%
df5 = df5.dropna(axis=0)
#date = date.drop(date.index[date.index.size-shift_amount:date.index.size])
df5 = df5.reset_index(drop=True)
#%%
pm_shift_1 = df5['pm2.5_shift_1']
time_shift_1 = df5['time_shift_1']
date = df5['date']
df5 = df5.drop(['date', 'pm2.5_shift_1', 'time_shift_1'], axis=1)
#%% split pm2.5 data as y and remain as X
y = pm_shift_1.copy()
original_pm25 = pm_shift_1.copy()
X = df5.copy()
#%%
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=0)
#print(X_train.shape, y_train.shape)
#print(X_test.shape, y_test.shape)
#%% Divide training set and test set
bound = int(len(df5)*0.85)
X_train = X[:bound]
X_test = X[bound:]
y_train = y[:bound]
y_test = y[bound:]
date = date[bound:]
time_shift_1 = time_shift_1[bound:]
original_pm25 = original_pm25[bound:]
#%% Fit the model
model = linear_model.LinearRegression(normalize=True)
model.fit(X_train, y_train)
#%% See the coefficients of our model
a = model.coef_
b = model.intercept_
print(a)
print(b)
for i in range(len(X_train.columns)):
print('Coefficient for %10s:\t%s' % (X_train.columns[i], model.coef_[i]))
#%% Calculate mean squared error for training set & test set
predict_train_y = model.predict(X_train)
predict_y = model.predict(X_test)
train_mse = metrics.mean_squared_error(y_train, predict_train_y)
test_mse = metrics.mean_squared_error(y_test, predict_y)
print('Train MSE:\t %s' % train_mse)
print('Test MSE:\t %s' % test_mse)
#%% denormalization
predict_y_plot = predict_y * std['pm2.5'] + mean['pm2.5']
y_test_plot = y_test * std['pm2.5'] + mean['pm2.5']
original_pm25_plot = original_pm25 * std['pm2.5'] + mean['pm2.5']
#%%
# Add explicitly converter
pd.plotting.register_matplotlib_converters()
# Plt
plt.figure(figsize=(12, 7))
plt.plot(time_shift_1, original_pm25_plot, label='actual values')
plt.plot(time_shift_1, predict_y_plot, label='predict values')
plt.legend()
fig = plt.gcf()
plt.show()
fig.savefig('output.png')
| WeichiChen1210/PM2.5-Prediction | pm2.5_prediction.py | pm2.5_prediction.py | py | 3,991 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytz.timezone",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "datet... |
9338192510 | import dash
import dash_core_components as dcc
import dash_html_components as html
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objs as go
from urllib.request import urlopen
import plotly.io as pio
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import scale
from collections import Counter
from datetime import date, timedelta
pio.renderers.default = 'colab'
nytcases = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/live/us-states.csv")
data = [['AL',4887871],
['AK',737438],
['AZ',7171646],
['AR',3013825],
['CA',39557045],
['CO',5695564],
['CT',3572665],
['DE',967171],
['DC',702455],
['FL',21299325],
['GA',10519475],
['Guam', 167294 ],
['HI',1420491],
['ID',1754208],
['IL',12741080],
['IN',6691878],
['IA',3156145],
['KS',2911505],
['KY',4468402],
['LA',4659978],
['ME',1338404],
['MD',6042718],
['MA',6902149],
['MI',9995915],
['MN',5611179],
['MS',2986530],
['MO',6126452],
['MT',1062305],
['NE',1929268],
['NV',3034392],
['NH',1356458],
['NJ',8908520],
['NM',2095428],
['NY',19542209],
['NC',10488084],
['ND',760077],
['Northern Mariana Islands', 57216],
['OH',11689442],
['OK',3943079],
['OR',4190713],
['PA',12807060],
['PR',3195153],
['RI',1057315],
['SC',5084127],
['SD',882235],
['TN',6770010],
['TX',28701845],
['UT',3161105],
['VT',626299],
['VA',8517685],
['Virgin Islands', 167294],
['WA',7535591],
['WV',1805832],
['WI',5813568],
['WY',577737]]
pop = pd.DataFrame(data, columns = ['statey', 'Population'])
df = pd.concat([nytcases, pop], axis=1)
cases_capita_list = []
death_capita_list = []
cases_per_cap = (df['cases'] / df['Population']) * 100000
death_per_cap = (df['deaths'] / df['Population']) * 100000
cases_capita_list.extend(cases_per_cap)
death_capita_list.extend(death_per_cap)
df['Cases per 100,000'] = cases_capita_list
df['Deaths per 100,000'] = death_capita_list
#df['fips'] = df['fips'] * 1000
df['fips'] = df["fips"].apply(lambda x: f"{x:02d}") #Fixes FIPS code of certain states and counties
def cases_per_100k():
with urlopen('https://raw.githubusercontent.com/PublicaMundi/MappingAPI/master/data/geojson/us-states.json') as response:
states = json.load(response)
hovertext = df['state']
fig = go.Figure(go.Choroplethmapbox(geojson = states, locations = df["fips"], z = df.iloc[:,-2],
colorscale = 'tempo', zmin = 0, marker_opacity = 0.5, marker_line_width = 0,
hovertext = hovertext))
fig.update_layout(mapbox_style="carto-positron", title = "Total Cases per 100,000 People",
mapbox_zoom=4, mapbox_center = {"lat": 40.785794, "lon": -89.209738})
return fig
def deaths_per_100k():
with urlopen('https://raw.githubusercontent.com/PublicaMundi/MappingAPI/master/data/geojson/us-states.json') as response:
states = json.load(response)
hovertext = df['state']
fig2 = go.Figure(go.Choroplethmapbox(geojson = states, locations = df["fips"], z = df.iloc[:,-1],
colorscale = 'tempo', zmin = 0, marker_opacity = 0.5, marker_line_width = 0,
hovertext = hovertext))
fig2.update_layout(mapbox_style="carto-positron", title = "Total Deaths per 100,000 People",
mapbox_zoom=4, mapbox_center = {"lat": 40.785794, "lon": -89.209738})
return fig2 | johnli25/uscoronavirusinfo | us_states_per_capita.py | us_states_per_capita.py | py | 3,428 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "plotly.io.renderers",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "plotly.io",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",... |
4038397722 | # TASK - 1
# A To-Do List application is a useful project that helps users manage and organize their tasks efficiently.
# This project aims to create a command-line or GUI-based application using Python, allowing users to create, update,
# and track their to-do lists.
from tkinter import *
import tkinter.messagebox as msg
import os
from termcolor import colored
class TodoApp(Tk):
def __init__(self):
super().__init__()
# Centering the window when opened.
screen_width = self.winfo_screenwidth()
screen_height = self.winfo_screenheight()
print(colored(f"SCREEN WIDTH : {screen_width} x SCREEN HEIGHT : {screen_height}", "blue"))
app_width = 1000
app_height = 800
print(colored(f"APP WIDTH : {app_width} x APP HEIGHT : {app_height}", "blue"))
set_x = int((screen_width/2) - (app_width/2))
set_y = int((screen_height/2) - (app_height/2))
self.geometry(f'{app_width}x{app_height}+{set_x}+{set_y}')
self.title("TodoList Manager")
self.resizable(False, False)
# Design
self.create_app_heading()
self.give_separation_line()
self.item_input_frame = self.create_input_frame()
self.item_entry_box = self.create_new_item_entry_box()
self.create_add_button()
self.list_display_frame = self.create_display_frame()
self.todo_display_listbox = self.create_todo_list_box()
self.listbox_scrollbar = self.add_listbox_scrollbar()
self.show_scrollbar()
self.operation_button_frame = self.create_operation_frame()
self.edit_img = PhotoImage(file='Images/edit.png')
self.create_edit_button()
self.clear_img = PhotoImage(file='Images/clear.png')
self.create_clear_button()
self.remove_img = PhotoImage(file='Images/remove.png')
self.create_delete_button()
self.uncross_img = PhotoImage(file='Images/save.png')
self.create_uncross_button()
self.cross_img = PhotoImage(file='Images/cross.png')
self.create_cross_button()
# Functions
self.listbox_load()
# Designing The application's interface.
def create_app_heading(self):
"""
Creates the top heading for the application.
"""
app_heading = Label(self, text = "TODO-List Manager", font = ('Pristina', 27, 'bold'), pady = 20, bg = "#e9ecf4")
app_heading.pack(fill = X)
def give_separation_line(self):
"""
Adds a separation line after a desired frame or widget.
"""
frame = Frame(self, bg = "#0b227a", height = 5)
frame.pack(fill = X)
def create_input_frame(self):
"""
Creates the frame on the root window for placing the
desired widgets related to performing input operations.
"""
frame = Frame(self, bg = "#38393f", height = 100, padx = 20, pady = 20)
frame.pack(fill = X)
return frame
def create_new_item_entry_box(self):
"""
Adds an entry box to the input related frame
for entering the new task item to be added.
"""
entry = Entry(self.item_input_frame, width = 50, borderwidth = 0, font = ('Helvetica', 22))
entry.pack(side = LEFT)
return entry
def create_add_button(self):
"""
Creates a button to the input related frame
for adding the entered task to the list box.
"""
button = Button(self.item_input_frame, text = "Add to list", width = 18, borderwidth = 0, font = ('Helvetica', 14, 'bold'), bg = "#2e5d72", fg = "#ffffff", command = self.add_item)
button.pack(side = LEFT)
def create_display_frame(self):
"""
Creates the frame on the root window for placing the
desired widgets related to performing display operations.
"""
frame = Frame(self, width = 200, height = 700, bg = "#274a5a", padx = 20, pady = 20)
frame.pack(fill = BOTH)
return frame
def create_todo_list_box(self):
"""
Creates a listbox to display all the items added to the task list.
"""
listbox = Listbox(self.list_display_frame, width = 78, height = 20, font = ('Helvetica', 16), bg = "#dde1e3", fg = "#0e0c49", selectbackground = "#6016d9", activestyle = NONE, cursor = "hand2")
listbox.pack(side = LEFT, fill = BOTH)
return listbox
def add_listbox_scrollbar(self):
"""
Adds a scroll bar to the listbox for scrolling vertically
through the listbox when the list of tasks is too long.
"""
scrollbar = Scrollbar(self.list_display_frame)
scrollbar.pack(side = LEFT, fill = BOTH)
return scrollbar
def show_scrollbar(self):
"""
Bind the scroll bar to the display listbox and activate
its operation.
"""
self.todo_display_listbox.config(yscrollcommand = self.listbox_scrollbar.set)
self.listbox_scrollbar.config(command = self.todo_display_listbox.yview)
def create_operation_frame(self):
"""
Creates the frame on the root window for placing the
desired widgets related to performing various operations like:
disable a task, delete a disabled task, edit a specific task, etc.
"""
frame = Frame(self, bg = "#38393f")
frame.pack(fill = BOTH)
return frame
def create_edit_button(self):
"""
Create an edit button with an edit icon to prompt
the user to edit the selected task upon clicking the button.
"""
button = Button(self.operation_button_frame, image = self.edit_img, bd = 0, bg = "#38393f", cursor = "hand2", command = self.edit_task)
button.pack(side = LEFT, pady = 10, padx = 10)
def create_delete_button(self):
"""
Create a delete button with a trash icon to delete a
disabled task.
"""
button = Button(self.operation_button_frame, image = self.remove_img, bd = 0, bg = "#38393f", cursor = "hand2", command = self.delete_crossed_item)
button.pack(side = RIGHT, pady = 10, padx = (10, 330))
def create_uncross_button(self):
"""
Create an uncross button with a tick icon to enable a
disabled task.
"""
button = Button(self.operation_button_frame, image = self.uncross_img, bd = 0, bg = "#38393f", cursor = "hand2", command = self.uncross_item)
button.pack(side = RIGHT, pady = 10, padx = 10)
def create_cross_button(self):
"""
Create a cross button with a times icon to disable
a desired task.
"""
button = Button(self.operation_button_frame, image = self.cross_img, bd = 0, bg = "#38393f", cursor = "hand2", command = self.cross_item)
button.pack(side = RIGHT, pady = 10, padx = 10)
def create_clear_button(self):
"""
Create a clear button with a sweep icon to clear all the
task items present in the list.
"""
button = Button(self.operation_button_frame, image = self.clear_img, bd = 0, bg = "#38393f", cursor = "hand2", command = self.clear_list)
button.pack(side = RIGHT, pady = 10, padx = 10)
# Adding the functionalities and events to the application.
def blank_line_handler(self):
"""
Handle the text file if there is a blank line,
remove the blank unnecessary blank line and save
the changes to the assigned text file.
"""
with open("todo.txt", "r") as tdf:
lines = tdf.readlines()
for line in lines:
if line.strip("\n"):
os.rename('todo.txt', 'old_todo.txt')
break
else:
pass
try:
with open("old_todo.txt", "r") as old_tdf:
new_tdf = open('todo.txt', "w")
lines = old_tdf.readlines()
for line in lines:
if line.strip("\n"):
new_tdf.write(line)
os.remove("old_todo.txt")
except FileNotFoundError:
message = colored("No blank lines in the file.")
print(message)
def add_item(self):
"""
Function for the add item button. Adds the entered
new task and add to the list items in the file and save
the changes to the text file.
"""
new_item = self.item_entry_box.get()
with open('todo.txt', "r+") as tdf:
number_of_lines = len(tdf.readlines())
print(colored(f"The total number of tasks in the list: {number_of_lines+1}", "magenta"))
if number_of_lines >= 1:
tdf.write(f"\n{new_item}")
else:
tdf.write(new_item)
self.listbox_load()
self.item_entry_box.delete(0, END)
def listbox_load(self):
"""
Function for the add item button. Adds the entered
new task and add to the list items in the file and save
the changes to the text file.
"""
self.blank_line_handler()
self.todo_display_listbox.delete(0, END)
try:
with open('todo.txt', "r") as tdf:
lines = tdf.readlines()
for line in lines:
self.todo_display_listbox.insert(END, line)
except Exception as e:
warning = colored(f"SORRY! EXCEPTION OCCURRED: {e}", "red")
print(warning)
tdf = open('todo.txt', "w")
tdf.close()
def cross_item(self):
"""
Function for the cross button. Greys the desired selected
text to indicate the task is disabled by the user
"""
try:
self.todo_display_listbox.itemconfig(self.todo_display_listbox.curselection(), fg = "#b7b3bd")
self.todo_display_listbox.selection_clear(0, END)
except Exception:
print(colored(f"CROSS EVENT ERROR : empty task list ", "light_yellow"))
msg.showwarning(title = "WARNING", message = "Task list is empty.")
def uncross_item(self):
"""
Function for the uncross item button. Converts a selected
disabled task to its default color to indicate the task is
an active task.
"""
try:
self.todo_display_listbox.itemconfig(self.todo_display_listbox.curselection(), fg = "#0e0c49")
self.todo_display_listbox.selection_clear(0, END)
except Exception:
print(colored(f"UNCROSS EVENT ERROR : empty task list ", "light_yellow"))
msg.showwarning(title = "WARNING", message = "Task list is empty.")
def save_edit_task(self, line_number, task):
"""
Function to save the new edited task to the corresponding
line number passed as parameter and save the changes to the text file.
Closes the pop-up edit wizard upon saving the changes.
:param line_number: the index number of the selected task
:param task: the selected task
"""
new_task = f"LINE NUMBER : {line_number}\tTASK : {task}"
print(new_task)
with open("todo.txt", "r") as tdf:
lines = tdf.readlines()
lines[line_number] = task+"\n"
message = colored(f"The edited list of tasks: {lines}", "green")
print(message)
with open("todo.txt", "w") as tdf:
for line in lines:
tdf.write(line)
self.listbox_load()
edit_task_popup_window.destroy()
msg.showinfo(title = "Edited", message = f"Successfully edited Task {line_number+1}!")
def popup_edit_task(self, item_number, editable_task):
"""
Creates a pop-up wizard to edit the desired task after
clicking on the edit button.
:param item_number: index number of the task
:param editable_task: the selected task to be edited
"""
global edit_task_popup_window
edit_task_popup_window = Toplevel(self)
edit_task_popup_window.geometry("700x215")
edit_task_popup_window.resizable(False, False)
edit_task_popup_window.title(f"Edit task {item_number+1}")
label = Label(edit_task_popup_window, text = f"EDIT TASK:\n{editable_task}", fg = "#1d3b64", font = ("Helvetica", 16))
label.pack()
task = Entry(edit_task_popup_window, width = 60, font = ("Helvetica", 14), fg = "#1d3b64")
task.insert(0, editable_task)
task.pack(pady = 20)
btn_frame = Frame(edit_task_popup_window, padx = 20, pady = 20)
btn_frame.pack()
btn_save = Button(btn_frame, text = "SAVE", width = 13, bd = 0, bg = "#3c8bdf", fg = "#ffffff", font = ("Helvetica", 13, "bold"), command = lambda : self.save_edit_task(item_number, task.get()))
btn_save.pack(side = LEFT, padx = (370, 10))
btn_cancel = Button(btn_frame, text = "CANCEL", width = 13, bd = 0, bg = "#3c8bdf", fg = "#ffffff", font = ("Helvetica", 13, "bold"), command = edit_task_popup_window.destroy)
btn_cancel.pack(side = LEFT)
edit_task_popup_window.mainloop()
def edit_task(self):
"""
Gets the selected task and its corresponding line number and
passes it on to the editing pop-up wizard for the user.
"""
try:
selected_item = self.todo_display_listbox.curselection()
if selected_item:
print(f"You have selected item number: {selected_item}")
with open("todo.txt", "r+") as tdf:
lines = tdf.readlines()
selected_task = lines[selected_item[0]]
print(f"You have selected task: {selected_task}")
win_title = selected_item[0]
self.popup_edit_task(item_number = win_title, editable_task = selected_task)
else:
msg.showwarning(title = "WARNING", message = "Please select a task to edit.")
except Exception:
print(colored("EDIT EVENT ERROR : empty task list", "light_yellow"))
msg.showwarning(title = "WARNING", message = "Task list is empty.")
def delete_crossed_item(self):
"""
Deletes the item(s) that is/are disabled or crossed i.e. greyed by
the user upon clicking the delete or trash icon.
"""
index = 0
length_diff = 0
while index < self.todo_display_listbox.size():
if self.todo_display_listbox.itemcget(index, 'fg') == "#b7b3bd":
line_no = self.todo_display_listbox.index(index)
item = self.todo_display_listbox.get(index)
info = colored(f"DELETE LINE NO.: {line_no}, TASK: {item}", "red")
print(info)
operation = msg.askyesno(title = "Confirm", message = f"Do you want to delete task - {item}")
print(f"DELETE : {operation}")
if operation is True:
try:
with open("todo.txt", "r") as tdf:
lines = tdf.readlines()
del lines[line_no]
length_diff += 1
print(f"length difference = {length_diff}")
except IndexError:
print(colored("HANDLING GENERATED INDEX ERROR", "cyan"))
error_index = index
print(f"ERROR INDEX = {error_index}")
print(f"LENGTH DIFFERENCE = {length_diff}")
del_index = (error_index - length_diff)
print(f"DELETE INDEX: {del_index}")
del lines[del_index]
finally:
with open("todo.txt", "w") as new_tdf:
for line in lines:
new_tdf.write(line)
else:
message = colored(f"Operation Delete - {item}\tCancelled by user!", "red")
print(message)
index += 1
self.blank_line_handler()
self.listbox_load()
def clear_list(self):
"""
This event is triggered when user clicks the clear icon.
It clears all the tasks in the task list irrespective of
their status(disabled or active).
"""
operation = msg.askyesno(title = "CLEAR LIST", message = "Do you want to clear the complete list?")
if operation is True:
try:
with open("todo.txt", "w+") as tdf:
lines = tdf.readlines()
lines.clear()
except Exception as e:
print(f"Exception : {e}")
finally:
self.listbox_load()
else:
message = colored("Operation CLEAR THE COMPLETE LIST is cancelled by the user!", "red")
print(message)
def run(self):
"""
Executes the application on a loop.
"""
self.mainloop()
if __name__ == '__main__':
app = TodoApp()
app.run() | KaustabRoy/CODSOFT | Task1-ToDoListManager/main.py | main.py | py | 17,100 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "termcolor.colored",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "termcolor.colored",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_... |
42703895429 | # Import Required Libraries
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
import cv2
import numpy as np
import os
import sys
import tensorflow as tf
from matplotlib import pyplot as plt
from PIL import Image,ImageTk
num_classes = 3
pb_fname = '/home/ubuntu/content/models/research/fine_tuned_model/frozen_inference_graph.pb'
PATH_TO_CKPT = pb_fname
PATH_TO_LABELS = '/home/ubuntu/Facescrub/annotations/facescrub.pbtxt'
PATH_TO_TEST_IMAGES_DIR = '/home/ubuntu/content/result'
# Create a Window.
MyWindow = Tk() # Create a window
MyWindow.title("First GUI") # Change the Title of the GUI
MyWindow.geometry('700x500') # Set the size of the Windows
global file
# Create the GUI Component but dont display or add them to the window yet.
MyLabel = Label(text = "Click to Open an Image", font=("Arial Bold", 10))
ClassficationResultLabel = Label(text = "Classification Result: ", font=("Arial Bold", 10))
# Open Image Function using OpenCV
def openImg(filename):
img_open = Image.open(filename)
render=ImageTk.PhotoImage(img_open)
img = Label(text='test',image=render)
img.image = render
img.place(x=50,y=50)
print(filename)
# Create Event Methods attached to the button etc.
def BttnOpen_Clicked():
global file
# Use the File Dialog component to Open the Dialog box to select files
file = filedialog.askopenfilename(filetypes = (("Images files","*.jpeg"),("all files","*.*")))
openImg(file)
def BttnProcess_Clicked():
global file
messagebox.showinfo("Info", "Do you wanna see a miracle?")
# img_detection = cv2.imread(file)
TEST_IMAGE_PATHS = []
assert os.path.isfile(pb_fname)
assert os.path.isfile(PATH_TO_LABELS)
TEST_IMAGE_PATHS.append(file)
assert len(TEST_IMAGE_PATHS) > 0, 'No image found'
print(TEST_IMAGE_PATHS)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
# This is needed to display the images.
# %matplotlib inline
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {
output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(
tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(
tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(
tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [
real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [
real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(
output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
# cv2.imshow('image', image_np)
fname = "/home/ubuntu/content/result/result_" + image_path.split('/')[-1]
plt.imsave(fname, image_np)
print('saved', fname)
img_open = Image.open(fname)
render=ImageTk.PhotoImage(img_open)
img = Label(text='test',image=render)
img.image = render
img.place(x=50,y=50)
# label_Img = tk.Label(window, image=file)
# label_Img.pack()
# Read and process images/frame using your DL model here <--
# Testing
#messagebox.showwarning("Invalid Input","Image is having an invalid format") # Showing Warning not very Critcal
#messagebox.showerror("Invalid Input","Image is having an invalid format") # Showing Error, very Critcal
#classifcationResult = "CAT"
#messagebox.showinfo("Classfication Result", classifcationResult)
# result = "DOG" # model.predict(file) for example
# resultText = "Classification Result:" + result # Concatenate the result class to the Label on the Window
# ClassficationResultLabel.configure(text = resultText) # Update the Label text on the Window
# Add the Components create previsously to the window
MyLabel.grid(column=0, row=1) # Adding the Label
openBttn = Button(text="Open Image", command=BttnOpen_Clicked)
openBttn.grid(column=1, row=1) # Adding the Open Button
openProcess = Button(text="Process Image", command=BttnProcess_Clicked)
openProcess.grid(column=2, row=1) # Adding the Process Button
#userEntry.grid(column=1, row=3) # Adding the Text entry Widget
# Calling the maninloop()
MyWindow.mainloop()
| 13020363/Deep-Learning | Assignments/A3/GUI_v2.py | GUI_v2.py | py | 8,872 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"li... |
31416846331 | import zipfile
dest_dir = "Bonusfiles/Bonusfiles/files"
def extract_archive(archive_path, dest_dir):
with zipfile.ZipFile(archive_path, 'r') as archive:
archive.extractall(dest_dir)
if __name__ == "__main__":
extract_archive("Bonusfiles/compressed.zip", dest_dir)
| manzitlo/Zip_CreateAndExtract | zip_extractor.py | zip_extractor.py | py | 285 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 7,
"usage_type": "call"
}
] |
38928738675 | from django import forms
from ckeditor.widgets import CKEditorWidget
from .models import UserProfile
class UserProfileForm(forms.ModelForm):
# It is valid to explicitly instantiate a form field that has a
# corresponding model field, but such a field will not take any of the
# defaults from the model
date_of_birth = forms.DateField(
label="Date of birth",
input_formats=['%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y'] # '10/25/06'
)
# Note that method declarations for cleaning must come before the
# Meta class declaration.
def clean_bio(self):
bio = self.cleaned_data.get('bio')
if len(bio) < 10:
raise forms.ValidationError("Bio must be at least 10 characters")
return bio
class Meta:
model = UserProfile
fields = ("date_of_birth",
"given_name",
"family_name",
"bio",
"city",
"state",
"country",
"favourite_animal",
"hobby",
"favourite_fountain_pen",
)
labels = {
"given_name": "First name",
"family_name": "Last name",
}
| Crossroadsman/treehouse-techdegree-python-project7 | accounts/forms.py | forms.py | py | 1,336 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.forms.DateField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.... |
14503858692 | from django.forms import Form, IntegerField, TextInput, CharField, ModelForm, Textarea
from .models import Feedback
class Calculator(Form):
width = IntegerField(min_value=1, widget=TextInput(
attrs={
'class': 'form-control',
'type': 'number',
'placeholder': 'Введите ширину посылки',
'min': '1'
}
))
height = IntegerField(min_value=1, widget=TextInput(
attrs={
'class': 'form-control',
'type': 'number',
'placeholder': 'Введите высоту посылки',
'min': '1'
}
))
color = CharField(widget=TextInput(
attrs={
'class': 'form-control',
'type': 'color',
'placeholder': 'выберите цвет'
}
))
class FeedbackForm(ModelForm):
class Meta:
model = Feedback
fields = ('name', 'email', 'phone_number', 'message')
widgets = {
'name': TextInput(
attrs={
'class': 'form-control',
'id': 'name',
'type': 'text',
'placeholder': 'Enter your name...',
'data-sb-validations': 'required'
}
),
'email': TextInput(
attrs={
'class': 'form-control',
'id': 'email',
'type': 'email',
'placeholder': 'kaliada-biz@yandex.ru',
'data-sb-validations': 'required,email'
}
),
'phone_number': TextInput(
attrs={
'class': 'form-control',
'id': 'phone',
'type': 'tel',
'placeholder': '+375332020327',
'data-sb-validations': 'required'
}
),
'message': Textarea(
attrs={
'class': 'form-control',
'id': 'message',
'type': 'text',
'placeholder': 'Enter your feedback here...',
'data-sb-validations': 'required'
}
),
} | Skywalker-69/python_django_belhard | catalogue/forms.py | forms.py | py | 2,291 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.Form",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.IntegerField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms.TextInput",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "djang... |
10180003978 | import os
import requests
import shutil
#################
# PREPARE FILES #
#################
source_dir = os.path.dirname(os.path.realpath(__file__))
md_file = os.path.join(source_dir, 'project.md')
##################
# PARSE MARKDOWN #
##################
with open(md_file, 'r') as f:
md = f.read()
#####################
# TRANSFORM TO HTML #
#####################
# documentation https://developer.github.com/v3/markdown/
r = requests.post(
"https://api.github.com/markdown",
json={
"text": md,
"mode": "markdown",
"context": "github/gollum"
}
)
#################
# OUTPUT RESULT #
#################
output = """
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="github-markdown.css">
<style>
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
@media (max-width: 767px) {
.markdown-body {
padding: 15px;
}
}
</style>
""" + f"""
<article class="markdown-body">
{r.text}
</article>
"""
output_dir = os.path.join(source_dir, 'output')
html_file = os.path.join(output_dir, 'index.html')
with open(html_file, "w") as f:
f.write(output)
resources_dir = os.path.join(source_dir, 'apron-doc')
resources_target = os.path.join(output_dir, 'apron-doc')
shutil.rmtree(resources_target, ignore_errors=True)
shutil.copytree(resources_dir, resources_target) | Acrop146/Test | project-description/render-site.py | render-site.py | py | 1,393 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_n... |
73591823713 | #! /usr/bin/env python
# Import ROS.
import rospy
# Import the API.
from iq_gnc.py_gnc_functions import *
# To print colours (optional).
from iq_gnc.PrintColours import *
# Import 3D Plotting Library
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Import Kalman Filter Library
from pykalman import KalmanFilter
import numpy as np
import math
def apply_kalman_filter(x_values, y_values, z_values):
# Define the initial state (position and velocity).
initial_state = [x_values[0], y_values[0], z_values[0], 0, 0, 0]
# Define the initial state covariance matrix.
initial_state_covariance = np.eye(6)
# Define the state transition matrix.
transition_matrix = np.eye(6)
transition_matrix[0, 3] = 1
transition_matrix[1, 4] = 1
transition_matrix[2, 5] = 1
# Define the observation matrix.
observation_matrix = np.zeros((3, 6))
observation_matrix[0, 0] = 1
observation_matrix[1, 1] = 1
observation_matrix[2, 2] = 1
# Create the Kalman Filter.
kf = KalmanFilter(
initial_state_mean=initial_state,
initial_state_covariance=initial_state_covariance,
transition_matrices=transition_matrix,
observation_matrices=observation_matrix,
)
# Apply the Kalman Filter to the position data.
measurements = np.column_stack((x_values, y_values, z_values))
filtered_state_means, filtered_state_covariances = kf.filter(measurements)
return filtered_state_means[:, 0], filtered_state_means[:, 1], filtered_state_means[:, 2]
def plot_trajectory(x_values, y_values, z_values, x_est, y_est, z_est):
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot(x_values, y_values, z_values, label="Drone Trajectory (Actual)", color="blue", marker="o")
ax.plot(x_est, y_est, z_est, label="Drone Trajectory (Estimated)", color="green", linestyle="--", marker="x")
ax.set_xlabel("X Position")
ax.set_ylabel("Y Position")
ax.set_zlabel("Z Position")
ax.legend()
plt.show()
def sliding_mode_control(current_state, desired_state, params):
x, y, z, dx, dy, dz = current_state
x_d, y_d, z_d, dx_d, dy_d, dz_d = desired_state
k, epsilon = params
# Calculate the position and velocity errors
e_pos = np.array([x_d - x, y_d - y, z_d - z])
e_vel = np.array([dx_d - dx, dy_d - dy, dz_d - dz])
# Calculate the sliding surface
s = e_vel + k * e_pos
# Calculate the control input
u = -k * e_vel - (k ** 2) * e_pos - (epsilon * np.sign(s))
return u
def main():
# Initializing ROS node.
rospy.init_node("drone_controller", anonymous=True)
# Create an object for the API.
drone = gnc_api()
# Wait for FCU connection.
drone.wait4connect()
# Wait for the mode to be switched.
drone.wait4start()
# Create local reference frame.
drone.initialize_local_frame()
# Request takeoff with an altitude of 3m.
drone.takeoff(3)
# Specify control loop rate. We recommend a low frequency to not over load the FCU with messages. Too many messages will cause the drone to be sluggish.
rate = rospy.Rate(3)
# Specify some waypoints
goals = [
[0, 0, 3, 0],
[5, 0, 3, -90],
[5, 5, 3, 0],
[0, 5, 3, 90],
[0, 0, 3, 180],
[0, 0, 3, 0]
]
i = 0
x_values = []
y_values = []
z_values = []
while i < len(goals):
drone.set_destination(
x=goals[i][0], y=goals[i][1], z=goals[i][2], psi=goals[i][3])
rate.sleep()
if drone.check_waypoint_reached():
# Get the current state of the drone
current_position = drone.get_current_position() if hasattr(drone, 'get_current_position') else None
current_velocity = drone.get_current_velocity() if hasattr(drone, 'get_current_velocity') else None
if current_position is not None and current_velocity is not None:
x, y, z = current_position.x, current_position.y, current_position.z
dx, dy, dz = current_velocity.x, current_velocity.y, current_velocity.z
# Calculate the control input using Sliding Mode Control
current_state = [x, y, z, dx, dy, dz]
desired_state = [goals[i][0], goals[i][1], goals[i][2], 0, 0, 0] # Assuming desired velocities are zero
control_params = (1, 0.5) # Define your control parameters here (k, epsilon)
control_input = sliding_mode_control(current_state, desired_state, control_params)
# Use the control input to update the destination
drone.set_destination(
x=goals[i][0], y=goals[i][1], z=goals[i][2], psi=goals[i][3], vx=control_input[0], vy=control_input[1], vz=control_input[2]
)
rate.sleep()
if drone.check_waypoint_reached():
x_values.append(goals[i][0])
y_values.append(goals[i][1])
z_values.append(goals[i][2])
i += 1
# Land after all waypoints is reached.
x_est, y_est, z_est = apply_kalman_filter(x_values, y_values, z_values)
drone.land()
rospy.loginfo(CGREEN2 + "All waypoints reached landing now." + CEND)
plot_trajectory(x_values, y_values, z_values, x_est, y_est, z_est)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
| khulqu15/smc_drone | ros_smc_kf/scripts/square.py | square.py | py | 5,474 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.eye",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pykalman.KalmanFilter",
"line_num... |
42296024184 | import random
from itertools import starmap, cycle
class PacketState:
def __init__(self, seq_no, status, is_final, data):
self.seq_no = seq_no
self.status = status
self.data = data
self.packet = PacketState.make_pkt(data, seq_no, is_final)
@staticmethod
def make_pkt(data, seq_no, is_final):
checksum_headers = '{}&{}&{}&{}&$'.format(255, str(seq_no).zfill(2), is_final, 0)
checksum = calc_checksum(checksum_headers + data)
headers = '{}&{}&{}&{}&$'.format(checksum, str(seq_no).zfill(2), is_final, 0)
return headers + data
def calc_checksum(data):
data = data[3:]
all_sum = 0
for s in data:
all_sum += ord(s)
cutted_sum = all_sum & 0x000000FF
remaining = all_sum >> 8
while remaining != 0:
cutted_sum += (remaining & 0x000000FF)
while (cutted_sum & 0x0000FF00) != 0:
next_byte = (cutted_sum & 0x0000FF00) >> 8
cutted_sum &= 0x000000FF
cutted_sum += next_byte
remaining = remaining >> 8
cutted_sum = cutted_sum ^ 0xFF
cutted_sum = str(cutted_sum).zfill(3)
return cutted_sum
def make_ack_packet(seq_no):
is_final = 0
is_ack = 1
checksum = calc_checksum('{}&{}&{}&{}&$'.format(255, seq_no, is_final, is_ack))
print('ack checksum {}'.format(checksum))
headers = '{}&{}&{}&{}&$'.format(checksum, str(seq_no).zfill(2), is_final, is_ack)
return headers
def lose_the_packet(PLP):
# return False
return random.random() < PLP
def encrypt(message, key):
# convert to uppercase.
# strip out non-alpha characters.
message = filter(str.isalpha, message.upper())
# single letter encrpytion.
def enc(c, k): return chr(((ord(k) + ord(c) - 2 * ord('A')) % 26) + ord('A'))
return "".join(starmap(enc, zip(message, cycle(key))))
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
# Print New Line on Complete
if iteration == total:
print()
| TarekAlQaddy/reliable-data-transfer-server | Helpers.py | Helpers.py | py | 2,829 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.random",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "itertools.starmap",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 61,
"usage_type": "call"
}
] |
23131275206 | """ Databricks - Terminate Cluster
User Inputs:
- Authentication
- Cluster ID
- terminates a single cluster.
"""
import argparse
import sys
import shipyard_utils as shipyard
try:
import errors
from helpers import DatabricksClient
except BaseException:
from . import errors
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--access-token', dest='access_token', required=True)
parser.add_argument('--instance-id', dest='instance_id', required=True)
parser.add_argument('--cluster-id', dest='cluster_id', required=True)
args = parser.parse_args()
return args
def terminate_cluster(client, cluster_id):
"""
Terminates a Databricks cluster given its ID.
see: https://docs.databricks.com/dev-tools/api/latest/clusters.html#delete-terminate
"""
terminate_cluster_endpoint = "/clusters/delete"
payload = {"cluster_id": cluster_id}
try:
termination_response = client.post(terminate_cluster_endpoint,
data=payload)
base_folder_name = shipyard.logs.determine_base_artifact_folder(
'databricks')
artifact_subfolder_paths = shipyard.logs.determine_artifact_subfolders(
base_folder_name)
response_file_name = shipyard.files.combine_folder_and_file_name(
artifact_subfolder_paths['responses'],
f'cluster_termination_{cluster_id}_response.json')
shipyard.files.write_json_to_file(
termination_response.json(), response_file_name)
except BaseException as e:
print(
f"Ran into an error while trying to terminate cluster {cluster_id}. Please check your instance ID and try again.")
print(e)
sys.exit(errors.EXIT_CODE_INVALID_INSTANCE)
determine_status(termination_response, cluster_id)
def determine_status(termination_response, cluster_id):
if termination_response.status_code == 200:
print(f"Cluster termination for id: {cluster_id} has started...")
sys.exit(errors.EXIT_CODE_TERMINATION_SUCCESSFULLY_STARTED)
elif termination_response.status_code == 400: # Cluster in RESTARTING state
if "does not exist" in termination_response.json()[
'message']:
print(
f"Cluster: {cluster_id} does not exist. Check for typos or that you have access to this cluster.")
sys.exit(errors.EXIT_CODE_INVALID_CLUSTER)
else:
throw_generic_error(termination_response, cluster_id)
elif termination_response.status_code == 403:
if "Invalid access token" in termination_response.json()[
'message']:
print(
f"The access key provided is not valid. Check for typos.")
sys.exit(errors.EXIT_CODE_INVALID_CREDENTIALS)
else:
throw_generic_error(termination_response, cluster_id)
else:
throw_generic_error(termination_response, cluster_id)
def throw_generic_error(termination_response, cluster_id):
print(f"Failed to start Cluster: {cluster_id}",
f"HTTP Status code: {termination_response.status_code} ",
f"and Response: {termination_response.text}")
sys.exit(errors.EXIT_CODE_UNKNOWN_ERROR)
def main():
args = get_args()
token = args.access_token
instance_id = args.instance_id
cluster_id = args.cluster_id
# initialize databricks client
client = DatabricksClient(token, instance_id)
# run terminate cluster
terminate_cluster(client, cluster_id)
if __name__ == "__main__":
main()
| shipyardapp/databricks-blueprints | databricks_blueprints/terminate_cluster.py | terminate_cluster.py | py | 3,593 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "shipyard_utils.logs.determine_base_artifact_folder",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "shipyard_utils.logs",
"line_number": 38,
"usage_type": "attrib... |
12712332139 | import os
try:
import busio
import armachat_lora
import aesio
except ImportError:
pass
from collections import namedtuple
import time
import struct
import binascii
import minipb
MeshtasticData = minipb.Wire([
("portnum", "t"),
("payload", "a"),
("want_response", "b"),
("dest", "I"),
("source", "I"),
("request_id", "I"),
("reply_id", "I"),
("emoji", "I"),
])
MeshtasticNodeInfo = minipb.Wire([
("num", "T"),
("user", [
("id", "U"),
("long_name", "U"),
("short_name", "U"),
("macaddr", "a"),
("hw_model", "x"),
("is_licensed", "b"),
]),
("position", "x"),
("snr", "f"),
("last_heard", "I"),
("device_metrics", "x"),
])
class Communication:
broadcast = b"\xff\xff\xff\xff"
def __init__(self, lora_config=None, my_address=None, remote_address=None, encryption_key=None, encryption_iv=None, nick=None, beep=None, led=None):
self.lora_config = lora_config
self.lora = None
self.my_address = my_address
self.messages = []
self.encryption_key = encryption_key
self.encryption_key = None
self.encryption_iv = encryption_iv
self.idx = 0
self.nick = nick
self.beep = beep
self.led = led
Message = namedtuple(
"Message", ["dst", "src", "id", "flags", "s", "rssi", "tstamp", "packet"])
def initialize(self):
if "m" not in self.lora_config:
self.lora_config = {"m": "e5"}
return
if self.lora_config["m"] != "e5":
spi = busio.SPI(
self.lora_config["sck"], MOSI=self.lora_config["mosi"], MISO=self.lora_config["miso"])
self.lora = armachat_lora.RFM9x(
spi, self.lora_config["cs"], self.lora_config["hz"])
self.lora.signal_bandwidth = self.lora_config["bw"]
self.lora.coding_rate = self.lora_config["cr"]
self.lora.spreading_factor = self.lora_config["sf"]
self.lora.preamble_length = self.lora_config["pl"]
self.lora.tx_power = self.lora_config["tx"]
self.lora.low_datarate_optimize = self.lora_config["ld"]
self.lora.listen()
self.announce_myself()
def get_messages(self):
return self.messages
def clear_messages(self):
self.messages = []
def format_address(self, address):
return str(binascii.hexlify(address), "utf-8")
def loop(self):
if not self.lora:
return
self.led.value = True
if self.lora.rx_done():
message = self.receive()
if message:
refresh = False
if message.packet['portnum'] == 1: # Text message
self.messages.append(message)
self.beep()
refresh = True
if message.packet['portnum'] == 4: # Nodeinfo message
node_info = MeshtasticNodeInfo.decode(message.packet['payload'])
if node_info:
refresh = self.nick[3](node_info['user']['macaddr'], node_info['user']['id'])
if refresh:
self.messages.append("-!- %s [%s@%s] has joined." % (node_info['user']['id'], node_info['user']['short_name'], binascii.hexlify(node_info['user']['macaddr']).decode("utf-8")))
self.announce_myself()
self.led.value = False
return refresh
self.led.value = False
return False
def announce_myself(self):
nodeinfo_packet = {
"num": int.from_bytes(self.my_address, 'little'),
"user": {
"id": self.nick[0](),
"long_name": self.nick[0](),
"short_name": self.nick[0]()[0:2].upper(),
"macaddr": self.my_address,
"hw_model": None,
"is_licensed": False,
},
"position": None,
"snr": self.lora.last_snr,
"last_heard": None,
"device_metrics": None,
}
packet = {
"portnum": 4,
"payload": MeshtasticNodeInfo.encode(nodeinfo_packet),
"want_response": None,
"dest": None,
"source": None,
"request_id": None,
"reply_id": None,
"emoji": None,
}
msg_id = os.urandom(4)
self.send(self.my_address, self.broadcast, packet, id=msg_id, want_ack=False)
def send_message(self, remote_address=b"\xff\xff\xff\xff", text=""):
msg_id = os.urandom(4)
packet = {
"portnum": 1,
"payload": text.encode("utf-8"),
"want_response": None,
"dest": None,
"source": None,
"request_id": None,
"reply_id": None,
"emoji": None,
}
msg = self.send(self.my_address, remote_address,
packet, id=msg_id, want_ack=True)
if msg:
self.messages.append(msg)
return True
return False
def receive(self):
header = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
packet = self.lora.receive()
if packet is None:
print("Receiver error")
return None
packetSize = len(packet)
if packetSize < 16:
print("Short packet <16")
return None
header = packet[0:16]
if bytearray(self.my_address) != header[0:4] and header[0:4] != self.broadcast:
return None
payload = bytes(packet[16:])
if self.encryption_key:
cipher = aesio.AES(self.encryption_key,
aesio.MODE_CTR, self.encryption_iv)
decrypted_out = bytearray(len(payload))
cipher.decrypt_into(payload, decrypted_out)
payload = decrypted_out
try:
decoded_packet = MeshtasticData.decode(payload)
except Exception as e:
print("Failed to decode packet", str(e))
return
msgID = int.from_bytes(packet[8:12], 'big')
msg = self.Message(dst=self.my_address, src=packet[4:8], id=msgID, flags=packet[15],
s=self.lora.last_snr, rssi=self.lora.last_rssi, tstamp=time.localtime(), packet=decoded_packet)
return msg
def send(self, sender, destination, packet, id, hops=3, want_ack=True):
self.led.value = True
dest = bytearray(destination)
src = bytearray(sender)
# msg_id = struct.pack("!I", id)
msg_id = bytearray(id)
flags = bytearray(struct.pack(
"!I", hops | 0b1000 if want_ack else hops & 0b0111))
packet_bytes = MeshtasticData.encode(packet)
payload = bytearray(len(packet_bytes))
if self.encryption_key:
nonce = src + msg_id
cipher = aesio.AES(self.encryption_key,
aesio.MODE_CTR, nonce)
encrypted_out = bytearray(len(payload))
cipher.encrypt_into(packet_bytes, encrypted_out)
payload = encrypted_out
else:
payload = packet_bytes
header = bytearray(dest + src + msg_id + flags)
if self.lora_config["m"] != "e5":
body = bytearray(header) + bytearray(payload)
self.lora.send(body)
self.led.value = False
return self.Message(dst=header[4:8], src=self.my_address, id=id, flags=header[15],
s=self.lora.last_snr, rssi=self.lora.last_rssi, tstamp=time.localtime(), packet=packet)
self.led.value = False
return None
| rosmo/armassi | armassi/lib/comms.py | comms.py | py | 7,772 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "minipb.Wire",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "minipb.Wire",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "busio.SPI",
"line_... |
31799853905 | import cv2
import mercantile
from shapely.geometry import Polygon, MultiPolygon, mapping
from toolz import curry, pipe
from toolz.curried import *
import numpy as np
from abfs.api.prediction.image import ImagePrediction
BASE_URL = 'https://api.mapbox.com/v4/mapbox.satellite'
class LatLongPrediction():
def __init__(self, keras_model, latitude, longitude, zoom=17,
tolerance=0.3, api_key=None, image_path_only=False):
self.keras_model = keras_model
self.tolerance = tolerance
self.tile = mercantile.tile(longitude, latitude, zoom)
self.image_path_only = image_path_only
t = self.tile
self.url = f'{BASE_URL}/{t.z}/{t.x}/{t.y}@2x.png?access_token={api_key}'
def run(self):
if self.image_path_only:
return ImagePrediction(self.url,
self.keras_model,
temp_file=True).run()
prediction_image = ImagePrediction(self.url, self.keras_model).run()
multi_polygon = self._multi_polygon(prediction_image, self.tile)
return mapping(multi_polygon), prediction_image
def _multi_polygon(self, prediction_image, tile):
return pipe(
self._find_contours(prediction_image),
map(self._contour_to_lat_long(prediction_image.shape[0:2], tile)),
filter(lambda p: p is not None),
list,
MultiPolygon
)
def _find_contours(self, prediction_image):
_, threshold = cv2.threshold(prediction_image, 1 - self.tolerance, 1,
cv2.THRESH_BINARY)
contours, _ = cv2.findContours(np.array(threshold, np.uint8),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
return contours
@curry
def _contour_to_lat_long(self, image_shape, tile, contour):
def xy_to_lng_lat(xy):
lat_long = mercantile.lnglat(xy[0], xy[1])
return (lat_long.lng, lat_long.lat)
xy_bounds = mercantile.xy_bounds(tile)
height, width = image_shape
width_scale = (xy_bounds.right - xy_bounds.left) / width
height_scale = (xy_bounds.bottom - xy_bounds.top) / height
xy_points = (contour[:, :] *
(width_scale, height_scale) +
(xy_bounds.left, xy_bounds.top))
if xy_points.shape[0] < 3:
return None
return Polygon(list(map(xy_to_lng_lat, xy_points[:, 0])))
| rcdilorenzo/abfs | abfs/api/prediction/lat_long.py | lat_long.py | py | 2,544 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "mercantile.tile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "abfs.api.prediction.image.ImagePrediction",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "abfs.api.prediction.image.ImagePrediction",
"line_number": 29,
"usage_type": "c... |
12090767387 | import requests
from bs4 import BeautifulSoup as bs
import json
import os
import time
#This URL will be the URL that your login form points to with the "action" tag.
POSTLOGINURL = 'https://www.hackerrank.com/auth/login'
LOGINREST = "https://www.hackerrank.com/rest/auth/login"
#This URL is the page you actually want to pull down with requests.
REQUESTURL = 'https://www.hackerrank.com/rest/contests/master/submissions/?offset=0&limit=500'
username= 'usr'
password= 'pass'
with requests.Session() as s:
headers = requests.utils.default_headers()
headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',})
site = s.get(POSTLOGINURL,headers=headers)
bs_content = bs(site.content, "html.parser")
token = str()
for link in bs_content.find_all('meta'):
if str(link.get('name')) == 'csrf-token':
token=(str(link.get('content')))
login_data = {"fallback":"false","login":username,"password":password, "remember_me":"false"}
headers=({"authority": "www.hackerrank.com",
"method": "POST",
"path": "/rest/auth/login",
"scheme": "https",
"accept": "application/json",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,ar;q=0.8",
"content-length": "89",
"content-type": "application/json",
"cookie": "user_type=hacker; hackerrank_mixpanel_token=7d9b95fc-9617-4f31-bd20-94f072556788; show_cookie_banner=false; h_r=home; h_l=body_middle_left_button; h_v=1; hrc_l_i=F; _hrank_session=fc937d8fa51bc4fbd5578d011ee9fe67fd339cbbb4a28213bf4a7cfda39fc102a9afad2a19a9da496d47af7e3469d718888e30732bd84745f1d774e300bdd0ea",
"origin": "https://www.hackerrank.com",
"referer": "https://www.hackerrank.com/dashboard",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36",
'x-csrf-token': token,})
rh=s.post(LOGINREST,data=login_data,headers=headers)
# print(rh.response)
home_page = s.get(REQUESTURL,headers=headers)
f=open("masa2el.json","w")
f.write(home_page.content.decode("utf-8") )
f.close()
#print(home_page.content)
# print(json.dumps(home_page.content))
with open('masa2el.json') as json_file:
data = json.load(json_file)
if not os.path.exists('hackerrank_sol'):
os.mkdir('hackerrank_sol')
i=0
for p in data['models']:
#print('Name: ' + str(p['challenge_id']))
#print('Website: ' + p['language'])
#print('From: ' + p['challenge']['slug'])
if p['status'] == 'Accepted':
i=i+1
challenge_slug=p['challenge']['slug']
sol_id=p['id']
if not os.path.exists('hackerrank_sol/'+p['language']):
os.mkdir('hackerrank_sol/'+p['language'])
extension="py"
if p['language'].find('py') == -1:
extension=p['language']
file_name='hackerrank_sol/'+p['language']+'/'+challenge_slug+"."+extension
link='https://www.hackerrank.com/rest/contests/master/challenges/'+challenge_slug+'/submissions/'+str(sol_id)
home_page = s.get(link,headers=headers)
# home_page.config['keep_alive'] = False
print(home_page)
f=open(file_name,"w")
f.write(home_page.content.decode("utf-8"))
f.close()
with open(file_name) as json_file:
data = json.load(json_file)
f=open(file_name,"w")
print(file_name)
f.write(data['model']['code'])
f.close()
#print('Name: ' + str(data['model']['code']))
#print('')
#time.sleep(5)
if i==10:
print("SLEEPING FOR 60 SECONDS!")
time.sleep(60)
i=0 | moamen-ahmed-93/hackerrank_sol | hackerrank_scrapper/hackerrank.py | hackerrank.py | py | 3,629 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.utils.default_headers",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.utils",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "b... |
38989140015 | import random
import torch
import os
import pandas as pd
from pathlib import Path
import torch.utils.data as data
from torch.utils.data import dataloader
class CamelData(data.Dataset):
def __init__(self, dataset_cfg=None, state=None):
# Set all input args as attributes
self.__dict__.update(locals())
self.dataset_cfg = dataset_cfg
# ---->data and label
self.nfolds = self.dataset_cfg.nfold
self.fold = self.dataset_cfg.fold
self.feature_dir = self.dataset_cfg.data_dir
self.csv_dir = self.dataset_cfg.label_dir + f"fold{self.fold}.csv"
self.slide_data = pd.read_csv(self.csv_dir, index_col=0)
# ---->order
self.shuffle = self.dataset_cfg.data_shuffle
# ---->split dataset
if state == "train":
self.data = self.slide_data.loc[:, "train"].dropna()
self.label = self.slide_data.loc[:, "train_label"].dropna()
if state == "val":
self.data = self.slide_data.loc[:, "val"].dropna()
self.label = self.slide_data.loc[:, "val_label"].dropna()
if state == "test":
self.data = self.slide_data.loc[:, "test"].dropna()
self.label = self.slide_data.loc[:, "test_label"].dropna()
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
# print(idx, "is the current idx")
slide_id = self.data[idx]
slide_id = str(int(slide_id))
label = int(self.label[idx])
# print('\nnow print the slide id',str(slide_id))
# full_path = Path(self.feature_dir) / f"slide{slide_id}.pt"
full_path = Path(self.feature_dir) / f"{slide_id}.pt"
# print('now print the full path: ',str(full_path))
features = torch.load(full_path)
# ----> shuffle
if self.shuffle == True:
index = [x for x in range(features.shape[0])]
random.shuffle(index)
features = features[index]
return features, label
from random import shuffle
from torchvision import transforms
from PIL import Image
transform = transforms.Compose(
[
transforms.Grayscale(1),
transforms.RandomRotation(30,fill=(0,)),
transforms.Resize((256,256)),
transforms.ToTensor(),
transforms.Normalize(0.5,0.25)
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
)
class ChestXray(torch.utils.data.Dataset):
def __init__(self, dataset_cfg=None, state=None) -> None:
super().__init__()
self.dataset_cfg = dataset_cfg
self.data_path = []
self.labels = [] # ['COVID19', 'NORMAL', 'PNEUMONIA']
dataset_path = os.path.join(self.dataset_cfg.data_dir,state)
for folder in sorted(Path(dataset_path).iterdir()):
self.labels.append(folder.stem)
for file in folder.iterdir():
file = str(file.resolve())
self.data_path.append([folder.stem, file])
if self.dataset_cfg.data_shuffle:
shuffle(self.data_path)
def __len__(self):
return len(self.data_path)
def __getitem__(self, index):
assert index < self.__len__()
label, path = self.data_path[index]
img = transform(Image.open(path))
label = self.labels.index(label)
return label, img
| ptoyip/6211H_Final | code/baseline_model/datasets/camel_data.py | camel_data.py | py | 3,369 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathli... |
73938024675 | '''
Created on 25 abr. 2020
@author: jesus.fernandez
'''
from common.SQLUtil import SQLUtil
class EmpleoINEProcessor(object):
'''
classdocs
'''
def __init__(self, spark, sc, sql):
'''
Constructor
'''
self.spark = spark
self.sc = sc
self.sql = sql
def process(self):
df_datos_empleo_ine = self.spark.read.format("mongo").option("uri", "mongodb://localhost/TFM_Jfernandez.EMPLEO_INE").load()
df_datos_empleo_ine.registerTempTable("EMPLEO_INE")
df_datos_empleo_ine_base = self.sql.sql('''
SELECT Nombre as nombre_variable,
Unidad.Nombre as unidad,
Escala.Nombre as escala,
datos.TipoDato.Nombre as tipoDato,
datos.NombrePeriodo as Periodo,
datos.Valor as valor
FROM EMPLEO_INE
LATERAL VIEW explode(Data) as datos
WHERE Nombre like '%Dato base%'
''')
df_datos_empleo_ine_base.show(20, False)
SQLUtil.writeSparkDf(df_datos_empleo_ine_base, "datos_empleo_ine", True)
| jfernandezrodriguez01234/TFM_jfernandezrodriguez01234 | src/processors/social/EmpleoINEProcessor.py | EmpleoINEProcessor.py | py | 1,261 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "common.SQLUtil.SQLUtil.writeSparkDf",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "common.SQLUtil.SQLUtil",
"line_number": 43,
"usage_type": "name"
}
] |
25058904996 | import pygame
class Player (object):
inititalJumpSpeed=30
jumpSpeed=30
jumping=False
gravity=2
initialFallSpeed=0
fallSpeed=0
falling=True
spriteName="./Run1.png"
spriteNum=0
sprite=pygame.image.load("./Run1.png");
sprite=pygame.transform.scale(sprite,(50,50))
#print(sprite.get_rect().size)
xPos=20;
yPos=400;
xMoveSpeed=10
airDrift=0;
width=(sprite.get_rect().size)[0];
height=sprite.get_rect().size[1];
collisionInFront=[];
collisionInMiddle=[];
wallCollision=False;
frontCollisionFrac=0;
hitBox=pygame.Rect(xPos,yPos,width,height);
def __init__ (self, xP, yP):
self.xPos=xP
self.yPos=yP
self.wallCollision=False
def update(self, dir):
self.collisionInFront.clear()
self.collisionInMiddle.clear();
for i in range(int(self.yPos),int(self.height+self.yPos),1):
self.collisionInMiddle.append(bool(((imageMask.get_at((int(self.xPos+self.width/2),i))) != (image.get_at((int(self.xPos+self.width/2),i))))))
if(dir==1):
self.collisionInFront.append(bool((imageMask.get_at((int(self.xPos+self.width),i))) != (image.get_at((int(self.xPos+self.width),i)))))
elif(dir==-1):
self.collisionInFront.append(bool((imageMask.get_at((int(self.xPos),i))) != (image.get_at((int(self.xPos),i)))))
frontCollisionFrac=(self.collisionInFront.count(True)/self.height)
if(frontCollisionFrac>=0.5):
#print("wall hit")
self.wallCollision=True
else:
self.wallCollision=False
#print(self.collisionInMiddle.count(True))
#print();
adjustment=0;
for i in range(self.height-1,0,-1):
if self.collisionInMiddle[i]:
adjustment+=1
#print(adjustment)
if (adjustment != 0):
self.yPos-=adjustment
self.update(dir);
if(imageMask.get_at((int(self.xPos+self.width/2),int(self.yPos)+self.height)) == image.get_at((int(self.xPos+self.width/2),int(self.yPos)+self.height)) and not self.jumping):
self.falling=True
if(self.falling):
self.fall();
self.spriteName
self.sprite=pygame.image.load(self.spriteName);
self.sprite=pygame.transform.scale(self.sprite,(50,50))
if(dir==-1):
self.sprite=pygame.transform.flip(self.sprite,True,False)
self.spriteNum+=self.spriteNum;
self.hitBox=pygame.Rect(self.xPos,self.yPos,self.width,self.height)
def fall(self):
self.yPos+=self.fallSpeed
self.fallSpeed=self.fallSpeed+self.gravity
if(imageMask.get_at((int(self.xPos+self.width/2),int(self.yPos)+self.height)) != image.get_at((int(self.xPos+self.width/2),int(self.yPos)+self.height))):
self.falling=False;
self.fallSpeed=self.initialFallSpeed
self.airDrift=0
def jump(self):
self.yPos-=self.jumpSpeed
self.jumpSpeed=self.jumpSpeed-self.gravity
if(self.yPos<0):
self.yPos=0
if(self.jumpSpeed==0):
self.jumping=False
self.falling=True
self.jumpSpeed=self.inititalJumpSpeed
class Goal (object):
xPos=0;
yPos=0;
sprite=pygame.image.load("./StarSprites/0.png");
spriteNum=0;
hitBox=pygame.Rect((xPos,yPos),(sprite.get_rect().size));
def __init__ (self, xP, yP):
self.xPos=xP
self.yPos=yP
self.wallCollision=False
def load(self):
self.spriteNum+=0.2
if self.spriteNum>=8: self.spriteNum=0
self.sprite=pygame.image.load("./StarSprites/"+str(int(self.spriteNum))+".png");
self.hitBox=pygame.Rect((self.xPos,self.yPos),(self.sprite.get_rect().size));
#GAME START
pygame.init()
screenWidth=1000;
screenHeight=700;
gameDisplay = pygame.display.set_mode((screenWidth,screenHeight))
pygame.display.set_caption('HTNE Game')
clock=pygame.time.Clock()
crashed = False
gameWin=False
xMoveSpeed=1;
leftHeld=False
rightHeld=False
upHeld=False
dir=1;
goalSet=False;
character=Player(40,40)
goal=Goal(0,0)
image=pygame.image.load("./purple2.jpg")
image=pygame.transform.scale(image,(screenWidth,screenHeight))
imageMask=pygame.image.load("./purple2_Mask.jpg")
imageMask=pygame.transform.scale(imageMask,(screenWidth,screenHeight))
while not goalSet:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
goalSet=True
if event.type == pygame.MOUSEBUTTONUP:
goalSet=True
break;
goal.xPos=pygame.mouse.get_pos()[0];
goal.yPos=pygame.mouse.get_pos()[1];
goal.load()
gameDisplay.blit(image,(0,0))
gameDisplay.blit(goal.sprite,(goal.xPos, goal.yPos))
pygame.display.update()
clock.tick(60)
while not crashed and not gameWin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
leftHeld=True
if not character.jumping and not character.falling:
dir=-1
if event.key == pygame.K_d:
rightHeld=True
if not character.jumping and not character.falling:
dir=1
if event.key == pygame.K_w and not character.falling:
character.falling=False
character.jumping=True
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
leftHeld=False
if event.key == pygame.K_d:
rightHeld=False
if leftHeld and not character.jumping and not character.falling:
dir=-1
if rightHeld and not character.jumping and not character.falling:
dir=1
if(not character.wallCollision):
#print(character.wallCollision)
if(leftHeld):
if character.jumping or character.falling:
character.airDrift-=character.xMoveSpeed/12
character.xPos+=character.airDrift
else:
character.xPos=character.xPos-character.xMoveSpeed
if (character.xPos<=0):
character.xPos=0;
elif character.xPos+character.width>=screenWidth:
character.xPos=screenWidth-character.width-1;
elif(rightHeld):
if character.jumping or character.falling:
character.airDrift+=character.xMoveSpeed/12
character.xPos+=character.airDrift
else:
character.xPos=character.xPos+character.xMoveSpeed
if (character.xPos<=0):
character.xPos=0;
elif character.xPos+character.width>=screenWidth:
character.xPos=screenWidth-character.width-1;
elif(character.jumping or character.falling):
character.xPos+=character.airDrift;
if (character.xPos<=0):
character.xPos=0;
elif character.xPos+character.width>=screenWidth:
character.xPos=screenWidth-character.width-1;
if(character.jumping):
character.jump();
if(character.hitBox.colliderect(goal.hitBox)):
print("WIN")
winGame=True
break
character.update(dir)
goal.load();
gameDisplay.blit(image,(0,0))
gameDisplay.blit(character.sprite,(character.xPos, character.yPos))
gameDisplay.blit(goal.sprite,(goal.xPos,goal.yPos))
pygame.display.update()
clock.tick(60) | SlothDemon42/htne_game | v0.1/HTNE_Game.py | HTNE_Game.py | py | 8,075 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.image.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.tra... |
34166720448 | #!/usr/bin/python3
import yaml, sys
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
import math
from matplotlib.colors import LogNorm
from scipy.interpolate import griddata
from io import StringIO
filename = sys.argv[1]
f = '%s.yaml' % (filename)
# Read YAML file
with open(f, 'r') as stream:
yaml_data = yaml.load(stream)
data = yaml_data
V = data['system']['cell']['box_diagonal']['x']*data['system']['cell']['box_diagonal']['y']*data['system']['cell']['box_diagonal']['z']
for my_entropy in sorted(glob.iglob("%s.movie/S*.dat" % filename)):
my_entropy = my_entropy.replace(' ',',')
dataS = np.loadtxt(my_entropy, ndmin=2)
entropy = dataS[1:,:]
Es = dataS[0,:]
nNs = len(entropy[:,0])
nEs = len(Es)
E_edges = np.zeros(len(Es)+1)
dE = 1
lnw = np.array(yaml_data['bins']['lnw'])
print(np.shape(lnw))
print(lnw , "test")
numE = data['bins']['num_E']
print(numE , "num e")
maxN = data['bins']['max_N']
print(maxN , "MAX N")
EE, NN = np.meshgrid(Es, np.arange(0, maxN+1))
g_exc = np.zeros((maxN+1,numE))
print(EE.shape)
assert(len(lnw) == (maxN+1)*numE)
#wrap the multiplicity from the yaml file into something 2d
for n in range(maxN+1):
for i in range(0,numE):
g_exc[n,i] = lnw[n + i*(maxN +1)]
print(g_exc , "mulitplicity")
g_exc -= g_exc[0,0] # set entropy to be zero when there are no atoms. There is only one such microstate.
print((maxN), 'len of maxN')
print((numE), 'len of numE')
print((nNs), 'len of nNs')
print((nEs), 'len of nEs')
TTT, mumumu = np.meshgrid(np.linspace(0, 10, 50), np.linspace(-19, 9, 50))
NNN = np.zeros_like(TTT)
UUU = np.zeros_like(TTT)
PPP = np.zeros_like(TTT)
TrotterP = np.zeros_like(TTT)
for i in range(TTT.shape[0]):
for j in range(TTT.shape[1]):
T = TTT[i,j]
mu = mumumu[i,j]
beta = 1/T
Fid = NN*T*np.log(NN/V*T**1.5) - NN*T
Fid[NN==0] = 0
gibbs_exponent = -beta*(Fid + EE - mu*NN)
Zgrand = (g_exc*np.exp(gibbs_exponent - gibbs_exponent.max())).sum()
NNN[i,j] = (NN*g_exc*np.exp(gibbs_exponent - gibbs_exponent.max())).sum()/Zgrand
UUU[i,j] = (EE*g_exc*np.exp(gibbs_exponent - gibbs_exponent.max())).sum()/Zgrand
PPP[i,j] = T/V*(np.log(Zgrand) + np.log(gibbs_exponent.max()*len(gibbs_exponent)))
TrotterP[i,j] = T*np.log(Zgrand) / V
print('mu = {}, T = {}'.format(mu,T))
plt.contourf(TTT, mumumu, NNN, 100)
plt.colorbar()
plt.title('N')
plt.xlabel('T')
plt.ylabel(r'$\mu$')
plt.figure()
plt.contourf(TTT, mumumu, UUU, 100)
plt.colorbar()
plt.title('U')
plt.xlabel('T')
plt.ylabel(r'$\mu$')
plt.figure()
plt.contourf(TTT, mumumu, PPP, 100)
plt.colorbar()
plt.contour(TTT, mumumu, PPP, linewidth=2, color='white')
plt.title('p')
plt.xlabel('T')
plt.ylabel(r'$\mu$')
plt.figure()
plt.contourf(TTT, mumumu, TrotterP, 100)
plt.colorbar()
plt.contour(TTT, mumumu, TrotterP, linewidth=2, color='white')
plt.title('Trotter p')
plt.xlabel('T')
plt.ylabel(r'$\mu$')
TTT, ppp = np.meshgrid(np.linspace(0, 1, 10), np.linspace(0, 0.00001, 10))
NNN = np.zeros_like(TTT)
UUU = np.zeros_like(TTT)
for i in range(TTT.shape[0]):
for j in range(TTT.shape[1]):
T = TTT[i,j]
p = ppp[i,j]
beta = 1/T
Fid = NN*T*np.log(NN/V*T**1.5) - NN*T
Fid[NN==0] = 0
mulo = -10000
muhi = 10000
while muhi - mulo > 1e-3:
mu = 0.5*(muhi + mulo)
gibbs_exponent = -beta*(Fid + EE - mu*NN)
Zgrand = (g_exc*np.exp(gibbs_exponent - gibbs_exponent.max())).sum()
pguess_excess = T/V*(np.log(Zgrand) + np.log(gibbs_exponent.max()*len(gibbs_exponent)))
N_guess = (NN*g_exc*np.exp(gibbs_exponent - gibbs_exponent.max())).sum()/Zgrand
pguess_ideal = N_guess*T/V
if pguess_excess + pguess_ideal > p:
muhi = mu
else:
mulo = mu
print('pguess =', pguess_ideal + pguess_excess, pguess_ideal, pguess_excess)
NNN[i,j] = (NN*g_exc*np.exp(gibbs_exponent - gibbs_exponent.max())).sum()/Zgrand
UUU[i,j] = (EE*g_exc*np.exp(gibbs_exponent - gibbs_exponent.max())).sum()/Zgrand
print('p = {}, T = {}, mu = {}'.format(p, T, mu))
plt.figure()
plt.contourf(TTT, ppp, UUU, 100)
plt.colorbar()
plt.title('U')
plt.xlabel('T')
plt.ylabel(r'p')
plt.figure()
plt.contourf(TTT, ppp, NNN, 100)
plt.colorbar()
plt.title('N')
plt.xlabel('T')
plt.ylabel(r'p')
plt.show()
| droundy/sad-monte-carlo | plotting/grand2d.py | grand2d.py | py | 4,441 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "yaml.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "glob.iglob",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number":... |
89310945 | # -*- coding: utf-8 -*-
from collections import namedtuple
GENERIC_DICT = 'GenericDict'
def __convert(obj):
if isinstance(obj, dict):
for key, value in obj.iteritems():
obj[key] = __convert(value)
return namedtuple(GENERIC_DICT, obj.keys())(**obj)
elif isinstance(obj, list):
return [__convert(item) for item in obj]
else:
return obj
def getParams(d):
return __convert(d)
def findItem(l, key_name, key_value):
if not isinstance(l, list):
return None
if key_name is None or not isinstance(key_name, str):
return None
if key_value is None or not isinstance(key_value, str):
return None
return next((item for item in l if getattr(item, key_name, None) == key_value), None)
| tech-sketch/fiware-ros-turtlesim | src/fiware_ros_turtlesim/params.py | params.py | py | 777 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 11,
"usage_type": "call"
}
] |
11212571235 | import subprocess
from num2words import num2words
def say(number: int) -> str:
"""
Say the number in words
:param
number: int
:return:
sentence: str
"""
if 0 <= number < 1000000000000:
sentence = num2words(number).split()
for index, word in enumerate(sentence):
if word == "and":
sentence.pop(index)
elif word.endswith(","):
sentence[index] = word.replace(",", "")
subprocess.run(["say", ' '.join(sentence)]) # Extension for Mac
# subprocess.call( # Extension for Linux
# "espeak",
# "-ven+f3 -k5 -s150 --punct='<characters>'",
# ' '.join(sentence),
# "2>>/dev/null"
# )
return ' '.join(sentence)
else:
raise ValueError(".+")
| stimpie007/exercism | python/say/say.py | say.py | py | 831 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "num2words.num2words",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number": 25,
"usage_type": "call"
}
] |
5895415014 | import pandas as pd
import matplotlib.pyplot as plt
import Utils as utils
from operator import itemgetter
# reading data
#da = utils.combine_data()
da = pd.read_csv('rawdata/heathrowRawData.csv')
class knn:
def __init__(self, *args, **kw):
pd.set_option('display.max_rows', 2000)
plt.rcParams['figure.figsize'] = (10.0, 5.0)
#self.write_to_file(5, 2013)
#self.plot()
def calculate_data(self, data, season):
d = utils.construct_season_dataframe_no_fill(data, season)
set_size = 5
k = 3
set = []
for i in range(set_size):
dic = {
"ma": d[["maxC"]].values[d[["maxC"]].size - 1 - i][0],
"yyyy": d[["yyyy"]].values[d[["yyyy"]].size - 1 - i][0]
}
set.append(dic)
new_set = sorted(set, key=itemgetter('yyyy'))
mean = 0
for i in range(k):
mean += new_set[k]['ma']
mean = mean / k
return mean
def calculate_data_from_date(self, k, data, season, date):
d = utils.construct_season_dataframe_no_fill(data, season)
print(season)
set_size = 5
set = []
for i in range(set_size):
index = date - 1948
new_k = round(k/2) - i
if date >= 2016:
new_k = i + 1
dic = {
"ma": d[["maxC"]].values[index - new_k][0],
"yyyy": d[["yyyy"]].values[index - new_k][0]
}
set.append(dic)
mean = 0
for i in range(k):
mean += set[i]['ma']
mean = mean / set_size
#print(set)
return mean
def calculate_fade_points(self, k, data, season, year):
fade_set = pd.DataFrame()
month_series = utils.get_data_from_season(season, utils.get_data_from_year(data, year))[['mm']]
for i in range(k):
y_d = utils.get_data_from_year(data, year - i)
d = utils.get_data_from_season(season, y_d)
ad = d[['maxC', 'minC']].mean(axis=1)
m = ad.to_frame().reset_index().drop(['index'], axis=1)
fade_set = pd.concat([fade_set, m], axis=1)
new_avg = fade_set.mean(axis=1)
final_df = pd.concat([month_series.reset_index().drop(['index'], axis=1), new_avg], axis=1)
final_df.columns=['mm', 'temp']
return final_df
knn()
| omarali0703/MachineLearning | kNN.py | kNN.py | py | 2,412 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "matp... |
12753959980 | import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.norm as norm
import sys
sys.path.append('..')
from bbvi import BaseBBVIModel
"""
This implements the example in http://www.cs.toronto.edu/~duvenaud/papers/blackbox.pdf
except using a full-size covariance matrix (rather than diagonal).
"""
class ModelTest2(BaseBBVIModel):
def __init__(self, D=2):
self.dim=D
self.fig, self.ax=plt.subplots(1)
plt.show(block=False)
BaseBBVIModel.__init__(self)
# specify the variational approximator
def unpack_params(self, params):
return params[:, 0], params[:, 1:]
def log_var_approx(self, z, params):
mu, root_sigma=self.unpack_params(params)
sigma=0.5*np.dot(root_sigma, root_sigma.T)
return mvn.logpdf(z, mu.flatten(), sigma)
def sample_var_approx(self, params, n_samples=2000):
mu, root_sigma=self.unpack_params(params)
sigma=0.5*np.dot(root_sigma, root_sigma.T)
return np.dot(npr.randn(n_samples, mu.shape[0]),np.sqrt(0.5)*root_sigma)+mu
# specify the distribution to be approximated
def log_prob(self, z):
# the density we are approximating is two-dimensional
mu, log_sigma = z[:, 0], z[:, 1]#this is a vectorized extraction of mu,sigma
sigma_density = norm.logpdf(log_sigma, 0, 1.35)
mu_density = norm.logpdf(mu, 0, np.exp(log_sigma))
return sigma_density + mu_density
def plot_isocontours(self, ax, func, xlimits=[-2, 2], ylimits=[-4, 2], numticks=101):
x = np.linspace(*xlimits, num=numticks)
y = np.linspace(*ylimits, num=numticks)
X, Y = np.meshgrid(x, y)
zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
Z = zs.reshape(X.shape)
plt.contour(X, Y, Z)
ax.set_yticks([])
ax.set_xticks([])
def callback(self, *args):
if args[1]%50==0:
print(args[1])
curr_params=args[0]
plt.cla()
self.plot_isocontours(self.ax, lambda z:np.exp(self.log_prob(z)))
self.plot_isocontours(self.ax, lambda z:np.exp(self.log_var_approx(z, curr_params)))
plt.pause(1.0/30.0)
plt.draw()
def run_BBVI_test():
init_params=np.hstack([np.ones((2,1))*0, np.eye(2)*-1])
mod=ModelTest2()
# var_params=mod.run_VI(init_params,
# step_size=0.0001,
# num_iters=4000,
# num_samples=1000,
# # how='stochsearch'
# )
var_params=mod.run_VI(init_params,
step_size=0.005,
num_iters=1500,
num_samples=2000,
# how='reparam'
how='noscore'
)
print('init params=')
print(init_params)
print('final params=')
print(var_params)
if __name__=='__main__':
run_BBVI_test() | jamesvuc/BBVI | Examples/BBVI_test2.py | BBVI_test2.py | py | 2,606 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "bbvi.BaseBBVIModel",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subp... |
34214470629 | import logging
import modules.net as model_arch
from torch.utils.data.dataloader import default_collate
from tqdm import tqdm
import torch
import torch.nn as nn
class Predictor():
def __init__(self, batch_size=64, max_epochs=100, valid=None, labelEncoder=None, device=None, metric=None,
learning_rate=1e-3, max_iters_in_epoch=1e20, grad_accumulate_steps=1,
embedding=None, loss="BCELoss", arch="rnn_net", **kwargs):
self.batch_size = batch_size
self.max_epochs = max_epochs
self.valid = valid
self.metric = metric
self.learning_rate = learning_rate
self.max_iters_in_epoch = max_iters_in_epoch
self.grad_accumulate_steps = grad_accumulate_steps
self.le = labelEncoder
self.num_classes = len(self.le.classes_)
if device is not None:
self.device = torch.device(device)
else:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.epoch = 0
self.model = getattr(model_arch,arch)(embedding.size(1), self.num_classes, **kwargs)
print(self.model)
logging.info("Embedding size: ({},{})".format(embedding.size(0),embedding.size(1)))
self.embedding = nn.Embedding(embedding.size(0),embedding.size(1))
self.embedding.weight = nn.Parameter(embedding)
self.model = self.model.to(self.device)
self.embedding = self.embedding.to(self.device)
logging.info("Learning_rate: {}".format(learning_rate))
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate, weight_decay=1e-5)
self.loss={'BCEWithLogitsLoss': nn.BCEWithLogitsLoss()}[loss]
logging.info("Loss: {}".format(self.loss))
def fit_dataset(self, data, collate_fn=default_collate, callbacks=[]):
while self.epoch < self.max_epochs:
logging.debug("training {}".format(self.epoch))
dataloader = torch.utils.data.DataLoader(data, batch_size=self.batch_size, shuffle=True,
num_workers=10, collate_fn=collate_fn)
log_train = self._run_epoch(dataloader, True)
if self.valid is not None:
logging.debug("evaluating {}".format(self.epoch))
dataloader = torch.utils.data.DataLoader(self.valid, batch_size=self.batch_size, shuffle=False,
num_workers=10, collate_fn=collate_fn)
log_valid = self._run_epoch(dataloader, False)
else:
log_valid = None
for callback in callbacks:
callback.on_epoch_end(log_train, log_valid, self)
self.epoch += 1
def _run_epoch(self, dataloader, training):
self.model.train(training)
loss = 0
self.metric.reset()
if training:
iter_in_epoch = min(len(dataloader), self.max_iters_in_epoch)
description = "training"
else:
iter_in_epoch = len(dataloader)
description = "evaluating"
trange = tqdm(enumerate(dataloader), total=iter_in_epoch, desc=description, ncols=70)
for i, batch in trange:
if training:
if i >= iter_in_epoch:
break
output, batch_loss = self._run_iter(batch, training)
batch_loss /= self.grad_accumulate_steps
if i % self.grad_accumulate_steps == 0:
self.optimizer.zero_grad()
batch_loss.backward()
if (i + 1) % self.grad_accumulate_steps == 0:
self.optimizer.step() # update gradient
else:
with torch.no_grad():
output, batch_loss = self._run_iter(batch, training)
loss += batch_loss.item()
self.metric.update(output, batch['labels'])
trange.set_postfix(loss=loss / (i + 1), **{"Accuracy": self.metric.get_score()})
loss /= iter_in_epoch
score = self.metric.get_score()
epoch_log = {}
epoch_log['loss'] = float(loss)
epoch_log["Accuracy"] = score
print("loss={}".format(loss))
print("Accuracy={}".format(score))
return epoch_log
def _run_iter(self, batch, training):
x = batch['sentences']
y = batch['labels']
with torch.no_grad():
sentence = self.embedding(x.to(self.device))
logits = self.model.forward(sentence.to(self.device))
loss = self.loss(logits, y.float().to(self.device))
return logits, loss
def _predict_batch(self, x):
sentence = self.embedding(x.to(self.device))
logits = self.model.forward(sentence.to(self.device))
return logits
def predict_dataset(self, data, collate_fn=default_collate, batch_size=None, predict_fn=None): # for prediction
if batch_size is None:
batch_size = self.batch_size
if predict_fn is None:
predict_fn = self._predict_batch
self.model.eval()
dataloader = torch.utils.data.DataLoader(data, batch_size=self.batch_size, shuffle=False,
num_workers=10, collate_fn=collate_fn)
ans = []
solutions = []
with torch.no_grad():
trange = tqdm(enumerate(dataloader), total=len(dataloader), desc="predicting", ncols=70)
for i, batch in trange:
x = batch["sentences"]
solution = batch["labels"]
batch_y = predict_fn(x) #batch
solutions.append(solution)
ans.append(batch_y)
ans = torch.cat(ans, 0)
solutions = torch.cat(solutions, 0)
return ans, solutions
def save(self, path):
torch.save({
'epoch': self.epoch + 1,
'model': self.model.state_dict(), # A state_dict is simply a Python dictionary object
'optimizer': self.optimizer.state_dict() # that maps each layer to its parameter tensor
},path)
def load(self, path):
saved = torch.load(path)
self.epoch = saved['epoch']
self.model.load_state_dict(saved['model'])
self.optimizer.load_state_dict(saved['optimizer'])
| hsinlichu/Customer-Service-Data-Analysis-with-Machine-Learning-Technique | src/mypredictor.py | mypredictor.py | py | 6,299 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"l... |
74736955554 | import os
import sys
import warnings
from decouple import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if BASE_DIR not in sys.path:
sys.path.append(BASE_DIR)
STORAGE_DIR = os.path.join(BASE_DIR, 'storage')
RUN_DIR = os.path.join(STORAGE_DIR, 'run')
MSD_DIR = os.path.join(STORAGE_DIR, 'msd')
LOG_DIR = os.path.join(STORAGE_DIR, 'logs')
DEBUG = config('DEBUG', default=False, cast=bool)
DATABASE_ENGINE = config('SEISMIC_BULLETIN_ENGINE')
if not DATABASE_ENGINE.startswith('mysql'):
warnings.warn('You are not using MySQL database on '
'SEISMIC_BULLETIN_ENGINE URL. The application may not '
'function properly if you are using database other than '
'MySQL that is supported by current version. Proceed at '
'your own risk.')
MIGRATED = config('MIGRATED', default=True, cast=bool)
CONN_MAX_AGE = config('CONN_MAX_AGE', default=60*60, cast=int)
MSD_EXT = '.msd'
TIMEZONE = config('TIMEZONE', default='Asia/Jakarta')
LOGGING_ROOT = LOG_DIR
LOG_LEVEL = config('LOG_LEVEL', default='info').upper()
if DEBUG:
LOG_LEVEL = 'DEBUG'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '{asctime} {levelname} {name} {message}',
'style': '{',
},
'verbose': {
'format': '{asctime} {levelname} {name} {process:d} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'default'
},
'production': {
'level': LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGGING_ROOT, 'wo.log'),
'maxBytes': 1024 * 1024 * 5,
'backupCount': 7,
'formatter': 'verbose',
},
},
'loggers': {
'': {
'handlers': ['console', 'production'],
'level': LOG_LEVEL
},
'__main__': {
'handlers': ['console', 'production'],
'level': LOG_LEVEL,
'propagate': False,
}
}
}
LOCKFILE = os.path.join(RUN_DIR, 'bulletin.lock')
DAY_RANGE = config('DAY_RANGE', default=3, cast=int)
if DAY_RANGE <= 0:
raise ValueError('DAY_RANGE value must be greater than 0.')
SEEDLINK_HOST = config('SEEDLINK_HOST', default='127.0.0.1')
SEEDLINK_PORT = config('SEEDLINK_PORT', default=18000, cast=int)
ARCLINK_HOST = config('ARCLINK_HOST', default='127.0.0.1')
ARCLINK_PORT = config('ARCLINK_PORT', default=18001, cast=int)
WEBOBS_USERNAME = config('WEBOBS_USERNAME')
WEBOBS_PASSWORD = config('WEBOBS_PASSWORD')
WEBOBS_HOST = config('WEBOBS_HOST', default='')
# This variable can be used to mock `wo.clients.waveform.get_waveforms()`
# function for testing purposes.
GET_WAVEFORMS_FUNCTION = None
# This variable can be use to set custom WebObs fetcher
# `wo.clients.webobs.WebObsMC3Fetcher`.
WEBOBS_MC3_FETCHER_CLASS = None
| bpptkg/bulletin | wo/settings.py | settings.py | py | 3,089 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number... |
15533905597 |
# A very simple Bottle Hello World app for you to get started with...
from bottle import route, run, template, default_app
import json
import get_stream
@route("/")
@route("/index")
def index():
data = get_stream.get_stream_data()
#print(data)
return template("""
<b>
An example weather server.
</b>
%for item in data:
<b>
{{item}}
</b>
""",data=data)
@route("/table")
def temperature():
data = get_stream.get_stream_data()
#print(data)
return template("""
<table>
%for item in data:
<tr>
<td>
{{item['kentid']}}
</td>
<td>
{{item['collection_time']}}
</td>
<td>
{{item['lat']}}
</td>
<td>
{{item['lon']}}
</td>
<td>
{{item['temperature']}}
</td>
<td>
{{item['humidity']}}
</td>
</tr>
%end
</table>
""", data=data)
@route("/graph")
def graph():
data = get_stream.get_stream_data()
return template("scatter_graph", data=data)
@route("/projectgraph")
def graph1():
data = get_stream.get_stream_data()
return template("project_graph", data=data)
@route("/map")
def example_map():
data = get_stream.get_stream_data()
return template("example_map", data=data)
@route("/map1")
def example_map1():
data = get_stream.get_stream_data()
return template("example_map1", data=data)
@route("/calendar_chart")
def calendar_chart():
data = get_stream.get_stream_data()
return template("calendar_chart", data=data)
application = default_app()
| smahagos/WeatherData | mysite/bottle_app.py | bottle_app.py | py | 1,680 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "get_stream.get_stream_data",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bottle.template",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bottle.route",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bottle.route"... |
30689755641 | ####################################################################################
# Estimator Models
# Contains different estimators
# Each Estimator Contains:
# -A set of required conditional distributions (nuisance parameters) that must be trained
# -The set of parameters that are shared between nuisance parameter models (embeddings, encoders)
# -A way to combine the estimator conditional to compute quantity of interest
####################################################################################
import torch
import torch.nn as nn
import numpy as np
import math
import causalchains.models.conditional_models as cmodels
from causalchains.models.encoders.cnn_encoder import CnnEncoder
from causalchains.models.encoders.average_encoder import AverageEncoder
from causalchains.models.encoders.onehot_encoder import OneHotEncoder
from causalchains.models.encoders.rnn_encoder import RnnEncoder
from causalchains.utils.data_utils import PAD_TOK
import logging
EXP_OUTCOME_COMPONENT="_exp_outcome_component_"
PROPENSITY_COMPONENT="_propensity_component_"
class Estimator(nn.Module):
def __init__(self, event_embed_size, text_embed_size, event_encoder_outsize, text_encoder_outsize, evocab, tvocab, config):
super(Estimator, self).__init__()
#Pass in None for event_embed_size to just use one hot encodings for events (make sure event_encoder_outsize is the correct size)
#Pass in None for event_encoder_outsize if not using event information
self.event_embed_size = event_embed_size
self.text_embed_size = text_embed_size
self.event_encoder_outsize= event_encoder_outsize
self.text_encoder_outsize= text_encoder_outsize
evocab_size = len(evocab.stoi.keys())
tvocab_size = len(tvocab.stoi.keys())
e_pad = evocab.stoi[PAD_TOK]
t_pad = tvocab.stoi[PAD_TOK]
if self.event_embed_size is not None:
self.event_embeddings = nn.Embedding(evocab_size, self.event_embed_size, padding_idx=e_pad)
else:
self.event_embeddings = None
self.text_embeddings = nn.Embedding(tvocab_size, self.text_embed_size, padding_idx=t_pad)
if config.use_pretrained:
logging.info("Estimator: Using Pretrained Word Embeddings")
self.text_embeddings.weight.data = tvocab.vectors
if self.event_encoder_outsize is not None:
if self.event_embed_size is not None and config.rnn_event_encoder:
logging.info("Estimator: Using RNN Event Encoder")
self.event_encoder = RnnEncoder(self.event_embed_size, self.event_encoder_outsize)
elif self.event_embed_size is not None:
self.event_encoder = AverageEncoder(self.event_embed_size)
else:
assert event_encoder_outsize == len(evocab.itos), "event_encoder_outsize incorrectly specified for OneHot, should be vocab size"
self.event_encoder = OneHotEncoder(len(evocab.itos), pad_idx=e_pad)
else:
self.event_encoder = None
if self.text_encoder_outsize is not None:
self.text_encoder = CnnEncoder(self.text_embed_size,
num_filters = self.text_embed_size,
output_dim = self.text_encoder_outsize)
else:
self.text_encoder = None
def forward(self, instance):
"""
Params:
Should take in a example from data_utils.InstanceDataset which will contain:
.e1, Tensor [batch]
.e2, Tensor [batch]
.e1_text, Tensor [batch, max_size]
.e1prev_intext, Tensor[batch, max_size2]
Outputs:
(dictonary) Output Dictionary: A dictonary mapping a component name to the logit outputs of each
component (for example, expected_outcome -> logits outpus of expected outcome)
"""
raise NotImplementedError
class FineTuneEstimator(nn.Module): #Assume using rnn encoder for events, cnn encoder for text
'An estimator with components like the event embeddings, event encoder, text encoder,... pretrained, fixed, and passed in'
def __init__(self, config, old_model):
"""
Params:
old_model (Estimator) : The previous Estimator model we are fine tunning on
"""
super(FineTuneEstimator, self).__init__()
self.event_embeddings = old_model.event_embeddings
self.text_embeddings = old_model.text_embeddings
self.event_encoder = old_model.event_encoder
self.text_encoder = old_model.text_encoder
self.event_embed_size = self.event_embeddings.weight.shape[1]
self.text_embed_size = self.text_embeddings.weight.shape[1]
self.event_encoder_outsize= self.event_encoder.output_dim
self.text_encoder_outsize= self.text_encoder.output_dim
self.out_event_encoder = AverageEncoder(self.event_embed_size)
#self.out_event_encoder = OneHotEncoder(self.event_embeddings.weight.shape[0], pad_idx=self.event_embeddings.padding_idx)
class NaiveAdjustmentEstimator(Estimator):
'Estimate ACE with Backdoor adjustment without considering any previous events'
def __init__(self, config, evocab, tvocab):
super(NaiveAdjustmentEstimator, self).__init__(config.event_embed_size,
config.text_embed_size,
event_encoder_outsize=None,
text_encoder_outsize=config.text_enc_output,
evocab=evocab, tvocab=tvocab, config=config)
self.expected_outcome = cmodels.ExpectedOutcome(self.event_embeddings,
self.text_embeddings,
event_encoder=None,
text_encoder=self.text_encoder,
evocab=evocab,tvocab=tvocab,
config=config)
assert not self.expected_outcome.includes_e1prev_intext()
assert not self.expected_outcome.onehot_events()
def forward(self, instance):
exp_out = self.expected_outcome(instance)
return {EXP_OUTCOME_COMPONENT: exp_out}
class AdjustmentEstimator(FineTuneEstimator):
'Estimate ACE with Backdoor adjustment considering previous events that occured in and out of text, used with finetunning'
def __init__(self, config, evocab, tvocab, old_model):
super(AdjustmentEstimator, self).__init__(config, old_model)
mlp_layer = old_model.expected_outcome.logits_mlp #reuse part of previous last layer
self.expected_outcome = cmodels.ExpectedOutcome(self.event_embeddings,
self.text_embeddings,
event_encoder=self.event_encoder,
text_encoder=self.text_encoder,
evocab=evocab,tvocab=tvocab,
config=config,out_event_encoder=self.out_event_encoder, old_mlp_layer=mlp_layer)
assert self.expected_outcome.includes_e1prev_intext()
def forward(self, instance):
exp_out = self.expected_outcome(instance)
return {EXP_OUTCOME_COMPONENT: exp_out}
class SemiNaiveAdjustmentEstimator(Estimator):
'Estimate ACE with Backdoor adjustment considering previous events that occured in text (but not those that didnt appear in text)'
def __init__(self, config, evocab, tvocab):
super(SemiNaiveAdjustmentEstimator, self).__init__(config.event_embed_size,
config.text_embed_size,
event_encoder_outsize=config.rnn_hidden_dim, #assume using rnn event encoder
text_encoder_outsize=config.text_enc_output,
evocab=evocab, tvocab=tvocab, config=config)
self.expected_outcome = cmodels.ExpectedOutcome(self.event_embeddings,
self.text_embeddings,
event_encoder=self.event_encoder,
text_encoder=self.text_encoder,
evocab=evocab,tvocab=tvocab,
config=config)
assert self.expected_outcome.includes_e1prev_intext()
assert not self.expected_outcome.onehot_events()
def forward(self, instance):
exp_out = self.expected_outcome(instance)
return {EXP_OUTCOME_COMPONENT: exp_out}
class SemiNaiveAdjustmentEstimatorOneHotEvents(Estimator):
'Estimate ACE with Backdoor adjustment considering previous events that occured in text (but not those that didnt appear in text)'
def __init__(self, config, evocab, tvocab):
super(SemiNaiveAdjustmentEstimatorOneHotEvents, self).__init__(None,
config.text_embed_size,
event_encoder_outsize=len(evocab.itos),
text_encoder_outsize=config.text_enc_output,
evocab=evocab, tvocab=tvocab, config=config)
self.expected_outcome = cmodels.ExpectedOutcome(self.event_embeddings,
self.text_embeddings,
event_encoder=self.event_encoder,
text_encoder=self.text_encoder,
evocab=evocab,tvocab=tvocab,
config=config)
assert self.expected_outcome.includes_e1prev_intext()
assert self.expected_outcome.onehot_events()
def forward(self, instance):
exp_out = self.expected_outcome(instance)
return {EXP_OUTCOME_COMPONENT: exp_out}
| weberna/causalchains | causalchains/models/estimator_model.py | estimator_model.py | py | 10,710 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "causalchains.utils.data_utils.PAD_TOK",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "c... |
25008123076 | from Net import Net
import torch
import torch.nn as nn
import torch.optim as optim
from typing import List
from pathlib import Path
import os
class ModelTrainEval:
def __init__(self):
self.net = Net()
self.criterion = nn.MSELoss()
self.optimizer = optim.Adam(self.net.parameters(), lr=0.001)
def train_net(self, num_epochs: int, train_loader: torch.utils.data.DataLoader) -> List[float]:
self.net.train()
losses_over_time = []
for epoch in range(num_epochs):
running_loss = 0.0
for batch_i, data in enumerate(train_loader, 0):
# getting inputs and labels from train_loader
inputs, labels = data
# zeroing gradient
self.optimizer.zero_grad()
outputs = self.net(inputs.float())
loss = self.criterion(outputs, labels.float())
# calculating gradients for the iteration
loss.backward()
# taking a step based on calculated gradients
self.optimizer.step()
running_loss += loss.item()
# printing the loss every 2000 mini batches
if batch_i % 2000 == 1999:
print(f'[{epoch + 1}, {batch_i + 1:5d}] training loss: {running_loss/2000:.3f}')
losses_over_time.append(running_loss)
running_loss = 0.0
return losses_over_time
print('Finished Training')
def eval(self, test_loader: torch.utils.data.DataLoader) -> float:
self.net.eval()
test_loss_total = 0
for data in test_loader:
test_image, test_label = data
y_pred = self.net(test_image.float())
test_loss = self.criterion(y_pred, test_label.float())
test_loss_total += test_loss.item()
print('Average test loss is {}'.format(test_loss_total/(len(test_loader) * test_loader.batch_size)))
return test_loss_total/(len(test_loader) * test_loader.batch_size)
def save_model(self, model_name: str):
model_path = Path(os.getcwd()) / "Models" / f"{model_name}.pth".format(model_name)
torch.save(self.net.state_dict(), model_path)
| paddywardle/Computational_Chemistry_Data_Engineering_Project | main/Models/ModelTrainEval.py | ModelTrainEval.py | py | 2,392 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "Net.Net",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn.MSELoss",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.optim.Adam",
"line_number... |
4062540397 |
# Pandigital products
# Problem 32
# We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once; for example,
# the 5-digit number, 15234, is 1 through 5 pandigital.
# The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital.
# Find the sum of all products whose multiplicand/multiplier/product identity can be written as a 1 through 9 pandigital.
# HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum.
# https://projecteuler.net/problem=32
import datetime
import re
def IsPanDigit(i, j, k):
s = str(i) + str(j) + str(k)
if len(s) != 9: return False
m = re.findall(r'(\d).*\1', s)
if len(m) != 0:
return False
a = [int(s1) for s1 in s]
a.sort()
res = (a == [1,2,3,4,5,6,7,8,9])
return res
def IsSingleNotZeroDigit(n):
s = str(n)
if s.find("0") != -1: return False
if len(s) == 1:
return True
# trying to find duplicate numbers
m = re.findall(r'(\d).*\1', s)
return (len(m) == 0)
def ContainTheSameDigits(i, j):
si = str(i)
sj = str(j)
for s in si:
if s in sj:
return True
return False
start_time = datetime.datetime.now()
a = []
for i in range(10000):
if IsSingleNotZeroDigit(i):
a.append(i)
stop_time = datetime.datetime.now()
print(stop_time - start_time)
start_time = datetime.datetime.now()
b = {}
b[7254] = '39,186'
L = len(a)
i1 = 0
dt1 = stop_time - start_time
dt2 = stop_time - start_time
dt3 = stop_time - start_time
dt4 = stop_time - start_time
while i1 < L:
i2 = i1
while i2 < L - 1:
i2 += 1
m1 = a[i1]
m2 = a[i2]
k = m1 * m2
if len(str(k)) != 4:
continue
t1 = datetime.datetime.now()
if ContainTheSameDigits(m1, m2):
dt1 += datetime.datetime.now() - t1
continue
dt1 += datetime.datetime.now() - t1
t1 = datetime.datetime.now()
if not IsSingleNotZeroDigit(k):
dt2 += datetime.datetime.now() - t1
continue
dt2 += datetime.datetime.now() - t1
t1 = datetime.datetime.now()
if (k in b):
dt3 += datetime.datetime.now() - t1
continue
dt3 += datetime.datetime.now() - t1
t1 = datetime.datetime.now()
if IsPanDigit(m1, m2, k):
b[k] = str(m1) + ',' + str(m2)
dt4 += datetime.datetime.now() - t1
i1 += 1
a = list(b)
print('ContainTheSameDigits - ',dt1)
print('IsSingleNotZeroDigit - ',dt2)
print('In B - ',dt3)
print('IsPanDigit - ',dt4)
stop_time = datetime.datetime.now()
print('Overal - ',stop_time - start_time)
print("{0:,d}".format(sum(a)))
#print("{0:,d}".format(len(result)))
print(b) | IgorKon/ProjectEuler | 032.py | 032.py | py | 2,843 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.findall",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"... |
19105383629 | import os
import json
import pandas as pd
from aideme.explore import ExplorationManager, PartitionedDataset
from aideme.active_learning import KernelVersionSpace
from aideme.active_learning.dsm import FactorizedDualSpaceModel
from aideme.initial_sampling import random_sampler
import src.routes.points
from src.routes.create_manager import (
encode_and_normalize,
compute_indexes_mapping,
compute_partition_in_encoded_indexes,
)
from src.routes.endpoints import (
INITIAL_UNLABELED_POINTS,
NEXT_UNLABELED_POINTS,
)
from tests.routes.data_points import (
SIMPLE_MARGIN_CONFIGURATION,
VERSION_SPACE_CONFIGURATION,
FACTORIZED_SIMPLE_MARGIN_CONFIGURATION,
FACTORIZED_VERSION_SPACE_CONFIGURATION,
)
TEST_DATASET_PATH = os.path.join(
__file__.split(sep="tests")[0], "tests", "data", "cars_raw_20.csv"
)
SEPARATOR = ","
SELECTED_COLS = [2, 3]
def test_get_initial_points_to_label(client, monkeypatch):
def use_config(configuration, column_ids):
response = client.post(
INITIAL_UNLABELED_POINTS,
data={
"configuration": json.dumps(configuration),
"columnIds": json.dumps(column_ids),
},
)
points_to_label = json.loads(response.data)
assert isinstance(points_to_label, list)
assert len(points_to_label) == 3
assert isinstance(points_to_label[0], int)
monkeypatch.setattr(
src.routes.points,
"get_dataset_path",
lambda: TEST_DATASET_PATH,
)
monkeypatch.setattr(src.routes.points.cache, "get", lambda key: ",")
use_config(SIMPLE_MARGIN_CONFIGURATION, column_ids=SELECTED_COLS)
use_config(VERSION_SPACE_CONFIGURATION, column_ids=SELECTED_COLS)
use_config(FACTORIZED_SIMPLE_MARGIN_CONFIGURATION, column_ids=[1, 3, 2])
use_config(FACTORIZED_VERSION_SPACE_CONFIGURATION, column_ids=[1, 3, 2, 3])
def test_get_next_points_to_label(client, monkeypatch):
cases = [
{
"selected_columns": SELECTED_COLS,
"partition": None,
"labeled_points": [
{"id": 3, "label": 1},
{"id": 9, "label": 1},
{"id": 11, "label": 0},
],
},
{
"selected_columns": [1, 2, 3],
"partition": [[0, 2], [1, 2]],
"labeled_points": [
{"id": 3, "labels": [1, 0]},
{"id": 9, "labels": [0, 1]},
{"id": 11, "labels": [1, 1]},
],
},
]
for case in cases:
dataset = pd.read_csv(
TEST_DATASET_PATH, SEPARATOR, usecols=case["selected_columns"]
)
transformed_dataset = encode_and_normalize(dataset)
if case["partition"] is not None:
indexes_mapping = compute_indexes_mapping(
list(range(len(case["selected_columns"]))), transformed_dataset.columns
)
new_partition = compute_partition_in_encoded_indexes(
case["partition"], indexes_mapping
)
active_learner = FactorizedDualSpaceModel(
KernelVersionSpace(), partition=new_partition
)
else:
active_learner = KernelVersionSpace()
exploration_manager = ExplorationManager(
PartitionedDataset(
transformed_dataset,
copy=False,
),
active_learner,
subsampling=50000,
initial_sampler=random_sampler(sample_size=3),
)
monkeypatch.setattr(
src.routes.points.cache, "get", lambda key: exploration_manager
)
response = client.post(
NEXT_UNLABELED_POINTS,
data={"labeledPoints": json.dumps(case["labeled_points"])},
)
points_to_label = json.loads(response.data)
assert isinstance(points_to_label, list)
assert len(points_to_label) == 1
assert isinstance(points_to_label[0], int)
| AIDEmeProject/AIDEme | api/tests/routes/test_points.py | test_points.py | py | 4,014 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "src.routes.endpoints.INITIAL_UNLABELED_POINTS",
"line_number": 40,
"usage_type": "argument"
},
{
"api_n... |
21415819246 | import statistics
import random
import pandas as pd
import plotly_express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
file1 = pd.read_csv('data.csv')
data = df['reading_time'].tolist()
dataset = []
def randomdata():
for i in range(0,100):
index = random.randint(0,(len(data) - 1))
value = data[index]
dataset.append(value)
mean = statistics.mean(dataset)
return mean
def showgraph(meanlist):
datframe = meanlist
mean = statistics.mean(dataframe)
graph = ff.create_distplot([dataframe], ['Result'], show_hist = False)
graph.add_trace(go.Scatter(x = [mean,mean], y = [0,1], mode = 'lines', name = 'Mean'))
graph.show()
def setup():
meanlist = []
for i in range(0,1000):
setofmeans = randomdata()
meanlist.append(setofmeans)
showfig(meanlist)
mean = statistics.mean(meanlist)
print('mean of sampling distribution' ,mean)
sd = statistics.stdev(data)
print(sd)
setup()
| SaanviSinha/Project-110 | program.py | program.py | py | 1,003 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"... |
75224693472 | from data import *
from models import *
import argparse
import os
import pickle
parser = argparse.ArgumentParser(description='NLI training')
parser.add_argument("--data_path", type=str, default='./data', help="path to data")
# model
parser.add_argument("--encoder_type", type=str, default='GRUEncoder', help="see list of encoders")
parser.add_argument("--enc_lstm_dim", type=int, default=256, help="encoder nhid dimension")
parser.add_argument("--num_layer", type=int, default=1, help="encoder num layers")
parser.add_argument("--fc_dim", type=int, default=256, help="nhid of fc layers")
parser.add_argument("--n_classes", type=int, default=3, help="entailment/neutral/contradiction")
parser.add_argument("--pool_type", type=str, default='max', help="max or mean")
parser.add_argument("--use_cuda", type=bool, default=True, help="True or False")
# train
parser.add_argument("--n_epochs", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--dpout_model", type=float, default=0., help="encoder dropout")
parser.add_argument("--dpout_fc", type=float, default=0.2, help="classifier dropout")
parser.add_argument("--dpout_embed", type=float, default=0., help="embed dropout")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for adam")
parser.add_argument("--last_model", type=str, default="", help="train on last saved model")
parser.add_argument("--saved_model_name", type=str, default="model_new", help="saved model name")
parser.add_argument("--w2v_model", type=str, default="w2v-model.txt", help="w2v file name")
params, _ = parser.parse_known_args()
print(params)
'''
SEED
'''
np.random.seed(10)
torch.manual_seed(10)
"""
DATA
"""
train, dev, test = get_nl(params.data_path)
wv, default_wv = build_vocab(np.append(train['s1'], train['s2']), params.w2v_model)
'''
MODEL
'''
config_nli_model = {
'n_words' : len(wv),
'word_emb_dim' : 300,
'enc_lstm_dim' : params.enc_lstm_dim,
'num_layer' : params.num_layer,
'dpout_model' : params.dpout_model,
'dpout_fc' : params.dpout_fc,
'fc_dim' : params.fc_dim,
'bsize' : params.batch_size,
'n_classes' : params.n_classes,
'pool_type' : params.pool_type,
'encoder_type' : params.encoder_type,
'use_cuda' : params.use_cuda,
}
nli_net = NLINet(config_nli_model)
if params.last_model:
print("load model {}".format(params.last_model))
nli_net.load_state_dict(torch.load(os.path.join("saved_model", params.last_model)))
print(nli_net)
# loss
weight = torch.FloatTensor(3).fill_(1)
loss_fn = nn.CrossEntropyLoss(weight=weight)
loss_fn.size_average = False
# optimizer
from torch import optim
optimizer = optim.Adam(nli_net.parameters(), lr=params.lr)
# cuda
if params.use_cuda:
torch.cuda.manual_seed(10)
torch.cuda.set_device(0)
nli_net.cuda()
loss_fn.cuda()
'''
TRAIN
'''
def trainepoch(epoch):
all_costs = []
tot_costs = []
logs = []
correct = 0.0
nli_net.train()
permutation = np.random.permutation(len(train['s1']))
s1 = train['s1'][permutation]
s2 = train['s2'][permutation]
target = train['label'][permutation]
for stidx in range(0, len(s1), params.batch_size):
s1_batch, s1_len = get_batch(s1[stidx:stidx+params.batch_size], wv, default_wv, params.dpout_embed)
s2_batch, s2_len = get_batch(s2[stidx:stidx+params.batch_size], wv, default_wv, params.dpout_embed)
if params.use_cuda:
s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())
tgt_batch = Variable(torch.LongTensor(target[stidx:stidx+params.batch_size])).cuda()
else:
s1_batch, s2_batch = Variable(s1_batch), Variable(s2_batch)
tgt_batch = Variable(torch.LongTensor(target[stidx:stidx+params.batch_size]))
k = s1_batch.size(1)
output = nli_net((s1_batch, s1_len), (s2_batch, s2_len))
pred = output.data.max(1)[1]
correct += pred.long().eq(tgt_batch.data.long()).cpu().sum().item()
# loss
loss = loss_fn(output, tgt_batch)
all_costs.append(loss.item())
tot_costs.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if len(all_costs) == 100:
logs.append('{0}; loss: {1}; accuracy train: {2}'.format(stidx,
round(np.mean(all_costs), 2), round(100.*correct/(stidx+k), 2)))
print(logs[-1])
all_costs = []
train_acc = round(100 * correct/len(s1), 2)
train_loss = round(np.mean(tot_costs), 2)
return train_loss, train_acc
val_acc_best = -1e10
adam_stop = False
stop_training = False
def evaluate(epoch, eval_type='dev', final_eval=False):
nli_net.eval()
correct = 0.0
global val_acc_best, lr, stop_training, adam_stop
s1 = dev['s1'] if eval_type == 'dev' else test['s1']
s2 = dev['s2'] if eval_type == 'dev' else test['s2']
target = dev['label'] if eval_type == 'dev' else test['label']
for i in range(0, len(s1), params.batch_size):
# prepare batch
s1_batch, s1_len = get_batch(s1[i:i + params.batch_size], wv, default_wv, params.dpout_embed)
s2_batch, s2_len = get_batch(s2[i:i + params.batch_size], wv, default_wv, params.dpout_embed)
if params.use_cuda:
s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())
tgt_batch = Variable(torch.LongTensor(target[i:i + params.batch_size])).cuda()
else:
s1_batch, s2_batch = Variable(s1_batch), Variable(s2_batch)
tgt_batch = Variable(torch.LongTensor(target[i:i + params.batch_size]))
# model forward
output = nli_net((s1_batch, s1_len), (s2_batch, s2_len))
pred = output.data.max(1)[1]
correct += pred.long().eq(tgt_batch.data.long()).cpu().sum().item()
# save model
eval_acc = round(100 * correct / len(s1), 2)
if final_eval:
print('finalgrep: accuracy {0}: {1}'.format(eval_type, eval_acc))
else:
print('togrep: results: epoch {0}; mean accuracy {1}:{2}'.format(epoch, eval_type, eval_acc))
if eval_type == 'dev' and eval_acc > val_acc_best:
with open( os.path.join("saved_model", params.saved_model_name+"_config.pickle" ), 'wb') as handle:
pickle.dump(params, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('saving model at epoch {0}'.format(epoch))
if not os.path.exists("saved_model"): os.makedirs("saved_model")
torch.save(nli_net.state_dict(), os.path.join("saved_model", params.saved_model_name))
val_acc_best = eval_acc
return eval_acc
"""
Train model
"""
### TRAINING
train_loss_ls = []
train_acc_ls = []
eval_acc_ls = []
eval_acc = 0
for i in range(params.n_epochs):
print('\nTRAINING : Epoch ' + str(i))
train_loss, train_acc = trainepoch(i)
train_loss_ls.append(train_loss)
train_acc_ls.append(train_acc)
print('results: epoch {0}; loss: {1} mean accuracy train: {2}'.format(i, train_loss, train_acc))
if i%1==0:
print("-"*100)
print('\nEVALIDATING: Epoch ' + str(i))
eval_acc = evaluate(i, eval_type='dev', final_eval=False)
eval_acc_ls.append(eval_acc)
print('results: epoch {0}; mean accuracy dev: {1}'.format(i, eval_acc))
print("-"*100)
| tingchunyeh/Sentence-Sim | train.py | train.py | py | 7,510 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
... |
8272807423 | # MAGIC CODEFORCES PYTHON FAST IO
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
# END OF MAGIC CODEFORCES PYTHON FAST IO
import itertools
def solve():
n = int(input())
a = map(int, input().split())
b = map(int, input().split())
delta = map(lambda x,y : x-y,a,b)
partialSums = sorted(itertools.accumulate(delta))
median = partialSums[n//2]
print(sum(abs(median - x) for x in partialSums))
solve()
| elsantodel90/cses-problemset | food_division.py | food_division.py | py | 644 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin.read",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number"... |
36343414639 | # -*- coding: utf-8 -*-
import pymysql
from learn_pymysql.test_api.mysql_api import MysqlClient
from learn_pymysql.test_api.redis import RedisClient
from learn_project.my_project.test_api.test_public import Job
class StuChooseCls(object):
@staticmethod
def create_course(token, course):
"""
创建课程
:param token:
:param course:
:return:
"""
# # 判断token是否合法 又不用判断了
# if not ValidCheckUtils.is_en_num(token):
# print("token不合法")
# return
# start_time不允许大于end_time,end_time不允许小于当前时间
if course.end_time > course.start_time and course.end_time > Job.get_time():
connection = MysqlClient.get_connection()
db = connection.cursor(pymysql.cursors.DictCursor)
user_id = RedisClient.create_redis_client().get("user_login_" + str(token))
if user_id is None:
print('用户未登录')
else:
db.execute("select * from school_user where id = %s", user_id)
res = db.fetchone()
db.execute("select * from school_user where id = %s", course.teach_id)
teach_res = db.fetchone()
# 以及判断token的权限是否为老师
if res is None:
print("用户不存在")
elif res['type'] == 0:
print('非教师登陆')
# teach_id必须有效(在用户表中存在,并且是type是教师类型)
elif teach_res is None or teach_res['type'] == 0:
print("teach_id不存在或权限错误")
else:
# 录入课程的信息到课程表中
db.execute(
"insert into school_class(name,teach_id,count,new_count,start_time,end_time,create_time) values (%s,%s,%s,%s,%s,%s,%s)",
(course.name, course.teach_id, course.count, 0, course.start_time,
course.end_time, Job.get_time()))
connection.commit()
else:
print("课程时间参数不合法")
@staticmethod
def get_course():
"""
获取课程列表
:return:
"""
connection = MysqlClient.get_connection()
db = connection.cursor(pymysql.cursors.DictCursor)
db.execute("select * from school_class")
res = db.fetchall()
course_list = []
for i in res:
course_dict = {}
if i['start_time'] <= Job.get_time() <= i['end_time']:
for k, v in i.items():
if k == 'teach_id':
db.execute("select * from school_user where id=%s", v)
teach_name = db.fetchone()['name']
course_dict['teach_name'] = teach_name
else:
course_dict[k] = v
course_list.append(course_dict)
return course_list
@staticmethod
def choose_course(token, course_id):
"""
选课
:param token:
:param course_id:
:return:
"""
connection = MysqlClient.get_connection()
db = connection.cursor(pymysql.cursors.DictCursor)
user_id = RedisClient.create_redis_client().get("user_login_" + str(token))
# 判断token对应的用户是否存在
if user_id is None:
print('用户未登录')
return False
else:
db.execute("select * from school_user where id = %s", user_id)
res = db.fetchone()
# 判断token对应的用户是否存在
if res is None:
print("用户不存在")
return False
else:
# 查询 course_id 的 school_class
db.execute("select * from school_class where id = %s", course_id)
cls_res = db.fetchone()
# 查询course_id 的选课信息
db.execute("select * from user_class where class_id = %s and user_id=%s", (course_id, user_id))
user_cls_res = db.fetchone()
# 判断用户是否选过了这门课
if user_cls_res is not None:
print('用户已经选过该课程')
return False
# 这门课程当前时间还能否选
elif not (cls_res['start_time'] < Job.get_time() < cls_res['end_time']):
print("当前时间该课程不可选")
return False
# 选课人数是否到达上限
elif cls_res['new_count'] + 1 > cls_res['count']:
print("选课人数达上限")
return False
else:
# 新增学生选课表
db.execute("insert into user_class(user_id, class_id, create_time) values (%s,%s,%s)",
(user_id, course_id, Job.get_time()))
connection.commit()
# 修改课程表数据
db.execute("update school_class set new_count=%s where id = %s",
(cls_res['new_count'] + 1, course_id))
connection.commit()
return True
| Liabaer/Test | learn_flask/course/course_selection_project/course_selection_api/stu_choose_service.py | stu_choose_service.py | py | 5,442 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "learn_project.my_project.test_api.test_public.Job.get_time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "learn_project.my_project.test_api.test_public.Job",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "learn_pymysql.test_api.mysql_api.Mys... |
20523314908 | from telegram import Update, Bot, InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler
from tg_token import token
bot = Bot(token)
updater = Updater(token, use_context = True)
dispatcher = updater.dispatcher
def printt(update, context, message: str):
context.bot.send_message(update.effective_chat.id, message)
def start(update, context):
#context.bot.send_message(update.effective_chat.id, 'Hello')
printt(update, context, 'Hello')
def message(update, context):
if update.message.text == '1':
context.bot.send_message(update.effective_chat.id, 'Goodbye')
def help_message(update, context):
context.bot.send_message(update.effective_chat.id, 'I don`t know this command :(')
def get_day(update, context):
keyboard =[
[InlineKeyboardButton('Monday', callback_data = '1'), InlineKeyboardButton('Tuesday', callback_data = '2'), \
InlineKeyboardButton('Wednesday', callback_data = '3'), InlineKeyboardButton('Thursday', callback_data = '4')],
[InlineKeyboardButton('Friday', callback_data = '5'), InlineKeyboardButton('Saturday', callback_data = '6'), \
InlineKeyboardButton('Sunday', callback_data = '7')]
]
update.message.reply_text('Choose the day!', reply_markup = InlineKeyboardMarkup(keyboard))
def button(update, context):
querry = update.callback_query
querry.answer()
if querry.data == '1': printt(update, context, 'Monday')
elif querry.data == '2': printt(update, context, 'Tuesday')
elif querry.data == '3': printt(update, context, 'Wednesday')
elif querry.data == '4': printt(update, context, 'Thursday')
elif querry.data == '5': printt(update, context, 'Friday')
elif querry.data == '6': printt(update, context, 'Saturday')
elif querry.data == '7': printt(update, context, 'Sunday')
buttons_handler = CallbackQueryHandler(button)
start_handler = CommandHandler('start', start) #первые обязательно команды
get_day_handler = CommandHandler('get_day', get_day)
unknown_handler = MessageHandler(Filters.command, help_message) #после unknown при наличии
message_handler = MessageHandler(Filters.text, message) #и только потом сообщения
dispatcher.add_handler(start_handler)
dispatcher.add_handler(buttons_handler)
dispatcher.add_handler(get_day_handler)
dispatcher.add_handler(unknown_handler)
dispatcher.add_handler(message_handler)
updater.start_polling()
updater.idle()
| letSmilesz/meet_python | seminar10/telegram_bot.py | telegram_bot.py | py | 2,548 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "telegram.Bot",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tg_token.token",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "telegram.ext.Updater",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tg_token.token",
... |
38728874269 | import speech_recognition as sr
import os
from gtts import gTTS
import datetime
import pyttsx3
import playsound
import warnings
import pyaudio
import random
import datetime
import time
import calendar
import wikipedia
warnings.filterwarnings("ignore")
engine = pyttsx3.init()
voices = engine.getProperty('rate')
engine.setProperty('rate', 175)
def talk(audio): # storing audio
engine.say(audio)
engine.runAndWait()
def get_voice_command(): # recoding the audio with the help of google
record = sr.Recognizer()
with sr.Microphone() as source:
record.adjust_for_ambient_noise(source)
print("Listening...")
audio = record.listen(source, timeout=10)
data = ''
try:
data = record.recognize_google(audio)
except sr.UnknownValueError:
engine.say("Assistant could not understand the audio")
except sr.RequestError as ex:
engine.say("Request Error from Google Speech Recognition" + ex)
return data
def response(text): # Air responding to the usr
tts = gTTS(text=text, lang="en")
audio = "Audio.mp3"
tts.save(audio)
playsound.playsound(audio)
os.remove(audio) # removing audio as a nessesity to keep it clear
def call(text): # calling air
action_call = "air"
text = text.lower()
if action_call in text:
return True
return False
def today_date():
now = datetime.datetime.now()
date_now = datetime.datetime.today()
week_now = calendar.day_name[date_now.weekday()]
month_now = now.month
day_now = now.day
months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
ordinals = [
"1st",
"2nd",
"3rd",
"4th",
"5th",
"6th",
"7th",
"8th",
"9th",
"10th",
"11th",
"12th",
"13th",
"14th",
"15th",
"16th",
"17th",
"18th",
"19th",
"20th",
"21st",
"22nd",
"23rd",
"24th",
"25th",
"26th",
"27th",
"28th",
"29th",
"30th",
"31st",
]
return "Today is " + week_now + ", " + months[month_now - 1] + " the " + ordinals[day_now - 1]
def wiki_finder(text):
extras = ["wikipedia", "who", "is", "search", "on", "at", " "]
original_text = text.split()
to_be_searched = ""
for imp in range(0, len(original_text)):
if original_text[imp] != ".":
if original_text[imp] not in extras:
to_be_searched = to_be_searched + original_text[imp]
to_be_searched = to_be_searched + " "
else:
break
return to_be_searched
while True:
try:
text = get_voice_command()
speak = ""
if "date" in text or "day" in text or "month" in text or "what do you think today's date is" in text:
get_today = today_date()
speak = speak + " " + get_today
elif "time" in text:
now = datetime.datetime.now()
meridiem = ""
if now.hour >= 12:
meridiem = "pm"
hour = now.hour - 12
else:
meridiem = "am"
hour = now.hour
if now.minute < 10:
minute = "0" + str(now.minute)
else:
minute = str(now.minute)
speak = speak + " " + "It is " + str(hour) + ":" + minute + " " + meridiem + " ."
elif "wikipedia" in text or "Wikipedia" in text:
object = wiki_finder(text)
wiki = wikipedia.summary(object, sentences=2)
speak = speak + " " + wiki
wiki = ""
elif "thank you" in text or "shutdown" in text:
talk("see you again")
break
response(speak)
print(speak)
except:
talk("I didn't understand what you said please try again")
| cs-darshan/AIR | main.py | main.py | py | 4,105 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyttsx3.init",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sp... |
10924485509 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
"""
在 main 方法中才会延迟输出
"""
def auto_search():
"""
自动搜索
:return:
"""
driver = webdriver.Chrome()
url = "http://www.dianping.com"
# url = "https://www.baidu.com/"
driver.get(url)
# 获取页面元素
print(driver.page_source)
# 获取 cookies
cookies = driver.get_cookies()
print(cookies)
# 搜索
elem = driver.find_element_by_id('J-search-input')
elem.clear()
elem.send_keys("火锅")
elem.send_keys(Keys.RETURN)
# driver.close()
if __name__ == '__main__':
# auto_search()
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
driver = webdriver.Chrome(options=options)
# driver = webdriver.Chrome()
url = "http://www.dianping.com"
# url = "https://www.baidu.com/"
url = "http://www.dianping.com/shop/110281977"
driver.get(url)
# 获取页面元素
all_page_source = driver.page_source
print(all_page_source)
print("sssssss", all_page_source.find("地址不对"))
# 获取 cookies
cookies = driver.get_cookies()
print(cookies)
# 搜索
elem = driver.find_element_by_id('J-search-input')
elem.clear()
elem.send_keys("火锅")
elem.send_keys(Keys.RETURN)
# driver.close() | logonmy/spider-mz | utils/selenium_utils_delay.py | selenium_utils_delay.py | py | 1,411 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.RETURN",
"line_number": 29,
"usage_type": "attribute"
... |
14085867754 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 15:15:36 2020
@author: Administrator
"""
import numpy as np
import torch
import torch.nn as nn
import time
#import skimage.measure as sm
import skimage.metrics as sm
import cv2
from osgeo import gdal
import matplotlib.pyplot as plt
###img read tool###############################################################
def imgread(file,mode='gdal'):
if mode=='cv2':
img=cv2.imread(file,-1)/10000.
if mode=='gdal':
img=gdal.Open(file).ReadAsArray()/10000.
return img
###weight caculate tools######################################################
def weight_caculate(data):
return torch.log((abs(data)*10000+1.00001))
def caculate_weight(l1m1,m1m2):
#atmos difference
wl1m1=weight_caculate(l1m1 )
#time deference
wm1m2=weight_caculate(m1m2 )
return wl1m1*wm1m2
###space distance caculate tool################################################
def indexdistance(window):
#one window, one distance weight matrix
[distx,disty]=np.meshgrid(np.arange(window[0]),np.arange(window[1]))
centerlocx,centerlocy=(window[0]-1)//2,(window[1]-1)//2
dist=1+(((distx-centerlocx)**2+(disty-centerlocy)**2)**0.5)/((window[0]-1)//2)
return dist
###threshold select tool######################################################
def weight_bythreshold(weight,data,threshold):
#make weight tensor
weight[data<=threshold]=1
return weight
def weight_bythreshold_allbands(weight,l1m1,m1m2,thresholdmax):
#make weight tensor
weight[l1m1<=thresholdmax[0]]=1
weight[m1m2<=thresholdmax[1]]=1
allweight=(weight.sum(0).view(1,weight.shape[1],weight.shape[2]))/weight.shape[0]
allweight[allweight!=1]=0
return allweight
###initial similar pixels tools################################################
def spectral_similar_threshold(clusters,NIR,red):
thresholdNIR=NIR.std()*2/clusters
thresholdred=red.std()*2/clusters
return (thresholdNIR,thresholdred)
def caculate_similar(l1,threshold,window):
#read l1
device= torch.device("cuda" if torch.cuda.is_available() else "cpu")
l1=nn.functional.unfold(l1,window)
#caculate similar
weight=torch.zeros(l1.shape,dtype=torch.float32).to(device)
centerloc=( l1.size()[1]-1)//2
weight=weight_bythreshold(weight,abs(l1-l1[:,centerloc:centerloc+1,:]) ,threshold)
return weight
def classifier(l1):
'''not used'''
return
###similar pixels filter tools#################################################
def allband_arrayindex(arraylist,indexarray,rawindexshape):
shape=arraylist[0].shape
datalist=[]
for array in arraylist:
newarray=torch.zeros(rawindexshape,dtype=torch.float32).cuda()
for band in range(shape[1]):
newarray[0,band]=array[0,band][indexarray]
datalist.append(newarray)
return datalist
def similar_filter(datalist,sital,sitam):
[l1,m1,m2]=datalist
l1m1=abs(l1-m1)
m1m2=abs(m2-m1)
#####
l1m1=nn.functional.unfold(l1m1,(1,1)).max(1)[0]+(sital**2+sitam**2)**0.5
m1m2=nn.functional.unfold(m1m2,(1,1)).max(1)[0]+(sitam**2+sitam**2)**0.5
return (l1m1,m1m2)
###starfm for onepart##########################################################
def starfm_onepart(datalist,similar,thresholdmax,window,outshape,dist):
#####param and data
[l1,m1,m2]=datalist
bandsize=l1.shape[1]
outshape=outshape
blocksize=outshape[0]*outshape[1]
device= torch.device("cuda" if torch.cuda.is_available() else "cpu")
#####img to col
l1=nn.functional.unfold(l1,window)
m1=nn.functional.unfold(m1,window)
m2=nn.functional.unfold(m2,window)
l1=l1.view(bandsize,-1,blocksize)
m1=m1.view(bandsize,-1,blocksize)
m2=m2.view(bandsize,-1,blocksize)
l1m1=abs(l1-m1)
m1m2=abs(m2-m1)
#####caculate weights
#time and space weight
w=caculate_weight(l1m1,m1m2)
w=1/(w*dist)
#similar pixels: 1:by threshold 2:by classifier
wmask=torch.zeros(l1.shape,dtype=torch.float32).to(device)
#filter similar pixels for each band: (bandsize,windowsize,blocksize)
#wmasknew=weight_bythreshold(wmask,l1m1,thresholdmax[0])
#wmasknew=weight_bythreshold(wmasknew,m1m2,thresholdmax[1])
#filter similar pixels for all bands: (1,windowsize,blocksize)
wmasknew=weight_bythreshold_allbands(wmask,l1m1,m1m2,thresholdmax)
#mask
w=w*wmasknew*similar
#normili
w=w/(w.sum(1).view(w.shape[0],1,w.shape[2]))
#####predicte and trans
#predicte l2
l2=(l1+m2-m1)*w
l2=l2.sum(1).reshape(1,bandsize,l2.shape[2])
#col to img
l2=nn.functional.fold(l2.view(1,-1,blocksize),outshape,(1,1))
return l2
###starfm for allpart#########################################################
def starfm_main(l1r,m1r,m2r,
param={'part_shape':(140,140),
'window_size':(31,31),
'clusters':5,
'NIRindex':3,'redindex':2,
'sital':0.001,'sitam':0.001}):
#get start time
time_start=time.time()
device= torch.device("cuda" if torch.cuda.is_available() else "cpu")
#read parameters
parts_shape=param['part_shape']
window=param['window_size']
clusters=param['clusters']
NIRindex=param['NIRindex']
redindex=param['redindex']
sital=param['sital']
sitam=param['sitam']
#caculate initial similar pixels threshold
threshold=spectral_similar_threshold(clusters,l1r[:,NIRindex:NIRindex+1],l1r[:,redindex:redindex+1])
print('similar threshold (NIR,red)',threshold)
####shape
imageshape=(l1r.shape[1],l1r.shape[2],l1r.shape[3])
print('datashape:',imageshape)
row=imageshape[1]//parts_shape[0]+1
col=imageshape[2]//parts_shape[1]+1
padrow=window[0]//2
padcol=window[1]//2
#####padding constant for conv;STARFM use Inverse distance weight(1/w),better to avoid 0 and NAN(1/0),or you can use another distance measure
constant1=10
constant2=20
constant3=30
l1=torch.nn.functional.pad( l1r,(padrow,padcol,padrow,padcol),'constant', constant1)
m1=torch.nn.functional.pad( m1r,(padrow,padcol,padrow,padcol),'constant', constant2)
m2=torch.nn.functional.pad( m2r,(padrow,padcol,padrow,padcol),'constant', constant3)
#split parts , get index and run for every part
row_part=np.array_split( np.arange(imageshape[1]), row , axis = 0)
col_part=np.array_split( np.arange(imageshape[2]), col, axis = 0)
print('Split into {} parts,row number: {},col number: {}'.format(len(row_part)*len(row_part),len(row_part),len(row_part)))
dist=nn.functional.unfold(torch.tensor( indexdistance(window),dtype=torch.float32).reshape(1,1,window[0],window[1]),window).to(device)
for rnumber,row_index in enumerate(row_part):
for cnumber,col_index in enumerate(col_part):
####run for part: (rnumber,cnumber)
print('now for part{}'.format((rnumber,cnumber)))
####output index
rawindex=np.meshgrid(row_index,col_index)
####output shape
rawindexshape=(col_index.shape[0],row_index.shape[0])
####the real parts_index ,for reading the padded data
row_pad=np.arange(row_index[0],row_index[len(row_index)-1]+window[0])
col_pad=np.arange(col_index[0],col_index[len(col_index)-1]+window[1])
padindex=np.meshgrid(row_pad,col_pad)
padindexshape=(col_pad.shape[0],row_pad.shape[0])
####caculate initial similar pixels
NIR_similar=caculate_similar(l1[0,NIRindex][ padindex ].view(1,1,padindexshape[0],padindexshape[1]),threshold[0],window)
red_similar=caculate_similar(l1[0,redindex][ padindex ].view(1,1,padindexshape[0],padindexshape[1]),threshold[1],window)
similar=NIR_similar*red_similar
####caculate threshold used for similar_pixels_filter
thresholdmax=similar_filter( allband_arrayindex([l1r,m1r,m2r],rawindex,(1,imageshape[0],rawindexshape[0],rawindexshape[1])),
sital,sitam)
####Splicing each col at rnumber-th row
if cnumber==0:
rowdata=starfm_onepart( allband_arrayindex([l1,m1,m2],padindex,(1,imageshape[0],padindexshape[0],padindexshape[1])),
similar,thresholdmax,window,rawindexshape,dist
)
else:
rowdata=torch.cat( (rowdata,
starfm_onepart( allband_arrayindex([l1,m1,m2],padindex,(1,imageshape[0],padindexshape[0],padindexshape[1])),
similar,thresholdmax,window,rawindexshape,dist) ) ,2)
####Splicing each row
if rnumber==0:
l2_fake=rowdata
else:
l2_fake=torch.cat((l2_fake,rowdata),3)
l2_fake=l2_fake.transpose(3,2)
#time cost
time_end=time.time()
print('now over,use time {:.4f}'.format(time_end-time_start))
return l2_fake
def test():
##three band datas(sorry,just find them at home,i cant recognise the spectral response range of each band,'NIR' and 'red' are only examples)
l1file='L72000306_SZ_B432_30m.tif'
l2file='L72002311_SZ_B432_30m.tif'
m1file='MOD09_2000306_SZ_B214_250m.tif'
m2file='MOD09_2002311_SZ_B214_250m.tif'
##param
param={'part_shape':(75,75),
'window_size':(31,31),
'clusters':5,
'NIRindex':1,'redindex':0,
'sital':0.001,'sitam':0.001}
##read images from files(numpy)
l1=imgread(l1file)
m1=imgread(m1file)
m2=imgread(m2file)
l2_gt=imgread(l2file)
##numpy to tensor
shape=l1.shape
l1r=torch.tensor(l1.reshape(1,shape[0],shape[1],shape[2]) ,dtype=torch.float32)
m1r=torch.tensor(m1.reshape(1,shape[0],shape[1],shape[2]) ,dtype=torch.float32)
m2r=torch.tensor(m2.reshape(1,shape[0],shape[1],shape[2]) ,dtype=torch.float32)
device= torch.device("cuda" if torch.cuda.is_available() else "cpu")
l1r=l1r.to(device)
m1r=m1r.to(device)
m2r=m2r.to(device)
##predicte(tensor input —> tensor output)
l2_fake=starfm_main(l1r,m1r,m2r,param)
print(l2_fake.shape)
##tensor to numpy
if device.type=='cuda':
l2_fake=l2_fake[0].cpu().numpy()
else:
l2_fake=l2_fake[0].numpy()
##show results
#transform:(chanel,H,W) to (H,W,chanel)
l2_fake=l2_fake.transpose(1,2,0)
l2_gt=l2_gt.transpose(1,2,0)
l1=l1.transpose(1,2,0)
m1=m1.transpose(1,2,0)
m2=m2.transpose(1,2,0)
#plot
plt.figure('landsat:t1')
plt.imshow(l1)
plt.figure('landsat:t2_fake')
plt.imshow(l2_fake)
plt.figure('landsat:t2_groundtrue')
plt.imshow(l2_gt)
##evaluation
ssim1=sm.structural_similarity(l2_fake,l2_gt,data_range=1,multichannel=True)
ssim2=sm.structural_similarity(l1,l2_gt,data_range=1,multichannel=True)
ssim3=sm.structural_similarity(l1+m2-m1,l2_gt,data_range=1,multichannel=True)
print('with-similarpixels ssim: {:.4f};landsat_t1 ssim: {:.4f};non-similarpixels ssim: {:.4f}'.format(ssim1,ssim2,ssim3))
return
if __name__ == "__main__":
test()
| endu111/remote-sensing-images-fusion | STARFM_torch.py | STARFM_torch.py | py | 11,637 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal.Open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.log",
"line_number": ... |
5409861942 | import torch
import torch.nn as nn
import statistics
import torchvision.models as models
import torch.nn.functional as F
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class encoder(nn.Module):
def __init__(self, embed_size, model, unfreeze=10):
super(encoder, self).__init__()
self.resnet = model
self.resnet = nn.Sequential(*list(self.resnet.children())[:-2])
self.total_trainable_layers = 0
self.freeze_count = 0
for name, param in self.resnet.named_parameters():
if param.requires_grad:
self.total_trainable_layers += 1
if(self.freeze_count + unfreeze < 60):
param.requires_grad = False
self.freeze_count += 1
print("Total trainable distil bert layers are : " + str(self.total_trainable_layers))
print("Layers freezed = "+str(self.freeze_count))
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.5)
def forward(self, images):
features = self.resnet(images)
features = features.permute(0, 2, 3, 1)
features = features.view(features.size(0), -1, features.size(-1)) #(B, 7*7, 512)
return self.dropout(self.relu(features))
class Attention(nn.Module):
def __init__(self, encoder_dim, decoder_dim, attention_dim):
super(Attention, self).__init__()
self.attention_dim = attention_dim
self.W = nn.Linear(decoder_dim, attention_dim)
self.U = nn.Linear(encoder_dim, attention_dim)
self.A = nn.Linear(attention_dim, 1)
self.init_h = nn.Linear(encoder_dim, decoder_dim)
def forward(self, features, hidden_state):
u_hs = self.U(features) #(batch_size,num_layers,attention_dim)
w_ah = self.W(hidden_state) #(batch_size,attention_dim)
combined_states = torch.tanh(u_hs + w_ah.unsqueeze(1)) #(batch_size,num_layers,attemtion_dim)
attention_scores = self.A(combined_states) #(batch_size,num_layers,1)
attention_scores = attention_scores.squeeze(2) #(batch_size,num_layers)
alpha = F.softmax(attention_scores,dim=1) #(batch_size,num_layers)
attention_weights = features * alpha.unsqueeze(2) #(batch_size,num_layers,features_dim)
attention_weights = attention_weights.sum(dim=1) #(batch_size,num_layers)
return alpha, attention_weights
class decoder(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers, vocab, tokenizer):
super(decoder, self).__init__()
self.tokenizer = tokenizer
self.encoder_dim = 512
self.embed_dim = 512
self.attention_dim = 256
self.decoder_dim = 256
self.vocab = vocab
self.vocab_size = vocab_size
self.attention = Attention(self.encoder_dim, self.decoder_dim, self.attention_dim)
self.decode_step = nn.GRU(input_size=self.embed_dim + self.encoder_dim, hidden_size=self.decoder_dim, num_layers=num_layers, bidirectional=True, batch_first=True)
self.h_lin = nn.Linear(self.encoder_dim, self.decoder_dim)
self.c_lin = nn.Linear(self.encoder_dim, self.decoder_dim)
self.f_beta = nn.Linear(self.decoder_dim, self.encoder_dim)
self.sigmoid = nn.Sigmoid()
self.fc = nn.Linear(self.decoder_dim, self.vocab_size)
# def forward(self, features, captions):
def forward(self, features):
enc_attn = self.attention(features)
print(enc_attn.size())
return enc_attn
class decoder1(nn.Module):
def __init__(self, embed_size, vocab_size, num_layers, vocab,tokenizer, max_len = 32, unfreeze = 10):
super(decoder1, self).__init__()
self.vocab_size = vocab_size
self.embed_size = embed_size
self.max_len = max_len
self.gru = nn.GRU(input_size=512, hidden_size=512, num_layers=2, bidirectional=True, batch_first=True)
self.distil_bert = DistilBertModel.from_pretrained("distilbert-base-uncased",).to(device=device)
self.tokenizer = tokenizer
self.total_trainable_layers_distil_bert = []
self.freeze_count = 0
for name, param in self.distil_bert.named_parameters():
if param.requires_grad:
self.total_trainable_layers_distil_bert.append(name)
if(self.freeze_count + unfreeze < 100):
param.requires_grad = False
self.freeze_count += 1
print("Total trainable distil bert layers are : " + str(len(self.total_trainable_layers_distil_bert)))
print("Layers freezed = "+str(self.freeze_count))
self.bert_to_enc_dim = nn.Linear(768, 512)
self.l1 = nn.Linear(2*512, self.vocab_size)
def forward(self, enc_embed, ids, mask):
embed = self.distil_bert(ids, attention_mask = mask)
embed = embed[0]
embed = self.bert_to_enc_dim(embed)
all_words = []
for i in range(self.max_len):
cur_tens = torch.tensor([i]).to(device=device)
word_embed = torch.index_select(embed,dim=1, index=cur_tens)
enc_cat = torch.cat([word_embed, enc_embed], dim=1)
e_cat, _ = self.gru(enc_cat)
e_cat = e_cat.max(dim = 1)[0]
e_cat = torch.squeeze(e_cat)
e_cat = F.leaky_relu(self.l1(e_cat))
e_cat = F.softmax(e_cat, dim=1)
# max_prob = torch.argmax(e_cat, dim=1)
all_words.append(e_cat)
final_words = torch.stack(all_words, dim=2) ## index of words
return final_words
def evaluate_test(self, enc_embed, ids, mask, i):
if(i==0):
embed = self.distil_bert(ids, attention_mask = mask)
embed = embed[0]
embed = self.bert_to_enc_dim(embed)
cur_tens = torch.tensor([i]).to(device=device)
word_embed = torch.index_select(embed,dim=1, index=cur_tens)
else:
embed = self.distil_bert(ids, attention_mask = mask)
embed = embed[0]
word_embed = self.bert_to_enc_dim(embed)
enc_cat = torch.cat([word_embed, enc_embed], dim=1)
e_cat, _ = self.gru(enc_cat)
e_cat = e_cat.max(dim = 1)[0]
e_cat = torch.squeeze(e_cat)
e_cat = F.leaky_relu(self.l1(e_cat))
e_cat = F.softmax(e_cat, dim=1)
return e_cat ## single word softmax
class fullModel(nn.Module):
def __init__(self, embed_size, resnet, vocab_size, num_layers, vocab, tokenizer, max_len, unfreeze1, unfreeze2, inverse_vocab, vocab_list):
super(fullModel, self).__init__()
self.inverse_vocab = inverse_vocab
self.vocab_size = vocab_size
self.vocab_list = vocab_list
self.embed_size = embed_size
self.max_len = max_len
self.vocab = vocab
self.tokenizer = tokenizer
self.enc = encoder(embed_size, resnet, unfreeze=unfreeze1)
self.dec = decoder1(embed_size, vocab_size, num_layers, vocab, tokenizer, max_len = max_len, unfreeze = unfreeze2)
def forward(self, image, ids, mask, test = False):
if(not test):
enc_embed = self.enc(image)
words_probability = self.dec(enc_embed, ids, mask)
words_pred = torch.argmax(words_probability, dim=1)
captions_strings = self.index_to_captions(words_pred, mask)
return words_probability, captions_strings
else:
enc_embed = self.enc(image)
distinct_words = []
total_probability = []
prev_word = None
for i in range(self.max_len):
if(prev_word is not None):
mask = torch.ones(ids.size(0),1)
for i in range(len(ids)):
ids[i] = torch.tensor(self.vocab_list[int(prev_word[i])])
words_probability = self.dec.evaluate_test(enc_embed, ids, mask, i)
words_pred = torch.argmax(words_probability, dim=1)
prev_wrod = words_pred
distinct_words.append(words_pred)
total_probability.append(words_probability)
words_probability = torch.stack(total_probability, dim=2)
all_words = torch.stack(distinct_words, dim=1)
captions_strings = self.index_to_captions(all_words, mask)
return words_probability, captions_strings
def index_to_captions(self, words_tensor, mask):
#(batch, words)
captions = []
for i in range(len(words_tensor)):
cur_words = []
ind = mask[i].argmin()
for j in range(ind):
cur_words.append(self.inverse_vocab[int(self.vocab_list[int(words_tensor[i][j])])])
captions.append(' '.join(cur_words))
return captions
class EncoderCNN(nn.Module):
def __init__(self):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-2]
self.resnet = nn.Sequential(*modules)
def forward(self, images):
features = self.resnet(images) #(batch_size,2048,7,7)
features = features.permute(0, 2, 3, 1) #(batch_size,7,7,2048)
features = features.view(features.size(0), -1, features.size(-1)) #(batch_size,49,2048)
return features
#Bahdanau Attention
class Attention(nn.Module):
def __init__(self, encoder_dim,decoder_dim,attention_dim):
super(Attention, self).__init__()
self.attention_dim = attention_dim
self.W = nn.Linear(decoder_dim,attention_dim)
self.U = nn.Linear(encoder_dim,attention_dim)
self.A = nn.Linear(attention_dim,1)
def forward(self, features, hidden_state):
u_hs = self.U(features) #(batch_size,num_layers,attention_dim)
w_ah = self.W(hidden_state) #(batch_size,attention_dim)
combined_states = torch.tanh(u_hs + w_ah.unsqueeze(1)) #(batch_size,num_layers,attemtion_dim)
attention_scores = self.A(combined_states) #(batch_size,num_layers,1)
attention_scores = attention_scores.squeeze(2) #(batch_size,num_layers)
alpha = F.softmax(attention_scores,dim=1) #(batch_size,num_layers)
attention_weights = features * alpha.unsqueeze(2) #(batch_size,num_layers,features_dim)
attention_weights = attention_weights.sum(dim=1) #(batch_size,num_layers)
return alpha,attention_weights
#Attention Decoder
class DecoderRNN(nn.Module):
def __init__(self,embed_size, vocab_size, attention_dim,encoder_dim,decoder_dim,drop_prob=0.3):
super().__init__()
self.vocab_size = vocab_size
self.attention_dim = attention_dim
self.decoder_dim = decoder_dim
self.embedding = nn.Embedding(vocab_size,embed_size)
self.attention = Attention(encoder_dim,decoder_dim,attention_dim)
self.init_h = nn.Linear(encoder_dim, decoder_dim)
self.init_c = nn.Linear(encoder_dim, decoder_dim)
self.lstm_cell = nn.LSTMCell(embed_size+encoder_dim,decoder_dim,bias=True)
self.f_beta = nn.Linear(decoder_dim, encoder_dim)
self.fcn = nn.Linear(decoder_dim,vocab_size)
self.drop = nn.Dropout(drop_prob)
def forward(self, features, captions):
embeds = self.embedding(captions)
h, c = self.init_hidden_state(features) # (batch_size, decoder_dim)
seq_length = len(captions[0])-1 #Exclude the last one
batch_size = captions.size(0)
num_features = features.size(1)
preds = torch.zeros(batch_size, seq_length, self.vocab_size).to(device)
alphas = torch.zeros(batch_size, seq_length,num_features).to(device)
for s in range(seq_length):
alpha,context = self.attention(features, h)
lstm_input = torch.cat((embeds[:, s], context), dim=1)
h, c = self.lstm_cell(lstm_input, (h, c))
output = self.fcn(self.drop(h))
preds[:,s] = output
alphas[:,s] = alpha
return preds, alphas
def generate_caption(self,features,max_len=20,vocab=None):
batch_size = features.size(0)
h, c = self.init_hidden_state(features) # (batch_size, decoder_dim)
alphas = []
word = torch.tensor(vocab.stoi['<SOS>']).view(1,-1).to(device)
embeds = self.embedding(word)
captions = []
for i in range(max_len):
alpha,context = self.attention(features, h)
alphas.append(alpha.cpu().detach().numpy())
lstm_input = torch.cat((embeds[:, 0], context), dim=1)
h, c = self.lstm_cell(lstm_input, (h, c))
output = self.fcn(self.drop(h))
output = output.view(batch_size,-1)
predicted_word_idx = output.argmax(dim=1)
captions.append(predicted_word_idx.item())
if vocab.itos[predicted_word_idx.item()] == "<EOS>":
break
embeds = self.embedding(predicted_word_idx.unsqueeze(0))
return [vocab.itos[idx] for idx in captions],alphas
def init_hidden_state(self, encoder_out):
mean_encoder_out = encoder_out.mean(dim=1)
h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)
c = self.init_c(mean_encoder_out)
return h, c
class EncoderDecoder(nn.Module):
def __init__(self,embed_size, vocab_size, attention_dim,encoder_dim,decoder_dim,drop_prob=0.3):
super().__init__()
self.encoder = EncoderCNN()
self.decoder = DecoderRNN(
embed_size=embed_size,
vocab_size = vocab_size,
attention_dim=attention_dim,
encoder_dim=encoder_dim,
decoder_dim=decoder_dim
)
def forward(self, images, captions):
features = self.encoder(images)
outputs = self.decoder(features, captions)
return outputs
| bhavyanarang/Image-Captioning | scripts/models.py | models.py | py | 14,504 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
... |
32301689640 | #!/usr/bin/env python3
import os
import argparse
import copy
import time
import json
import signal
import types
from datetime import datetime
from contextlib import contextmanager
import wandb
import yaml
def main():
args = parse_args()
init_wandb(args)
log_parser = LogParser(args.experiment_dir, wait_for_new_lines=args.wait_for_logs)
for step, log_data in log_parser.main_loop():
wandb.log(log_data, step=step)
print('Parser stopped')
save_best_models(args.experiment_dir)
def parse_args():
parser = argparse.ArgumentParser(description='Weights and Biases runner')
parser.add_argument(
'--experiment-dir', '-d',
type=str,
help='Folder with experiment',
)
parser.add_argument(
'--project-name',
default='multitarget-mt',
help='Project name to save at wandb',
)
parser.add_argument(
'--run-name',
default=None,
help='Project name to save at wandb',
)
parser.add_argument(
'--tags', '-t',
type=lambda s: s.split(','),
default=[],
help='Tag of the experiment: e.g. random, mono, wals, etc.'
)
parser.add_argument(
'--infer-language-tags', '-l',
action='store_const',
const=True,
default=False,
help='Infer languages from model name, save as tags'
)
parser.add_argument(
'--wait-for-logs', '-w',
action='store_const',
const=True,
default=False,
help='Wait for new lines in logs (use during the training)'
)
args = parser.parse_args()
return args
def init_wandb(args):
config = get_config(args.experiment_dir)
run_id = get_run_id(args.experiment_dir)
run_name = os.path.basename(os.path.normpath(args.experiment_dir))
language_tags = get_language_tags(args)
tags = args.tags + language_tags
config = add_language_info(config, language_tags)
wandb.init(
project=args.project_name,
name=run_name,
resume=run_id,
dir=args.experiment_dir,
tags=tags,
config=config,
)
save_run_id(args.experiment_dir)
def get_run_id(experiment_dir):
try:
with open(os.path.join(experiment_dir, '.run_id'), encoding='utf-8') as f:
run_id = f.readline()
return run_id
except FileNotFoundError:
return False
def save_run_id(experiment_dir):
with open(os.path.join(experiment_dir, '.run_id'), 'w', encoding='utf-8') as f:
f.write(str(wandb.run.id))
def get_language_tags(args):
if args.infer_language_tags:
return extract_langs(args.experiment_dir)
else:
return []
def extract_langs(experiment_dir):
""" model_en2fres -> source:en, target:fr, target:es """
useful_part = os.path.basename(os.path.normpath(experiment_dir)).split('_')[-1]
source, target = useful_part.split('2')
get_langs = lambda ls: [ls[i:i + 2] for i in range(0, len(ls), 2)]
source_tags = ['source:'+tag for tag in get_langs(source)]
target_tags = ['target:'+tag for tag in get_langs(target)]
n_tags = [
'n_sources:'+str(len(source_tags)),
'n_targets:'+str(len(target_tags)),
]
return source_tags + target_tags + n_tags
def add_language_info(config, language_tags):
new_config = copy.deepcopy(config)
n_targets = sum('target:' in tag for tag in language_tags)
new_config['n_targets'] = n_targets if n_targets else None
return new_config
def get_config(path):
config_path = os.path.join(path, 'model.npz.yml')
config = get_config_when_created(config_path)
config = filter_config(config)
config = fix_config_values(config)
config = convert_config_list_values(config)
#config = set_languages(config, path)
return config
def get_config_when_created(path):
def safe_load(f):
time.sleep(2)
return yaml.safe_load(f)
config = read_when_created(path, safe_load, wait=10)
print(config)
return config
def filter_config(config):
ignored_keys = {
'log-level', 'quiet', 'quiet-translation',
'train-sets', 'vocabs', 'overwrite', 'no-reload',
'keep-best', 'valid-sets', 'log', 'valid-log',
'relative-paths', 'model', 'ignore-model-config',
'valid-script-path', 'tempdir',
}
filtered_config = {
key: value
for key, value
in config.items()
if key not in ignored_keys
}
return filtered_config
def fix_config_values(config):
# change devices enumeration to device number
fixed_config = copy.deepcopy(config)
fixed_config['devices'] = len(fixed_config['devices'])
# save only version, drop hash and compile date
if 'version' in fixed_config:
# sometimes the last line with the version is not
# in the config immediately, but it is not an important
# value to be stored
fixed_config['version'] = fixed_config['version'].split()[0]
return fixed_config
def convert_config_list_values(config):
# replace key:[value1,value2] with key-1:value1, key-2: value2
fixed_config = {}
for key, value in config.items():
if type(value) is list:
for i,v in enumerate(value, 1):
fixed_config[key+'-'+str(i)] = v
else:
fixed_config[key] = value
return fixed_config
def set_languages(config, path):
model_folder = os.path.basename(os.path.normpath(path))
ls = model_folder.split('2')[-1]
languages = [ls[i:i + 2] for i in range(0, len(ls), 2)]
new_config = copy.deepcopy(config)
new_config['languages'] = languages
new_config['n_languages'] = len(languages)
return new_config
def read_when_created(path, fn, mode='r', wait=2, stop=None):
if stop is None:
stop = lambda: False
while not stop():
try:
with open(path, mode, encoding='utf-8') as f:
return fn(f)
except FileNotFoundError:
time.sleep(wait)
continue
def read_when_created_gen(path, fn, mode='r', wait=5, stop=None):
if stop is None:
stop = lambda: False
while not stop():
try:
with open(path, mode, encoding='utf-8') as f:
yield from fn(f)
break
except FileNotFoundError:
time.sleep(wait)
continue
def follow(thefile, stop=None):
if stop is None:
stop = lambda: False
while True:
line = thefile.readline()
if not line:
time.sleep(0.1)
# read till the end but do not wait if stop() fired
if stop(): break
else: continue
yield line
def save_best_models(experiment_dir):
print('Saving models... ', end='')
wandb.save(os.path.join(experiment_dir, 'model.npz.best-*'))
wandb.save(os.path.join(experiment_dir, '*.log'))
wandb.save(os.path.join(experiment_dir, 'model.npz.yml'))
print('Done.')
class LogParser:
def __init__(self, experiment_dir, wait_for_new_lines=False):
self.experiment_dir = experiment_dir
self.train_log_path = os.path.join(self.experiment_dir, 'train.log')
self.wait_for_new_lines = wait_for_new_lines
self.should_be_stopped = False
def stop(self, signum, frame):
print('Stopping log parser...')
self.should_be_stopped = True
def main_loop(self):
"""yields (step, log_dict)"""
with signal_handler(signal.SIGINT, self.stop),\
signal_handler(signal.SIGTERM, self.stop):
yield from read_when_created_gen(
self.train_log_path,
fn=lambda f: self._process_train_log_file(f),
stop=lambda: self.should_be_stopped,
)
def _process_train_log_file(self, train_log_file):
# TODO: refactor this monster
def extract_time(log_line):
'''[dtime] remaining log line'''
dtime = datetime.strptime(log_line[1:20], "%Y-%m-%d %H:%M:%S")
return dtime, log_line[22:]
def extract_is_validation(log_line):
'''
[valid] remaining_line -> True, remaining_line
log_line -> False, log_line
'''
validation = '[valid]'
if log_line[:len(validation)] == validation:
return True, log_line[len(validation):]
else: return False, log_line
def is_training_log(log_line):
return log_line.find('Ep. ') >= 0
def parse_train_log(line):
""" Ep. 2 : Up. 1000 : Sen. 311,824 : Cost 4.95877838 : Time 163.05s : 33655.40 words/s : L.r. 1.0000e-04 """
processing = {
"Ep.": lambda s: ('train/epoch', int(s)),
"Up.": lambda s: ('step', int(s)),
"Sen.": lambda s: ('train/sentences', int(s.replace(',',''))),
"Cost": lambda s: ('train/loss', float(s)),
"Time": lambda s: ('train/time', float(s[:-1])),
"words/s": lambda s: ('train/speed', float(s)),
"L.r.": lambda s: ('train/learning_rate', float(s)),
}
def process(log_line_chunk):
first, second = log_line_chunk.strip().split(' ')
try:
return processing[first](second)
except KeyError:
try:
return processing[second](first)
except KeyError:
return None
log_data = dict(
process(info_chunk)
for info_chunk
in line.split(':')
if info_chunk is not None
)
step = log_data['step']
del log_data['step']
return step, log_data
def parse_val_log(line):
""" Ep. 26 : Up. 35000 : ce-mean-words : 1.63575 : new best """
line = line.split(':')
step = int(line[1].strip().split(' ')[1])
log_data = {}
metric_name = line[2].strip()
metric_value = float(line[3].strip())
if 'lang/' not in metric_name:
metric_name = 'valid/' + metric_name
log_data[metric_name] = metric_value
metric_stalled = line[4].strip()
if 'no effect' not in metric_stalled:
if 'stalled' in metric_stalled:
metric_stalled = int(metric_stalled.split()[1])
else:
metric_stalled = 0
log_data[metric_name+'_stalled'] = metric_stalled
return step, log_data
def parse_translation(lines):
data = []
log_data = {}
for line in lines:
if translation_ends(line):
log_data['valid/translation_time'] = float(line.strip().split(' ')[-1][:-1])
break
if not 'Best translation' in line:
return None
# cut time
line = line[line.find(']')+1:]
colon = line.find(':')
n = line[:colon]
translation = line[colon+1:]
n = int(n.strip().split(' ')[-1])
translation = translation.strip()
data.append([n, translation])
log_data['valid/translation_example'] = wandb.Table(
data=data, columns=["N","Translation"]
)
return log_data
def training_finished(line):
return "Training finished" in line
def translation_begins(line):
return "Translating" in line
def translation_ends(line):
return "Total translation time" in line
last_step=0
lines = follow(
train_log_file,
stop=lambda: self.should_be_stopped or not self.wait_for_new_lines
)
for line in lines:
try:
dtime, line = extract_time(line)
except:
continue
if training_finished(line):
break
is_validation, line = extract_is_validation(line)
if is_validation:
step, log_data = parse_val_log(line)
elif is_training_log(line):
step, log_data = parse_train_log(line)
elif translation_begins(line):
# pass generator here to consume all lines with translations
log_data = parse_translation(lines)
# translation does not have 'step', that is why there is this 'last_step' thing
step = last_step
else:
continue
last_step = step
yield step, log_data
@contextmanager
def signal_handler(handled_signal, new_handler):
old_handler = signal.getsignal(handled_signal)
try:
signal.signal(handled_signal, new_handler)
yield
finally:
signal.signal(handled_signal, old_handler)
if __name__=='__main__':
main()
| b0hd4n/multitarget_mt | scripts/wandb_runner.py | wandb_runner.py | py | 13,135 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "wandb.log",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
10785644029 | # coding:utf-8
"""
@file: .py
@author: dannyXSC
@ide: PyCharm
@createTime: 2022年05月15日 19点47分
@Function: 把协作关系转化为csv文件
"""
import pandas as pd
from Reader.CoauthorReader import read_coauthor
from utils import get_coauthor_csv_path, timer
coauthor_path = get_coauthor_csv_path()
collaboration_list = []
@timer("读取协作关系")
def read_coauthor_data():
cnt = 0
for collaboration in read_coauthor():
if len(collaboration.name1.strip()) == 0 \
or len(collaboration.name2.strip()) == 0 \
or collaboration.count is None:
continue
collaboration_list.append(collaboration.to_dict())
cnt += 1
if cnt % 100 == 0:
print(f"已读取{cnt}条数据")
@timer("写入csv文件")
def write_csv():
pd.DataFrame(collaboration_list).to_csv(coauthor_path, index=False, sep=';')
def main():
read_coauthor_data()
write_csv()
| dannyXSC/BusinessIntelligence | ETL/Service/TransformCoauthorToCSV.py | TransformCoauthorToCSV.py | py | 956 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.get_coauthor_csv_path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Reader.CoauthorReader.read_coauthor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "utils.timer",
"line_number": 19,
"usage_type": "call"
},
{
"api_n... |
71039993315 | import requests
import pandas as pd
def get_children_leis(targetLei):
"""
Gets a list of LEIs for all ultimate children of a given LEI.
Parameters
----------
targetLei : str
The LEI of the parent entity for which the ultimate children LEIs are to be retrieved.
Returns
-------
df : DataFrame
A Pandas DataFrame containing the LEI values for all ultimate children of the parent entity.
"""
# Create the API URL to retrieve the list of ultimate children for the parent entity
url = "https://api.gleif.org/api/v1/lei-records/" + targetLei + "/ultimate-children?page[size]=50&page[number]=1"
# Get the first page of the response to extract the last page number
response = requests.request("GET", url, headers={'Accept': 'application/vnd.api+json'}, data={})
# Extract the JSON data from the response
data = response.json()
# Extract the last page number from the JSON data
lastPage = data['meta']['pagination']['lastPage']
df_list = []
for currentPage in range(1, lastPage + 1):
print('Processing page ' + str(currentPage) + ' of ' + str(lastPage))
url = url[0:-1] + str(currentPage)
response = requests.request("GET", url, headers={'Accept': 'application/vnd.api+json'}, data={})
# Extract the JSON data from the response
data = response.json()
# Create an empty list to store the LEIs of the ultimate children
ultimate_children_leis = []
# Iterate over the JSON data to extract the LEI values for each ultimate child entity
for child in data['data']:
ultimate_children_leis.append(child['attributes']['lei'])
# Create a Pandas DataFrame from the list of ultimate children LEIs
df_list.append(pd.DataFrame({'LEI': ultimate_children_leis}))
# Concatenate the DataFrames into a single DataFrame
df = pd.concat(df_list)
return df
targetLei = '5493006QMFDDMYWIAM13'
get_children_leis(targetLei) | Donquicote/utils | utils.py | utils.py | py | 2,005 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.request",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
... |
19323733535 | import random
import uuid
import copy
import json
import multihash
from intergov.domain.jurisdiction import Jurisdiction
from intergov.domain.wire_protocols import generic_discrete as gd
from intergov.domain import uri as u
from intergov.serializers import generic_discrete_message as ser
def _random_multihash():
digest = str(uuid.uuid4()).encode()
return multihash.to_b58_string(multihash.encode(digest, 'sha2-256'))
def _generate_msg_dict(**kwargs):
# test predicates must have some dots in them,
# so that we can test the predicate__wild filter
# tmp = str(uuid.uuid4())[:-4]
# predicate = str("http://predicate/{}.{}.{}.{}".format(
# tmp[0:5],
# tmp[5:10],
# tmp[10:20],
# tmp[20:]))
short_countries_list = ['AU', 'CN']
sender = random.choice(short_countries_list)
short_countries_list.remove(sender)
receiver = random.choice(short_countries_list)
predicate_parts = []
for i in range(4):
predicate_parts.append(_random_multihash()[:4])
predicate = ".".join(predicate_parts)
subject = _random_multihash()
obj = _random_multihash()
return {
"sender": sender,
"receiver": receiver,
"subject": subject,
"obj": obj,
"predicate": predicate,
**kwargs
}
def _generate_msg_object(**kwargs):
return gd.Message.from_dict(_generate_msg_dict(**kwargs))
def _remove_message_params(data, keys=[], copy_data=True, set_none=False):
if copy_data:
data = copy.deepcopy(data)
for key in keys:
if set_none:
data[key] = None
else:
del data[key]
return data
def _remove_required_message_params(data, index=-1, indexes=[]):
if index >= 0:
indexes.append(index)
return _remove_message_params(data, keys=[gd.Message.required_attrs[i] for i in indexes])
def _encode_message_dict(data):
return json.dumps(data, cls=ser.MessageJSONEncoder)
def _diff_message_dicts(left, right, keys=[]):
diff = []
for key in keys:
l_val = left.get(key)
r_val = right.get(key)
if l_val != r_val:
diff.append({
'key': key,
'left': l_val,
'right': r_val
})
return diff
def _generate_message_params():
msg_dict = _generate_msg_dict()
A = Jurisdiction(msg_dict["sender"])
B = Jurisdiction(msg_dict["receiver"])
subject = u.URI(msg_dict["subject"])
obj = u.URI(msg_dict["obj"])
predicate = u.URI(msg_dict["predicate"])
return (A, B, subject, obj, predicate)
def _generate_invalid_uri_list():
return (None, "invalid", 42)
def test_message_from_dict():
adict = _generate_msg_dict()
msg = gd.Message.from_dict(adict)
assert msg.is_valid()
def test_message_to_dict():
adict = _generate_msg_dict()
msg = gd.Message.from_dict(adict)
assert msg.to_dict() == adict
def test_message_comparison():
adict = _generate_msg_dict()
m1 = gd.Message.from_dict(adict)
m2 = gd.Message.from_dict(adict)
assert m1 == m2
def test_validation_OK():
A, B, subject, obj, predicate = _generate_message_params()
msg = gd.Message(
sender=A,
receiver=B,
subject=subject,
obj=obj,
predicate=predicate)
assert msg.is_valid()
def test_validation_invalid_no_sender():
A, B, subject, obj, predicate = _generate_message_params()
msg = gd.Message(
receiver=B,
subject=subject,
obj=obj,
predicate=predicate)
assert not msg.is_valid()
def test_validation_invalid_sender():
A, B, subject, obj, predicate = _generate_message_params()
for x in _generate_invalid_uri_list():
A = x
msg = gd.Message(
sender=A,
receiver=B,
subject=subject,
obj=obj,
predicate=predicate)
assert not msg.is_valid()
def test_validation_invalid_no_reciever():
A, B, subject, obj, predicate = _generate_message_params()
msg = gd.Message(
sender=A,
subject=subject,
obj=obj,
predicate=predicate)
assert not msg.is_valid()
def test_validation_invalid_receiver():
A, B, subject, obj, predicate = _generate_message_params()
for x in _generate_invalid_uri_list():
B = x
msg = gd.Message(
sender=A,
receiver=B,
subject=subject,
obj=obj,
predicate=predicate)
assert not msg.is_valid()
def test_validation_invalid_no_subject():
A, B, subject, obj, predicate = _generate_message_params()
msg = gd.Message(
sender=A,
receiver=B,
obj=obj,
predicate=predicate)
assert not msg.is_valid()
def test_validation_invalid_no_obj():
A, B, subject, obj, predicate = _generate_message_params()
msg = gd.Message(
sender=A,
receiver=B,
subject=subject,
predicate=predicate)
assert not msg.is_valid()
def test_validation_invalid_obj():
A, B, subject, obj, predicate = _generate_message_params()
for x in _generate_invalid_uri_list():
obj = x
msg = gd.Message(
sender=A,
receiver=B,
subject=subject,
obj=obj,
predicate=predicate)
assert not msg.is_valid()
def test_validation_invalid_no_predicate():
A, B, subject, obj, predicate = _generate_message_params()
msg = gd.Message(
sender=A,
receiver=B,
subject=subject,
obj=obj)
assert not msg.is_valid()
def test_validation_invalid_predicate():
A, B, subject, obj, predicate = _generate_message_params()
for x in _generate_invalid_uri_list():
predicate = x
msg = gd.Message(
sender=A,
receiver=B,
subject=subject,
obj=obj,
predicate=predicate)
assert not msg.is_valid()
def test_validation_bogus_parameter():
A, B, subject, obj, predicate = _generate_message_params()
msg = gd.Message(
bogus=True,
sender=A,
receiver=B,
subject=subject,
obj=obj)
assert not msg.is_valid()
| bizcubed/intergov | tests/unit/domain/wire_protocols/test_generic_message.py | test_generic_message.py | py | 6,256 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "uuid.uuid4",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "multihash.to_b58_string",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "multihash.encode",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.choice",
... |
14202406531 | from typing import ClassVar, Dict, List, Type, Union, TYPE_CHECKING
from typing_extensions import Annotated
from inflection import underscore
from pydantic import BaseModel, Field, ConfigDict, BeforeValidator, PlainSerializer
from pydantic.functional_validators import AfterValidator
from pydantic._internal._model_construction import ModelMetaclass
from pyparsing import ParseException
from ..privs import DatabasePriv, GlobalPriv, Privs, SchemaPriv
from ..enums import AccountEdition, Scope
from ..props import BoolProp, EnumProp, Props, IntProp, StringProp, TagsProp, FlagProp
from ..parse import _parse_create_header, _parse_props, _resolve_resource_class
from ..sql import SQL, track_ref
from ..identifiers import FQN
from ..builder import tidy_sql
from .validators import coerce_from_str
# https://stackoverflow.com/questions/62884543/pydantic-autocompletion-in-vs-code
if TYPE_CHECKING:
from dataclasses import dataclass as _fix_class_documentation
else:
def _fix_class_documentation(cls):
return cls
# TODO: snowflake resource name compatibility
# TODO: make this configurable
def normalize_resource_name(name: str):
return name.upper()
# Consider making resource names immutable with Field(frozen=True)
ResourceName = Annotated[str, "str", AfterValidator(normalize_resource_name)]
serialize_resource_by_name = PlainSerializer(lambda resource: resource.name if resource else None, return_type=str)
class _Resource(ModelMetaclass):
classes: Dict[str, Type["Resource"]] = {}
resource_key: str = None
def __new__(cls, name, bases, attrs):
cls_ = super().__new__(cls, name, bases, attrs)
cls_.resource_key = underscore(name)
cls_.__doc__ = cls_.__doc__ or ""
cls.classes[cls_.resource_key] = cls_
return cls_
class Resource(BaseModel, metaclass=_Resource):
model_config = ConfigDict(
from_attributes=True,
extra="forbid",
validate_assignment=True,
populate_by_name=True,
# Don't use this
use_enum_values=True,
)
lifecycle_privs: ClassVar[Privs] = None
props: ClassVar[Props]
resource_type: ClassVar[str] = None
serialize_as_list: ClassVar[bool] = False
implicit: bool = Field(exclude=True, default=False, repr=False)
stub: bool = Field(exclude=True, default=False, repr=False)
_refs: List["Resource"] = []
def model_post_init(self, ctx):
for field_name in self.model_fields.keys():
field_value = getattr(self, field_name)
if isinstance(field_value, Resource) and not field_value.stub:
self._refs.append(field_value)
elif isinstance(field_value, SQL):
self._refs.extend(field_value.refs)
setattr(self, field_name, field_value.sql)
@classmethod
def fetchable_fields(cls, data):
data = data.copy()
for key in list(data.keys()):
field = cls.model_fields[key]
fetchable = field.json_schema_extra is None or field.json_schema_extra.get("fetchable", True)
if not fetchable:
del data[key]
return data
@classmethod
def from_sql(cls, sql):
resource_cls = cls
if resource_cls == Resource:
resource_cls = Resource.classes[_resolve_resource_class(sql)]
identifier, remainder_sql = _parse_create_header(sql, resource_cls)
try:
props = _parse_props(resource_cls.props, remainder_sql) if remainder_sql else {}
return resource_cls(**identifier, **props)
except ParseException as err:
raise ParseException(f"Error parsing {resource_cls.__name__} props {identifier}") from err
def __format__(self, format_spec):
track_ref(self)
return self.fully_qualified_name
def _requires(self, resource):
self._refs.add(resource)
def requires(self, *resources):
if isinstance(resources[0], list):
resources = resources[0]
for resource in resources:
self._requires(resource)
return self
@property
def fully_qualified_name(self):
return FQN(name=self.name.upper())
@property
def fqn(self):
return self.fully_qualified_name
@property
def refs(self):
return self._refs
@classmethod
def lifecycle_create(cls, fqn, data, or_replace=False, if_not_exists=False):
# TODO: modify props to split into header props and footer props
return tidy_sql(
"CREATE",
"OR REPLACE" if or_replace else "",
cls.resource_type,
"IF NOT EXISTS" if if_not_exists else "",
fqn,
cls.props.render(data),
)
@classmethod
def lifecycle_delete(cls, fqn, data, if_exists=False):
return tidy_sql("DROP", cls.resource_type, "IF EXISTS" if if_exists else "", fqn)
def create_sql(self, **kwargs):
data = self.model_dump(exclude_none=True, exclude_defaults=True)
return str(self.lifecycle_create(self.fqn, data, **kwargs))
def drop_sql(self, **kwargs):
data = self.model_dump(exclude_none=True, exclude_defaults=True)
return self.lifecycle_delete(self.fqn, data, **kwargs)
@_fix_class_documentation
class Organization(Resource):
resource_type = "ORGANIZATION"
name: ResourceName
class OrganizationScoped(BaseModel):
scope: ClassVar[Scope] = Scope.ORGANIZATION
organization: Annotated[
Organization,
BeforeValidator(coerce_from_str(Organization)),
] = Field(default=None, exclude=True, repr=False)
@property
def fully_qualified_name(self):
return FQN(name=self.name.upper())
def has_scope(self):
return self.organization is not None
name: str
@_fix_class_documentation
class Account(Resource, OrganizationScoped):
"""
CREATE ACCOUNT <name>
ADMIN_NAME = <string>
{ ADMIN_PASSWORD = '<string_literal>' | ADMIN_RSA_PUBLIC_KEY = <string> }
[ FIRST_NAME = <string> ]
[ LAST_NAME = <string> ]
EMAIL = '<string>'
[ MUST_CHANGE_PASSWORD = { TRUE | FALSE } ]
EDITION = { STANDARD | ENTERPRISE | BUSINESS_CRITICAL }
[ REGION_GROUP = <region_group_id> ]
[ REGION = <snowflake_region_id> ]
[ COMMENT = '<string_literal>' ]
"""
resource_type = "ACCOUNT"
lifecycle_privs = Privs(
create=GlobalPriv.CREATE_ACCOUNT,
)
props = Props(
admin_name=StringProp("admin_name"),
admin_password=StringProp("admin_password"),
admin_rsa_public_key=StringProp("admin_rsa_public_key"),
first_name=StringProp("first_name"),
last_name=StringProp("last_name"),
email=StringProp("email"),
must_change_password=BoolProp("must_change_password"),
edition=EnumProp("edition", AccountEdition),
region_group=StringProp("region_group"),
region=StringProp("region"),
comment=StringProp("comment"),
)
name: ResourceName
admin_name: str = Field(default=None, json_schema_extra={"fetchable": False})
admin_password: str = Field(default=None, json_schema_extra={"fetchable": False})
admin_rsa_public_key: str = Field(default=None, json_schema_extra={"fetchable": False})
first_name: str = Field(default=None, json_schema_extra={"fetchable": False})
last_name: str = Field(default=None, json_schema_extra={"fetchable": False})
email: str = Field(default=None, json_schema_extra={"fetchable": False})
must_change_password: bool = Field(default=None, json_schema_extra={"fetchable": False})
# edition: AccountEdition = None
# region_group: str = None
# region: str = None
comment: str = None
@classmethod
def lifecycle_create(cls, fqn, data):
return tidy_sql(
"CREATE ACCOUNT",
fqn,
cls.props.render(data),
)
@classmethod
def lifecycle_delete(cls, fqn, data, if_exists=False, grace_period_in_days=3):
return tidy_sql(
"DROP ACCOUNT",
"IF EXISTS" if if_exists else "",
fqn,
"GRACE_PERIOD_IN_DAYS = ",
grace_period_in_days,
)
def add(self, *resources: "AccountScoped"):
if isinstance(resources[0], list):
resources = resources[0]
for resource in resources:
resource.account = self
def remove(self, *resources: "AccountScoped"):
if isinstance(resources[0], list):
resources = resources[0]
for resource in resources:
resource.account = None
class AccountScoped(BaseModel):
scope: ClassVar[Scope] = Scope.ACCOUNT
account: Annotated[
Account,
BeforeValidator(coerce_from_str(Account)),
] = Field(default=None, exclude=True, repr=False)
@property
def fully_qualified_name(self):
return FQN(name=self.name.upper())
@property
def fqn(self):
return self.fully_qualified_name
def has_scope(self):
return self.account is not None
@_fix_class_documentation
class Database(Resource, AccountScoped):
"""
CREATE [ OR REPLACE ] [ TRANSIENT ] DATABASE [ IF NOT EXISTS ] <name>
[ CLONE <source_db>
[ { AT | BEFORE } ( { TIMESTAMP => <timestamp> | OFFSET => <time_difference> | STATEMENT => <id> } ) ] ]
[ DATA_RETENTION_TIME_IN_DAYS = <integer> ]
[ MAX_DATA_EXTENSION_TIME_IN_DAYS = <integer> ]
[ DEFAULT_DDL_COLLATION = '<collation_specification>' ]
[ [ WITH ] TAG ( <tag_name> = '<tag_value>' [ , <tag_name> = '<tag_value>' , ... ] ) ]
[ COMMENT = '<string_literal>' ]
"""
resource_type = "DATABASE"
lifecycle_privs = Privs(
create=GlobalPriv.CREATE_DATABASE,
read=DatabasePriv.USAGE,
delete=DatabasePriv.OWNERSHIP,
)
props = Props(
transient=FlagProp("transient"),
data_retention_time_in_days=IntProp("data_retention_time_in_days"),
max_data_extension_time_in_days=IntProp("max_data_extension_time_in_days"),
default_ddl_collation=StringProp("default_ddl_collation"),
tags=TagsProp(),
comment=StringProp("comment"),
)
name: ResourceName
transient: bool = False
owner: str = "SYSADMIN"
data_retention_time_in_days: int = 1
max_data_extension_time_in_days: int = 14
default_ddl_collation: str = None
tags: Dict[str, str] = None
comment: str = None
def model_post_init(self, ctx):
super().model_post_init(ctx)
self.add(
Schema(name="PUBLIC", implicit=True),
Schema(name="INFORMATION_SCHEMA", implicit=True),
)
@classmethod
def lifecycle_create(cls, fqn: FQN, data, or_replace=False, if_not_exists=False):
return tidy_sql(
"CREATE",
"OR REPLACE" if or_replace else "",
"TRANSIENT" if data.get("transient") else "",
"DATABASE",
"IF NOT EXISTS" if if_not_exists else "",
fqn,
cls.props.render(data),
)
@classmethod
def lifecycle_update(cls, fqn, change, if_exists=False):
attr, new_value = change.popitem()
attr = attr.upper()
if new_value is None:
return tidy_sql(
"ALTER DATABASE",
"IF EXISTS" if if_exists else "",
fqn,
"UNSET",
attr,
)
elif attr == "NAME":
return tidy_sql(
"ALTER DATABASE",
"IF EXISTS" if if_exists else "",
fqn,
"RENAME TO",
new_value,
)
else:
new_value = f"'{new_value}'" if isinstance(new_value, str) else new_value
return tidy_sql(
"ALTER DATABASE",
"IF EXISTS" if if_exists else "",
fqn,
"SET",
attr,
"=",
new_value,
)
def add(self, *resources: "DatabaseScoped"):
if isinstance(resources[0], list):
resources = resources[0]
for resource in resources:
resource.database = self
def remove(self, *resources: "DatabaseScoped"):
if isinstance(resources[0], list):
resources = resources[0]
for resource in resources:
resource.database = None
class DatabaseScoped(BaseModel):
scope: ClassVar[Scope] = Scope.DATABASE
database: Annotated[
Database,
BeforeValidator(coerce_from_str(Database)),
] = Field(default=None, exclude=True, repr=False)
@property
def fully_qualified_name(self):
return FQN(database=self.database.name if self.database else None, name=self.name.upper())
@property
def fqn(self):
return self.fully_qualified_name
def has_scope(self):
return self.database is not None
@_fix_class_documentation
class Schema(Resource, DatabaseScoped):
"""
CREATE [ OR REPLACE ] [ TRANSIENT ] SCHEMA [ IF NOT EXISTS ] <name>
[ CLONE <source_schema>
[ { AT | BEFORE } ( { TIMESTAMP => <timestamp> | OFFSET => <time_difference> | STATEMENT => <id> } ) ] ]
[ WITH MANAGED ACCESS ]
[ DATA_RETENTION_TIME_IN_DAYS = <integer> ]
[ MAX_DATA_EXTENSION_TIME_IN_DAYS = <integer> ]
[ DEFAULT_DDL_COLLATION = '<collation_specification>' ]
[ [ WITH ] TAG ( <tag_name> = '<tag_value>' [ , <tag_name> = '<tag_value>' , ... ] ) ]
[ COMMENT = '<string_literal>' ]
"""
resource_type = "SCHEMA"
lifecycle_privs = Privs(
create=DatabasePriv.CREATE_SCHEMA,
read=SchemaPriv.USAGE,
delete=SchemaPriv.OWNERSHIP,
)
props = Props(
transient=FlagProp("transient"),
with_managed_access=FlagProp("with managed access"),
data_retention_time_in_days=IntProp("data_retention_time_in_days"),
max_data_extension_time_in_days=IntProp("max_data_extension_time_in_days"),
default_ddl_collation=StringProp("default_ddl_collation"),
tags=TagsProp(),
comment=StringProp("comment"),
)
name: ResourceName
transient: bool = False
owner: str = "SYSADMIN"
with_managed_access: bool = None
data_retention_time_in_days: int = None
max_data_extension_time_in_days: int = None
default_ddl_collation: str = None
tags: Dict[str, str] = None
comment: str = None
def add(self, *resources: "SchemaScoped"):
if isinstance(resources[0], list):
resources = resources[0]
for resource in resources:
resource.schema = self
def remove(self, *resources: "SchemaScoped"):
if isinstance(resources[0], list):
resources = resources[0]
for resource in resources:
resource.schema = None
T_Schema = Annotated[Schema, BeforeValidator(coerce_from_str(Schema)), serialize_resource_by_name]
class SchemaScoped(BaseModel):
scope: ClassVar[Scope] = Scope.SCHEMA
schema_: Annotated[
Schema,
BeforeValidator(coerce_from_str(Schema)),
] = Field(exclude=True, repr=False, alias="schema", default=None)
@property
def schema(self):
return self.schema_
@schema.setter
def schema(self, new_schema):
self.schema_ = new_schema
@property
def fully_qualified_name(self):
schema = self.schema_.name if self.schema_ else None
database = None
if self.schema_ and self.schema_.database:
database = self.schema_.database.name
return FQN(database=database, schema=schema, name=self.name.upper())
@property
def fqn(self):
return self.fully_qualified_name
def has_scope(self):
return self.schema_ is not None
| teej/titan | titan/resources/base.py | base.py | py | 15,887 | python | en | code | 91 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing_extensions.Annotated",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "pydantic.functional_validators.AfterValidator",
"line_number": 37,
"usage_type": "call"
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.