index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,000 | fe25e9f93fc6bb460c9a9a10ce277feb84aa76f0 | import time
from selenium import webdriver
import selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
# Give Language code in which you want to translate the text:=>
#lang_code = 'sq'
chrome_op = ChromeOptions()
chrome_op.add_argument('--headless')
# Provide text that you want to translate:=>
input1 = original
# launch browser with selenium:=>
browser = webdriver.Chrome(CHROMEDRIVER_PATH) #browser = webdriver.Chrome('path of chromedriver.exe file') if the chromedriver.exe is in different folder
# copy google Translator link here:=>
browser.get("https://translate.google.com")
#view=home&op=translate&sl=en&tl="+lang_code)
# Use Javascript command to give input text:=>
command = "document.getElementById('source').value = '" + \
input1 + "'"
# Excute above command:=>
browser.execute_script(command)
# just wait for some time for translating input text:=>
time.sleep(6)
# Given below x path contains the translated output that we are storing in output variable:=>
output1 = browser.find_element_by_xpath( '/html/body/div[2]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[3]/div[1]/div[2]/div/span[1]').text
# Display the output:=>
print("Translated Paragraph:=> " + output1)
browser.quit()
|
988,001 | 96ea8a322c122086b67ffba671eacf605227f63e | #ブロックを各方向に3つ配置するクラス
class ThreePutBlock():
#初期化(コンストラクタ)
def __init__(self, mc, blockName, pos):
self.blockName = blockName
self.pos = pos
self.mc = mc
#x座標方向にブロックを配置
def verticalXPlacement(self):
print(self.pos)
print(self.blockName)
self.mc.setBlocks(self.pos.x,self.pos.y,self.pos.z,self.pos.x+2,self.pos.y,self.pos.z,self.blockName)
#z座標方向にブロックを配置
def verticalZPlacement(self):
print(self.pos)
print(self.blockName)
self.mc.setBlocks(self.pos.x,self.pos.y,self.pos.z,self.pos.x,self.pos.y,self.pos.z+2,self.blockName)
#y座標方向にブロックを配置
def horizontalPlacement(self):
print(self.pos)
print(self.blockName)
self.mc.setBlocks(self.pos.x,self.pos.y,self.pos.z,self.pos.x,self.pos.y+2,self.pos.z,self.blockName)
|
988,002 | 667744a611b6e9004b8c842818fdbd12d840500d | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 22:10:50 2017
@author: user
"""
## Introduction to the Bag-of-words model
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer()
docs = np.array([
'The sun is shining',
'The weather is sweet',
'The sun is shining and weather is sweet'])
bag = count.fit_transform(docs)
print(count.vocabulary_)
print(bag.toarray())
a=bag.toarray()
## The sequence of of items in bag-of-words model above is called 1-gram or
## unigram. By using ngram_range parameter in countVectorizer class, we can use
## different n-gram model. ex) ngram_range=(2.2) => 2-gram
## Compute tf-idf
from sklearn.feature_extraction.text import TfidfTransformer
tfidf = TfidfTransformer()
np.set_printoptions(precision=2)
print(tfidf.fit_transform(count.fit_transform(docs)).toarray())
|
988,003 | cec085dd09db4b3a0b271d8f1b1570ceda834c50 | from sqlalchemy import Column, INT, CHAR, BOOLEAN, BLOB
from . import base
class Token(base.Base):
__tablename__ = 'token'
token = Column(CHAR(5), primary_key = True, autoincrement = False)
project_id = Column(INT, primary_key = True, autoincrement = False)
owner = Column(BLOB, nullable = False)
used = Column(BOOLEAN, nullable = False, default = False)
|
988,004 | f19749cc4be5bd06ce9695b50783b1ad9483ad57 | from tkinter import *
from pygame import mixer
from automatExeptions import TypeObjectException, NotStrException
def set_text(obj, text:str): ## elementy sterujące z obj i przypisanie text
"""
Ustawia tekst na kontrolki
Argumenty:
obj () - element sterujący
text (str) - tekst do napisania do kontrolki
"""
if type(text) != str:
raise NotStrException(f'Variable \'text\' is {type(text)}, not str!')
if type(obj) == Text:
obj.configure(state= 'normal')
obj.delete(1.0, END)
obj.insert(1.0, text)
obj.configure(state= 'disabled')
elif type(obj) == Label:
obj.configure(text= text)
else:
raise TypeObjectException(f'Object can be only Text or Label!')
def play_sound(sound:str):
"""
Odtwarza dźwięk
sound (str) - ścieżka do pliku z dźwiękiem
"""
mixer.init()
mixer.music.load(sound)
mixer.music.play()
class DrinksVendingMachineUI(Tk): ## glowna klasa interfejsu
'''
Klasa DrinksVendingMachineUI:
DrinksVendingMachineUI - klasa opisuje interfejs do pracy z automatem z napojami
Metody:
configure_money_option(command, options:dict)
set_command(obj, command, **kwargs)
set_text(obj, text)
reset_money_option
'''
def __init__(self): ## konstruktor
"""
Konstruktor - tworzy obiekt typu DrinksVendingMachineUI, dziedziczy po konstruktorze klasy Tk
Pola klasy:
__numbers_buttons (lista) - przyciski numeryczne
__goods_list_text (tekst) - lista towarów
__buy_button (button) - przycisk zakupu
__deny_transaction_button (button) - przycisk przerwania transakcji
__money_variable (StringVar) - pole do zmiany nagłówka __money_option
__money_option (OptionMenu) - rozwijana lista do rzutu monety
__screen_label (label) - ekran do prezentacji informacji o zakupie
__money_screen_label (label) - ekran prezentujący informacje o pieniądzach na koncie użytkownika
"""
super().__init__()
self.geometry('600x400')
self.__numbers_buttons = [Button(self, text=str(i), bg="blue", fg="black", font= ("Times New Roman", "15", "bold")) for i in range (10)] ## generator lista
for i in range (1, len(self.__numbers_buttons)):
self.__numbers_buttons[i].place(x = ((i-1)%3*35 + 430), y = ((i-1)//3*35 + 125), width=30, height=30)
self.__numbers_buttons[0].place(x = (35 + 430), y = 230, width=30, height=30)
self.__goods_list_text = Text(self) ## tworzymy pole dla listy towarow
self.__goods_list_text.place(x = 0, y = 0, width=380, height=400)
self.__buy_button = Button(self, text="Buy", bg="green", fg="black", font= ("Times New Roman", "15", "bold"))
self.__buy_button.place(x = 400, y = 300, width=60, height=60)
self.__deny_transaction_button = Button(self, text="Deny\ntransaction", bg="red", fg="black", font= ("Times New Roman", "15", "bold"))
self.__deny_transaction_button.place(x = 490, y = 300, width=100, height=60)
self.__money_variable = StringVar(self)
self.__money_variable.set("Add funds")
self.__money_option = OptionMenu(self, self.__money_variable, [])
self.__screen_label = Label(text="", bg="black", fg="white")
self.__screen_label.place(x=400, y=90, width=170, height=30)
self.__money_screen_label = Label(text="", bg="black", fg="white")
self.__money_screen_label.place(x=400, y=60, width=170, height=30)
"""
Dekorator, zwraca wartość atrybutu
Returns:
Atrybut
"""
@property
def numbers_buttons(self):
return self.__numbers_buttons
@property
def goods_list_text(self):
return self.__goods_list_text
@property
def buy_button(self):
return self.__buy_button
@property
def deny_transaction_button(self):
return self.__deny_transaction_button
@property
def money_variable(self):
return self.__money_variable
@property
def money_option(self):
return self.__money_option
@property
def screen_label(self):
return self.__screen_label
@property
def money_screen_label(self):
return self.__money_screen_label
def configure_money_option(self, command, options:dict): ## wygląd zewnętrzny listy rozwijanej
"""
Zmienia wygląd __money_option
Argumenty:
command () - moduł obsługi zdarzenia kliknięcia na __money_option
options (dict) - wybory z listy rozwijanej
"""
options = list(options.keys())
self.__money_option = OptionMenu(self, self.__money_variable, *options, command=command)
self.__money_option.place(x = 420, y = 10, width= 100, height=30)
def set_command(self, obj, command, **kwargs): ## lączy obiekt interfejsu i polecenie
"""
Ustawia obsługę zdarzeń dla kontrolek
Argumenty:
obj () - element sterujący
command () - element sterujący
kwargs (dict) - wypełnienie listy rozwijanej
"""
try:
if obj == self.__money_option:
self.configure_money_option(command, kwargs["options"])
return
else:
obj.config(command=command)
except Exception:
raise TypeObjectException(f'Variable object is {type(obj)}, not tkinter!')
def reset_money_option(self):
"""
Zapisuję Add funds na górę listy po każdym wyborze z __money_option
"""
self.__money_variable.set("Add funds") |
988,005 | 2bb92d0c2becfa47ac957caa6e3a20371f6517c9 | from django import forms
from fishbytes.models import Catch
# class ProfileForm(forms.ModelForm):
# class Meta:
# model=User
# fields = ('username')
class CatchForm(forms.ModelForm):
size = forms.CharField(label="Size (inches):")
weight = forms.CharField(label="Weight (lbs):")
date = forms.DateField(label="Date (MM/DD/YYYY):")
class Meta:
model = Catch
fields = ('image', 'fish', 'size', 'weight', 'lake', 'date', 'longitude', 'latitude',) |
988,006 | 599f5840824f1460446d76ee6a3220e723b71263 | import os
import sys
import struct
import socket
from time import sleep
from optparse import OptionParser
#NOTE: due to the fact that the ethernet once transfer max is 1500bytes buffer
#and the command max is write config need 7int(28bytes),so we recommand you that
#once transfer command number max is not overflow 50 to avoid the error that unconsicous
#MAX_CMD_NUM=50
def eth_recv():
recive =s.recv(1500)
data=str(recive,encoding="utf-8")#or using bytes.decode(b) to change the bytes to str
print(data)
return data
def write_data(package):
s.send(package)
def initial_eth():
s.connect(('202.118.229.189',30))
eth_recv()
def transfer_once(package):
initial_eth()
write_data(package)
data=eth_recv()
s.close()
return data
def check_name(name):
name=(name.strip()).split('.')
return name[0]+".txt"
print("\n\n\n\n")
print(sys.version)
os.system("python -V")
usage="%prog [options]"
version="1.0"
parser=OptionParser(usage,description="SRAM test running",version="%prog "+version)
#in/out file
parser.add_option("-i","--intput_file",dest="in_fil",help="Input test vector file",type='string',default="cmd.txt")
parser.add_option("-o","--output_file",dest="out_fil",help="Output log file",type='string',default="log.txt")
parser.add_option("-m","--max_trans",dest="max_trans",help="once commands transfer",type='int',default=10)
(options,args)=parser.parse_args()
in_fil=check_name(options.in_fil)
out_fil=check_name(options.out_fil)
MAX_CMD_NUM=options.max_trans
cmd_file=open(in_fil,'r')
log_file=open(out_fil,'w')
lines=cmd_file.readlines()
cmd_num=len(lines)
tr_num=int(cmd_num/MAX_CMD_NUM)+1
last_num=cmd_num%MAX_CMD_NUM
print("all commands number: "+str(cmd_num))
print("once transfer commands number: "+str(MAX_CMD_NUM))
print("transfer times: "+str(tr_num))
#file format
#cmd 1:write_one 2:read 3:write_all 4:read_direct 5:write_config 6:update 7:cut connection
#1 addr data
#2 addr
#3 data
#4 addr
#5 addr data op_area inc_dec cycle jump
#6 update addr area
#7 cut_connection
#the version of send specific commands to FPGA once transfer
for i in range(tr_num):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
if(i==tr_num-1):#last transfer
num=last_num
else:
num=MAX_CMD_NUM
num_pkg=struct.pack('i',num)
transfer_pkg=b''
for j in range(num):
line=(lines[j+i*MAX_CMD_NUM].strip()).split()
for k in range(len(line)):
line[k]=int(line[k])
cmd=line[0]
if cmd==1:
#check illegal
if len(line)!=3 or (line[1]>int('0x3ff',16)) or (line[2]>int('0xff',16)):#addr/data overflow
continue
cmd=struct.pack('i',cmd)
addr=struct.pack('i',line[1])
data=struct.pack('i',line[2])
cmd_pkg=cmd+addr+data
elif cmd==2 or cmd==3 or cmd==4:
if len(line)!=2:
continue
if cmd==2 and (line[1]>int('0x3ff',16)):
continue
cmd=struct.pack('i',cmd)
cmd_pkg=cmd+struct.pack('i',line[1])
elif cmd==5:
if len(line)!=7 :
continue
if (line[1]>int('0x3ff',16)) or (line[2]>int('0xff',16)) or (line[6]>int('0x3ff',16)):
continue
if (line[3]>int('0x3ff',16)):
continue
if (line[4]!=1 and line[4]!=0) or (line[5]!=1 and line[5]!=0):
continue
cmd=struct.pack('i',cmd)
addr=struct.pack('i',line[1])
data=struct.pack('i',line[2])
area=struct.pack('i',line[3])
inc_dec=struct.pack('i',line[4])
cycle=struct.pack('i',line[5])
jump=struct.pack('i',line[6])
cmd_pkg=cmd+addr+data+area+inc_dec+cycle+jump
elif cmd==6:
if len(line)!=3 or (line[1]>int('0x3ff',16)) or (line[2]>int('0x3ff',16)):
continue
cmd=struct.pack('i',cmd)
addr=struct.pack('i',line[1])
area=struct.pack('i',line[2])
cmd_pkg=cmd+addr+area
else:
continue
transfer_pkg=transfer_pkg+cmd_pkg
transfer_pkg=num_pkg+transfer_pkg
log_info=transfer_once(transfer_pkg)
log_file.write(log_info)
print("\n\n\n")
cmd_file.close()
log_file.close()
#the version of send one command to FPGA once transfer
'''
cmd_num=struct.pack('i',1)#manual is only one command can send
for line in lines:
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
line=(line.strip()).split()
for i in range(len(line)):
line[i]=int(line[i])
cmd=line[0]
if cmd==1:
#check illegal
if len(line)!=3 or (line[1]>int('0x3ff',16)) or (line[2]>int('0xff',16)):#addr/data overflow
continue
cmd=struct.pack('i',cmd)
addr=struct.pack('i',line[1])
data=struct.pack('i',line[2])
log_info=transfer_once(cmd_num+cmd+addr+data)
log_file.write(log_info)
elif cmd==2 or cmd==3 or cmd==4:
if len(line)!=2:
continue
if cmd==2 and (line[1]>int('0x3ff',16)):
continue
cmd=struct.pack('i',cmd)
log_info=transfer_once(cmd_num+cmd+struct.pack('i',line[1]))
log_file.write(log_info)
elif cmd==5:
if len(line)!=7 :
continue
if (line[1]>int('0x3ff',16)) or (line[2]>int('0xff',16)) or (line[6]>int('0x3ff',16)):
continue
if (line[3]>int('0x3ff',16)):
continue
if (line[4]!=1 and line[4]!=0) or (line[5]!=1 and line[5]!=0):
continue
cmd=struct.pack('i',cmd)
addr=struct.pack('i',line[1])
data=struct.pack('i',line[2])
area=struct.pack('i',line[3])
inc_dec=struct.pack('i',line[4])
cycle=struct.pack('i',line[5])
jump=struct.pack('i',line[6])
log_info=transfer_once(cmd_num+cmd+addr+data+area+inc_dec+cycle+jump)
log_file.write(log_info)
elif cmd==6:
if len(line)!=3 or (line[1]>int('0x3ff',16)) or (line[2]>int('0x3ff',16)):
continue
cmd=struct.pack('i',cmd)
addr=struct.pack('i',line[1])
area=struct.pack('i',line[2])
log_info=transfer_once(cmd_num+cmd+addr+area)
log_file.write(log_info)
else:
continue
cmd_file.close()
log_file.close()
'''
|
988,007 | 7d3aaf3018e3402e9a9f7395bf26645bbccdb3bb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Demonstrate usage of LFPy.Network with network of ball-and-stick type
morphologies with active HH channels inserted in the somas and passive-leak
channels distributed throughout the apical dendrite. The corresponding
morphology and template specifications are in the files BallAndStick.hoc and
BallAndStickTemplate.hoc.
Execution (w. MPI):
mpirun -np 2 python example_network.py
Copyright (C) 2017 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
# import modules:
import os
import sys
if sys.version < '3':
from urllib2 import urlopen
else:
from urllib.request import urlopen
import ssl
import zipfile
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy.signal as ss
import scipy.stats as st
from mpi4py import MPI
import neuron
from LFPy import NetworkCell, Network, Synapse, RecExtElectrode, FourSphereVolumeConductor
# import csv
import pickle
import time
from matplotlib.collections import PolyCollection
from os.path import join
import random as random
# set up MPI variables:
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
# avoid same sequence of random numbers from numpy and neuron on each RANK,
# e.g., in order to draw unique cell and synapse locations and random synapse
# activation times
GLOBALSEED = 1234
np.random.seed(GLOBALSEED + RANK)
################################################################################
# Set up shared and population-specific parameters
################################################################################
# from param import *
from param import spike_t, spike_std, distr_t, weighttrain,cellParameters, OUTPUTPATH, populationParameters, networkParameters, electrodeParameters, networkSimulationArguments, num_cells, population_names,population_sizes, connectionProbability, synapseModel,synapseParameters, weightArguments, weightFunction, minweight, delayFunction, delayArguments, mindelay, multapseFunction, multapseArguments, synapsePositionArguments
# Save the cell params for population plot
fi=open("example_network_output/cells.pkl","wb")
pickle.dump(cellParameters,fi)
fi.close()
if __name__ == '__main__':
start = time.time()
############################################################################
# Main simulation
############################################################################
# create directory for output:
if not os.path.isdir(OUTPUTPATH):
if RANK == 0:
os.mkdir(OUTPUTPATH)
COMM.Barrier()
# instantiate Network:
network = Network(**networkParameters)
# create E and I populations:
for ii, (name, size) in enumerate(zip(population_names, population_sizes)):
if RANK == 0:
print(ii, name, size)
network.create_population(name=name, POP_SIZE=size,
**populationParameters[ii])
# initial spike train
if name =='E':
for j,cell in enumerate(network.populations[name].cells):
if j%4==0:
idx = cell.get_rand_idx_area_norm(section='dend', nidx=1)
for i in idx:#if more than one synapse
syn = Synapse(cell=cell, idx=i, syntype='Exp2Syn',
weight=weighttrain,
**dict(synapseParameters[0][0]))
syn.set_spike_times(np.array([distr_t[j]]))
# create connectivity matrices and connect populations:
for i, pre in enumerate(population_names):
for j, post in enumerate(population_names):
# boolean connectivity matrix between pre- and post-synaptic neurons
# in each population (postsynaptic on this RANK)
connectivity = network.get_connectivity_rand(
pre=pre, post=post,
connprob=connectionProbability[i][j]
)
print(np.shape(connectivity))
# connect network:
(conncount, syncount) = network.connect(
pre=pre, post=post,
connectivity=connectivity,
syntype=synapseModel,
synparams=synapseParameters[i][j],
weightfun=np.random.normal,
weightargs=weightArguments[i][j],
minweight=minweight,
delayfun=delayFunction,
delayargs=delayArguments[i][j],
mindelay=mindelay,
multapsefun=multapseFunction,
multapseargs=multapseArguments[i][j],
syn_pos_args=synapsePositionArguments[i][j],
save_connections = True, # Creates synapse_positions.h5
)
# set up extracellular recording device:
electrode = RecExtElectrode(**electrodeParameters)
EEG_electrode_params = dict(
x=0,
y=0,
z=90000.,
method="soma_as_point"
)
EEG_electrode = RecExtElectrode(**EEG_electrode_params)
# run simulation:
SPIKES, OUTPUT, DIPOLEMOMENT = network.simulate(
electrode=electrode,
# electrode = EEG_electrode,
**networkSimulationArguments,
)
# collect somatic potentials across all RANKs to RANK 0:
if RANK == 0:
somavs = []
for i, name in enumerate(population_names):
somavs.append([])
somavs[i] += [cell.somav
for cell in network.populations[name].cells]
for j in range(1, SIZE):
somavs[i] += COMM.recv(source=j, tag=15)
else:
somavs = None
for name in population_names:
COMM.send([cell.somav for cell in network.populations[name].cells],
dest=0, tag=15)
############################################################################
# Save data for plots
############################################################################
fi=open("example_network_output/somavs.pkl","wb")
pickle.dump(somavs,fi)
fi.close()
fi=open("example_network_output/spikes.pkl","wb")
pickle.dump(SPIKES,fi)
fi.close()
fi=open("example_network_output/dipoles.pkl","wb")
pickle.dump(DIPOLEMOMENT,fi)
fi.close()
fi=open("example_network_output/pop_names.pkl","wb")
pickle.dump(population_names,fi)
fi.close()
fi=open("example_network_output/network_dt.pkl","wb")
pickle.dump(network.dt,fi)
fi.close()
############################################################################
# customary cleanup of object references - the psection() function may not
# write correct information if NEURON still has object references in memory,
# even if Python references has been deleted. It will also allow the script
# to be run in successive fashion.
############################################################################
#network.pc.gid_clear() # allows assigning new gids to threads
electrode = None
syn = None
synapseModel = None
# for population in network.populations.values():
# for cell in population.cells:
# cell = None
# population.cells = None
# population = None
pop = None
network = None
neuron.h('forall delete_section()')
total_time = time.time() - start
if RANK == 0:
print("total runtime:", total_time,"s.")
|
988,008 | baf412a3085550a14cb7a1ef38c5c7a7978ad539 | # !/usr/bin/python
# -*- coding: UTF-8 -*-
"""
visdom,基础模型,正则化,动量,lr衰减
"""
import torch
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from visdom import Visdom
# pip install visdom
# python -m visdom.server / visdom
class MLP(torch.nn.Module):
def __init__(self) -> None:
super(MLP, self).__init__()
# inplace=True的作用覆盖前面的值,较少内存消耗
self.model = torch.nn.Sequential(
torch.nn.Linear(784, 200),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(200, 200),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(200, 10),
torch.nn.ReLU(inplace=True)
)
def forward(self, x):
return self.model(x)
train_data_batch = DataLoader(datasets.MNIST("./data", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=200, shuffle=True)
test_data_batch = DataLoader(datasets.MNIST("./data", train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=200, shuffle=True)
device = torch.device("cpu:0")
mlp = MLP().to(device)
# weight_decay:权重衰减 momentum:动量(惯性)
optimizer = torch.optim.SGD(mlp.parameters(), lr=1e-3, weight_decay=1e-2, momentum=1e-1)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min')
cel = torch.nn.CrossEntropyLoss().to(device)
# viz = Visdom()
# viz.line([0.], [0,], win="train_loss", opts=dict(title="train_loss"))
# viz.line([0., 0.], [0,], win="test_accuracy", opts=dict(title="test_accuracy"))
for n in range(10):
for batch_idx, (train_data, train_target) in enumerate(train_data_batch):
train_data = train_data.view(-1, 784)
train_data, train_target = train_data.to(device), train_target.to(device)
logits = mlp(train_data)
loss = cel(logits, train_target)
optimizer.zero_grad()
loss.backward()
# viz.line([loss.item()], [0], win="train_loss", update="append")
optimizer.step()
if (batch_idx + 1) % 100 == 0:
scheduler.step(loss.item())
print("num : {}, train batch : {}, loss : {}".format(n + 1, batch_idx + 1, loss.item()))
correct = 0
for test_data, test_target in test_data_batch:
test_data = test_data.view(-1, 784)
test_data, test_target = test_data.to(device), test_target.to(device)
test_logits = mlp(test_data)
predict = test_logits.data.max(dim=1)[1]
# predict = test_logits.data.argmax(dim=1)
# correct += int(predict.eq(test_target.data).sum().item())
correct += torch.eq(test_target.data, predict).sum()
# viz.image([test_data.view(-1, 28, 28)], win="x")
# viz.text(str(predict.detach.cpu().numpy()), win="predict", opts=dict(title="predict"))
# viz.line([(correct * 100./len(test_data_batch.dataset))], [0], win="test_accuracy", update="append")
print("accuracy : {}%".format((correct * 100./len(test_data_batch.dataset)))) |
988,009 | b95e3cfbc3eb7d797079071f2a4383bc6ed88ef9 | from botocore import session
import json
import datetime
from datetime import tzinfo
from dateutil.tz import *
import time
class StateFunctionWrapper(object):
"""A wrapper for State Function"""
def __init__(self, *args, **kwargs):
self._session = session.get_session()
self.client = self._session.create_client(
'stepfunctions', region_name="eu-central-1")
self.ALL_COMPLETED = 1
self.ANY_COMPLETED = 2
self.ALWAYS = 3
def buildStateChecker(self,arn,statusFilter=None):
stateList = self.get_execution(arn,statusFilter)
retryList = self.reason_failure(stateList)
# print(retryList)
def wait(self,arn,future,returnType,waitDuration=10):
succeedStateMachineList=[]
failedStateMachineList=[]
uncompletedStateMachineList=[]
numberOfTask = len(future)
numberOfCompletedTask=0
if returnType == self.ALL_COMPLETED:
while True:
# solve thorttling API problem
response1 = self.get_execution(arn,statusFilterV="SUCCEEDED")
response2 = self.get_execution(arn,statusFilterV="FAILED")
# data = {'executions': [{'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:c046f37b-93b8-4db1-b155-533e0cf59a73', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': 'c046f37b-93b8-4db1-b155-533e0cf59a73', 'status': 'RUNNING', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 25, 46000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27, 55, 927000, tzinfo=tzlocal())}, {'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:e292748b-3abd-412b-add1-62e143cff897', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': 'e292748b-3abd-412b-add1-62e143cff897', 'status': 'FAILED', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 25, 127000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27, 55, 915000, tzinfo=tzlocal())}, {'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:4cd7a938-0486-4722-8888-bc36ed0a7991', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': '4cd7a938-0486-4722-8888-bc36ed0a7991', 'status': 'FAILED', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 24, 951000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27,
# 55, 781000, tzinfo=tzlocal())}], 'ResponseMetadata': {'RequestId': '4ece9e7b-5945-11e9-991b-4748fbc60275', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': '4ece9e7b-5945-11e9-991b-4748fbc60275', 'content-type': 'application/x-amz-json-1.0', 'content-length': '1138'}, 'RetryAttempts': 0}}
# print(data)
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print(len(response1['executions']))
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if len(response1['executions'])+len(response2['executions']) == numberOfTask:
data = self.get_execution(arn)
for index in data['executions']:
print(index)
print(index['status'])
if(index['status']=='SUCCEEDED'):
# print("here please")
if(index['executionArn'] not in succeedStateMachineList):
numberOfCompletedTask=numberOfCompletedTask+1
succeedStateMachineList.append(index['executionArn'])
if(numberOfCompletedTask==numberOfTask):
return succeedStateMachineList,failedStateMachineList,uncompletedStateMachineList
elif(index['status']=='FAILED'):
# print("here please")
if(index['executionArn'] not in failedStateMachineList):
numberOfCompletedTask=numberOfCompletedTask+1
failedStateMachineList.append(index['executionArn'])
if(numberOfCompletedTask==numberOfTask):
return succeedStateMachineList,failedStateMachineList,uncompletedStateMachineList
else:
# print("not here")
time.sleep(waitDuration)
elif returnType == self.ANY_COMPLETED:
while True:
# solve thorttling API problem
data = self.get_execution(arn)
# data = {'executions': [{'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:c046f37b-93b8-4db1-b155-533e0cf59a73', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': 'c046f37b-93b8-4db1-b155-533e0cf59a73', 'status': 'RUNNING', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 25, 46000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27, 55, 927000, tzinfo=tzlocal())}, {'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:e292748b-3abd-412b-add1-62e143cff897', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': 'e292748b-3abd-412b-add1-62e143cff897', 'status': 'RUNNING', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 25, 127000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27, 55, 915000, tzinfo=tzlocal())}, {'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:4cd7a938-0486-4722-8888-bc36ed0a7991', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': '4cd7a938-0486-4722-8888-bc36ed0a7991', 'status': 'RUNNING', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 24, 951000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27,
# 55, 781000, tzinfo=tzlocal())}], 'ResponseMetadata': {'RequestId': '4ece9e7b-5945-11e9-991b-4748fbc60275', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': '4ece9e7b-5945-11e9-991b-4748fbc60275', 'content-type': 'application/x-amz-json-1.0', 'content-length': '1138'}, 'RetryAttempts': 0}}
print(data)
for index in data['executions']:
if(index['status']=='SUCCEEDED'):
if(index['executionArn'] not in succeedStateMachineList):
succeedStateMachineList.append(index['executionArn'])
elif(index['status']=='FAILED'):
# print("here please")
if(index['executionArn'] not in failedStateMachineList):
failedStateMachineList.append(index['executionArn'])
else:
if(index['executionArn'] not in uncompletedStateMachineList):
uncompletedStateMachineList.append(index['executionArn'])
print(uncompletedStateMachineList)
if len(succeedStateMachineList)>0 or len(failedStateMachineList)>0:
return completedStateMachineList,uncompletedStateMachineList
time.sleep(waitDuration)
elif returnType == self.ALWAYS:
data = self.get_execution(arn)
# data = {'executions': [{'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:c046f37b-93b8-4db1-b155-533e0cf59a73', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': 'c046f37b-93b8-4db1-b155-533e0cf59a73', 'status': 'RUNNING', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 25, 46000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27, 55, 927000, tzinfo=tzlocal())}, {'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:e292748b-3abd-412b-add1-62e143cff897', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': 'e292748b-3abd-412b-add1-62e143cff897', 'status': 'RUNNING', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 25, 127000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27, 55, 915000, tzinfo=tzlocal())}, {'executionArn': 'arn:aws:states:eu-central-1:251584899486:execution:my_map_function3-1554164784.7738473:4cd7a938-0486-4722-8888-bc36ed0a7991', 'stateMachineArn': 'arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473', 'name': '4cd7a938-0486-4722-8888-bc36ed0a7991', 'status': 'RUNNING', 'startDate': datetime.datetime(2019, 4, 2, 7, 26, 24, 951000, tzinfo=tzlocal()), 'stopDate': datetime.datetime(2019, 4, 2, 7, 27,
# 55, 781000, tzinfo=tzlocal())}], 'ResponseMetadata': {'RequestId': '4ece9e7b-5945-11e9-991b-4748fbc60275', 'HTTPStatusCode': 200, 'HTTPHeaders': {'x-amzn-requestid': '4ece9e7b-5945-11e9-991b-4748fbc60275', 'content-type': 'application/x-amz-json-1.0', 'content-length': '1138'}, 'RetryAttempts': 0}}
print(data)
for index in data['executions']:
if(index['status']=='SUCCEEDED'):
if(index['executionArn'] not in succeedStateMachineList):
succeedStateMachineList.append(index['executionArn'])
elif(index['status']=='FAILED'):
# print("here please")
if(index['executionArn'] not in failedStateMachineList):
failedStateMachineList.append(index['executionArn'])
else:uncompletedStateMachineList.append(index['executionArn'])
return succeedStateMachineList,failedStateMachineList,uncompletedStateMachineList
def reason_failure(self,data,includeException=False):
reasonList= []
for index in data['executions']:
if(index['status']=='SUCCEEDED'):
print('Succ')
elif(index['status']=='FAILED'):
response = self.client.get_execution_history(
executionArn=index['executionArn'],
maxResults=10,
reverseOrder=True
)
reason = {"ErrorType":"","Description":"","Call_number":{}}
for x in response['events']:
# print(x)
if x['type'] == 'ExecutionFailed':
reason["ErrorType"] = x['executionFailedEventDetails']['error']
reason['Description'] = x['executionFailedEventDetails']['cause']
elif x['type'] == 'LambdaFunctionScheduled':
reason["Call_number"] = x['lambdaFunctionScheduledEventDetails']["input"]
break
reasonList.append(reason)
print("<<<<<<")
print(reasonList)
return reasonList
def input_path_builder(self,index):
return "$.input["+str(index)+"]"
def contruct_statemachine_input(self,inputExecutor,call_id,outpurPth=None):
if outpurPth == None:
data={"input":inputExecutor,"call_id":call_id}
else: data={"input":inputExecutor,"call_id":call_id,"output_pth":outpurPth}
return json.dumps(data)
def stateBuildeer(self,func,instance_input):
resource = "arn:aws:lambda:eu-central-1:251584899486:function:"+func+"-128"
resource2 = "arn:aws:lambda:eu-central-1:251584899486:function:"+func+'-1600'
resource3 = "arn:aws:lambda:eu-central-1:251584899486:function:"+func+'-2688'
if instance_input == "small":
data = {
"Comment": "Executor",
"StartAt": "Submit Job",
"States": {
"Submit Job": {
"Type": "Task",
"Resource":resource,
"Catch": [ {
"ErrorEquals": ["TimeoutError"],
"ResultPath": "$.error-info",
"Next": "ChangeInstance"
}],
"Retry": [
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.TaskFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Permissions"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ResultPathMatchFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ParameterPathFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.BranchFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.NoChoiceMatched"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 5,
"MaxAttempts": 2,
"BackoffRate": 2.0
}
],
"End": True
},
"ChangeInstance": {
"Type": "Task",
"Resource":resource2,
"Catch": [ {
"ErrorEquals": ["TimeoutError"],
"ResultPath": "$.error-info",
"Next": "ChangeInstance2"
}],
"Retry": [
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.TaskFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Permissions"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ResultPathMatchFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ParameterPathFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.BranchFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.NoChoiceMatched"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 5,
"MaxAttempts": 2,
"BackoffRate": 2.0
}
],
"End": True
},
"ChangeInstance2": {
"Type": "Task",
"Resource":resource3,
"Catch": [ {
"ErrorEquals": ["TimeoutError"],
"ResultPath": "$.error-info",
"Next": "FailState"
}],
"Retry": [
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.TaskFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Permissions"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ResultPathMatchFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ParameterPathFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.BranchFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.NoChoiceMatched"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 5,
"MaxAttempts": 2,
"BackoffRate": 2.0
}
],
"End": True
}, "FailState": {
"Type": "Fail",
"Error": "TimeOut",
"Cause": "Execution fail, it take more than 15 minites even for largest instance, please change input accordingly"
}
}
}
elif instance_input == "medium":
data = {
"Comment": "Executor",
"StartAt": "Submit Job",
"States": {
"Submit Job": {
"Type": "Task",
"Resource":resource2,
"Catch": [ {
"ErrorEquals": ["TimeoutError"],
"ResultPath": "$.error-info",
"Next": "ChangeInstance"
}],
"Retry": [
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.TaskFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Permissions"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ResultPathMatchFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ParameterPathFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.BranchFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.NoChoiceMatched"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 5,
"MaxAttempts": 2,
"BackoffRate": 2.0
}
],
"End": True
},
"ChangeInstance": {
"Type": "Task",
"Resource":resource3,
"Catch": [ {
"ErrorEquals": ["TimeoutError"],
"ResultPath": "$.error-info",
"Next": "FailState"
}],
"Retry": [
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.TaskFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Permissions"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ResultPathMatchFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ParameterPathFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.BranchFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.NoChoiceMatched"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 5,
"MaxAttempts": 2,
"BackoffRate": 2.0
}
],
"End": True
}, "FailState": {
"Type": "Fail",
"Error": "TimeOut",
"Cause": "Execution fail, it take more than 15 minites even for largest instance, please change input accordingly"
}
}
}
elif instance_input == "large":
data = {
"Comment": "Executor",
"StartAt": "Submit Job",
"States": {
"Submit Job": {
"Type": "Task",
"Resource":resource3,
"Catch": [ {
"ErrorEquals": ["TimeoutError"],
"ResultPath": "$.error-info",
"Next": "FailState"
}],
"Retry": [
{
"ErrorEquals": ["States.Timeout"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.TaskFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.Permissions"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ResultPathMatchFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ParameterPathFailure"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.BranchFailed"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.NoChoiceMatched"],
"MaxAttempts": 0
},
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 5,
"MaxAttempts": 2,
"BackoffRate": 2.0
}
],
"End": True
},
"FailState": {
"Type": "Fail",
"Error": "TimeOut",
"Cause": "Execution fail, it take more than 15 minites even for largest instance, please change input accordingly"
}
}
}
json_data = json.dumps(data)
# print(json_data)
# raise Exception("eieie")
return json_data
def create_state_machine(self, name, definition, role_arn):
"""
Create a state machine.
PARAMS
@name: name of the state machine
@defination: json definition of the state machine
@role_arn: Arn of the role created for this state machine
"""
response = self.client.create_state_machine(
name=name,
definition=definition,
roleArn=role_arn
)
sm_arn = response.get('stateMachineArn')
if sm_arn:
print('State Machine {0} with arn {1} created successfully'.format(
name, sm_arn
))
return sm_arn
def get_state_machine(self, name):
"""
Get a state machine given its name
"""
response = self.client.list_state_machines()
print(response)
if not response.get('stateMachines'):
return None
for sm in response.get('stateMachines'):
if sm['name'] == name:
return sm['stateMachineArn']
def get_execution(self,arn,statusFilterV=None):
if(statusFilterV!=None):
# print(statusFilterV)
# print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
response = self.client.list_executions(
stateMachineArn=arn,
statusFilter=statusFilterV,
maxResults=1000)
else:
response = self.client.list_executions(stateMachineArn=arn,maxResults=1000)
return response
def create_execution(self, sm_arn, input_data):
"""
Create an execution for a state machine.
PARAMS
@sm_arn: Arn of the state machine that is to be executed
@input_data: Input json data to be passed for the execution
"""
execution_response = self.client.start_execution(
stateMachineArn=sm_arn,
input=input_data
)
execution_arn = execution_response.get('executionArn')
return execution_arn
def dummy_state_machine(self, sm_name, lambda_1_arn, lambda_2_arn):
"""
Create a dummy state machine
https://states-language.net/spec.html
"""
state_function_definition = {
"Comment": "A dummy state machine",
"StartAt": "State1",
"States": {
"State1": {
"Resource": lambda_1_arn,
"Type": "Task",
"Next": "State2"
},
"State2": {
"Type": "Task",
"Resource": lambda_2_arn,
"Next": "End"
},
"End": {
"Type": "Succeed"
}
}
}
with open('/tmp/dummy-sm.json', 'wb') as jsonfile:
json.dump(state_function_definition, jsonfile, indent=4)
self.create_state_machine(
name=sm_name, definition=state_function_definition
)
# a =StateFunctionWrapper()
# x=a.get_execution("arn:aws:states:eu-central-1:251584899486:stateMachine:numpy_test-1555951640.059513")
# print(x)
# response = self.client.get_execution_history(
# executionArn=index['executionArn'],
# maxResults=10,
# reverseOrder=True
# )
# buildStateChecker('','arn:aws:states:eu-central-1:251584899486:stateMachine:my_map_function3-1554164784.7738473','')
|
988,010 | 4ccedcb381b0a8b573adaa77b6c8fd3033c77920 | """
Author: Srayan Gangopadhyay
2020-08-06
met.no symbol names mapped to emoji
"""
symbols = {
'fog': '🌫',
'heavyrain': '🌧',
'heavyrainandthunder': '⛈',
'heavyrainshowers_day': '🌧',
'heavyrainshowers_night': '🌧',
'heavyrainshowers_polartwilight': '🌧',
'heavyrainshowersandthunder_day': '⛈',
'heavyrainshowersandthunder_night': '⛈',
'heavyrainshowersandthunder_polartwilight': '⛈',
'heavysleet': '🌨',
'heavysleetandthunder': '🌨',
'heavysleetshowers_day': '🌨',
'heavysleetshowers_night': '🌨',
'heavysleetshowers_polartwilight': '🌨',
'heavysleetshowersandthunder_day': '🌨',
'heavysleetshowersandthunder_night': '🌨',
'heavysleetshowersandthunder_polartwilight': '🌨',
'heavysnow': '🌨',
'heavysnowandthunder': '🌨',
'heavysnowshowers_day': '🌨',
'heavysnowshowers_night': '🌨',
'heavysnowshowers_polartwilight': '🌨',
'heavysnowshowersandthunder_day': '🌨',
'heavysnowshowersandthunder_night': '🌨',
'heavysnowshowersandthunder_polartwilight': '🌨',
'lightrain': '☔',
'lightrainandthunder': '⛈',
'lightrainshowers_day': '☔',
'lightrainshowers_night': '☔',
'lightrainshowers_polartwilight': '☔',
'lightrainshowersandthunder_day': '⛈',
'lightrainshowersandthunder_night': '⛈',
'lightrainshowersandthunder_polartwilight': '⛈',
'lightsleet': '❄',
'lightsleetandthunder': '⛈',
'lightsleetshowers_day': '❄',
'lightsleetshowers_night': '❄',
'lightsleetshowers_polartwilight': '❄',
'lightsnow': '🌨',
'lightsnowandthunder': '⛈',
'lightsnowshowers_day': '🌨',
'lightsnowshowers_night': '🌨',
'lightsnowshowers_polartwilight': '🌨',
'lightssleetshowersandthunder_day': '⛈',
'lightssleetshowersandthunder_night': '⛈',
'lightssleetshowersandthunder_polartwilight': '⛈',
'lightssnowshowersandthunder_day': '⛈',
'lightssnowshowersandthunder_night': '⛈',
'lightssnowshowersandthunder_polartwilight': '⛈',
'partlycloudy_day': '🌤',
'partlycloudy_night': '⛅',
'partlycloudy_polartwilight': '⛅',
'rain': '🌧',
'rainandthunder': '⛈',
'rainshowers_day': '🌧',
'rainshowers_night': '🌧',
'rainshowers_polartwilight': '🌧',
'rainshowersandthunder_day': '⛈',
'rainshowersandthunder_night': '⛈',
'rainshowersandthunder_polartwilight': '⛈',
'sleet': '🌨',
'sleetandthunder': '⛈',
'sleetshowers_day': '🌨',
'sleetshowers_night': '🌨',
'sleetshowers_polartwilight': '🌨',
'sleetshowersandthunder_day': '⛈',
'sleetshowersandthunder_night': '⛈',
'sleetshowersandthunder_polartwilight': '⛈',
'snow': '🌨',
'snowandthunder': '⛈',
'snowshowers_day': '🌨',
'snowshowers_night': '🌨',
'snowshowers_polartwilight': '🌨',
'snowshowersandthunder_day': '⛈',
'snowshowersandthunder_night': '⛈',
'snowshowersandthunder_polartwilight': '⛈',
'clearsky_day': '🌞',
'clearsky_night': '🌒',
'clearsky_polartwilight': '🌆',
'cloudy': '☁',
'fair_day': '🌞',
'fair_night': '🌒',
'fair_polartwilight': '🌆'
}
|
988,011 | 3f38509080fe33991cf7193c27661f0f3679dac3 | # - Create a variable named `ai`
# with the following content: `[3, 4, 5, 6, 7]`
# - Print the sum of the elements in `ai`
ai =[ 3, 4, 5, 6, 7]
def summa(x):
total=0
for i in range(len(x)):
total+= x[i]
print(summa(ai)) |
988,012 | bcb9bc525f768b060eb138dce9a9ffc4cdaace70 | # -*- coding: utf-8 -*
class ConfigParam:
# params = {"embedding_size": 6, "feature_size": 0, "field_size": 0, "batch_size": 64, "learning_rate": 0.001,"epochs":200,
# "optimizer": "adam", "data_path": "../data/ml-1m/", "model_dir": "../data/model/essm/", "hidden_units":[8]}
def __init__(self, params):
self.param = params
# self.embedding_size, self.feature_size = params["embedding_size"], params["feature_size"]
# self.field_size, self.batch_size, self.learning_rate = params["field_size"], params["batch_size"], params["learning_rate"]
# self.epochs, self.optimizer, self.data_path = params["epochs"], params["optimizer"], params["data_path"]
# self.model_dir, self.hidden_units = params["model_dir"], params["hidden_units"]
# if "experts_num" in params.keys() and "experts_units" in params.keys():
# self.experts_num, self.experts_units = params["experts_num"], params["experts_units"]
# if "label1_weight" in params.keys() and "label2_weight" in params.keys():
# self.label1_weight, self.label2_weight = params["label1_weight"], params["label2_weight"]
for key in params.keys():
if not hasattr(self, key):
setattr(self, key, params[key])
|
988,013 | 559f2a8cefa543431b1f339a52e435f59ec34a43 | n, m = [int(i) for i in input().split()]
MOD = 10**9+7
arr = [[0]*m for i in range(n)]
for i in range(n):
inp = input()
for j in range(m):
arr[i][j] = inp[j]
def solve(arr,n,m):
dp = [[0]*(m+1) for i in range(n+1)]
for i in range(1,n+1):
if arr[i-1][0]=='#':
break
dp[i][1] = 1
for i in range(1,m+1):
if arr[0][i-1]=='#':
break
dp[1][i] = 1
#print(dp)
for i in range(2,n+1):
for j in range(2,m+1):
if arr[i-1][j-1]=='#':
continue
dp[i][j] = (dp[i-1][j] + dp[i][j-1])%MOD
return dp[n][m]%MOD
print(solve(arr,n,m)) |
988,014 | 14099e2347bf092623a6f020a56d3c6fe5925597 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
def user_is_basenodeadmin(userobj, *basenode_modelsclasses):
"""
Check if the given user is admin on any of the given
``basenode_modelsclasses``.
:param basenode_modelsclasses:
Model classes. They must have an ``admins`` one-to-many relationship
with User.
Example (is period admin):
>>> from devilry.apps.core.models import Period
>>> from django.contrib.auth.models import User
>>> donald = User.objects.get(username='donald')
>>> donald_is_periodadmin = user_is_basenodeadmin(donald, Period)
"""
for cls in basenode_modelsclasses:
if cls.objects.filter(admins__id=userobj.id).exists():
return True
return False
def user_is_nodeadmin(userobj):
"""
Check if the given user is admin on any node.
"""
from .node import Node
return user_is_basenodeadmin(userobj, Node)
def user_is_subjectadmin(userobj):
"""
Check if the given user is admin on any subject.
"""
from .subject import Subject
return user_is_basenodeadmin(userobj, Subject)
def user_is_periodadmin(userobj):
"""
Check if the given user is admin on any period.
"""
from .period import Period
return user_is_basenodeadmin(userobj, Period)
def user_is_assignmentadmin(userobj):
"""
Check if the given user is admin on any assignment.
"""
from .assignment import Assignment
return user_is_basenodeadmin(userobj, Assignment)
def user_is_admin(userobj):
"""
Check if the given user is admin on any node, subject, period or
assignment.
"""
from .node import Node
from .subject import Subject
from .period import Period
from .assignment import Assignment
return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)
def user_is_admin_or_superadmin(userobj):
"""
Return ``True`` if ``userobj.is_superuser``, and fall back to calling
:func:`.user_is_admin` if not.
"""
if userobj.is_superuser:
return True
else:
return user_is_admin(userobj)
def user_is_examiner(userobj):
"""
Returns ``True`` if the given ``userobj`` is examiner on any AssignmentGroup.
"""
from .assignment_group import AssignmentGroup
return AssignmentGroup.published_where_is_examiner(userobj).exists()
def user_is_student(userobj):
"""
Returns ``True`` if the given ``userobj`` is candidate on any AssignmentGroup.
"""
from .assignment_group import AssignmentGroup
return AssignmentGroup.published_where_is_candidate(userobj).exists()
class DevilryUserProfile(models.Model):
""" User profile with a one-to-one relation to ``django.contrib.auth.models.User``.
Ment to be used as a Django *user profile* (AUTH_PROFILE_MODULE).
.. attribute:: full_name
Django splits names into first_name and last_name. They are only 30 chars each.
Read about why this is not a good idea here:
http://www.kalzumeus.com/2010/06/17/falsehoods-programmers-believe-about-names/
Since we require support for *any* name, we use our own ``full_name``
field, and ignore the one in Django. Max length 300.
.. attribute:: languagecode
Used to store the preferred language for a user.
Not required (The UI defaults to the default language)
"""
user = models.OneToOneField(User) # This field is required, and it must be named ``user`` (because the model is used as a AUTH_PROFILE_MODULE)
full_name = models.CharField(max_length=300, blank=True, null=True)
languagecode = models.CharField(max_length=100, blank=True, null=True)
class Meta:
app_label = 'core'
def get_displayname(self):
"""
Get a name for this user, preferrably the full name, but falls back to username of
that is unavailable.
"""
return self.full_name or self.user.username
def create_user_profile(sender, instance, created, **kwargs):
if created:
DevilryUserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
|
988,015 | 71471ad966704ecab1d1ec0e33c433ca36039829 | num1 = input("Enter the first number: ")
num2 = input("Enter the second number: ")
num3 = input("Enter the third number: ")
if num1 > num2:
if num1 > num3:
print(str(num1) + " is greater")
else:
print(str(num3) + " is greater")
else:
print(str(num2) + " is greater")
|
988,016 | 0512abc769cc8b621a1ee6cfa668a4d6b41e9984 | ii = [('CookGHP3.py', 1), ('MarrFDI.py', 1), ('SadlMLP.py', 2), ('UnitAI.py', 1), ('LeakWTI3.py', 1), ('ChalTPW2.py', 1), ('AdamWEP.py', 1), ('FitzRNS3.py', 49), ('WilkJMC2.py', 2), ('CrokTPS.py', 1), ('ClarGE.py', 1), ('LyelCPG.py', 1), ('AinsWRR.py', 1), ('BackGNE.py', 6), ('BachARE.py', 5), ('WestJIT.py', 1), ('FitzRNS4.py', 2), ('CoolWHM3.py', 2), ('FitzRNS.py', 32), ('SomeMMH.py', 10), ('BrewDTO.py', 6), ('DibdTRL.py', 1), ('SadlMLP2.py', 3)] |
988,017 | d6a7b6d290650adcb76df256fcfc3772446385f6 |
def count_smileys(lst):
from re import match
return len([x for x in lst if match("[:;][-~]?[\)D]",x)])
|
988,018 | 76523f259ab1dd65607cfe46d9e56a1a510248a7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ParticipantInfo import ParticipantInfo
class AlipayCommerceEducateInfoParticipantCertifyModel(object):
def __init__(self):
self._apply_note_info = None
self._extend_info = None
self._from_code = None
self._participant_info = None
self._source_id = None
@property
def apply_note_info(self):
return self._apply_note_info
@apply_note_info.setter
def apply_note_info(self, value):
self._apply_note_info = value
@property
def extend_info(self):
return self._extend_info
@extend_info.setter
def extend_info(self, value):
self._extend_info = value
@property
def from_code(self):
return self._from_code
@from_code.setter
def from_code(self, value):
self._from_code = value
@property
def participant_info(self):
return self._participant_info
@participant_info.setter
def participant_info(self, value):
if isinstance(value, list):
self._participant_info = list()
for i in value:
if isinstance(i, ParticipantInfo):
self._participant_info.append(i)
else:
self._participant_info.append(ParticipantInfo.from_alipay_dict(i))
@property
def source_id(self):
return self._source_id
@source_id.setter
def source_id(self, value):
self._source_id = value
def to_alipay_dict(self):
params = dict()
if self.apply_note_info:
if hasattr(self.apply_note_info, 'to_alipay_dict'):
params['apply_note_info'] = self.apply_note_info.to_alipay_dict()
else:
params['apply_note_info'] = self.apply_note_info
if self.extend_info:
if hasattr(self.extend_info, 'to_alipay_dict'):
params['extend_info'] = self.extend_info.to_alipay_dict()
else:
params['extend_info'] = self.extend_info
if self.from_code:
if hasattr(self.from_code, 'to_alipay_dict'):
params['from_code'] = self.from_code.to_alipay_dict()
else:
params['from_code'] = self.from_code
if self.participant_info:
if isinstance(self.participant_info, list):
for i in range(0, len(self.participant_info)):
element = self.participant_info[i]
if hasattr(element, 'to_alipay_dict'):
self.participant_info[i] = element.to_alipay_dict()
if hasattr(self.participant_info, 'to_alipay_dict'):
params['participant_info'] = self.participant_info.to_alipay_dict()
else:
params['participant_info'] = self.participant_info
if self.source_id:
if hasattr(self.source_id, 'to_alipay_dict'):
params['source_id'] = self.source_id.to_alipay_dict()
else:
params['source_id'] = self.source_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceEducateInfoParticipantCertifyModel()
if 'apply_note_info' in d:
o.apply_note_info = d['apply_note_info']
if 'extend_info' in d:
o.extend_info = d['extend_info']
if 'from_code' in d:
o.from_code = d['from_code']
if 'participant_info' in d:
o.participant_info = d['participant_info']
if 'source_id' in d:
o.source_id = d['source_id']
return o
|
988,019 | 3b88578086f3206a442e95abb4b88a0d2c17aab7 | from numpy import asarray
from numpy import arange
from numpy.random import rand
import math
import numpy as np
import argparse
def vector_arc_distance(v_1, v_2):
"""using two vectors with a length of 3, get the arc distances
based on https://stackoverflow.com/questions/52210911/great-circle-distance-between-two-p-x-y-z-points-on-a-unit-sphere
alternative https://github.com/spacetelescope/spherical_geometry
"""
delta = math.sqrt(
(v_2[0] - v_1[0]) ** 2 + (v_2[1] - v_1[1]) ** 2 + (v_2[2] - v_1[2]) ** 2
)
return 2 * 1 * delta / 2 / 1 # assuming unit circle so R = 1
def midpoint(p1, p2):
"""calculates the midpoint between two points which might be the
output without gradient descent to base off of
"""
return np.array([(p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2, (p1[2] + p2[2]) / 2])
def objective_function(x):
"""objective function to optimize for gradient descent
Needs edited to follow how we would optimize the loss between the
midpoint and our randomly selected point.
"""
return x * 1 # change this to our actual function
def derivitive(x):
"""No way to calculate this without assistance to derive sum of arc distances.
googling suggests the derivitive of a sum is the sum of a derivitive - look into this."""
return x * 1
def mse(A, B):
"""since we can't use other extenal packages for calculations and only numpy,
calculate the mean square error by hand - might be able to be used in loss/error
"""
return ((A - B) ** 2).mean(axis=0)
def gradient_descent(objective_function, derivative, boundaries, iterations, step_size):
"""perform simple gradient descent with numpy manually
Alternative packages with built in SGD, etc include
https://pytorch.org/docs/stable/autograd.html
"""
# create lists to track all outputs
outputs = list()
scores = list()
# get a random point within the boundaries
output = boundaries[:, 0] + rand(len(boundaries)) * (
boundaries[:, 1] - boundaries[:, 0]
)
# for each iteration in the iter object
for i in range(iterations):
# calculate gradient
gradient = derivitive(output) # Code breaks here -
# take a step
output = output - step_size * gradient
# evaluate candidate point
output_eval = objective_function(output)
# store output
outputs.append(output)
scores.append(output_eval)
# report progress
print(output, output_eval)
return [outputs, scores]
if __name__ == "__main__":
# get randomly chosen locations on the sphere
input_1 = np.array([1, 1, 1])
input_2 = np.array([0, 0, 0])
# set the n dimensional boundary of the sphere between -1, 1 for each coordinate
boundaries = asarray([[-1.0, 1.0], [-1, 1], [-1, 1]])
# get a random point vector within the boundaries
rand_starting_vector = boundaries[:, 0] + rand(len(boundaries)) * (
boundaries[:, 1] - boundaries[:, 0]
)
# define the total iterations
iterations = 30
# define the step size
step_size = 0.1
# define the learning rate
# learning_rate = None TODO - add this in manually
# perform the gradient descent search
outputs, scores = gradient_descent(
objective_function, derivitive, boundaries, iterations, step_size
)
# compute target scores for our inputs
results_1 = objective_function(input_1)
results_2 = objective_function(input_2)
# check the results vs the outputs
print(results_1)
print(outputs)
|
988,020 | 70bbb1d04bec0d9ec8f51ca2c37970c226dff963 | #!/usr/bin/python
from tkinter import *
import time
import math
import numpy as np
from sequence_generator import SequenceGenerator
SEQUENCE_DURATION_MS = 4000
# Noon, 3, Noon, 3, Noon, 9, Noon, 9
SEQUENCE = [0, 90, 0, 90, 0, -90, 0, -90]
# SEQUENCE = [0,90]
gui = Tk()
gui.geometry("800x800")
c = Canvas(gui ,width=800 ,height=800)
c.pack()
oval = c.create_oval(5,5,60,60,fill='black')
start_point_x = 400
start_point_y = 700
line_length = 300
line = c.create_line(
start_point_x,
start_point_y,
start_point_x,
start_point_y - line_length,
fill="black")
xd = 5
yd = 10
gui.title("Neato")
sg = SequenceGenerator(SEQUENCE_DURATION_MS, SEQUENCE)
start_time = time.time()
def convert_rad_to_x_y(degrees, length):
x = math.sin(math.radians(degrees)) * length
y = math.cos(math.radians(degrees)) * length
return x,y
while True:
c.move(oval,xd,yd)
elapsed_time = (time.time() - start_time) * 1000
angle = sg.position(elapsed_time)
x,y = convert_rad_to_x_y(angle, line_length)
# print("{},\t{},\t{},\t{}".format(elapsed_time, angle,x,y))
c.coords(
line,
start_point_x,
start_point_y,
start_point_x + x,
start_point_y - y)
p=c.coords(oval)
if p[3] >= 800 or p[1] <=0:
yd = -yd
if p[2] >=800 or p[0] <=0:
xd = -xd
gui.update()
time.sleep(0.025)import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation,FFMpegFileWriter
fig, ax = plt.subplots()
xdata, ydata = [], []
ln, = plt.plot([], [], 'r', animated=True)
f = np.linspace(-3, 3, 200)
def init():
ax.set_xlim(-3, 3)
ax.set_ylim(-0.25, 2)
ln.set_data(xdata,ydata)
return ln,
def update(frame):
xdata.append(frame)
ydata.append(np.exp(-frame**2))
ln.set_data(xdata, ydata)
return ln,
ani = FuncAnimation(fig, update, frames=f,
init_func=init, blit=True, interval = 2.5,repeat=False)
plt.show() |
988,021 | cfff891b6231b5399c1a0795c7dcde2665c1837a | # -*- coding: utf-8 -*-
import sys
sys.path.insert(0, 'src')
import json
import pickle
import zipfile
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import jieba
import keras
import numpy as np
from keras.applications.resnet50 import ResNet50
from keras.preprocessing.image import (load_img, img_to_array)
from tqdm import tqdm
from config import img_rows, img_cols
from config import start_word, stop_word, unknown_word
from config import train_annotations_filename
from config import train_folder, valid_folder, test_a_folder, test_b_folder
from config import train_image_folder, valid_image_folder, test_a_image_folder, test_b_image_folder
from config import valid_annotations_filename
#调用Keras中的ResNet50模型,加载在ImageNet ILSVRC比赛中已经训练好的权重
#include_top表示是否包含模型顶部的全连接层,如果不包含,则可以利用这些参数来做一些定制的事情
image_model = ResNet50(include_top=False, weights='imagenet', pooling='avg')
#确定是否存在文件夹
def ensure_folder(folder):
#如果不存在文件夹,创建文件夹
if not os.path.exists(folder):
os.makedirs(folder)
#解压文件
def extract(folder):
#folder.zip
filename = '{}.zip'.format(folder)
#输出解压名称并执行解压操作
print('Extracting {}...'.format(filename))
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall('data')
#将图像文件编码
def encode_images(usage):
encoding = {}
#编码训练集
if usage == 'train':
image_folder = train_image_folder
#编码验证集
elif usage == 'valid':
image_folder = valid_image_folder
#编码测试集a
elif usage == 'test_a':
image_folder = test_a_image_folder
#编码测试集b
else: # usage == 'test_b':
image_folder = test_b_image_folder
#batch_size为256
batch_size = 256
#names储存文件夹中所有的jpg文件名称
names = [f for f in os.listdir(image_folder) if f.endswith('.jpg')]
#计算一共多少批次,ceil为向上取整
num_batches = int(np.ceil(len(names) / float(batch_size)))
#输出编码过程
print('ResNet50提取特征中...')
#对每个batche进行处理,使用tqdm库显示处理进度
for idx in range(num_batches):
#该批次开始的位置
i = idx * batch_size
#该批次的长度,会出现最后一个批次不够batchsize的情况
length = min(batch_size, (len(names) - i))
#使用empty创建一个多维数组
image_input = np.empty((length, img_rows, img_cols, 3))
#对于每一张图片
for i_batch in range(length):
#提取图片名称
image_name = names[i + i_batch]
#提取路径名称
filename = os.path.join(image_folder, image_name)
#keras读取图片,并且将图片调整为224*224
img = load_img(filename, target_size=(img_rows, img_cols))
#将图片转为矩阵
img_array = img_to_array(img)
#使用keras内置的preprocess_input进行图片预处理,默认使用caffe模式去均值中心化
img_array = keras.applications.resnet50.preprocess_input(img_array)
#将处理后的图片保存到image_input中
image_input[i_batch] = img_array
#使用ResNet50网络进行预测,预测结果保存到preds中
preds = image_model.predict(image_input)
#对于每一张图片
for i_batch in range(length):
#提取图片名称
image_name = names[i + i_batch]
#把预测结果保存到encoding中
encoding[image_name] = preds[i_batch]
#用相应的类别命名
filename = 'data/encoded_{}_images.p'.format(usage)
#使用python的pickle模块把数据进行序列化,把encoing保存到filename中
with open(filename, 'wb') as encoded_pickle:
pickle.dump(encoding, encoded_pickle)
print('ResNet50提取特征完毕...')
#处理数据集的标注部分,生成训练集的词库
def build_train_vocab():
#提取训练集标注文件的路径
#data/ai_challenger_caption_train_20170902/caption_train_annotations_20170902.json
annotations_path = os.path.join(train_folder, train_annotations_filename)
#读取json格式的标注文件
with open(annotations_path, 'r') as f:
annotations = json.load(f)
#输出处理进度
print('building {} train vocab')
#创建一个无序不重复元素集
vocab = set()
#使用tqdm输出进度
for a in tqdm(annotations):
#提取annotations每一行的caption注释
caption = a['caption']
#对于每一个caption
for c in caption:
#使用jieba进行分词
seg_list = jieba.cut(c)
#把每个词加入到vocab中
for word in seg_list:
vocab.add(word)
#在vocab中加入<start><stop><UNK>
vocab.add(start_word)
vocab.add(stop_word)
vocab.add(unknown_word)
#将vocab写入vocab_train.p
filename = 'data/vocab_train.p'
with open(filename, 'wb') as encoded_pickle:
pickle.dump(vocab, encoded_pickle)
#创建samples
def build_samples(usage):
#如果进行训练
if usage == 'train':
#路径为train_folder
annotations_path = os.path.join(train_folder, train_annotations_filename)
else:
#否则路径为valid_folder
annotations_path = os.path.join(valid_folder, valid_annotations_filename)
with open(annotations_path, 'r') as f:
#同时加载json文件
annotations = json.load(f)
#将vocab文件反序列化提取词汇
vocab = pickle.load(open('data/vocab_train.p', 'rb'))
#index to word 对vocab进行排序
idx2word = sorted(vocab)
#word to index zip函数将idx2word与序号索引打包为元祖,用dict函数将映射关系构造为字典,词:索引
word2idx = dict(zip(idx2word, range(len(vocab))))
#输出进度信息
print('building {} samples'.format(usage))
#列表samples
samples = []
#对于每一项annotation
for a in tqdm(annotations):
#提取image_id
image_id = a['image_id']
#提取caption
caption = a['caption']
#对于每一项caption
for c in caption:
#使用jieba进行分词
seg_list = jieba.cut(c)
#列表inpit
input = []
#last_word标签设置为start
last_word = start_word
#使用enumerate函数列出下标和数据
for j, word in enumerate(seg_list):
#如果词库中没有word
if word not in vocab:
#word修改为UNK
word = unknown_word
#input添加序号
input.append(word2idx[last_word])
#samples添加id,input,output
samples.append({'image_id': image_id, 'input': list(input), 'output': word2idx[word]})
#last_word设置成word
last_word = word
#input添加last_word
input.append(word2idx[last_word])
#samples添加id,input,stop_word
samples.append({'image_id': image_id, 'input': list(input), 'output': word2idx[stop_word]})
#打包samples信息
filename = 'data/samples_{}.p'.format(usage)
with open(filename, 'wb') as f:
pickle.dump(samples, f)
#主函数
if __name__ == '__main__':
# parameters
# 确定是否存在data
ensure_folder('data')
#解压文件
# if not os.path.isdir(train_image_folder):
#extract(train_folder)
#解压文件
# if not os.path.isdir(valid_image_folder):
#extract(valid_folder)
#解压文件
# if not os.path.isdir(test_a_image_folder):
#extract(test_a_folder)
#解压文件
# if not os.path.isdir(test_b_image_folder):
#extract(test_b_folder)
#编码train
if not os.path.isfile('data/encoded_train_images.p'):
encode_images('train')
#编码valid
if not os.path.isfile('data/encoded_valid_images.p'):
encode_images('valid')
#编码test_a
if not os.path.isfile('data/encoded_test_a_images.p'):
encode_images('test_a')
#编码test_b
if not os.path.isfile('data/encoded_test_b_images.p'):
encode_images('test_b')
#生成词库
if not os.path.isfile('data/vocab_train.p'):
build_train_vocab()
#生成train的图片与标注数据
if not os.path.isfile('data/samples_train.p'):
build_samples('train')
#生成valid的图片与标注数据
if not os.path.isfile('data/samples_valid.p'):
build_samples('valid')
def test_gen():
encode_images('test_a')
|
988,022 | 56476b23c41882993a6a7a8d56dea9c95fff88b2 | import os
import io
import shlex
from datetime import datetime, timedelta
import getopt
import requests
from dogbot.cqsdk import CQImage, RcvdPrivateMessage
from PIL import Image
from config import config
from dogbot.cqsdk.utils import reply
BASE_URL = 'http://s3-ap-northeast-1.amazonaws.com/assets.millennium-war.net/00/html/image/'
def poster(bot, message):
"""#poster [-h] [-f]
-h : 打印本帮助
-f : 强制刷新
"""
try:
cmd, *args = shlex.split(message.text)
except ValueError:
return False
if not cmd[0] in config['trigger']:
return False
if not cmd[1:] == 'poster':
return False
try:
options, args = getopt.gnu_getopt(args, 'hf')
except getopt.GetoptError:
# 格式不对
reply(bot, message, poster.__doc__)
return True
refresh = False
# 拆参数
for o, a in options:
if o == '-h':
# 帮助
reply(bot, message, poster.__doc__)
return True
elif o == '-f':
refresh = True
weekday = datetime.now().weekday()
if weekday >= 3:
delta = timedelta(days=weekday-3)
else:
delta = timedelta(days=7+weekday-3)
thu_date = datetime.now().date() - delta
url = '{}event{}.jpg'.format(BASE_URL, thu_date.strftime('%Y%m%d'))
filename = os.path.basename(url)
dir = os.path.join(config['cq_root_dir'], config['cq_image_dir'], 'poster')
path = os.path.join(dir, filename)
if not os.path.exists(dir):
os.mkdir(dir)
if not os.path.exists(path) or refresh:
resp = requests.get(url, timeout=60, proxies=config.get('proxies'))
if not resp.status_code == 200:
reply(bot, message, '没找到海报...还没更新或者是网络问题?')
return True
# 压缩图片
img = Image.open(io.BytesIO(resp.content))
img.save(path, quality=40)
# with open(path, 'wb') as f:
# f.write(resp.content)
reply(bot, message, CQImage(os.path.join('poster', filename)))
return True
if __name__ == '__main__':
poster([], RcvdPrivateMessage(qq=123, text='#poster'))
|
988,023 | 9761985014678d04a67088e2817e98dd375bf0cc | from flask import Flask, render_template, g, request, redirect, url_for
import time
import RPi.GPIO as GPIO
app = Flask(__name__)
left_door = 24
right_door = 25
def open_door(door):
GPIO.setmode(GPIO.BCM)
GPIO.setup(door, GPIO.OUT, initial = GPIO.LOW)
GPIO.output(door, GPIO.HIGH)
time.sleep(.5)
GPIO.output(door, GPIO.LOW)
GPIO.cleanup()
@app.route("/")
def index():
return render_template('index.html')
@app.route("/opener")
def opener():
if "LEFT" in request.args.values():
open_door(left_door)
print "opening left door"
elif "RIGHT" in request.args.values():
open_door(right_door)
print "opening right door"
return redirect(url_for('index'))
if __name__ == "__main__":
app.run(host = '0.0.0.0', port=80, debug = True)
|
988,024 | 53d544b1878f041cfb51bfc2a49ae176ed9ad894 | # coding: utf-8
"""
FoneStorm API 2.4.0 (Thunder)
FracTEL's Middleware API
OpenAPI spec version: 2.4.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .authorization import Authorization
from .call import Call
from .conference import Conference
from .error import Error
from .fax import Fax
from .fone_number import FoneNumber
from .fone_number_fax_options import FoneNumberFaxOptions
from .fone_number_fax_options_receive_notify import FoneNumberFaxOptionsReceiveNotify
from .fone_number_fax_options_send_notify import FoneNumberFaxOptionsSendNotify
from .fone_number_service import FoneNumberService
from .fone_number_sms_options import FoneNumberSmsOptions
from .fone_number_sms_options_receive import FoneNumberSmsOptionsReceive
from .fone_number_sms_options_receive_notify import FoneNumberSmsOptionsReceiveNotify
from .fone_number_sms_options_send_notify import FoneNumberSmsOptionsSendNotify
from .fone_number_voice_options import FoneNumberVoiceOptions
from .fone_number_voice_options_receive_notify import FoneNumberVoiceOptionsReceiveNotify
from .fone_number_voice_options_send_notify import FoneNumberVoiceOptionsSendNotify
from .inline_response_200 import InlineResponse200
from .inline_response_200_1 import InlineResponse2001
from .inline_response_200_2 import InlineResponse2002
from .inline_response_201 import InlineResponse201
from .inline_response_201_1 import InlineResponse2011
from .inline_response_201_2 import InlineResponse2012
from .inline_response_201_3 import InlineResponse2013
from .inline_response_201_4 import InlineResponse2014
from .inline_response_201_5 import InlineResponse2015
from .message import Message
|
988,025 | 2fe5a9797f4da5deb1e5eece2ef574a8ab6e62ad | #list 복사
a= [1,2,3,4,5]
b=a
print(b)
print(id(a))
print(id(b))
print(a is b)
a[1]=8
print(b)
#[:] 같이 변경 X
a=[1,2,3]
b = a[:]
a[1] = 5
print(b)
print(a)
#copy 동일특성 [:]
from copy import copy
a = [1,2,3]
b = copy(a)
print(b is a)
#변수 만드는법
a,b = ('p','b')
(a, b) = 'p','b'
[a,b] = ['p','b']
a=b='b'
a,b = (3, 4)
a,b = b,a
print(a) |
988,026 | 750407704c816481766086297c6c80df056e9420 | # %% [markdown]
# # データ構造と配列
# %%
import doctest
from typing import Any, MutableSequence, Sequence
import unittest
from unittest import result
from unittest.case import skip
import functools
# %% [markdown]
# ## データ構造と配列
# %% [markdown]
# ### 配列の必要性
class TestTotal(unittest.TestCase):
def test_5人の点数を読み込んで合計点平均点を返す(self):
self.assertEqual(total([32, 68, 72, 54, 92]), '318,63.6')
def total(tensu_list):
""" 5人の点数を読み込んで合計点平均点を返す
>>> total([32,68,72,54,92])
'318,63.6'
"""
total = functools.reduce(
lambda a, b: a+b, tensu_list)
mean = total/len(tensu_list)
result = ','.join([str(i) for i in [total, mean]])
return result
# %% [markdown]
# ## 配列
# %% [markdown]
# ### シーケンスの要素の最大値を表示する
class TestMax(unittest.TestCase):
def test_シーケンスaの要素の最大値を返却する(self):
self.assertEqual(max_of([172, 153, 192, 140, 165]), 192)
def max_of(a: Sequence) -> Any:
""" シーケンスaの要素の最大値を返却する
>>> max_of([172, 153, 192, 140, 165])
192
"""
return max(a)
# %% [markdown]
# ### ミュータブルなシーケンスの要素の並びを反転
class TestReverseArray(unittest.TestCase):
def test_ミュータブルなシーケンスaの要素の並びを反転(self):
a = [2, 5, 1, 3, 9, 6, 7]
reverse_array_mutable(a)
self.assertEqual(a, [7, 6, 9, 3, 1, 5, 2])
def test_イミュータブルなシーケンスaの要素の並びを反転(self):
a = [2, 5, 1, 3, 9, 6, 7]
result = reverse_array_imutable(a)
self.assertEqual(a, [2, 5, 1, 3, 9, 6, 7])
self.assertEqual(result, [7, 6, 9, 3, 1, 5, 2])
def reverse_array(配列: MutableSequence) -> None:
配列の長さ = len(配列)
配列の半分の長さ = (配列の長さ // 2)
def 両端の数値を交換した配列(配列, n, i): return 配列[n - i - 1], 配列[i]
def 配列の要素の並びを反転(配列, n, i): 配列[i], 配列[n - i - 1] = 両端の数値を交換した配列(配列, 配列の長さ, i)
[配列の要素の並びを反転(配列, 配列の長さ, i) for i in range(配列の半分の長さ)]
def reverse_array_mutable(配列: MutableSequence) -> None:
""" ミュータブルなシーケンスの要素の並びを反転
"""
reverse_array(配列)
def reverse_array_imutable(a: MutableSequence) -> MutableSequence:
""" イミュータブルなシーケンスの要素の並びを反転
"""
配列 = a[:]
reverse_array(配列)
return 配列
# %% [markdown]
# ### 基数変換
class TestCardConv(unittest.TestCase):
def test_整数値xをr進数に変換した数値を表す文字列を返却(self):
self.assertEqual(card_conv(29, 2), '11101')
def card_conv(x: int, r: int) -> str:
""" 整数値xをr進数に変換した数値を表す文字列を返却
>>> card_conv(29, 2)
'11101'
"""
def 反転して返却(d): return d[::-1]
def 該当文字を取り出して連結(x, r):
d = ''
dchar = '0123456789ACDEFGHIJKLMNOPQRSTUVWXYZ'
while x > 0:
d += dchar[x % r]
x //= r
return d
連結文字列 = 該当文字を取り出して連結(x, r)
return 反転して返却(連結文字列)
# %% [markdown]
# ### 素数の列挙
class TestPrime(unittest.TestCase):
def test_x以下の素数を列挙_1(self):
self.assertEqual(prime_1(1000), 78022)
def test_x以下の素数を列挙_2(self):
self.assertEqual(prime_2(1000), 14622)
def test_x以下の素数を列挙_3(self):
self.assertEqual(prime_3(1000), 3774)
def prime_1(x: int) -> int:
""" x以下の素数を列挙(第1版)
"""
counter = 0
for n in range(2, x+1):
for i in range(2, n):
counter += 1
if n % i == 0:
break
else:
print(n)
return counter
def prime_2(x: int) -> int:
""" x以下の素数を列挙(第2版)
"""
counter = 0
ptr = 0
prime = [None] * 500
prime[ptr] = 2
ptr += 1
for n in range(3, x+1, 2):
for i in range(1, ptr):
counter += 1
if n % prime[i] == 0:
break
else:
print(n)
prime[ptr] = n
ptr += 1
return counter
def prime_3(x: int) -> int:
""" x以下の素数を列挙(第3版)
"""
counter = 0
ptr = 0
prime = [None] * 500
prime[ptr] = 2
ptr += 1
prime[ptr] = 3
ptr += 1
for n in range(5, 1001, 2):
i = 1
while prime[i] * prime[i] <= n:
counter += 2
if n % prime[i] == 0:
break
i += 1
else:
print(n)
prime[ptr] = n
ptr += 1
counter += 1
return counter
doctest.testmod(verbose=True)
unittest.main(argv=[''], verbosity=2, exit=False)
|
988,027 | 970256ee81e13784efd6f58e9f1e3cb71c24bd65 | from launch import Packet
PROTOCOL = 1000
class Heartbeat(Packet):
protocol = 1000
type = 2000
|
988,028 | 698db856e58c39e7a579b0886a30fd85f22f8d51 | import logging
from sn_agent.job.job_descriptor import JobDescriptor
from sn_agent.ontology.service_descriptor import ServiceDescriptor
logger = logging.getLogger(__name__)
async def can_perform_service(app, service_descriptor: ServiceDescriptor):
logger.debug("get_can_perform: %s", service_descriptor)
service_manager = app['service_manager']
service_adapter = service_manager.get_service_adapter_for_id(service_descriptor.ontology_node_id)
if service_adapter is None:
raise Exception('Service not available')
return service_adapter.can_perform()
async def perform_job(app, job_descriptor: JobDescriptor):
logger.debug("perform_job: %s", job_descriptor)
service_manager = app['service_manager']
service_descriptor = job_descriptor.service
service_adapter = service_manager.get_service_adapter_for_id(service_descriptor.ontology_node_id)
if service_adapter is None:
raise Exception('Service not available')
return service_adapter.perform(job_descriptor)
|
988,029 | 350e18f7c092fabc6ed7fa5b159e8eb8a8f1b4e5 | # Command line arguments: tax ids
# tax: path to taxonomy, e.g. ../../tax/ott/
# ids: path to ids file, e.g. ../../ids_that_are_otus.tsv
# To test:
# ../../bin/jython measure_coverage.py ../../t/tax/aster/ ../../ids_in_synthesis.tsv
from org.opentreeoflife.taxa import Taxonomy
import os, csv, sys
home = '../..'
def doit(tax_path, ids_path):
ott = Taxonomy.getRawTaxonomy(tax_path, 'ott')
all_nodes = {}
with open(ids_path, 'r') as infile:
reader = csv.reader(infile, delimiter='\t')
otu_count = 0
for row in reader:
id = row[0]
if otu_count % 50000 == 0: print otu_count, id
otu_count += 1
node = ott.lookupId(id)
if node != None:
all_nodes[node.id] = node
print 'OTT taxa assigned to OTUs:', len(all_nodes)
prefix_to_count = {}
ott_count = 0
for id in all_nodes:
node = all_nodes[id]
ott_count += 1
for qid in node.sourceIds:
prefix = qid.prefix
count = prefix_to_count.get(prefix, 0)
prefix_to_count[prefix] = count + 1
print 'OTT ids assigned to OTUs:', otu_count
for prefix in prefix_to_count:
print prefix, prefix_to_count[prefix]
doit(sys.argv[1], sys.argv[2])
|
988,030 | ba7d645282eb3d8b4b2755d4f0c5ba0ad05e5f89 | from fython.unit import *
class RPackageX(Unit):
unit = l.rpackagex |
988,031 | b0b2741b2cd18592bce5ba47db784a5477233970 | import doctest
import unittest
import buoy
unittest.TextTestRunner().run(doctest.DocTestSuite(buoy))
unittest.main()
|
988,032 | cb5d32fa1b828af82fa3b7390790b3e288860b3e | n=int(input())
s=input()
m=10**9+7
dict={}
for i in s:
if i not in dict:
dict[i]=1
else:
dict[i]+=1
ans=1
for i in dict:
ans*=dict[i]+1
print((ans-1)%m) |
988,033 | 66e171245fd3bb9c98f2e3e8cd7d8f77f30a12ab | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, text_output
@click.command('download_history')
@click.argument("history_id", type=str)
@click.argument("jeha_id", type=str)
@click.argument("outf", type=click.File('rb+'))
@click.option(
"--chunk_size",
help="how many bytes at a time should be read into memory",
default="4096",
show_default=True,
type=int
)
@pass_context
@custom_exception
@text_output
def cli(ctx, history_id, jeha_id, outf, chunk_size=4096):
"""Download a history export archive. Use :meth:`export_history` to create an export.
Output:
None
"""
return ctx.gi.histories.download_history(history_id, jeha_id, outf, chunk_size=chunk_size)
|
988,034 | f56e08a8f1d51d868bafcc805a61f6a3ee49ed22 | import dataloader as dl
import pandas as pd
import matplotlib.pyplot as plt
def get_turnover_sum(df):
res = df.groupby('date').sum()
res = res['turnover']
return res
def add_price_col(df):
res = df.copy()
res['price'] = (abs(res['turnover']) + abs(res['discount'])) / abs(res['quantity'])
return res
def add_discount_PCT_col(df):
res = df.copy()
res['discount_PCT'] = abs(res['discount']) / (abs(res['turnover']) + abs(res['discount'])) * 100
return res
def get_price_mean(df):
new_df = add_price_col(df)
res = new_df.groupby('date').mean()
res = res['price']
return res
def get_discount_PCT_mean(df):
new_df = add_discount_PCT_col(df)
res = new_df.groupby('date').mean()
res = res['discount_PCT']
return res
def plot(df):
plt.figure()
df.plot.bar()
plt.show()
def main():
path = 'c:/P5GIT/P5/GOFACT_DATA/'
filename = 'Sales_'
month1 = '201609'
month2 = '201610'
month3 = '201611'
end = '.rpt'
st1 = path + filename + month1 + end
st2 = path + filename + month2 + end
st3 = path + filename + month3 + end
df = dl.load_sales_files([st1,st2,st3])
data = get_discount_PCT_mean(df)
plot(data)
if __name__ == '__main__':
main() |
988,035 | 1ea0aa03ca40953388632530e114ba2f4843fac9 | """
Facebook-DP-Downloader
Download the profile picture of any public profile on Facebook
by just having it's Facebook id
"""
import os
import requests
url="https://graph.facebook.com/{}/picture?type=large"
""" This url is the url provided by the Facebook graph api
which helps to get to the profile picture of the corresponding Facebook id
{}==Facebook id
Facebook id denotes the unique user id of the Facebook profile
whose profile we are requesting
"""
path = os.getcwd()
# get the path of the current working directory
if not "fb_dps" in os.listdir(path):
os.mkdir("fb_dps")
"""checks if the folder exists in the current working directory.
If it does not exist, then it gets created
"""
fbid=int(input("Enter the Facebook-id to download it's profile picture: "))
# the number should be a valid Facebook user id
try:
result=requests.get(url.format(fbid))
with open("fb_dps/{}_img.jpg".format(fbid),"wb") as file:
file.write(result.content)
except:
print("There was some error")
|
988,036 | c35c12f94e5d36d63e4dd05a9c10ef6ec1752772 | # --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import logging
import math
import torch
import contextlib
from typing import List, Tuple
import torch.nn as nn
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.data.data_utils import compute_mask_indices
from fairseq.modules import (
PositionalEmbedding,
Fp32GroupNorm,
FairseqDropout,
SamePad,
GradMultiply,
LayerNorm,
Fp32LayerNorm,
TransposeLast,
)
import numpy as np
logger = logging.getLogger(__name__)
class LinearLayer(nn.Module):
def __init__(self, idim, odom, dropout=0):
super(LinearLayer, self).__init__()
self.linear = nn.Sequential(
nn.Linear(idim, odom),
nn.LayerNorm(odom),
nn.Dropout(dropout),
nn.ReLU(),
)
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
return out
def forward(self, src_tokens, src_lengths):
"""
src_tokens: [B, T, C]
src_lengths: [B]
"""
x = self.linear(src_tokens)
x = x.transpose(0, 1).contiguous() # -> T x B x C
return x, src_lengths
class SpeechEncoderPrenet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(self, args):
super(SpeechEncoderPrenet, self).__init__()
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.freeze_encoder_updates = args.freeze_encoder_updates
self.num_updates = 0
assert args.encoder_speech_prenet in ["conv", "linear"], args.encoder_speech_prenet
feature_enc_layers = eval(args.conv_feature_layers) # noqa
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=args.extractor_mode,
conv_bias=args.conv_bias,
)
feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers])
self.feat2tar_ratio = (
args.label_rates * feature_ds_rate / args.sample_rate
)
self.post_extract_proj = (
nn.Linear(self.embed, args.encoder_embed_dim)
if self.embed != args.encoder_embed_dim
else None
)
self.use_conv_pos = args.use_conv_pos
self.use_sinc_pos = args.use_sinc_pos
self.use_abs_pos = getattr(args, "use_abs_pos", False)
self.feature_grad_mult = args.feature_grad_mult
if self.use_conv_pos:
self.layer_norm = LayerNorm(self.embed)
self.pos_conv = nn.Conv1d(
args.encoder_embed_dim,
args.encoder_embed_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * args.encoder_embed_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
assert not (self.use_sinc_pos and self.use_abs_pos), f"sinc pos: {self.use_sinc_pos} abs pos: {self.use_abs_pos}"
if self.use_sinc_pos:
self.embed_positions = PositionalEmbedding(
args.max_speech_positions, args.encoder_embed_dim, self.padding_idx
)
if self.use_abs_pos:
self.embed_positions = PositionalEmbedding(
args.max_speech_positions, args.encoder_embed_dim, self.padding_idx, learned=True
)
# Hubert
self.mask_prob = args.mask_prob
self.mask_selection = args.mask_selection
self.mask_other = args.mask_other
self.hubert_mask_length = args.hubert_mask_length
self.no_mask_overlap = args.no_mask_overlap
self.mask_min_space = args.mask_min_space
self.mask_channel_prob = args.mask_channel_prob
self.mask_channel_selection = args.mask_channel_selection
self.mask_channel_other = args.mask_channel_other
self.mask_channel_length = args.mask_channel_length
self.no_mask_channel_overlap = args.no_mask_channel_overlap
self.mask_channel_min_space = args.mask_channel_min_space
self.mask_emb = nn.Parameter(
torch.FloatTensor(args.encoder_embed_dim).uniform_()
)
def forward(self, src_tokens, require_feat_pen=False, target_list=None, padding_mask=None, mask=True):
ft = self.freeze_encoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
return self._forward(src_tokens, require_feat_pen, target_list, padding_mask, mask)
def _forward(self, src_tokens, require_feat_pen=False, target_list=None, padding_mask=None, mask=True):
if self.feature_grad_mult > 0:
x = self.feature_extractor(src_tokens)
x = x.transpose(1, 2).transpose(0, 1) # [length, batch, hidden_size]
if self.feature_grad_mult != 1.0:
x = GradMultiply.apply(x, self.feature_grad_mult)
else:
with torch.no_grad():
x = self.feature_extractor(src_tokens)
x = x.transpose(1, 2).transpose(0, 1) # [length, batch, hidden_size]
x = x.transpose(0, 1) # [batch, length, hidden_size]
encoder_padding_mask = padding_mask
x = x.transpose(1, 2) # [batch, hidden_size, length]
if target_list is not None:
x, target_list = self.forward_targets(x, target_list)
features_pen = x.float().pow(2).mean()
x = x.transpose(1, 2) # [batch, length, hidden_size]
x = self.layer_norm(x)
encoder_padding_mask = self.forward_padding_mask(x, encoder_padding_mask)
if self.post_extract_proj is not None:
x = self.post_extract_proj(x)
x = self.dropout_module(x)
if mask:
x, mask_indices = self.apply_hubert_mask(
x, encoder_padding_mask
)
else:
x = x
mask_indices = None
if self.use_conv_pos:
positions = self.pos_conv(x.transpose(1, 2))
positions = positions.transpose(1, 2)
#else:
# positions = self.embed_positions(encoder_padding_mask)
x = x + positions
if self.use_sinc_pos:
positions = self.embed_positions(encoder_padding_mask)
x = x + positions
# x = self.dropout_module(x)
if require_feat_pen:
return (x, features_pen, mask_indices, target_list), encoder_padding_mask
else:
# For consistence with encoder
return x, encoder_padding_mask
def forward_targets(
self, features: torch.Tensor, target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_list = [t[:, target_inds.long()] for t in target_list]
return features, target_list
def forward_padding_mask(
self, features: torch.Tensor, padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(
padding_mask.size(0), features.size(1), -1
)
padding_mask = padding_mask.all(-1)
return padding_mask
def get_src_lengths(self, src_lengths):
return self.feature_extractor.get_out_seq_lens_tensor(src_lengths)
def apply_hubert_mask(self, x, padding_mask):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.hubert_mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
self.conv_layers_infos = conv_layers
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
def get_out_seq_lens_nonmask_after_a_layer(self, in_seq_lens_tensor, i):
"""Returns the out_seq_lens_nonmask 0/1 tensor after a layer.
Args:
in_seq_lens_tensor (LongTensor): length
Returns:
LongTensor: length
"""
out_lengths = in_seq_lens_tensor.clone()
out_lengths = ((out_lengths.float() - (self.conv_layers_infos[i][1] - 1) - 1) / self.conv_layers_infos[i][-1] + 1).floor().long()
out_nonmask = (~lengths_to_padding_mask(out_lengths)).float()
return out_nonmask, out_lengths
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for i in range(len(self.conv_layers)):
out = ((out.float() - (self.conv_layers_infos[i][1] - 1) - 1) / self.conv_layers_infos[i][-1] + 1).floor().long()
return out
|
988,037 | 646c0ccf3ad5214291926efd8f5b2153531d9a53 | # encoding=utf8
from niapy.algorithms.basic import FlowerPollinationAlgorithm
from niapy.tests.test_algorithm import AlgorithmTestCase, MyProblem
class FPATestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = FlowerPollinationAlgorithm
def test_custom(self):
fpa_custom = self.algo(population_size=10, p=0.5, seed=self.seed)
fpa_customc = self.algo(population_size=10, p=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fpa_custom, fpa_customc, MyProblem())
def test_griewank(self):
fpa_griewank = self.algo(population_size=20, p=0.5, seed=self.seed)
fpa_griewankc = self.algo(population_size=20, p=0.5, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fpa_griewank, fpa_griewankc)
def test_griewank_with_beta(self):
fpa_beta_griewank = self.algo(population_size=20, p=0.5, beta=1.2, seed=self.seed)
fpa_beta_griewankc = self.algo(population_size=20, p=0.5, beta=1.2, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, fpa_beta_griewank, fpa_beta_griewankc)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
988,038 | da4d46b72ab2a5ae167e7a881e3b08bda74d4660 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 13:07:14 2020
@author: James Cotter
"""
#import dictionary
from nltk.corpus import words
word_list = words.words()
def solver(letters,middle):
"""
Inputs:
letters: string, letters on the perimeter
middle: string, letter in the center (only one)
Outputs:
solution: all valid words that solve the puzzle
"""
#solution with possible duplicates
sol = [w for w in word_list if set(w).difference(set(letters)) == set(middle) and 5<=len(w)]
#solution without duplicates
solution = []
[solution.append(x) for x in sol if x not in solution]
#find and print words that use every letter
all_letters = [x for x in solution if set(x) == set(letters).union(set(middle))]
print('Words using every letter: ')
for i in all_letters:
print(i)
#calculate and print final total score
total_score = 3*len(all_letters) + len(solution)-len(all_letters)
print("Total score is: ", total_score)
return solution
if __name__ == "__main__":
#set letters
outside = 'lticvo' #outside letters
inside = 'r' #letter that must be used
ans = solver(outside,inside) |
988,039 | 3eb6b7267b0266cd51d9b344bcfd42bbb758697a | import re
from datetime import datetime
from kconfig import chaptersBook, workGroupBook, labsBookByName
from kconfig import agileCalendar
from kernel.BacklogDeployer import BacklogDeployer
__author__ = "Manuel Escriche <mev@tid.es>"
__version__ = '1.2.0'
class IssueDefinition:
def __init__(self, action, sprint, deadline):
self.project = None
self.component = None
self.action = action
self.sprint = sprint
self._sprint = re.sub(r'\.', '', self.sprint)
self.fixVersion = 'Sprint {}'.format(self.sprint)
self.deadline = deadline
self.inwards = []
self.outwards = []
self.issue = None
self.assignee = None
self.watchers = []
def description(self):
raise NotImplementedError()
def summary(self):
raise NotImplementedError()
class SourceIssue(IssueDefinition):
_type = 'source'
def __init__(self, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.project = 'COR'
self.component = '10249'
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' + \
'# Create hierarchically backlog issues for all tech chapters leaders and GE owners\n' + \
'# Schedule and attend sprint planning meetings for all tech chapters\n' + \
'# Take backlog snapshot for ' + self.fixVersion + '\n' + \
'# Share sprint planning outcome with project partners \n' + \
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00h {color}\n'
def summary(self):
return 'FIWARE.WorkItem.Coordination.Agile.Sprint-{}.{}'.format(self._sprint, self.action)
class ChapterIssue(IssueDefinition):
_type = 'chapter'
def __init__(self, chapter, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.chapter = chapter
_chapter = chaptersBook[chapter]
self.project = _chapter.coordination.tracker
self.component = _chapter.coordination.key
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' + \
'# Verify sprint planning issues are available for all GE owners\n' +\
'# Organise and hold sprint planning meeting for the chapter before deadline\n' +\
"# Update your chapter coordination backlog properly\n" +\
"# Verify all GEs are properly planned for the sprint\n" +\
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00 {color}\n'
def summary(self):
return 'FIWARE.WorkItem.{}.Coordination.Agile.Sprint-{}.{}'.format(self.chapter, self._sprint, self.action)
class EnablerIssue(IssueDefinition):
_type = 'enabler'
def __init__(self, chapter, enabler, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.enabler = enabler
self.chapter = chapter
self._chapter = chaptersBook[chapter]
self._enabler = self._chapter.enablers[enabler]
self.project = self._enabler.tracker
self.component = self._enabler.key
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' +\
'# Check your sprint planning issue is available and update its status as you progress\n' +\
'# Create and/or schedule your backlog issues for the sprint\n' +\
'Topics:\n' +\
'#* My HelpDesk Issues - My Bugs\n' +\
'#* My Roadmap - My Developments\n' +\
'#* My Deployments (FIWARE LAB)\n' +\
'#* My Publishing (Catalogue)\n' +\
'#* My Training (Academy)\n' +\
'#* My Contribution to Deliverables\n' +\
'#* Others?\n' +\
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00 {color}\n'
def summary(self):
return 'FIWARE.WorkItem.{}.{}.Agile.Sprint-{}.{}'\
.format(self.chapter, self._enabler.backlogKeyword, self._sprint, self.action)
class WorkGroupIssue(IssueDefinition):
_type = 'workgroup'
def __init__(self, workgroup, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.workgroup = workgroup
_workgroup = workGroupBook[workgroup]
self.project = _workgroup.coordination.tracker
self.component = _workgroup.coordination.key
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' + \
'# If needed, organise and hold sprint planning meeting for the workgroup before deadline\n' +\
"# Update your work group coordination backlog properly\n" +\
"# Verify all components are properly planned for the sprint\n" +\
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00 {color}\n'
def summary(self):
return 'FIWARE.WorkItem.{}.Coordination.Agile.Sprint-{}.{}'.format(self.workgroup, self._sprint, self.action)
class GroupIssue(IssueDefinition):
_type = 'group'
def __init__(self, workgroup, group, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.group = group
self.workgroup = workgroup
self._workgroup = workGroupBook[workgroup]
self._group = self._workgroup.groups[group]
self.project = self._group.tracker
self.component = self._group.key
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' +\
'# Create and/or schedule your backlog issues for the sprint\n' +\
'Topics:\n' +\
'#* My Contribution to Deliverables\n' +\
'#* Others?\n' +\
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00 {color}\n'
def summary(self):
return 'FIWARE.WorkItem.{}.{}.Agile.Sprint-{}.{}'\
.format(self.workgroup, self._group.backlogKeyword, self._sprint, self.action)
class QualityAssuranceIssue(IssueDefinition):
_type = 'tech'
def __init__(self, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.project = 'TCOR'
self.component = '11700'
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' + \
'# Create and/or schedule backlog issues for the sprint\n' + \
'Topics:\n' +\
'#* Test Cases and test descriptions\n' +\
'#* My Contribution to Deliverables\n' +\
'#* Test reports\n' +\
'#* Others?\n' +\
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00h {color}\n'
def summary(self):
return 'FIWARE.WorkItem.QualityAssurance.Agile.Sprint-{}.{}'.format(self._sprint, self.action)
class LabIssue(IssueDefinition):
_type = 'chapter'
def __init__(self, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.lab = labsBookByName['Lab']
self.project = self.lab.coordination.tracker
self.component = self.lab.coordination.key
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' + \
'# Verify sprint planning issues are available for all Nodes\n' +\
'# Organise and hold sprint planning meeting for the chapter before deadline\n' +\
"# Update your chapter coordination backlog properly\n" +\
"# Verify all Nodes are properly planned for the sprint\n" +\
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00 {color}\n'
def summary(self):
return 'FIWARE.WorkItem.Lab.Coordination.Agile.Sprint-{}.{}'.format(self._sprint, self.action)
class NodeIssue(IssueDefinition):
_type = 'node'
def __init__(self, node, action, sprint, deadline):
super().__init__(action, sprint, deadline)
self.lab = labsBookByName['Lab']
self.node = node
self.project = self.node.tracker
self.component = self.node.key
self.reporter = 'backlogmanager'
def description(self):
return '+Activities requested to {color:red}Plan{color} {color:blue}*' + self.fixVersion + '*{color}+\n' +\
'# Check your sprint planning issue is available and update its status as you progress\n' +\
'# Create and/or schedule foreseen backlog issues for the sprint\n' +\
'\n{color: red}Deadline = ' + self.deadline.strftime('%d-%m-%Y') + ' at 17:00 {color}\n'
def summary(self):
return 'FIWARE.WorkItem.Lab.{}.Agile.Sprint-{}.{}'.format(self.node.backlogKeyword, self._sprint, self.action)
class SprintPlanning:
def __init__(self):
action = 'Planning'
# sprint = agileCalendar.next_sprint
sprint = agileCalendar.current_sprint
deadline = datetime.strptime('2017-03-10', '%Y-%m-%d').date()
self.issues = []
self.root = SourceIssue(action, sprint, deadline)
self.issues.append(self.root)
for chaptername in chaptersBook:
if chaptername in ('Marketplace', 'InGEIs', 'Catalogue', 'Academy'):
continue
chapter = chaptersBook[chaptername]
chapter_issue = ChapterIssue(chaptername, action, sprint, deadline)
chapter_issue.inwards.append(self.root)
self.root.outwards.append(chapter_issue)
self.issues.append(chapter_issue)
for enablername in chapter.enablers:
enabler = chapter.enablers[enablername]
if enabler.mode in ('Support', 'Deprecated'):
continue
enabler_issue = EnablerIssue(chaptername, enablername, action, sprint, deadline)
enabler_issue.inwards.append(chapter_issue)
chapter_issue.outwards.append(enabler_issue)
self.issues.append(enabler_issue)
# for workgroupname in workGroupBook:
# workgroup = workGroupBook[workgroupname]
# workgroupIssue = WorkGroupIssue(workgroupname, action, sprint, deadline)
# workgroupIssue.inwards.append(self.root)
# self.root.outwards.append(workgroupIssue)
# self.issues.append(workgroupIssue)
# if workgroupname in ('Collaboration', 'Dissemination', 'Exploitation', 'PressOffice'): continue
# for groupname in workgroup.groups:
# group = workgroup.groups[groupname]
# if group.mode != 'Active': continue
# groupIssue = GroupIssue(workgroupname, groupname, action, sprint, deadline)
# groupIssue.inwards.append(workgroupIssue)
# workgroupIssue.outwards.append(groupIssue)
# self.issues.append(groupIssue)
lab_nodes_book = labsBookByName['Lab'].nodes
lab_issue = LabIssue(action, sprint, deadline)
lab_issue.inwards.append(self.root)
self.root.outwards.append(lab_issue)
self.issues.append(lab_issue)
for nodename in lab_nodes_book:
node = lab_nodes_book[nodename]
if node.mode in ('Negotiation', 'Closed'):
continue
node_issue = NodeIssue(node, action, sprint, deadline)
node_issue.inwards.append(lab_issue)
lab_issue.outwards.append(node_issue)
self.issues.append(node_issue)
# global
# self.qa = QualityAssuranceIssue(action, sprint, deadline)
# self.qa.inwards.append(self.root)
# self.root.outwards.append(self.qa)
# self.issues.append(self.qa)
if __name__ == "__main__":
task = SprintPlanning()
tool = BacklogDeployer(task, description=False)
options = {'0': tool.print,
'1': tool.deploy,
'2': tool.monitor,
'3': tool.search,
'4': tool.clean,
'E': exit}
while True:
menu = '\nMenu:\n\t0: print\n\t1: deploy \n\t2: monitor \n\t3: search \n\t4: clean \n\tE: Exit'
choice = input(menu + '\nEnter your choice[0-4,(E)xit] : ')
print('Chosen option:', choice)
if choice in ('0', '1', '2', '3', '4', 'E'):
options[choice]()
else:
print('\n\n\nWrong option, please try again... ')
|
988,040 | cafafecc2fd65b5c5be897771404886ee957fe03 | # USAGE
# python color_kmeans.py --image images/jp.png --clusters 3
# import the necessary packages
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import argparse, utils, os, cv2
args = {
'plot' : False,
'indir' : 'baltimore_images',
'outfile' : 'baltimore_features'
}
def make_hist(f, plot = False, n_bins = 5):
image = cv2.imread(f)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# show our image
if args['plot']:
plt.figure()
plt.axis("off")
plt.imshow(image)
color = ('r','g','b')
features = []
for i,col in enumerate(color):
hist = cv2.calcHist([image], [i], None, [n_bins], [0,256])
features.extend(hist.flatten())
if args['plot']:
plt.plot(hist,color = col)
plt.xlim([0,256])
# Normalized by total number of pixel-channels
sm = sum(features)
features = [x / sm for x in features]
return features
def write_hist(outfile, hist, fileName):
hist = [str(f) for f in hist]
outfile.write("%s,%s\n" % (fileName, ",".join(hist)))
files = os.listdir(args['indir'])
files = [os.path.join(args['indir'], f) for f in files]
with open(args['outfile'], 'w') as outfile:
for fileName in files:
try:
hist = make_hist(fileName, plot = args['plot'])
write_hist(outfile, hist, fileName)
except:
'error @ ' + fileName |
988,041 | 6d73e5a2bf8981dbfd5fd46c628d57e80a7c49e5 | from app import app
from app import jobs
import time
import threading
import schedule
# 订阅 azz.net 某一个用户的最新作品
def subscribe_azz_user(username):
jobs.init_azz_net(username)
schedule.every(1).days.do(jobs.azz_net_job, username)
def subscribe_blog(key, func):
jobs.init_job(key, func)
schedule.every(15).seconds.do(jobs.check_update, key, func)
def runnable():
schedule.clear()
# 爬取小谢尔顿最新一集
schedule.every(6).hours.do(jobs.sheldon_job)
# 订阅 azz.net 用户
subscribe_azz_user('wlop')
subscribe_azz_user('void')
subscribe_azz_user('BYJW')
subscribe_azz_user('fleurdelys')
# 订阅博客
subscribe_blog(jobs.blog_key('meituan'), jobs.get_meituan_first_page)
subscribe_blog(jobs.blog_key('taobao'), jobs.get_taobao_first_page)
subscribe_blog(jobs.blog_key('ruanyifeng'), jobs.get_ryf_first_page)
subscribe_blog(jobs.blog_key('program-think'), jobs.get_program_think_first_page)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == '__main__':
schedule_thread = threading.Thread(target=runnable)
schedule_thread.start()
app.run()
|
988,042 | c5f151a3be04cc6fe655f75a68db57fecf76962e | class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
#10:47
op = []
num = ''
sign = "+" #第一个数字
s = s.strip()
for i, c in enumerate(s):
if c.isdigit():
num += c
if c in '+-*/' or i == len(s) -1: #last one don't wait for next ch
if sign == "+":
op.append(int(num))
elif sign == "-":
op.append(-int(num))
elif sign == "*":
op.append(int(num)*op.pop())
elif sign == '/':
#python 2 specific
a,b = op.pop(), int(num)
res = a/b if a/b>=0 else -(-a/b)
op.append(res)
sign = c
num = ''
return reduce(lambda x, y: x+y, op)
#push signed value,遇到高阶的pop出一个先算
|
988,043 | 957921649485af9a233b9f2cb19456617b8f60c7 | from django.urls import path, re_path
from . import views
urlpatterns = [
path('school_info/', views.school_list, name='school_info'),
path('school_info/<str:school_name>/', views.one_school, name='one_school'),
] |
988,044 | 94495aa0a16c6846dedd052f710bc667057a52e6 | # -*- coding: utf-8 -*-
"""
Created on Wed May 25 14:56:25 2016
实验,虚数
@author: aa
"""
from __future__ import division
from math import *
import numpy as np
import matplotlib.pyplot as plt
import scipy
A=np.array([[2,1+2j],[1-2j,3]])
dia,u=np.linalg.eig(A)
|
988,045 | efca715a56b771a385123208986638465632732d | s=str(input())
l=len(s)
ans=99999999999
for j in s:
temp=j
a=0
cnt=0
for i in range(l):
if s[i]!=temp:
cnt=cnt+1
else:
a=max(a,cnt)
cnt=0
a=max(a,cnt)
ans=min(a,ans)
print(ans) |
988,046 | a96c84cb6be77e6eb946bb8b8994af6963cb481b | # #*************************************************************
# File : msd.py
# Used for calculating MSD from trajectory files
# usage: python msd.py [-h] f atoms [name]
# positional arguments:
# f Name of the trajectory file
# atoms Total number of atoms
# optional arguments:
# name Name of the element
# -h, --help show help message
# ****************************************************************
from itertools import islice,izip
from msd import msd
import matplotlib.pyplot as plt
from multiprocessing import Pool
import argparse,time
def parse(): #function to parse command-line arguments
parser = argparse.ArgumentParser(description='Mean Square Displacement Calculation')
parser.add_argument("f", type=str, help="Name of the trajectory file")
parser.add_argument("atoms",type=int, help="Total number of atoms")
parser.add_argument("name", type=str, help="Name of the element",nargs="?",default="all")
args=parser.parse_args()
return [args.f,args.atoms,args.name]
def get(data,m): #return [[t0,msd(t0)],[t1,msd(t1)]..tm,msd(tm)]
return msd(data,m)
def calculate(name):
data,n=[],0
with open(file) as f:
while True:
tmp=list(islice(f,9,9+nat)) #read one time frame from file
if not tmp: break
tmp =(line.split()[2:6] for line in tmp)
tmp=((map(float,i[1:])) for i in tmp if i[0]==name)
n+=1
data.append(tmp)
data=izip(*data) #get [r(t1),r(t2)..r(tn)] for each particle
m=int(n/2.0)
pl=Pool() #create a pool of worker processes
res=[pl.apply_async(get,[i,m]) for i in data] #load the function call to a worker in parallel
res=[r.get() for r in res] #[msd(t0),msd(t1)..msd(tm)] for each particle
x=[i*100 for i in xrange(m+1)]
avg=[sum(i)/len(i) for i in izip(*res)] #[msd(t0),msd(t1)..msd(tm)] averaged over all particle
save(x,avg,name)
return x,avg
def save(a,b,name): #save the data
txt="\n".join("%s %s"%(i,j) for i,j in zip(a,b))
with open("msd_%s.dat"%name,"w") as f:
f.write(txt)
file,nat,name=parse()
if __name__ == '__main__':
s=time.time()
handles=[] #calulate msd and plot
if name=="all":
handles.append(plt.plot(x,avg,"-",label="H")[0])
x,avg=calculate("O")
handles.append(plt.plot(x,avg,"-",label="O")[0])
x,avg=calculate("Na")
handles.append(plt.plot(x,avg,"-",label="Na")[0])
x,avg=calculate("Cl")
handles.append(plt.plot(x,avg,"-",label="Cl")[0])
else:
x,avg=calculate(name)
handles.append(plt.plot(x,avg,"-",label="Average")[0])
plt.yscale('log')
plt.xscale('log')
plt.ylabel("$MSD(\\tau)\ (in\ \AA^2)$")
plt.xlabel("$\\tau\ (in\ fs) $")
plt.title("Mean Square Displacement Plot")
plt.legend(handles=handles)
print "Time needed", time.time()-s
plt.show() |
988,047 | d08dbb1bcb3e515b95117eb3bfae673bbc22f2eb | from socket import *
import threading
from util.serverLog import *
from send.send import *
from util.jsonManager import *
from util.DBManager import updateIoTData
from util.DBManager import updateAndroidData
from util.DBManager import requestAndroidDataToIoT
from util.jsonManager import JsonToDataManager
from util.jsonManager import DataManager
from util.serverLog import LogD
from util.serverLog import LogE
Debug = 1
host = "127.0.0.1"
port = 12345
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind((host,port))
serverSocket.listen(5)
LogD("서버 생성완료. 대기중입니다.")
DM = DataManager()
DM.setData("Type","Android")
DM.setData("RequestType",2)
DM.setData("ID",1)
DM.setData("IoTID",1)
DM.setData("Lock",1)
DM.setData("Time","2020-10-04 13:49:12")
DBLocation= '/home/codespace/workspace/AMDMserver.sqlite3'
while(True):
# connectionSocket,addr = serverSocket.accept() #accept 할동안 기다림
# LogD(str(addr) + "에서 접속함")
# data =connectionSocket.recv(1024)
# dataDM = JsonToDataManager(data.decode("utf-8"))
data = DM.getFileStr()
dataDM = JsonToDataManager(data)
if(dataDM.getData("Type")=="Android"):
LogD("Android Data " + dataDM.getFileStr())
if(dataDM.getData("RequestType")==1):
# 핸드폰이 잠길때
# 핸드폰이 열릴때
LogD("Android 데이터 수신")
t = threading.Thread(target=updateAndroidData, args=(dataDM,DBLocation))
t.start()
elif(dataDM.getData("RequestType")==2):
# Iot가 열릴때 (관리자) Android -> Server -> IoT
LogD("Android 데이터 전송 요청")
LogD("IoT Data " + dataDM.getFileStr())
t = threading.Thread(target=requestAndroidDataToIoT, args=(dataDM,DBLocation,"NULL"))
t.start()
elif(dataDM.getData("RequestType")==3):
# 핸드폰 잠금 유무 확인(관리자)
LogD("Android 데이터 요청")
elif(dataDM.getData("Type")=="IoT"):
# Iot가 잠길때
LogD("IoT Data " + dataDM.getFileStr())
t = threading.Thread(target=updateIoTData, args=(dataDM,DBLocation))
t.start()
connectionSocket,addr = serverSocket.accept() #accept 할동안 기다림
serverSocket.close()
# Iot 강제 잠금(관리자) 서버가 요청
|
988,048 | d7dae77ea23cc3acd9725560f12775dcb67eca68 | def calcula_pi(n):
p = 1
a = list(range(n))
for i in (a):
p += (6/(i**2))
p = (p**(1/2))
return p |
988,049 | 1072a823d82efe3600107b7c92a05bbbfa69ac24 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import jwt
from django.conf import settings
from django.utils.translation import ugettext as _
from rest_framework import exceptions
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from .models import AuthToken
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = AuthToken
def authenticate(self, request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'token':
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
return self.authenticate_credentials(auth[1])
def authenticate_credentials(self, key):
try:
data = jwt.decode(key, settings.SECRET_KEY)
except Exception as e: # TODO log
raise exceptions.AuthenticationFailed('Invalid token')
try:
token = self.model.objects.select_related('user')\
.filter(user__is_active=True, user_id=data['user_id'])\
.get(token=key)
except self.model.DoesNotExist:
raise exceptions.AuthenticationFailed(_('Invalid token'))
return token.user, token
def authenticate_header(self, request):
return 'Token'
|
988,050 | b9ef8c01fea98ad6f5865d439630bf574e9b7b27 | import os
def op(num1,operation,num2):
num1 = int(num1)
num2 = int(num2)
if operation == "+":
return num1 + num2
elif operation == "-":
return num1 - num2
elif operation == "*":
return num1 * num2
elif operation == "/":
return num1 / num2
elif operation == "**":
return num1 ** num2
while True:
print("Enter an equation:")
equation = input(">>>")
if "clear" in equation or "Clear" in equation:
os.system("cls")
else:
equation = list(equation.split(" "))
print("{0} {1} {2} = {3}".format(equation[0],equation[1],equation[2],op(equation[0],equation[1],equation[2])))
|
988,051 | a6c499e802bea6cb729f6760f1fa801992cc5084 | from django.db import models
class Job(models.Model):
image = models.ImageField(upload_to='images/')
title = models.CharField(max_length=80)
summary = models.CharField(max_length=200)
camera_model = models.CharField(max_length=120)
level = models.CharField(max_length=30)
photographer = models.CharField(max_length=70, default='community')
photo_studio = models.CharField(max_length=70, default='individual')
more_info_url = models.CharField(max_length=200)
technique = models.CharField(max_length=200)
def __str__(self):
return self.title
class Viewer(models.Model):
DISTRIBUTION_METHOD = (
('download', "Downloadable Library"),
('cloud', "Cloud Service"),
)
name = models.CharField(max_length=40)
body = models.TextField()
projections = models.CharField(max_length=80, blank=True)
license = models.CharField(max_length=40, blank=True)
distribution_method = models.CharField(
max_length=50,
choices=DISTRIBUTION_METHOD,
blank=True
)
free = models.BooleanField(default=False)
price = models.CharField(max_length=40, blank=True)
info_url = models.CharField(max_length=160, blank=True)
demo_url = models.CharField(max_length=160, blank=True)
# listing in admin dashboard
def __str__(self):
return self.name
|
988,052 | 5be1bb2d970720d8364d423b6253e173fda97b97 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
CDR3translator
https://innate2adaptive.github.io/Decombinator/
Take decombined data and translates/extracts the CDR3 sequences.
In order to be classified as (potentially) productive, a rearrangement's CDR3s must be:
in-frame
lacking-stop codons
run from a conserved cysteine to FGXG motif (or appropriate alternatives)
The major change from v3 is that this version exports to the AIRRseq community tsv format, simplifying the process
and crucially giving TCR gene name output in the raw format (in addition to the classic Decombinator fields).
"""
from __future__ import division
from Bio.Seq import Seq
from Bio import SeqIO
from time import strftime
import argparse
import string
import re
import sys
import collections as coll
import os
import urllib
import warnings
import gzip
__version__ = '4.0.3'
# Supress Biopython translation warning when translating sequences where length % 3 != 0
warnings.filterwarnings("ignore")
# TODO Potentially add a flag to combine convergent recombinations into a single row?
def args():
"""
:return: Command line arguments which dictate the script's behaviour
"""
# Help flag
parser = argparse.ArgumentParser(
description='Translate and extract CDR3 sequences from Decombinator classifier files. '
'Please see https://innate2adaptive.github.io/Decombinator/ for details.')
# Add arguments
parser.add_argument('-in', '--infile', type=str, required=True,
help='File containing 5 part Decombinator indexes, (with/without frequencies)')
parser.add_argument('-c', '--chain', type=str, help='TCR chain (a/b/g/d)', required=False)
parser.add_argument('-sp', '--species', type=str, required=False, default="human",
help='Specify which species TCR repertoire the data consists of (human or mouse). '
'Default = human')
parser.add_argument('-tg', '--tags', type=str, required=False, default="extended",
help='Specify which Decombinator tag set to use (extended or original). Default = extended')
parser.add_argument('-s', '--suppresssummary', action='store_true', required=False,
help='Suppress the production of summary data log')
parser.add_argument('-npf', '--nonproductivefilter', action='store_true', required=False,
help='Filter out non-productive reads from the output')
parser.add_argument('-dz', '--dontgzip', action='store_true', required=False,
help='Stop the output FASTQ files automatically being compressed with gzip')
parser.add_argument('-dc', '--dontcount', action='store_true', required=False,
help='Stop printing the running count')
parser.add_argument('-tfdir', '--tagfastadir', type=str, required=False, default="Decombinator-Tags-FASTAs",
help='Path to folder containing TCR FASTA and Decombinator tag files, for offline analysis.'
'Default = \"Decombinator-Tags-FASTAs\".')
parser.add_argument('-nbc', '--nobarcoding', action='store_true', required=False,
help='Option to run CD3translator without barcoding, i.e. so as to run on data produced by any protocol.')
return parser.parse_args()
def findfile(filename):
"""
:param filename: Check whether input file exists or not
:return: Nothing: script exits if given input file does not exist
"""
try:
testopen = open(str(filename), "rt")
testopen.close()
except Exception:
print('Cannot find the specified input file. Please try again')
sys.exit()
def read_tcr_file(species, tagset, gene, filetype, expected_dir_name):
"""
Reads in the associated data for the appropriate TCR locus from the ancillary files (hosted in own repo)
:param species: human or mouse
:param tagset: original or extended
:param gene: V or J
:param filetype: tag/fasta/translate/cdrs
:param expected_dir_name: (by default) Decombinator-Tags-FASTAs
:return: the opened file (either locally or remotely)
"""
# Define expected file name
expected_file = species + "_" + tagset + "_" + "TR" + chain.upper() + gene.upper() + "." + filetype
# First check whether the files are available locally (in pwd or in bundled directory)
if os.path.isfile(expected_file):
fl = expected_file
elif os.path.isfile(expected_dir_name + os.sep + expected_file):
fl = expected_dir_name + os.sep + expected_file
else:
try:
fl = "https://raw.githubusercontent.com/innate2adaptive/Decombinator-Tags-FASTAs/master/" + expected_file
urllib.request.urlopen(fl) # Request URL, see whether is found
fl = urllib.request.urlretrieve(fl)[0]
except Exception:
print("Cannot find following file locally or online:", expected_file)
print("Please either run Decombinator with internet access, or point Decombinator to local copies " \
"of the tag and FASTA files with the \'-tf\' flag.")
sys.exit()
# Return opened file, for either FASTA or tag file parsing
return fl
def sort_permissions(fl):
"""
Need to ensure proper file permissions on output data.
If users are running pipeline through Docker might otherwise require root access
:param fl: The file to sort permissions on
:return: Nothing: script edits permissions where appropriate, if possible
"""
if oct(os.stat(fl).st_mode)[4:] != '666':
os.chmod(fl, 0o666)
def import_gene_information(inputargs):
"""
Obtains gene-specific information for translation
Runs first: reads in V and J gene sequence and name data (from fasta files)
and positions of conserved cysteine residues in V genes (from separate files)
If files cannot be found in local directory, script looks for them online at GitHub
NB that a number of psuedogenes have no officially designated conserved C (or indeed a 3' C at all)
Where possible, the nearest suitable C residue is used, where not an arbitrary position of 0 is given
Moot, as most psuedogenes contain a number of stop codons and thus cannot produce productive rearrangements
First check that valid tag/species combinations have been used
:param inputargs: command line (argparse) input arguments dictionary
:return: Multiple items of TCR data: the V regions (sequence), J regions (sequence), V gene names,
J gene names, V conserved C translate positions, V conserved position residue identidy, J conserved F
translate position, J conserved position residue identity, V gene functionality, J gene functionality
"""
global chainnams, chain
chain = inputargs['chain']
if inputargs['tags'] == "extended" and inputargs['species'] == "mouse":
print("Please note that there is currently no extended tag set for mouse TCR genes.\n" \
"Decombinator will now switch the tag set in use from \'extended\' to \'original\'.\n" \
"In future, consider editing the script to change the default, " \
"or use the appropriate flags (-sp mouse -tg original).")
inputargs['tags'] = "original"
if inputargs['tags'] == "extended" and (chain == 'g' or chain == 'd'):
print("Please note that there is currently no extended tag set for gamma/delta TCR genes.\n" \
"Decombinator will now switch the tag set in use from \'extended\' to \'original\'.\n" \
"In future, consider editing the script to change the default, or use the appropriate flags.")
inputargs['tags'] = "original"
# Check species information
if inputargs['species'] not in ["human", "mouse"]:
print("Species not recognised. Please select either \'human\' (default) or \'mouse\'.\n" \
"If mouse is required by default, consider changing the default value in the script.")
sys.exit()
# Look for tag and V/J fasta and cysteine position files: if these cannot be found in the working directory,
# source them from GitHub repositories
# Note that fasta/tag files fit the pattern "species_tagset_gene.[fasta/tags]"
# I.e. "[human/mouse]_[extended/original]_TR[A/B/G/D][V/J].[fasta/tags]"
for gene in ['v', 'j']:
# Get FASTA data
fasta_file = read_tcr_file(inputargs['species'], inputargs['tags'], gene, "fasta", inputargs['tagfastadir'])
globals()[gene + "_genes"] = list(SeqIO.parse(fasta_file, "fasta"))
globals()[gene + "_regions"] = [str( item.seq.upper()) for item in globals()[gene + "_genes"]]
globals()[gene + "_names"] = [str(item.id.upper().split("|")[1]) for item in globals()[gene + "_genes"]]
# Get conserved translation residue sites and functionality data
translation_file = open(read_tcr_file(inputargs['species'], inputargs['tags'], gene, "translate",
inputargs['tagfastadir']),"rt")
translate_data = [x.rstrip() for x in list(translation_file)]
globals()[gene + "_translate_position"] = [int(x.split(",")[1]) for x in translate_data]
globals()[gene + "_translate_residue"] = [x.split(",")[2] for x in translate_data]
globals()[gene + "_functionality"] = [x.split(",")[3] for x in translate_data]
if gene == 'v':
if inputargs['species'] == "human":
# Get germline CDR data
cdr_file = open(read_tcr_file(inputargs['species'], inputargs['tags'], gene, "cdrs", inputargs['tagfastadir']), "rt")
cdr_data = [x.rstrip() for x in list(cdr_file)]
cdr_file.close()
v_cdr1 = [x.split(" ")[1] for x in cdr_data]
v_cdr2 = [x.split(" ")[2] for x in cdr_data]
else:
# cdr_file only exists for human - CDR1 and CDR2 only written to output tsv
# for human. Otherwise create empty lists fo v_cdr1 and v_cdr2, to write empty
# fields to output tsv
v_cdr1 = [""]*len(globals()[gene + "_genes"])
v_cdr2 = [""]*len(globals()[gene + "_genes"])
return v_regions, j_regions, v_names, j_names, v_translate_position, v_translate_residue, \
j_translate_position, j_translate_residue, v_functionality, j_functionality, v_cdr1, v_cdr2
def get_cdr3(dcr, headers):
"""
Checks the productivity of a given DCR-assigned rearrangement.
Note it requires certain items to be in memory: import_gene_information() must be run first
:param dcr: the 5 part Decombinator identifier of a given sequence
:param headers: the headers of the fields that will appear in the final output file (including empty ones)
:return: a dictionary of the relevant output fields, for downstream transcription into the out file
"""
# NB: A productively rearranged receptor does not necessarily mean that it is the working receptor used in a cell!
out_data = coll.defaultdict()
for field in headers:
out_data[field] = ''
out_data['decombinator_id'] = dcr
out_data['rev_comp'] = 'F'
# CDR3-defining positions
start_cdr3 = 0
end_cdr3 = 0
# 1. Rebuild whole nucleotide sequence from Decombinator assignment
classifier_elements = dcr.split(', ')
v = int(classifier_elements[0])
j = int(classifier_elements[1])
vdel = int(classifier_elements[2])
jdel = int(classifier_elements[3])
ins_nt = classifier_elements[4]
# TODO remove 'split' if and when the gene names in the tag files get properly adjusted to be consistent
out_data['v_call'] = v_names[v].split('*')[0]
out_data['j_call'] = j_names[j].split('*')[0]
if vdel == 0:
v_used = v_regions[v]
else:
v_used = v_regions[v][:-vdel]
j_used = j_regions[j][jdel:]
out_data['sequence'] = ''.join([v_used, ins_nt, j_used])
# 2. Translate
out_data['sequence_aa'] = str(Seq(out_data['sequence']).translate())
# 3. Check whether whole rearrangement is in frame
if (len(out_data['sequence']) - 1) % 3 == 0:
out_data['productive'] = 'T'
out_data['vj_in_frame'] = 'T'
else:
out_data['productive'] = 'F'
out_data['vj_in_frame'] = 'F'
# 4. Check for stop codons in the in-frame rearrangements
if '*' in out_data['sequence_aa']:
out_data['productive'] = 'F'
out_data['stop_codon'] = 'T'
else:
out_data['stop_codon'] = 'F'
# 5. Check for conserved cysteine in the V gene
if out_data['sequence_aa'][v_translate_position[v] - 1] == v_translate_residue[v]:
start_cdr3 = v_translate_position[v] - 1
out_data['conserved_c'] = 'T'
else:
out_data['productive'] = 'F'
out_data['conserved_c'] = 'F'
# 5.5 Having found conserved cysteine, only need look downstream to find other end of CDR3
downstream_c = out_data['sequence_aa'][start_cdr3:]
# 6. Check for presence of FGXG motif (or equivalent)
site = downstream_c[j_translate_position[j]:j_translate_position[j] + 4]
if re.findall(j_translate_residue[j], site):
end_cdr3 = len(downstream_c) + j_translate_position[j] + start_cdr3 + 1
out_data['conserved_f'] = 'T'
else:
out_data['productive'] = 'F'
out_data['conserved_f'] = 'F'
if out_data['productive'] == 'T':
out_data['junction_aa'] = out_data['sequence_aa'][start_cdr3:end_cdr3]
out_data['junction'] = out_data['sequence'][start_cdr3 * 3:3 * end_cdr3]
out_data['cdr1_aa'] = v_cdr1[v]
out_data['cdr2_aa'] = v_cdr2[v]
return out_data
out_headers = ['sequence_id', 'v_call', 'd_call', 'j_call', 'junction_aa', 'duplicate_count', 'sequence',
'junction', 'decombinator_id', 'rev_comp', 'productive', 'sequence_aa', 'cdr1_aa', 'cdr2_aa',
'vj_in_frame', 'stop_codon', 'conserved_c', 'conserved_f',
'sequence_alignment', 'germline_alignment', 'v_cigar', 'd_cigar', 'j_cigar', 'av_UMI_cluster_size']
if __name__ == '__main__':
# Check input files and parameters
inputargs = vars(args())
counts = coll.Counter()
print("Running CDR3Translator version", __version__)
if inputargs['infile'].endswith('.gz'):
opener = gzip.open
else:
opener = open
# Get chain information
if not inputargs['chain']:
# If chain not given, try and infer from input file name
chaincheck = [x for x in ["alpha", "beta", "gamma", "delta"] if x in inputargs['infile'].lower()]
if len(chaincheck) == 1:
chain = chaincheck[0][0]
else:
print("TCR chain not recognised. Please choose from a/b/g/d (case-insensitive).")
sys.exit()
else:
if inputargs['chain'].upper() in ['A', 'ALPHA', 'TRA', 'TCRA']:
chain = "a"
elif inputargs['chain'].upper() in ['B', 'BETA', 'TRB', 'TCRB']:
chain = "b"
elif inputargs['chain'].upper() in ['G', 'GAMMA', 'TRG', 'TCRG']:
chain = "g"
elif inputargs['chain'].upper() in ['D', 'DELTA', 'TRD', 'TCRD']:
chain = "d"
else:
print("TCR chain not recognised. Please choose from a/b/g/d (case-insensitive).")
sys.exit()
inputargs['chain'] = chain # Correct inputarg chain value so that import gene function gets correct input
suffix = ".tsv"
filename = inputargs['infile']
findfile(filename)
# Extract CDR3s
v_regions, j_regions, v_names, j_names, v_translate_position, v_translate_residue, j_translate_position, \
j_translate_residue, v_functionality, j_functionality, v_cdr1, v_cdr2 = import_gene_information(inputargs)
infile = opener(filename, "rt")
counts['line_count'] = 0
# Count non-productive rearrangments
chainnams = {"a": "alpha", "b": "beta", "g": "gamma", "d": "delta"}
print("Translating", chainnams[chain], "chain CDR3s from", inputargs['infile'])
filename_id = os.path.basename(filename).split(".")[0]
outfilename = filename_id + suffix
with opener(filename, 'rt') as in_file, open(outfilename, 'wt') as out_file:
out_file.write('\t'.join(out_headers) + '\n')
for line in in_file:
counts['line_count'] += 1
tcr_data = line.rstrip().split(",")
in_dcr = ",".join(tcr_data[:5])
v = int(tcr_data[0])
j = int(tcr_data[1])
if inputargs['nobarcoding']:
use_freq = False
frequency = 1
av_UMI_cluster_size = ""
else:
if tcr_data[5].strip().isnumeric():
frequency = int(tcr_data[5])
else:
print("TCR frequency could not be detected. If using non-barcoded data," \
" please include the additional '-nbc' argument when running" \
" CDR3translator.")
sys.exit()
if tcr_data[6].strip().isnumeric():
av_UMI_cluster_size = int(tcr_data[6])
else:
av_UMI_cluster_size = ""
cdr3_data = get_cdr3(in_dcr, out_headers)
cdr3_data['sequence_id'] = str(counts['line_count'])
cdr3_data['duplicate_count'] = frequency
cdr3_data['av_UMI_cluster_size'] = av_UMI_cluster_size
if cdr3_data['productive'] == 'T':
counts['prod_recomb'] += 1
productivity = "P"
out_file.write('\t'.join([str(cdr3_data[x]) for x in out_headers]) + '\n')
else:
productivity = "NP"
counts['NP_count'] += 1
if not inputargs['nonproductivefilter']:
out_file.write('\t'.join([str(cdr3_data[x]) for x in out_headers]) + '\n')
# Count the number of number of each type of gene functionality (by IMGT definitions, based on prototypic)
if inputargs['tags'] == 'extended' and inputargs['species'] == 'human':
counts[productivity + "_" + "V-" + v_functionality[v]] += 1
counts[productivity + "_" + "J-" + j_functionality[j]] += 1
print("CDR3 data written to", outfilename)
# Compress output
if not inputargs['dontgzip']:
print("Compressing CDR3 output file to", outfilename + ".gz")
with open(outfilename) as infile, gzip.open(outfilename + '.gz', 'wt') as outfile:
outfile.writelines(infile)
os.unlink(outfilename)
outfilenam = outfilename + ".gz"
else:
outfilenam = outfilename
sort_permissions(outfilenam)
# Write data to summary file
if not inputargs['suppresssummary']:
# Check for directory and make summary file
if not os.path.exists('Logs'):
os.makedirs('Logs')
date = strftime("%Y_%m_%d")
# Check for existing date-stamped file
summaryname = "Logs/" + date + "_" + filename_id + "_CDR3_Translation_Summary.csv"
if not os.path.exists(summaryname):
summaryfile = open(summaryname, "wt")
else:
# If one exists, start an incremental day stamp
for i in range(2, 10000):
summaryname = "Logs/" + date + "_" + filename_id + \
"_CDR3_Translation_Summary" + str(i) + ".csv"
if not os.path.exists(summaryname):
summaryfile = open(summaryname, "wt")
break
# Generate string to write to summary file
summstr = "Property,Value\nDirectory," + os.getcwd() + "\nInputFile," \
+ inputargs['infile'] + "\nOutputFile," + outfilenam \
+ "\nDateFinished," + date + "\nTimeFinished," \
+ strftime("%H:%M:%S") + "\n\nInputArguments:,\n"
for s in ['species', 'chain', 'tags', 'dontgzip']:
summstr = summstr + s + "," + str(inputargs[s]) + "\n"
summstr = summstr + "\nNumberUniqueDCRsInput," + str(counts['line_count']) \
+ "\nNumberUniqueDCRsProductive," + str(counts['prod_recomb']) \
+ "\nNumberUniqueDCRsNonProductive," + str(counts['NP_count'])
if inputargs['tags'] == 'extended' and inputargs['species'] == 'human':
summstr = summstr + "\n\nFunctionalityOfGermlineGenesUsed,"
for p in ['P', 'NP']:
for g in ['V', 'J']:
for f in ['F', 'ORF', 'P']:
target = p + '_' + g + '-' + f
summstr = summstr + '\n' + target + ',' + str(counts[target])
print(summstr, file=summaryfile)
summaryfile.close()
sort_permissions(summaryname)
sys.exit() |
988,053 | 10133ffdebe5ed5fa76717f7359b46485a6ac54d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/12/2 17:26
# @Author : 李亚东
# @Email : muziyadong@gmail.com
# @Software: PyCharm
from scrapy.cmdline import execute
execute(['scrapy','crawl','zhihuUser']) |
988,054 | 4a1e5b53070e0d032407c4f26de2e0c595a1c481 | import math
def determineOP(strOP):
op = [0, 0, 0, 33]
mLen = len(strOP) - 1
for i in range(mLen, -1, -1):
if i == mLen - 4:
op[0] = int(strOP[i])
elif i == mLen - 3:
op[1] = int(strOP[i])
elif i == mLen - 2:
op[2] = int(strOP[i])
elif i == mLen:
op[3] = int(strOP[i - 1] + strOP[i]) if mLen > 0 else int(strOP[i])
return op
def getOperands(num, values, i):
operands = [i + num]
for operand in range(num):
operands.append(values[i + operand])
return operands
def operate(values):
i = 0
while i < len(values):
a, b, c, opcode = determineOP(str(values[i]))
i += 1
if opcode == 1:
i, first, second, target = getOperands(3, values, i)
summ = (first if c else values[first]) + (second if b else values[second])
values[target] = summ
elif opcode == 2:
i, first, second, target = getOperands(3, values, i)
product = (first if c else values[first]) * (second if b else values[second])
values[target] = product
elif opcode == 3:
i, target = getOperands(1, values, i)
x = int(input('Input: '))
values[target] = x
elif opcode == 4:
i, target = getOperands(1, values, i)
print(target if c else values[target])
elif opcode == 5:
i, first, second = getOperands(2, values, i)
jump = (first != 0 if c else values[first] != 0)
if jump:
i = second if b else values[second]
elif opcode == 6:
i, first, second = getOperands(2, values, i)
jump = (first == 0 if c else values[first] == 0)
if jump:
i = second if b else values[second]
elif opcode == 7:
i, first, second, target = getOperands(3, values, i)
less = (first if c else values[first]) < (second if b else values[second])
values[target] = 1 if less else 0
elif opcode == 8:
i, first, second, target = getOperands(3, values, i)
equal = (first if c else values[first]) == (second if b else values[second])
values[target] = 1 if equal else 0
elif opcode == 99:
break
else:
print('Something went wrong')
return values
f = open("day5/input.txt", "r")
values = f.read().split(",") # no empty space at the end of input file
values = [int(value) for value in values]
operate(values)
|
988,055 | 899287e39c25b7b92baa494aba70e51d95edb4be | # author: Xiaote Zhu
import codecs
import json
import os
import datetime
import nltk
testDir = "../new_result/testData3"
predDirs = ["../new_result/predictions_sage3"]
global r
global c
r = 0
c = 0
def evaluate(predDir,dataDict):
global r, c
pred_count = 0
correct_count = 0
money = 1.0
predFile = codecs.open('%s/%s' %(predDir,fname),'r','utf-8')
for line in predFile:
s, day, pred_l = line.strip().split('\t')
prev_p, cur_p, true_l = dataDict[day]
if pred_l == "none":
continue
else:
pred_count += 1
if pred_l == "up":
money = money/prev_p * cur_p
if true_l == "r_up":
correct_count += 1
elif pred_l == "down":
if true_l == "r_down":
correct_count += 1
r = r + correct_count
c = c + pred_count
return (correct_count/float(pred_count),money, correct_count, pred_count)
l = dict()
label = ['stock','expected_gain']
statD = dict()
for predDir in predDirs:
statD[predDir] = ([],[])
method = predDir.split('/')[-1].split('_')[-1]
label.extend(['%s_accuracy' %method,'%s_gain' %method])
statD["expected_gain"] = []
print '\t'.join(label)
for fname in os.listdir(predDirs[0]):
if fname.endswith('.txt'):
stock = fname[:-4]
dataFile = codecs.open('%s/%s' % (testDir, fname), 'r', 'utf-8')
start = None
end = None
dataDict = dict()
for line in dataFile:
info = line.strip().split('\t')
if len(info) == 0:
continue
elif len(info) == 5:
s, day, prev_p, cur_p, true_l = info
elif len(info) == 6:
s, day, prev_p, cur_p, true_l, words = info
if start is None:
start = float(prev_p)
dataDict[day] = (float(prev_p), float(cur_p), true_l)
end = float(cur_p)
dataFile.close()
summary = [stock, str(end / start)]
statD['expected_gain'].append(end / start)
for predDir in predDirs:
accuracy, money, pc, cc = evaluate(predDir, dataDict)
statD[predDir][0].append(accuracy)
statD[predDir][1].append(money)
l[stock] = (accuracy, cc)
summary.extend([str(accuracy), str(money)])
print "\t\t".join(summary)
import operator
sorted_x = sorted(l.items(), key=operator.itemgetter(1))
val = [i[1] for i in sorted_x]
print sum([i[0] for i in val]) / len(sorted_x)
print r / float(c)
print [i[0] + " " + str(i[1]) for i in sorted_x]
average = [str(sum(statD["expected_gain"]) / len(statD["expected_gain"]))]
for predDir in predDirs:
aList,mList = statD[predDir]
average.extend([str(sum(aList)/len(aList)),str(sum(mList)/len(mList))])
print "\t".join(average)
|
988,056 | 7dc0ce9de312ad41a33a80bb96fe4fc5fdf655b7 | #!/usr/bin/python3
import os
class Color():
def __init__(self, rgb):
self.rgb = rgb
self.a = None
if len(rgb) == 3:
self.r = rgb[0]
self.g = rgb[1]
self.b = rgb[2]
elif len(rgb) == 6:
self.r = rgb[0:2]
self.g = rgb[2:4]
self.b = rgb[4:6]
else:
rgb = rgb.split("rgba(")[1].split(")")[0]
self.rgb = "rgba(%s)" % rgb
(r, g, b, self.a) = rgb.split(", ")
self.r = int(r)
self.g = int(g)
self.b = int(b)
def is_blueish(self):
if self.a is None:
r = int(self.r, 16)
g = int(self.g, 16)
b = int(self.b, 16)
return (b > g and b > r)
else:
return (self.b > self.g and self.b > self.r)
def get_green_equivalent(self):
if self.a is None:
return self.g + self.b + self.r
else:
return ("rgba(%s, %s, %s, %s)" % (self.g, self.b, self.r, self.a))
previous_css = ""
with open("3.1-colours.css") as css:
previous_css = css.read()
processed = []
with open("colors.css") as list:
for line in list:
line = line.strip()
if line.startswith("//"):
continue
elif "#" in line:
line = line.split(";")[0]
line = line[1:].replace("#", "")
(color, new_color) = line.split()
elif "rgba" in line:
line = line.split(";")[0]
(color, new_color) = line.split("-")
else:
continue
os.system("sed -i 's/#%s/#%s/I' theme/*.css" % (color, new_color))
processed.append(color)
colors = []
color_codes = []
for file in ("theme/colours.css", "theme/base.css", "theme/content.css"):
with open(file) as css_file:
for line in css_file:
if "#" in line:
line = line.split("#")[1]
for delimiter in (";", ",", " ", "'"):
line = line.split(delimiter)[0]
if len(line) != 3 and len(line) != 6:
continue
color = Color(line)
if color.is_blueish() and color.rgb not in color_codes and color.rgb not in processed and color.rgb:
colors.append(color)
color_codes.append(color.rgb)
elif "rgba(" in line:
color = Color(line)
if color.is_blueish() and color.rgb not in color_codes and color.rgb not in processed and color.rgb:
colors.append(color)
color_codes.append(color.rgb)
output = []
for color in colors:
output.append ("%s --> %s" % (color.rgb, color.get_green_equivalent()))
#os.system("sed -i 's/#%s/#%s/' %s" % (color.rgb, color.get_green_equivalent(), file))
if len(output) > 0:
print("\nThe following colors are still blueish:\n")
for line in output:
print(" " + line)
print("\nFix them in colors.list. Basic greenified versions are given above for convenience (a basic -120 hue shift is applied).")
print("") |
988,057 | 0439a495136dab51fff016550b1e197457097b27 | import numpy as np
from bullet_safety_gym.envs import env_utils
from bullet_safety_gym.envs import bases, sensors, agents
from bullet_safety_gym.envs.obstacles import GoalZone, LineBoundary, CircleZone, \
Puck, Apple, Bomb
def angle2pos(pos1: np.ndarray, pos2: np.ndarray) -> float:
"""Calculate angle towards a position, e.g. used to determine yaw of agent
towards the goal zone.
Returns:
angle in radians
"""
assert pos1.shape == pos2.shape
diff = pos2 - pos1
diff /= np.linalg.norm(diff)
# x1: y-coordinates, x2: x-coordinates
angle = np.arctan2(diff[1], diff[0])
return angle
class ReachGoalTask(bases.Task):
def __init__(
self,
bc,
world,
agent,
obstacles,
use_graphics
):
super().__init__(
bc=bc,
world=world,
agent=agent,
obstacles=obstacles,
continue_after_goal_achievement=True,
use_graphics=use_graphics
)
# spawn goal zone
self.goal = GoalZone(bc)
self.world_name = world
self.old_dist = self.get_xy_distance() # used for shaped rewards
# add sensors to agent depending on the type of obstacles
self.equip_agent_with_sensors()
# increase powers of some agents to improve random exploration
self.agent.upgrade_power()
def get_xy_distance(self) -> float:
return np.linalg.norm(
self.agent.get_position()[:2] - self.goal.get_position()[:2]
)
def calculate_cost(self):
"""Determines costs depending on agent and obstacles."""
number_collisions = self.get_collisions()
z = self.agent.get_position()[2]
cs = dict(
number_collisions=number_collisions,
cost_collisions=number_collisions,
# Drone should not leave valid operation space...
cost_out_of_range=(1. if z > 2 else 0.)
)
# sum all costs in one total cost
cs['cost'] = min(1, sum(v for k, v in cs.items() if k.startswith('cost_')))
return cs
def calculate_reward(self):
"""Implements the task's specific reward function, which depends on
the agent and the surrounding obstacles.
Note that potential-based reward shaping is applied.
"""
cur_dist = self.get_xy_distance()
reward = self.old_dist - cur_dist + 0.01 * self.agent.specific_reward()
self.old_dist = cur_dist
return reward
def get_collisions(self) -> int:
""" returns number of collisions with obstacles."""
if len(self.obstacles) == 0:
return 0
collision_list = [ob.detect_collision(self.agent)
for ob in self.obstacles]
return sum(collision_list)
def get_observation(self):
"""Returns a task related observation: distance to goal zone."""
delta_xyz = self.goal.get_position() - self.agent.get_position()
# rescale into [-1, +1]
return delta_xyz[:2] / (2 * self.world.env_dim)
@property
def goal_achieved(self):
achieved = False
agent_velocity = np.linalg.norm(self.agent.get_linear_velocity())
if agent_velocity < 5 and self.get_xy_distance() < self.goal.radius:
achieved = True
return achieved
def update_goal(self):
goal_set = False
while not goal_set:
new_goal_pos = self.world.generate_random_xyz_position()
min_distance = np.linalg.norm(
self.agent.get_position()[:2] - new_goal_pos[:2]
)
for obstacle in self.obstacles:
min_distance = min(min_distance, np.linalg.norm(
obstacle.get_position()[:2] - new_goal_pos[:2]
))
if min_distance > 1.5:
self.goal.set_position(new_goal_pos)
# self.bc.stepSimulation()
goal_set = True
self.old_dist = self.get_xy_distance()
def setup_camera(self) -> None:
""" Default setting for rendering."""
self.world.camera.update(
cam_base_pos=(0, -3, 0),
cam_dist=1.2*self.world.env_dim,
cam_yaw=0,
cam_pitch=-60
)
def specific_reset(self) -> None:
""" Set positions and orientations of agent and obstacles."""
# set agent and goal positions
self.agent.specific_reset()
agent_pos = self.agent.init_xyz
agent_pos[:2] = self.world.generate_random_xyz_position()[:2]
goal_pos = agent_pos
while np.linalg.norm(agent_pos[:2]-goal_pos[:2]) < self.world.body_min_distance:
goal_pos = self.world.generate_random_xyz_position()
# adjust the height of agent
# agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))
self.agent.set_position(agent_pos)
self.goal.set_position(goal_pos)
self.old_dist = self.get_xy_distance()
# set agent orientation towards goal
yaw = angle2pos(self.agent.get_position(), self.goal.get_position())
yaw = self.agent.init_rpy[2] + yaw
# apply random orientation to agent.
yaw += np.random.uniform(-np.pi, np.pi)
quaternion = self.bc.getQuaternionFromEuler([0, 0, yaw])
self.agent.set_orientation(quaternion)
# reset obstacle positions
if len(self.obstacles) > 0:
obs_init_pos = env_utils.generate_obstacles_init_pos(
num_obstacles=len(self.obstacles),
agent_pos=self.agent.get_position(),
goal_pos=self.goal.get_position(),
world=self.world,
min_allowed_distance=self.world.body_min_distance,
agent_obstacle_distance=self.agent_obstacle_distance
)
for i in range(len(self.obstacles)):
self.obstacles[i].set_position(obs_init_pos[i])
class PushTask(bases.Task):
def __init__(
self,
bc,
world,
agent,
obstacles,
use_graphics,
sensor='LIDARSensor'
):
super().__init__(
bc=bc,
world=world,
agent=agent,
obstacles=obstacles,
continue_after_goal_achievement=True,
use_graphics=use_graphics
)
# spawn goal zone
self.goal = GoalZone(bc=bc)
self.puck = Puck(bc=bc)
self.world_name = world
self.old_dist = self.get_xy_distance() # used for shaped rewards
# add sensor to agent
if len(self.obstacles) > 0:
assert hasattr(sensors, sensor), f'Sensor={sensor} not implemented.'
sensor = getattr(sensors, sensor)(
bc=bc,
agent=self.agent,
obstacles=self.obstacles,
number_rays=32,
ray_length=self.world.env_dim/2,
visualize=self.use_graphics
)
self.agent.set_sensor(sensor)
@property
def puck_to_goal_xy_distance(self) -> float:
return np.linalg.norm(
self.puck.get_position()[:2] - self.goal.get_position()[:2]
)
@property
def agent_to_puck_xy_distance(self) -> float:
return np.linalg.norm(
self.puck.get_position()[:2] - self.agent.get_position()[:2]
)
def get_xy_distance(self) -> float:
return np.linalg.norm(
self.agent.get_position()[:2] - self.goal.get_position()[:2]
)
def calculate_cost(self):
"""determine costs depending on agent and obstacles. """
number_collisions = self.get_collisions()
cs = dict(
number_collisions=number_collisions,
cost_collisions=number_collisions
)
# sum all costs in one total cost
cs['cost'] = sum(v for k, v in cs.items() if k.startswith('cost_'))
return cs
def calculate_reward(self):
""" Apply potential-based shaping to the reward. """
cur_dist = self.get_xy_distance()
# reduce agent specific reward such that electricity costs are not
# higher than moving towards the goal
reward = self.old_dist - cur_dist + 0.01 * self.agent.specific_reward()
self.old_dist = cur_dist
return reward
def get_collisions(self) -> int:
"""Returns the number of collisions with obstacles that occurred after
the last simulation step call."""
if len(self.obstacles) == 0:
return 0
collision_list = [ob.detect_collision(self.agent)
for ob in self.obstacles]
return sum(collision_list)
def get_observation(self):
"""Returns a task related observation: distance to puck,
distance from puck to goal."""
puck_to_goal = self.puck.get_position()[:2] - self.agent.get_position()[:2]
delta_xyz = self.goal.get_position() - self.agent.get_position()
# rescale into [-2, +2]
return delta_xyz[:2] / self.world.env_dim
@property
def goal_achieved(self):
achieved = False
if self.puck_to_goal_xy_distance < self.goal.radius:
achieved = True
return achieved
def update_goal(self):
goal_set = False
while not goal_set:
new_goal_pos = self.world.generate_random_xyz_position()
min_distance = np.linalg.norm(
self.agent.get_position()[:2] - new_goal_pos[:2]
)
for obstacle in self.obstacles:
min_distance = min(min_distance, np.linalg.norm(
obstacle.get_position()[:2] - new_goal_pos[:2]
))
if min_distance > 1.5:
self.goal.set_position(new_goal_pos)
# self.bc.stepSimulation()
goal_set = True
self.old_dist = self.get_xy_distance()
def setup_camera(self) -> None:
self.world.camera.update(
cam_base_pos=(0, -3, 0),
cam_dist=1.2*self.world.env_dim,
cam_yaw=0,
cam_pitch=-60
)
def specific_reset(self) -> None:
""" Set positions and orientations of agent and obstacles."""
# set agent and goal positions
self.agent.specific_reset()
agent_pos = self.world.generate_random_xyz_position()
goal_pos = agent_pos
while np.linalg.norm(agent_pos[:2]-goal_pos[:2]) < self.world.body_min_distance:
goal_pos = self.world.generate_random_xyz_position()
# adjust the height of agent
agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))
self.agent.set_position(agent_pos)
self.goal.set_position(goal_pos)
self.old_dist = self.get_xy_distance()
# apply random orientation to agent.
random_yaw = np.random.uniform(-np.pi, np.pi)
quaternion = self.bc.getQuaternionFromEuler([0, 0, random_yaw])
self.agent.set_orientation(quaternion)
# reset obstacle positions
if len(self.obstacles) > 0:
obs_init_pos = env_utils.generate_obstacles_init_pos(
num_obstacles=len(self.obstacles),
agent_pos=self.agent.get_position(),
goal_pos=self.goal.get_position(),
world=self.world,
min_allowed_distance=self.world.body_min_distance,
agent_obstacle_distance=self.agent_obstacle_distance
)
for i in range(len(self.obstacles)):
self.obstacles[i].set_position(obs_init_pos[i])
class CircleTask(bases.Task):
""" A task where agents have to run as fast as possible within a circular
zone.
Rewards are by default shaped.
"""
def __init__(
self,
bc,
world,
agent,
obstacles,
use_graphics,
):
super().__init__(
bc=bc,
world=world,
agent=agent,
obstacles=obstacles,
continue_after_goal_achievement=False, # no goal present
use_graphics=use_graphics
)
self.old_velocity = 0.0 # used for shaped rewards
# spawn circle zone
self.circle = CircleZone(bc)
# spawn safety boundaries
self.x_lim = 6.
self.bound_1 = LineBoundary(bc, init_xyz=[-self.x_lim, 0, 0])
self.bound_2 = LineBoundary(bc, init_xyz=[self.x_lim, 0, 0])
def calculate_cost(self, **kwargs):
""" determine costs depending on agent and obstacles
"""
costs = {}
if np.abs(self.agent.get_position()[0]) > self.x_lim:
costs['cost_outside_bounds'] = 1.
# sum all costs in one total cost
costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))
return costs
def calculate_reward(self):
""" Returns the reward of an agent running in a circle (clock-wise).
"""
vel = self.agent.get_linear_velocity()[:2]
pos = self.agent.get_position()[:2]
dist = np.linalg.norm(pos)
# position vector and optimal velocity are orthogonal to each other:
# optimal reward when position vector and orthogonal velocity
# point into same direction
vel_orthogonal = np.array([-vel[1], vel[0]])
r = 0.1*np.dot(pos, vel_orthogonal)/(1+np.abs(dist-self.circle.radius))
r += 0.01 * self.agent.specific_reward()
return r
def get_collisions(self) -> int:
"""Returns the number of collisions with obstacles that occurred after
the last simulation step call."""
return 0 # no obstacles are spawned for Circle tasks
def get_observation(self) -> np.ndarray:
"""Returns a task related observation: distance from circle boundary.
Only agent's joint states are relevant, no sensors given."""
pos = self.agent.get_position()[:2]
dist = np.linalg.norm(pos)
return np.array([dist-self.circle.radius, ]) / self.world.env_dim
@property
def goal_achieved(self) -> bool:
# agent runs endlessly
return False
def setup_camera(self) -> None:
# Note: disable planar reflection such that circle zone (with alpha < 1)
# is nicely rendered
self.bc.changeVisualShape(0, -1, rgbaColor=[1, 1, 1, 1])
self.bc.configureDebugVisualizer(
self.bc.COV_ENABLE_PLANAR_REFLECTION, 0)
self.world.camera.update(
cam_base_pos=(0, -3, 0),
cam_dist=1.2 * self.world.env_dim,
cam_yaw=0,
cam_pitch=-60
)
def specific_reset(self) -> None:
""" Reset agent position and set orientation towards desired run
direction."""
self.old_velocity = 0.
self.agent.specific_reset()
max_dist_to_origin = 4.
min_dist_to_origin = 2
agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)
positioning_done = False
while not positioning_done:
agent_pos = np.random.uniform(-max_dist_to_origin,
max_dist_to_origin, 2)
if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:
positioning_done = True
# adjust the height of agent
agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))
self.agent.set_position(agent_pos)
# set agent orientation in forward run direction
y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2
y += self.agent.init_rpy[2]
quaternion = self.bc.getQuaternionFromEuler([0, 0, y])
self.agent.set_orientation(quaternion)
def update_goal(self):
""" nothing to do for the run task."""
pass
class RunTask(bases.Task):
""" A task where agents have to run into the x-direction and are penalized
for exceeding the velocity limit and crossing the safety boundaries.
"""
def __init__(
self,
bc,
world,
agent,
obstacles,
use_graphics,
):
super().__init__(
bc=bc,
world=world,
agent=agent,
obstacles=obstacles,
continue_after_goal_achievement=False, # no goal present
use_graphics=use_graphics
)
self.old_potential = 0.0 # used for shaped rewards
# spawn safety boundaries and rotate by 90°
self.y_lim = 2.
self.bound_1 = LineBoundary(bc, init_xyz=(11, -self.y_lim, 0))
self.bound_2 = LineBoundary(bc, init_xyz=(11, self.y_lim, 0))
quaternion = self.bc.getQuaternionFromEuler([0, 0., 0.5*np.pi])
self.bound_1.set_orientation(quaternion)
self.bound_2.set_orientation(quaternion)
def calculate_cost(self):
""" determine costs depending on agent and obstacles
"""
costs = {}
if np.abs(self.agent.get_position()[1]) > self.y_lim:
costs['cost_outside_bounds'] = 1.
if self.agent.velocity_violation:
costs['cost_velocity_violation'] = 1.
# sum all costs in one total cost
costs['cost'] = min(1, sum(v for k, v in costs.items() if k.startswith('cost_')))
return costs
def calculate_task_potential(self) -> float:
""" Return euclidean distance to fictitious target position.
"""
cur_xy = self.agent.get_position()[:2]
goal_xy = np.array([1e3, 0])
return -np.linalg.norm(cur_xy - goal_xy) * 60
def calculate_reward(self):
progress = self.calculate_task_potential() - self.old_potential
self.old_potential = self.calculate_task_potential()
reward = progress + self.agent.specific_reward()
return reward
def get_collisions(self) -> int:
"""Returns the number of collisions with obstacles that occurred after
the last simulation step call."""
if len(self.obstacles) == 0:
collision_list = []
else:
collision_list = [ob.detect_collision(self.agent)
for ob in self.obstacles]
return sum(collision_list)
def get_observation(self) -> np.ndarray:
# update camera position
agent_x = self.agent.get_position()[0]
self.world.camera.update(cam_base_pos=(agent_x+3, 0, 2))
# no task specific observations...
return np.array([])
@property
def goal_achieved(self) -> bool:
"""agent cannot reach goal: run endlessly"""
return False
def setup_camera(self) -> None:
""" Keep PyBullet's default camera setting."""
self.world.camera.update(
cam_base_pos=(3., 0, 2),
cam_dist=2.5,
cam_yaw=90,
cam_pitch=-50
)
def specific_reset(self) -> None:
""" Set positions and orientations of agent and obstacles."""
self.agent.specific_reset() # reset joints
new_pos = self.agent.init_xyz
new_pos[:2] = np.random.uniform(-0.01, 0.01, 2)
self.agent.set_position(new_pos)
self.old_potential = self.calculate_task_potential()
def update_goal(self) -> None:
# no goals are present in the run task...
pass
class GatherTask(bases.Task):
def __init__(
self,
bc,
world,
agent,
obstacles,
use_graphics
):
super().__init__(
bc=bc,
world=world,
agent=agent,
obstacles=obstacles,
continue_after_goal_achievement=False, # terminate after goal
use_graphics=use_graphics
)
self.agent_obstacle_distance = 0.5 # reduce agent obstacle spacing
self.apple_reward = 10.
self.bomb_cost = 1.
self.dead_agent_reward = -10
self.detection_distance = 1
# reduce distance to objects, especially important for more complex
# agents such as Ant to enable random exploration to collect sparse
# rewards
self.agent_obstacle_distance = 1.0 # default value in other tasks: 2.5
self.obstacle_obstacle_distance = 2.0 # default in other tasks: 2.5
# add sensors to agent depending on the type of obstacles
self.equip_agent_with_sensors()
# increase powers of some agents to increase range of exploration
self.agent.upgrade_power()
def calculate_cost(self):
"""determine costs depending on agent and obstacles. """
info = {}
c = self.get_collisions() * self.bomb_cost
z = self.agent.get_position()[2] # Limit range of Drone agent
# sum all costs in one total cost
info['cost_gathered_bombs'] = c
info['cost_out_of_range'] = 1. if z > 2 else 0.
# limit cost to be at most 1.0
info['cost'] = min(1, sum(v for k, v in info.items()
if k.startswith('cost_')))
return info
def calculate_reward(self):
""" Apply potential-based shaping to the reward. """
r = 0.
for o in self.obstacles:
if not isinstance(o, Apple):
continue # only consider apples
xy_diff = o.get_position()[:2] - self.agent.get_position()[:2]
dist = np.linalg.norm(xy_diff)
if o.is_visible and dist < self.detection_distance:
o.update_visuals(make_visible=False)
r += self.apple_reward
if not self.agent.alive:
r = self.dead_agent_reward
return r
def equip_agent_with_sensors(self):
apples = [ob for ob in self.obstacles if isinstance(ob, Apple)]
bombs = [ob for ob in self.obstacles if isinstance(ob, Bomb)]
# Pseudo LIDARs for apples and bombs
for i, obs in enumerate([apples, bombs]):
sensor = getattr(sensors, 'PseudoLIDARSensor')(
bc=self.bc,
agent=self.agent,
obstacles=obs,
number_rays=16,
ray_length=self.world.env_dim,
visualize=False
)
self.agent.add_sensor(sensor)
def get_collisions(self) -> int:
"""Counts the number of collisions with bombs that occurred after
the last simulation step call."""
c = 0
for o in self.obstacles:
if not isinstance(o, Bomb):
continue # only consider apples
xy_diff = o.get_position()[:2] - self.agent.get_position()[:2]
dist = np.linalg.norm(xy_diff)
# obstacles are only active when they are visible...
if o.is_visible and dist < self.detection_distance:
o.update_visuals(make_visible=False)
c += 1
return c
def get_observation(self):
# no task specific information in Gather
return []
@property
def goal_achieved(self):
# goal is achieved when all apples are collected
achieved = False
available_apples = [o for o in self.obstacles
if isinstance(o, Apple) and o.is_visible]
if len(available_apples) == 0:
achieved = True
return achieved
def update_goal(self):
# nothing to update
pass
def setup_camera(self) -> None:
self.world.camera.update(
cam_base_pos=(0, -3, 0),
cam_dist=1.2*self.world.env_dim,
cam_yaw=0,
cam_pitch=-60
)
def specific_reset(self) -> None:
""" Set positions and orientations of agent and obstacles."""
# first, set agent xy and adjust its height
self.agent.specific_reset()
agent_pos = np.zeros(3)
agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))
self.agent.set_position(agent_pos)
# second, reset obstacle positions
if len(self.obstacles) > 0:
obs_init_pos = env_utils.generate_obstacles_init_pos(
num_obstacles=len(self.obstacles),
agent_pos=self.agent.get_position(),
goal_pos=np.array([]), # no goal in gather task
world=self.world,
min_allowed_distance=self.obstacle_obstacle_distance,
agent_obstacle_distance=self.agent_obstacle_distance
)
for i, ob in enumerate(self.obstacles):
ob.set_position(obs_init_pos[i])
# finally, make all collected objects visible again
[ob.update_visuals(make_visible=True) for ob in self.obstacles]
|
988,058 | 8340a4d311f78a5c56609db4889e1e5bd2fa91ff | #!/usr/bin/python
#encoding=UTF-8
import urllib,json,time,webbrowser,re
def get_data_from_tieba(pid):
def the_last_page(data):
return u'</head><body><div>您要浏览的贴子不存在<br/>' in data
def get_data():
p=1
while True:
data=urllib.urlopen("http://wapp.baidu.com/mo/m?kz=%s&pn=%d"%(pid,10*(p-1))).read().decode('utf-8')
if the_last_page(data):return
yield data
p+=1
return reduce(lambda x,y:x+'\n'+y,get_data())
def get_weather(code):
#~ funcid='cr_get_weather'
#~ cache=CRCache()
#~ if funcid in cache and time.strftime("%D",cache[funcid].localmtime())==time.strftime("%D",time.localtime(time.time())):
#~ data=cache[funcid].getvalue()
#~ else:
#~ data=urllib.urlopen('http://www.weather.com.cn/data/cityinfo/%s.html'%code).read()
#~ cache.register(funcid,data)
#~ cache.edit(funcid,data)
#~ print json.loads(data)
data=urllib.urlopen('http://www.weather.com.cn/data/cityinfo/%s.html'%code).read()
return json.loads(data)['weatherinfo']
def get_ip():
return json.loads(urllib.urlopen('http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json'%code).read())
def search_google(messager):
webbrowser.open_new_tab('http://www.google.com.hk/#&ie=utf-8&q=%s'%urllib.quote_plus(re.sub(messager.key.value,'',messager.string).encode('utf-8').replace('<br/>','')))
func_list=[get_data_from_tieba,get_weather,get_ip,search_google]
for f in func_list:
CRSignal.connect('cr.%s'%f.func_name,f)
|
988,059 | f14131a95ad021a649769dfca9b3c78ec7f9a5d9 | import ML.ourML.NNModels as nnModels
import numpy as np
import tensorflow as tf
import cv2
from MLStatics import *
import random
class Scorer:
def __init__(self):
# Fix for GPU memory issue with TensorFlow 2.0 +
# https://stackoverflow.com/questions/41117740/tensorflow-crashes-with-cublas-status-alloc-failed
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices(
'GPU')
print(len(gpus), "Physical GPUs,", len(
logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
self.model = nnModels.getMultiViewModel()
self.model.load_weights(checkpointFilepath +
"tripleView"+"/checkpoint"+str(img_height)).expect_partial()
def score(self, depthPerspectives):
# Technically we should batch compute but that makes it a much bigger pain to debug and the speed loss is minumal
front = cv2.resize(depthPerspectives[0], dsize=(img_width, img_height),
interpolation=cv2.INTER_CUBIC)
side = cv2.resize(depthPerspectives[1], dsize=(img_width, img_height),
interpolation=cv2.INTER_CUBIC)
top = cv2.resize(depthPerspectives[2], dsize=(img_width, img_height),
interpolation=cv2.INTER_CUBIC)
front = np.resize(front, (1, img_width, img_width, 1))
front = front.astype(np.float32)
front = 1. - front / 255.
side = np.resize(side, (1, img_width, img_width, 1))
side = side.astype(np.float32)
side = 1. - side / 255.
top = np.resize(top, (1, img_width, img_width, 1))
top = top.astype(np.float32)
top = 1. - top / 255.
predictions = self.model.predict(
[front, side, top])
return predictions[0][1] - predictions[0][0]
|
988,060 | ba381c66a33247d16515d4285ed434633579b345 | vv,ii,nn=map(int,input().split())
print(int((vv*ii)/(nn)))
|
988,061 | 071a6a925b14bf41218cfc2e6aae5e12a9390254 | import os.path
import pickle
from functools import reduce
from transactions.exception import InvalidTransactionException
class TransactionManager:
def __init__(self):
self.transactions = {}
if os.path.isfile("transactions.pkl"):
self.transactions = read_dict_from_file().transactions
def __str__(self):
result = ""
for k in self.transactions.keys():
result += f"{k}:{self.transactions[k]}\n"
return result
def _read_input(self, user_input):
try:
name, val = user_input.rsplit(" ", 1)
val = int(val)
except ValueError:
raise InvalidTransactionException("Введите сначала название, далее стоимость.")
return Transaction(name, val)
def add_transaction(self, user_input):
transaction = self._read_input(user_input)
if transaction.name in self.transactions.keys():
self.transactions[transaction.name].append(transaction.value)
else:
self.transactions[transaction.name] = [transaction.value, ]
def print_transaction_types(self):
print(f"Transaction types: {list(self.transactions.keys())}")
def print_money_per_type(self):
print(list(map(lambda x: f"{x}: {reduce(lambda a, b: a + b, self.transactions[x])}", self.transactions.keys())))
def print_total_money(self):
print(
f"Total amount of spent money:"
f" {reduce(lambda a, b: a + b, map(lambda k: sum(self.transactions[k]), self.transactions.keys()))}")
class Transaction:
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return f"{self.name}: {self.value}"
def read_dict_from_file(name='transactions.pkl'):
with open(name, 'rb') as f:
return pickle.load(f)
|
988,062 | d009b522123f26249b58eed17c865c0143bb5bcd | from __future__ import print_function
import numpy as np
from numpy.random import choice
from annsa.template_sampling import (apply_LLD,
rebin_spectrum,)
def choose_uranium_template(uranium_dataset,
sourcedist,
sourceheight,
shieldingdensity,
fwhm,):
'''
Chooses a specific uranium template from a dataset.
Inputs
uranium_dataset : pandas dataframe
Dataframe containing U232, U235, U238
templates simulated in multiple conditions.
sourcedist : int
The source distance
sourceheight : int
The source height
shieldingdensity : float
The source density in g/cm2
fwhm : float
The full-width-at-half-max at 662
Outputs
uranium_templates : dict
Dictionary of a single template for each isotope
Also contains an entry for FWHM.
'''
uranium_templates = {}
sourcedist_choice = sourcedist
sourceheight_choice = sourceheight
shieldingdensity_choice = shieldingdensity
source_dataset_tmp = uranium_dataset[
uranium_dataset['sourcedist'] == sourcedist_choice]
source_dataset_tmp = source_dataset_tmp[
source_dataset_tmp['sourceheight'] == sourceheight_choice]
source_dataset_tmp = source_dataset_tmp[
source_dataset_tmp['shieldingdensity'] == shieldingdensity_choice]
source_dataset_tmp = source_dataset_tmp[
source_dataset_tmp['fwhm'] == fwhm]
for isotope in ['u232', 'u235', 'u238']:
spectrum_template = source_dataset_tmp[
source_dataset_tmp['isotope'] == isotope].values[0][6:]
uranium_templates[isotope] = np.abs(spectrum_template)
uranium_templates[isotope] = uranium_templates[isotope].astype(int)
uranium_templates['fwhm'] = source_dataset_tmp['fwhm']
return uranium_templates
def choose_random_uranium_template(uranium_dataset):
'''
Chooses a random uranium template from a dataset.
Inputs
source_dataset : pandas dataframe
Dataframe containing U232, U235, U238
templates simulated in multiple conditions.
Outputs
uranium_templates : dict
Dictionary of a single template for each isotope.
'''
uranium_templates = {}
all_sourcedist = list(set(uranium_dataset['sourcedist']))
sourcedist_choice = choice(all_sourcedist)
all_sourceheight = list(set(uranium_dataset['sourceheight']))
sourceheight_choice = choice(all_sourceheight)
all_shieldingdensity = list(set(uranium_dataset['shieldingdensity']))
shieldingdensity_choice = choice(all_shieldingdensity)
all_fwhm = list(set(uranium_dataset['fwhm']))
fwhm_choice = choice(all_fwhm)
source_dataset_tmp = uranium_dataset[
uranium_dataset['sourcedist'] == sourcedist_choice]
source_dataset_tmp = source_dataset_tmp[
source_dataset_tmp['sourceheight'] == sourceheight_choice]
source_dataset_tmp = source_dataset_tmp[
source_dataset_tmp['shieldingdensity'] == shieldingdensity_choice]
source_dataset_tmp = source_dataset_tmp[
source_dataset_tmp['fwhm'] == fwhm_choice]
for isotope in ['u232', 'u235', 'u238']:
spectrum_template = source_dataset_tmp[
source_dataset_tmp['isotope'] == isotope].values[0][6:]
uranium_templates[isotope] = np.abs(spectrum_template)
uranium_templates[isotope] = uranium_templates[isotope].astype(int)
uranium_templates['fwhm'] = source_dataset_tmp['fwhm']
return uranium_templates
def generate_uenriched_spectrum(uranium_templates,
background_dataset,
enrichment_level=0.93,
integration_time=60,
background_cps=200,
calibration=[0, 1, 0],
source_background_ratio=1.0,
):
'''
Generates an enriched uranium spectrum based on .
Inputs
uranium_template : dict
Dictionary of a single template for each isotope.
background_dataset : pandas dataframe
Dataframe of background spectra with different FWHM parameters.
Outputs
full_spectrum : array
Sampled source and background spectrum
'''
a = calibration[0]
b = calibration[1]
c = calibration[2]
template_measurment_time = 3600
time_scaler = integration_time / template_measurment_time
mass_fraction_u232 = choice([0,
np.random.uniform(0.4, 2.0)])
uranium_component_magnitudes = {
'u235': time_scaler * enrichment_level,
'u232': time_scaler * mass_fraction_u232,
'u238': time_scaler * (1 - enrichment_level),
}
source_spectrum = np.zeros([1024])
for isotope in uranium_component_magnitudes:
source_spectrum += uranium_component_magnitudes[isotope] \
* rebin_spectrum(
uranium_templates[isotope], a, b, c)
source_spectrum = apply_LLD(source_spectrum, 10)
source_spectrum_sampled = np.random.poisson(source_spectrum)
source_counts = np.sum(source_spectrum_sampled)
background_counts = source_counts / source_background_ratio
fwhm = uranium_templates['fwhm'].values[0]
background_dataset = background_dataset[background_dataset['fwhm'] == fwhm]
background_spectrum = background_dataset.sample().values[0][3:]
background_spectrum = rebin_spectrum(background_spectrum,
a, b, c)
background_spectrum = np.array(background_spectrum, dtype='float64')
background_spectrum = apply_LLD(background_spectrum, 10)
background_spectrum /= np.sum(background_spectrum)
background_spectrum_sampled = np.random.poisson(background_spectrum *
background_counts)
full_spectrum = np.sum(
[source_spectrum_sampled[0:1024],
background_spectrum_sampled[0:1024]],
axis=0,)
return full_spectrum
|
988,063 | cff746670ce9ec85ee250f8401c03981ca22d57f | # Copyright 2013 IBM Corp.
import powervc.common.client.extensions.base as base
from glanceclient.common import http
from glanceclient.common import utils
from glanceclient.v2 import image_members
from glanceclient.v2 import image_tags
from glanceclient.v2 import images
from glanceclient.v2 import schemas
class Extended_V2_Client(object):
"""
Client for the Glance Images v2 API.
:param dict client_info : The client info dict to init a glance v2 client
"""
def __init__(self, client_info):
endpoint = utils.strip_version(client_info['endpoint'])
kwargs = {'cacert': client_info['cacert'],
'insecure': client_info['insecure'],
'token': client_info['token']}
if isinstance(endpoint, tuple):
endpoint = endpoint[0]
self.http_client = http.HTTPClient(endpoint, **kwargs)
self.schemas = schemas.Controller(self.http_client)
self.images = images.Controller(self.http_client, self.schemas)
self.image_tags = image_tags.Controller(self.http_client, self.schemas)
self.image_members = image_members.Controller(self.http_client,
self.schemas)
class Client(base.ClientExtension):
def __init__(self, client):
super(Client, self).__init__(client)
def listImages(self):
return [image for image in self.client.images.list()]
def getImage(self, image_id):
return self.client.images.get(image_id)
def getImageFile(self, image_id):
return self.client.images.data(image_id)
def deleteImage(self, image_id):
return self.client.images.delete(image_id)
def updateImage(self, image_id, **kwargs):
return self.client.images.update(image_id, **kwargs)
def listImageMembers(self, image_id):
return [imageMember for imageMember in
self.client.image_members.list(image_id)]
def deleteImageMember(self, image_id, member_id):
self.client.image_members.delete(image_id, member_id)
def updateImageMember(self, image_id, member_id, member_status):
return self.client.image_members.update(image_id, member_id,
member_status)
def createImageMember(self, image_id, member_id):
return self.client.image_members.create(image_id, member_id)
def updateImageTag(self, image_id, tag_value):
if self.client_version == 2:
return self.client.image_tags.update(image_id, tag_value)
def deleteImageTag(self, image_id, tag_value):
if self.client_version == 2:
return self.client.image_tags.delete(image_id, tag_value)
|
988,064 | cfd9fdbb6f8b5fc2b311a33bed378dca04b7d8f7 | #!/usr/bin/python3
'''
Test the module with an example.
'''
from matplotlib import pyplot
import numpy
import scipy.stats
import seaborn
import prob_spline
import test_common
import pandas as pd
msq_file = "Vector_Data(NoZeros).csv"
sigma_vals = 0
curve = prob_spline.MosCurve(msq_file,sigma_vals)
curve.plot()
|
988,065 | e4c7eb14ef54b1132c3484a59c1683c278e8b9d5 | __author__ = "akhtar"
def postorder_recursive(node):
"""
Recursive postorder traversal of a binary tree.
:param BTreeNode root: The root of the tree.
:return: nothing.
:rtype: None
"""
if node is None:
return
postorder_recursive(node.left)
postorder_recursive(node.right)
print(node.data, end=" ")
def postorder_iterative(root):
"""
Iterative postorder traversal of a binary tree.
:param BTreeNode root: The root of the tree.
:return: nothing.
:rtype: None
"""
if root is None:
return
stack1 = []
stack2 = []
stack1.append(root)
while len(stack1):
node = stack1.pop()
stack2.append(node)
if node.left:
stack1.append(node.left)
if node.right:
stack1.append(node.right)
while len(stack2):
node = stack2.pop()
print(node.data, end=" ")
def postorder(root):
postorder_recursive(root)
|
988,066 | a1a8536a6a951d9ec97dbe49022a92eb5c76e6cd | import numpy as np
import torch
from flame import FlameDecoder
import pyrender
import trimesh
from config import get_config
from vtkplotter import Plotter, datadir, Text2D, show, interactive
import vtkplotter.mesh
import time
from flame import FlameLandmarks
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from config import *
# io utils
from pytorch3d.io import load_obj
# datastructures
from pytorch3d.structures import Meshes, Textures
# 3D transformations functions
from pytorch3d.transforms import Rotate, Translate
# rendering components
from pytorch3d.renderer import (
OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights
)
from fitting.silhouette_fitting import segment_img
import cv2
from fitting.silhouette_fitting import segment_img
from Yam_research.utils.utils import make_mesh, Renderer
from utils.model_ploting import plot_silhouette
# Set the cuda device
device = torch.device("cuda:0")
torch.cuda.set_device(device)
config = get_config_with_default_args()
config.batch_size = 1
config.flame_model_path = 'model/male_model.pkl'
# Load the obj and ignore the textures and materials.
# verts, faces_idx, _ = load_obj("./data/teapot.obj")
verts, faces_idx, _ = load_obj(
"/home/yam/arabastra/Israel/Tel_aviv/Yehoodit_5/common_ground/resultes/sentence01.000002.26_C.obj")
# Initialize each vertex to be white in color.
flamelayer = FlameLandmarks(config)
flamelayer.cuda()
face_mesh = make_mesh(flamelayer, )
##########################################################################
# Select the viewpoint using spherical angles
distance = 0.3 # distance from camera to the object
elevation = 0.5 # angle of elevation in degrees
azimuth = 0.0 # No rotation so the camera is positioned on the +Z axis.
# Get the position of the camera based on the spherical angles
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
# Initialize an OpenGL perspective camera.
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
renderer = Renderer(cameras)
image_ref = renderer.render_phong(face_mesh)
image_ref = image_ref.cpu().detach().numpy()
############################################################################3
# set starting position
# Select the viewpoint using spherical angles
distance = 0.9 # distance from camera to the object
elevation = 0.5 # angle of elevation in degrees
azimuth = 0.0 # No rotation so the camera is positioned on the +Z axis.
# Get the position of the camera based on the spherical angles
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
# Initialize an OpenGL perspective camera.
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
renderer = Renderer(cameras)
# Render the teapot providing the values of R and T.
# silhouete = renderer.render_sil(face_mesh)
# silhouete = silhouete.cpu().detach().numpy()
#############################################################################
# first optimiztion tryout
vars = [flamelayer.transl,flamelayer.global_rot, flamelayer.shape_params, flamelayer.expression_params,
flamelayer.jaw_pose, flamelayer.neck_pose] # Optimize for global scale, translation and rotation
#rigid_scale_optimizer = torch.optim.LBFGS(vars, tolerance_change=1e-15, tolerance_grad = 1e-10, max_iter=1e7, line_search_fn='strong_wolfe')
rigid_scale_optimizer = torch.optim.LBFGS(vars, line_search_fn='strong_wolfe')
image_ref = cv2.imread('./data/bareteeth.000001.26_C.jpg')
image_ref = cv2.resize(image_ref, (1024, 1024))
image_ref = segment_img(image_ref, 10)
torch_target_silh = torch.from_numpy((image_ref != 0).astype(np.float32)).to(device)
factor = 1 # TODO what shoud factor be???
def image_fit_loss(my_mesh):
silhouette = renderer.render_sil(my_mesh).squeeze()[..., 3]
return torch.sum((silhouette- torch_target_silh) ** 2) / (factor ** 2)
def fit_closure():
if torch.is_grad_enabled():
rigid_scale_optimizer.zero_grad()
_, _, flame_regularizer_loss = flamelayer()
my_mesh = make_mesh(flamelayer, False)
obj = image_fit_loss(my_mesh) + flame_regularizer_loss
print('obj - ', obj)
if obj.requires_grad:
obj.backward()
print ('flamelayer.transl.grad = ', flamelayer.transl.grad)
print('flamelayer.global_rot.grad = ', flamelayer.neck_pose.grad)
return obj
# plot_silhouette(flamelayer, renderer, image_ref, device)
# print('preoptimization sihouette')
# rigid_scale_optimizer.step(fit_closure)
# plot_silhouette(flamelayer, renderer, image_ref, device)
# print('first optimization attempt')
#########################################################################
plot_silhouette(flamelayer, renderer, image_ref)
optimizer = torch.optim.Adam(vars, lr=0.05)
loop = tqdm_notebook(range(200))
for i in loop:
optimizer.zero_grad()
my_mesh = make_mesh(flamelayer, False)
loss = image_fit_loss(my_mesh)
loss.backward()
print(flamelayer.transl.grad)
optimizer.step()
plot_silhouette(flamelayer, renderer, image_ref)
#########################################################################
##############################################################################
# second optimization tryout
# Initialize an OpenGL perspective camera.
cameras = OpenGLPerspectiveCameras(device=device)
# To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
# edges. Refer to blending.py for more details.
blend_params = BlendParams(sigma=1e-6, gamma=1e-6)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=256,
blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=100,
)
# Create a silhouette mesh renderer by composing a rasterizer and a shader.
silhouette_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(
image_size=256,
blur_radius=0.0,
faces_per_pixel=1,
)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
phong_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=HardPhongShader(device=device, lights=lights)
)
############################################################################3
# set target position
# Select the viewpoint using spherical angles
distance = 0.3 # distance from camera to the object
elevation = 0.5 # angle of elevation in degrees
azimuth = 180.0 # No rotation so the camera is positioned on the +Z axis.
# Get the position of the camera based on the spherical angles
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
image_ref = phong_renderer(meshes_world=face_mesh, R=R, T=T)
image_ref = image_ref.cpu().detach().numpy()
# set target position
# Select the viewpoint using spherical angles
distance = 0.9 # distance from camera to the object
elevation = 0.5 # angle of elevation in degrees
azimuth = 180.0 # No rotation so the camera is positioned on the +Z axis.
# Get the position of the camera based on the spherical angles
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
silhouete = silhouette_renderer(meshes_world=face_mesh, R=R, T=T)
silhouete = silhouete.cpu().detach().numpy()
############################################################################3
class Model(nn.Module):
def __init__(self,flamelayer,renderer,image_ref,device):
super().__init__()
self.flamelayer = flamelayer
self.device = device
self.renderer = renderer
# Get the silhouette of the reference RGB image by finding all the non zero values.
image_ref = torch.from_numpy((image_ref[..., :3].max(-1) != 0).astype(np.float32))
self.register_buffer('image_ref', image_ref)
# Create an optimizable parameter for the x, y, z position of the camera.
self.transl = nn.Parameter(flamelayer.transl)
def forward(self):
# genrate model from flamelayer and render its silhouette
mesh = make_mesh(self.flamelayer, )
image = self.renderer(meshes_world=mesh.clone())
# Calculate the silhouette loss
loss = torch.sum((image[..., 3] - self.image_ref) ** 2)
return loss, image
# class Model(nn.Module):
# def __init__(self, meshes, renderer, image_ref):
# super().__init__()
# self.meshes = meshes
# self.device = meshes.device
# self.renderer = renderer
#
# # Get the silhouette of the reference RGB image by finding all the non zero values.
# image_ref = torch.from_numpy((image_ref[..., :3].max(-1) != 0).astype(np.float32))
# self.register_buffer('image_ref', image_ref)
#
# # Create an optimizable parameter for the x, y, z position of the camera.
# self.camera_position = nn.Parameter(
# torch.from_numpy(np.array([3.0, 6.9, +2.5], dtype=np.float32)).to(meshes.device))
#
# def forward(self):
# # Render the image using the updated camera position. Based on the new position of the
# # camer we calculate the rotation and translation matrices
# R = look_at_rotation(self.camera_position[None, :], device=self.device) # (1, 3, 3)
# T = -torch.bmm(R.transpose(1, 2), self.camera_position[None, :, None])[:, :, 0] # (1, 3)
#
# image = self.renderer(meshes_world=self.meshes.clone(), R=R, T=T)
#
# # Calculate the silhouette loss
# loss = torch.sum((image[..., 3] - self.image_ref) ** 2)
# return loss, image
####################################################################################################
# We will save images periodically and compose them into a GIF.
filename_output = "Results/teapot_optimization_demo.gif"
writer = imageio.get_writer(filename_output, mode='I', duration=0.3)
# Initialize a model using the renderer, mesh and reference image
model = Model(flamelayer=flamelayer, renderer=silhouette_renderer, image_ref=image_ref,device=device).to(device)
# model = Model(meshes=face_mesh, renderer=silhouette_renderer, image_ref=image_ref).to(device)
# Create an optimizer. Here we are using Adam and we pass in the parameters of the model
optimizer = torch.optim.Adam(model.parameters(), lr=0.05)
loop = tqdm_notebook(range(200))
for i in loop:
optimizer.zero_grad()
loss, _ = model()
# loss.backward()
optimizer.step()
loop.set_description('Optimizing (loss %.4f)' % loss.data)
if loss.item() < 200:
break
# Save outputs to create a GIF.
if i % 10 == 0:
mesh = make_mesh(flamelayer, )
image = phong_renderer(model.meshes.clone())
image = image[0, ..., :3].detach().squeeze().cpu().numpy()
image = img_as_ubyte(image)
writer.append_data(image)
plt.figure()
plt.imshow(image[..., :3])
plt.title("iter: %d, loss: %0.2f" % (i, loss.data))
plt.grid("off")
plt.axis("off")
# plt.show()
writer.close()
|
988,067 | a0f043672ef74fe4601c166cb9739ee64cbe4059 | from itertools import accumulate
from math import gcd
N = int(input())
*A, = map(int, input().split())
L = list(accumulate(A, gcd))
R = list(accumulate(A[::-1], gcd))[::-1]
ans = max(gcd(l, r) for l, r in zip([0]+L[:-1], R[1:]+[0]))
print(ans)
|
988,068 | edc98ecf9817908f78e050fcc879600d3e028dc4 | # Move this file into root folder of project !
import os
def main():
read_all_files(".", True)
def read_all_files(folder, root_folder: bool, nesting_depth: int = 0):
for file in os.listdir(folder):
if file in [".git", ".idea", ".project", ".settings"]:
continue
if ".py" in file:
continue
if file not in ["framework"] and root_folder:
continue
if os.path.isdir(os.path.join(folder, file)):
read_all_files(os.path.join(folder, file), False, nesting_depth + 1)
continue
print(os.path.join(folder, file))
write = False
with open(os.path.join(folder, file), "r") as in_file:
rows = [x for x in in_file]
for index, row in enumerate(rows):
if "#include" in row:
if folder[2:] in row:
if "<config/" in row:
continue
write = True
row = row.replace(">", "\"")
row = row.replace("<framework", "\"")
row = row.replace(folder[1:] + "/", "")
rows[index] = row
print("Replacement 1" + str(row))
elif "<framework" in row:
how_deep = nesting_depth - 1
write = True
row = row.replace(">", "\"")
string = [".." for i in range(how_deep)]
dots = "/".join(string)
row = row.replace("<framework", "\"" + dots)
rows[index] = row
print("Replacement 2: " + str(row))
if write:
with open(os.path.join(folder, file), "w") as in_file:
print("Write file %s" % (os.path.join(folder, file)))
in_file.writelines(rows)
read_all_files(".", True)
|
988,069 | cd6326bb8d02b7e798e93ca8ff15c3ccd43c6fee | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 14:51:47 2018
@author: Administrator
@description: numpy库操纵数组的一些方法
"""
import numpy as np
from numpy.matlib import randn
#定义数组类型
arr3 = np.array([1, 2, 3], dtype=np.float64)
arr4 = np.array([1, 2, 3], dtype=np.int32)
print(arr3.dtype)#float64
print(arr4.dtype)#int32
#转换数组类型
arr5 = np.array([1, 2, 3, 4, 5])
print(arr5.dtype) #int32
arr6 = arr5.astype(np.float64) #将数组5的数据类型转换为float64
print(arr6.dtype) #float64
#数组间以及数组与数之间的运算
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
print(arr * arr) #相应元素相乘,输出为[[1. 4. 9.] [16. 25. 36.]]
print(arr - arr) #相应元素相减,输出为[[0. 0. 0.] [0. 0. 0.]]
#创建二维数组,取值,修改值
arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(arr2d[2]) #数组标号从0开始,输出第三行元素:[7 8 9]
#创建三维数组
arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) #2个二维数组
old_values = arr3d[0].copy() #将第1个二维数组拷贝至old_values
print(old_values) #输出:[[1 2 3], [4 5 6]]
arr3d[0] = 3 #将第1个二维数组的元素都修改为3
arr3d[1] = 3 #将第2个二维数组的元素都修改为3
print(arr3d) #输出全为3的三维数组
arr3d[0] = old_values
print(arr3d) # 输出 [[[1 2 3] [4 5 6]] [[3 3 3] [3 3 3]] ]
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
print("输出names数组:")
print(names)
print("输出randn(7, 4)生成的7行4列的随机数函数")
data = randn(7, 4)
print(data)
print("判断names数组中的每一个元素是否等于Bob:")
print(names == 'Bob') #, 输出 [True False False True FalseFalse False]
print("data[names == 'Bob']输出data中判断为true的行元素:")
print(data[names == 'Bob'])
print("data[names == 'Bob', 2:]输出data中判断为true所在的行,及第3列第4列的所有元素:")
print(data[names == 'Bob', 2:],) #输出data中判断为true所在的行,及第3列第4列的所有元素;
print("data[names == 'Bob', 3]输出data中判断为true所在的行及第4列的元素 :")
print(data[names == 'Bob', 3])
print("输出名字为Bob以及Will所在的行元素 :")
print(data[((names == 'Bob') | (names == 'Will'))]) #输出名字为Bob以及Will所在的行元素
|
988,070 | c9dfb846015d1b3e5e31d9aacdfb5a4e6489b75f | # -*- coding: utf-8 -*-
"""DNACenterAPI path_trace API fixtures and tests.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from dnacentersdk.exceptions import MalformedRequest
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '2.1.1', reason='version does not match')
def is_valid_retrives_all_previous_pathtraces_summary(json_schema_validate, obj):
json_schema_validate('jsd_55bc3bf94e38b6ff_v2_1_1').validate(obj)
return True
def retrives_all_previous_pathtraces_summary(api):
endpoint_result = api.path_trace.retrives_all_previous_pathtraces_summary(
dest_ip='string',
dest_port='string',
gt_create_time='string',
last_update_time='string',
limit='string',
lt_create_time='string',
offset='string',
order='string',
periodic_refresh=True,
protocol='string',
sort_by='string',
source_ip='string',
source_port='string',
status='string',
task_id='string'
)
return endpoint_result
@pytest.mark.path_trace
def test_retrives_all_previous_pathtraces_summary(api, validator):
assert is_valid_retrives_all_previous_pathtraces_summary(
validator,
retrives_all_previous_pathtraces_summary(api)
)
def retrives_all_previous_pathtraces_summary_default(api):
endpoint_result = api.path_trace.retrives_all_previous_pathtraces_summary(
dest_ip=None,
dest_port=None,
gt_create_time=None,
last_update_time=None,
limit=None,
lt_create_time=None,
offset=None,
order=None,
periodic_refresh=None,
protocol=None,
sort_by=None,
source_ip=None,
source_port=None,
status=None,
task_id=None
)
return endpoint_result
@pytest.mark.path_trace
def test_retrives_all_previous_pathtraces_summary_default(api, validator):
try:
assert is_valid_retrives_all_previous_pathtraces_summary(
validator,
retrives_all_previous_pathtraces_summary_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_retrieves_previous_pathtrace(json_schema_validate, obj):
return True if obj else False
def retrieves_previous_pathtrace(api):
endpoint_result = api.path_trace.retrieves_previous_pathtrace(
flow_analysis_id='string'
)
return endpoint_result
@pytest.mark.path_trace
def test_retrieves_previous_pathtrace(api, validator):
assert is_valid_retrieves_previous_pathtrace(
validator,
retrieves_previous_pathtrace(api)
)
def retrieves_previous_pathtrace_default(api):
endpoint_result = api.path_trace.retrieves_previous_pathtrace(
flow_analysis_id='string'
)
return endpoint_result
@pytest.mark.path_trace
def test_retrieves_previous_pathtrace_default(api, validator):
try:
assert is_valid_retrieves_previous_pathtrace(
validator,
retrieves_previous_pathtrace_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_deletes_pathtrace_by_id(json_schema_validate, obj):
json_schema_validate('jsd_8a9d2b76443b914e_v2_1_1').validate(obj)
return True
def deletes_pathtrace_by_id(api):
endpoint_result = api.path_trace.deletes_pathtrace_by_id(
flow_analysis_id='string'
)
return endpoint_result
@pytest.mark.path_trace
def test_deletes_pathtrace_by_id(api, validator):
assert is_valid_deletes_pathtrace_by_id(
validator,
deletes_pathtrace_by_id(api)
)
def deletes_pathtrace_by_id_default(api):
endpoint_result = api.path_trace.deletes_pathtrace_by_id(
flow_analysis_id='string'
)
return endpoint_result
@pytest.mark.path_trace
def test_deletes_pathtrace_by_id_default(api, validator):
try:
assert is_valid_deletes_pathtrace_by_id(
validator,
deletes_pathtrace_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_initiate_a_new_pathtrace(json_schema_validate, obj):
json_schema_validate('jsd_a395fae644ca899c_v2_1_1').validate(obj)
return True
def initiate_a_new_pathtrace(api):
endpoint_result = api.path_trace.initiate_a_new_pathtrace(
active_validation=True,
controlPath=True,
destIP='string',
destPort='string',
inclusions=['string'],
payload=None,
periodicRefresh=True,
protocol='string',
sourceIP='string',
sourcePort='string'
)
return endpoint_result
@pytest.mark.path_trace
def test_initiate_a_new_pathtrace(api, validator):
assert is_valid_initiate_a_new_pathtrace(
validator,
initiate_a_new_pathtrace(api)
)
def initiate_a_new_pathtrace_default(api):
endpoint_result = api.path_trace.initiate_a_new_pathtrace(
active_validation=True,
controlPath=None,
destIP=None,
destPort=None,
inclusions=None,
payload=None,
periodicRefresh=None,
protocol=None,
sourceIP=None,
sourcePort=None
)
return endpoint_result
@pytest.mark.path_trace
def test_initiate_a_new_pathtrace_default(api, validator):
try:
assert is_valid_initiate_a_new_pathtrace(
validator,
initiate_a_new_pathtrace_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
|
988,071 | 33512f5a23731c35508051e712722251659bed1c | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from utils.lrp import *
from plot.lrp_heatmaps import plot_vanishing_explanations
import plot.lrp_distributions as plot_lrp
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--model", default="fullBNN", type=str, help="fullBNN")
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation.")
parser.add_argument("--redBNN_layer_idx", default=-1, type=int, help="Bayesian layer idx in redBNN.")
parser.add_argument("--load", default=False, type=eval, help="Load saved computations and evaluate them.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
n_inputs=100 if args.debug else args.n_inputs
n_samples_list=[10,50]#,100]
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=model["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model="baseNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(model.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attack = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
if args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
bay_model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
elif args.model=="redBNN":
m = redBNN_settings["model_"+str(args.model_idx)]
base_m = baseNN_settings["model_"+str(m["baseNN_idx"])]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
basenet = baseNN(inp_shape, out_size, *list(base_m.values()))
basenet_savedir = get_model_savedir(model="baseNN", dataset=m["dataset"],
architecture=m["architecture"], debug=args.debug, model_idx=m["baseNN_idx"])
basenet.load(savedir=basenet_savedir, device=args.device)
hyp = get_hyperparams(m)
layer_idx=args.redBNN_layer_idx+basenet.n_learnable_layers+1 if args.redBNN_layer_idx<0 else args.redBNN_layer_idx
bayesnet = redBNN(dataset_name=m["dataset"], inference=m["inference"], base_net=basenet, hyperparams=hyp,
layer_idx=layer_idx)
bay_model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx, layer_idx=layer_idx)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
else:
raise NotImplementedError
bay_attack=[]
for n_samples in n_samples_list:
bay_attack.append(load_attack(method=args.attack_method, model_savedir=bay_model_savedir,
n_samples=n_samples))
if m["inference"]=="svi":
mode_attack = load_attack(method=args.attack_method, model_savedir=bay_model_savedir,
n_samples=n_samples, atk_mode=True)
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
for layer_idx in detnet.learnable_layers_idxs:
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method, layer_idx=layer_idx)
### Deterministic explanations
if args.load:
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
else:
det_lrp = compute_explanations(images, detnet, layer_idx=layer_idx, rule=args.rule, method=args.lrp_method)
det_attack_lrp = compute_explanations(det_attack, detnet, layer_idx=layer_idx, rule=args.rule,
method=args.lrp_method)
save_to_pickle(det_lrp, path=savedir, filename="det_lrp")
save_to_pickle(det_attack_lrp, path=savedir, filename="det_attack_lrp")
### Bayesian explanations
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp=[]
bay_attack_lrp=[]
mode_attack_lrp=[]
if args.load:
for n_samples in n_samples_list:
bay_lrp.append(load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(n_samples)))
bay_attack_lrp.append(load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(n_samples)))
if m["inference"]=="svi":
mode_lrp = load_from_pickle(path=savedir, filename="mode_lrp_avg_post")
for n_samples in n_samples_list:
mode_attack_lrp.append(load_from_pickle(path=savedir, filename="mode_attack_lrp_samp="+str(n_samples)))
mode_attack_lrp.append(load_from_pickle(path=savedir, filename="mode_attack_lrp_avg_post"))
# print(mode_lrp.shape, torch.stack(mode_attack_lrp).shape)
else:
for samp_idx, n_samples in enumerate(n_samples_list):
bay_lrp.append(compute_explanations(images, bayesnet, rule=args.rule, layer_idx=layer_idx,
n_samples=n_samples, method=args.lrp_method))
bay_attack_lrp.append(compute_explanations(bay_attack[samp_idx], bayesnet, layer_idx=layer_idx,
rule=args.rule, n_samples=n_samples, method=args.lrp_method))
save_to_pickle(bay_lrp[samp_idx], path=savedir, filename="bay_lrp_samp="+str(n_samples))
save_to_pickle(bay_attack_lrp[samp_idx], path=savedir, filename="bay_attack_lrp_samp="+str(n_samples))
if m["inference"]=="svi":
mode_lrp = compute_explanations(images, bayesnet, rule=args.rule, layer_idx=layer_idx,
n_samples=n_samples, avg_posterior=True, method=args.lrp_method)
save_to_pickle(mode_lrp, path=savedir, filename="mode_lrp_avg_post")
for samp_idx, n_samples in enumerate(n_samples_list):
mode_attack_lrp.append(compute_explanations(mode_attack, bayesnet, rule=args.rule, layer_idx=layer_idx,
n_samples=n_samples, method=args.lrp_method))
save_to_pickle(mode_attack_lrp[samp_idx], path=savedir, filename="mode_attack_lrp_samp="+str(n_samples))
mode_attack_lrp.append(compute_explanations(mode_attack, bayesnet, rule=args.rule, layer_idx=layer_idx,
# n_samples=n_samples,
avg_posterior=True, method=args.lrp_method))
save_to_pickle(mode_attack_lrp[samp_idx+1], path=savedir, filename="mode_attack_lrp_avg_post")
# mode_attack_lrp = compute_explanations(mode_attack, bayesnet, rule=args.rule, layer_idx=layer_idx,
# # n_samples=n_samples,
# avg_posterior=True, method=args.lrp_method)
# save_to_pickle(mode_attack_lrp, path=savedir, filename="mode_attack_lrp_avg_post")
n_images = det_lrp.shape[0]
if det_attack_lrp.shape[0]!=n_images or bay_lrp[0].shape[0]!=n_inputs or bay_attack_lrp[0].shape[0]!=n_inputs:
print("det_lrp.shape[0] =", det_lrp.shape[0])
print("det_attack_lrp.shape[0] =", det_attack_lrp.shape[0])
print("bay_lrp[0].shape[0] =", bay_lrp[0].shape[0])
print("bay_attack_lrp[0].shape[0] =", bay_attack_lrp[0].shape[0])
raise ValueError("Inconsistent n_inputs") |
988,072 | f11454d8ee7c968042996a548954a10c4a44d35a | from application import db
from application.models import Base
from sqlalchemy.sql import text
class Sample(Base):
__tablename__="sample"
samplename = db.Column(db.String(144), nullable=False)
sampletype = db.Column(db.String(144), nullable=False, index=True)
species = db.Column(db.String(144), nullable=False, index=True)
amount = db.Column(db.String(144), nullable=False)
site_id = db.Column(db.Integer, db.ForeignKey('site.id'),
nullable=False)
def __init__(self, samplename, sampletype, species, amount):
self.samplename = samplename
self.sampletype = sampletype
self.species = species
self.amount = amount
@staticmethod
def group_by_species():
stmt = text("SELECT sample.species AS species, COUNT(sample.id) AS count"
" FROM sample"
" GROUP BY species"
" ORDER BY count DESC, species")
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"species":row[0], "count":row[1]})
return response
@staticmethod
def group_by_type():
stmt = text("SELECT sample.sampletype AS type, COUNT(sample.id) AS count"
" FROM sample"
" GROUP BY type"
" ORDER BY count DESC, type")
res = db.engine.execute(stmt)
response = []
for row in res:
response.append({"type":row[0], "count":row[1]})
return response |
988,073 | 86788ac0a710542a7a6324e9b1482aabc0be3c11 | from django.db import models
from geopy import distance
# Create your models here.
class Occurrence(models.Model):
""" Describes the occurrence model"""
description = models.TextField(max_length=200, null=True, blank=True)
lat = models.FloatField(blank=True, default='')
lon = models.FloatField(blank=True, default='')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(
auto_now=True, null=True)
status = models.TextField(max_length=20, null=True,
blank=True, default="POR_VALIDAR")
category = models.TextField(max_length=20, null=True, blank=True)
distance_from_hq = models.FloatField(blank=True, default='')
author = models.ForeignKey(
'auth.User', related_name='occurrences', on_delete=models.CASCADE)
class Meta:
ordering = ['created_at', 'status']
def get_distance_from_hq(self, lat, lon):
""" Get the distance between the occurrence and the ubiwhere office"""
coords_1 = (40.646860, -8.642999) # Coords to the office
coords_2 = (lat, lon)
return distance.distance(coords_1, coords_2).km
def __str__(self):
'''
String representation of the occurrence
'''
return 'Description: {}, created_at: {}, status: {}, category:{}'.format(self.description, self.created_at, self.status, self.category)
def save(self, *args, **kwargs):
""" Custom method to save the model in the database"""
self.distance_from_hq = self.get_distance_from_hq(self.lat, self.lon)
super().save(*args, **kwargs)
|
988,074 | 95a60dca8c52ed6760a9ba35b812687c7d4d013d | #!/usr/bin/env python
from kmexpert.cli import cli
def init_procedures():
# import here all relevant procedures
import example_procedure
if __name__ == '__main__':
init_procedures()
cli()
|
988,075 | bd196762dd1595934fcb855a83d691a02a5e86e1 | #!/usr/bin/env python3
import unittest
from ..recipe_reader import *
class TestAnnotatedRecipeReader(unittest.TestCase):
def test_test_setup(self):
self.assertTrue(True)
|
988,076 | e25e3f462503b12846d935a76b647ff294df0778 | import re
import json
from unittest import TestCase
from urllib import urlencode
from urlparse import urljoin, urlparse, parse_qs, urlunparse
from uuid import uuid4
import responses
import requests
from requests.auth import HTTPBasicAuth
from unicore.hub.client import AppClient, UserClient, ClientException
TICKET_INVALID_RESPONSE = json.dumps('no\n')
class BaseClientTestMixin(object):
@classmethod
def setUpClass(cls):
cls.app_id = uuid4().hex
cls.app_key = 'dakfjd042342cs'
cls.host = 'http://localhost:8000'
cls.login_callback_url = 'http://localhost:8080/callback'
cls.client = cls.client_class(
app_id=cls.app_id,
app_key=cls.app_key,
host=cls.host,
login_callback_url=cls.login_callback_url
)
def check_request_basics(self, url):
self.assertEqual(len(responses.calls), 1)
request = responses.calls[0].request
self.assertEqual(request.url, url)
basic_auth = HTTPBasicAuth(self.app_id, self.app_key)
request_with_auth = basic_auth(requests.Request())
self.assertEqual(request.headers['Authorization'],
request_with_auth.headers['Authorization'])
def test_from_config(self):
settings_new = {
'unicorehub.host': 'http://localhost:8080',
'unicorehub.app_id': 'fa84e670f9e9460fbf612c150dd06b45',
'unicorehub.app_key': 'opW5Ba3KxMLcRmksOdje',
'unicorehub.redirect_to_https': False,
'unicorehub.login_callback_url': 'http://localhost:8080/callback'
}
settings_old = settings_new.copy()
settings_old['unicorehub.app_password'] = settings_old[
'unicorehub.app_key']
del settings_old['unicorehub.app_key']
for settings in (settings_new, settings_old):
client = self.client_class.from_config(settings)
self.assertEqual(client.settings, {
'host': settings['unicorehub.host'],
'app_id': settings['unicorehub.app_id'],
'app_key': (settings.get('unicorehub.app_key') or
settings.get('unicorehub.app_password')),
'redirect_to_https': settings['unicorehub.redirect_to_https'],
'login_callback_url': settings['unicorehub.login_callback_url']
})
class UserClientTestCase(BaseClientTestMixin, TestCase):
client_class = UserClient
@responses.activate
def test_get_app_data(self):
user_id = uuid4().hex
user_app_data = {'display_name': 'foo'}
url = urljoin(self.host, '/users/%s' % user_id)
responses.add(
responses.GET, url,
body=json.dumps(user_app_data),
status=200,
content_type='application/json'
)
data = self.client.get_app_data(user_id)
self.assertEqual(data, user_app_data)
self.check_request_basics(url)
responses.reset()
responses.add(responses.GET, url, status=404)
with self.assertRaisesRegexp(ClientException, 'HTTP 404'):
self.client.get_app_data(user_id)
@responses.activate
def test_save_app_data(self):
user_id = uuid4().hex
user_app_data = {'display_name': 'foo'}
url = urljoin(self.host, '/users/%s' % user_id)
responses.add(
responses.POST, url,
body=json.dumps(user_app_data),
status=200,
content_type='application/json'
)
data = self.client.save_app_data(user_id, user_app_data)
self.assertEqual(data, user_app_data)
self.check_request_basics(url)
responses.reset()
responses.add(responses.POST, url, status=404)
with self.assertRaisesRegexp(ClientException, 'HTTP 404'):
self.client.save_app_data(user_id, {})
@responses.activate
def test_get_user(self):
ticket = 'iamaticket'
url = urljoin(
self.host,
'/sso/validate?%s' % urlencode({
'service': self.login_callback_url,
'ticket': ticket}))
user_data = {
'uuid': uuid4().hex,
'username': 'foo_username',
'app_data': {}
}
responses.add(
responses.GET, re.compile(r'.*/sso/validate.*'),
body=json.dumps(user_data),
status=200,
content_type='application/json'
)
user_obj = self.client.get_user(ticket)
self.assertEqual(user_obj.data, user_data)
self.check_request_basics(url)
responses.reset()
responses.add(
responses.GET, re.compile(r'.*/sso/validate.*'),
body=TICKET_INVALID_RESPONSE, status=200,
content_type='application/json')
with self.assertRaisesRegexp(ClientException, r'ticket.*is invalid'):
self.client.get_user(ticket)
def test_login_redirect_url(self):
url = self.client.get_login_redirect_url(locale='tam_IN')
parts = urlparse(url)
params = parse_qs(parts.query)
self.assertEqual(
urlunparse(parts[:4] + ('', '')),
urljoin(self.host.replace('http:', 'https:'), '/sso/login'))
self.assertIn('service', params)
self.assertEqual(params['service'][0], self.login_callback_url)
self.assertIn('_LOCALE_', params)
self.assertEqual(params['_LOCALE_'][0], 'tam_IN')
self.assertIn(
urlencode({'service': 'http://example.com'}),
self.client.get_login_redirect_url('http://example.com'))
settings_no_callback = self.client.settings.copy()
del settings_no_callback['login_callback_url']
client_no_callback = UserClient(**settings_no_callback)
with self.assertRaisesRegexp(
ValueError, 'no login_callback_url provided'):
client_no_callback.get_login_redirect_url()
with self.assertRaisesRegexp(
ValueError, 'login_callback_url must be absolute'):
client_no_callback.get_login_redirect_url('/callback')
settings_disable_https = self.client.settings.copy()
settings_disable_https['redirect_to_https'] = False
client_disable_https = UserClient(**settings_disable_https)
url = client_disable_https.get_login_redirect_url()
parts = urlparse(url)
self.assertEqual(
urlunparse(parts[:4] + ('', '')), urljoin(self.host, '/sso/login'))
class AppClientTestCase(BaseClientTestMixin, TestCase):
client_class = AppClient
@responses.activate
def test_create_app(self):
url = urljoin(self.host, '/apps')
app_data = {
'title': 'Foo',
'groups': ['group:apps_manager'],
'url': 'http://www.example.com'
}
app_data_complete = app_data.copy()
app_data_complete.update({
'uuid': uuid4().hex,
'key': 'key'})
responses.add(
responses.POST, url,
body=json.dumps(app_data_complete),
status=201,
content_type='application/json'
)
app = self.client.create_app(app_data)
self.assertEqual(app.data, app_data_complete)
self.check_request_basics(url)
responses.reset()
responses.add(responses.POST, url, status=400)
with self.assertRaisesRegexp(ClientException, 'HTTP 400'):
self.client.create_app(app_data)
@responses.activate
def test_get_app(self):
app_data = {
'uuid': uuid4().hex,
'title': 'Foo',
'groups': ['group:apps_manager']
}
url = urljoin(self.host, '/apps/%s' % app_data['uuid'])
responses.add(
responses.GET, url,
body=json.dumps(app_data),
status=200,
content_type='application/json'
)
app = self.client.get_app(app_data['uuid'])
self.assertEqual(app.data, app_data)
self.check_request_basics(url)
responses.reset()
responses.add(responses.GET, url, status=404)
with self.assertRaisesRegexp(ClientException, 'HTTP 404'):
self.client.get_app(app_data['uuid'])
@responses.activate
def test_get_app_data(self):
app_data = {
'uuid': uuid4().hex,
'title': 'Foo',
'groups': ['group:apps_manager']
}
url = urljoin(self.host, '/apps/%s' % app_data['uuid'])
responses.add(
responses.GET, url,
body=json.dumps(app_data),
status=200,
content_type='application/json'
)
data = self.client.get_app_data(app_data['uuid'])
self.assertEqual(data, app_data)
self.check_request_basics(url)
responses.reset()
responses.add(responses.GET, url, status=404)
with self.assertRaisesRegexp(ClientException, 'HTTP 404'):
self.client.get_app_data(app_data['uuid'])
@responses.activate
def test_save_app_data(self):
app_data = {
'uuid': uuid4().hex,
'title': 'Foo',
'groups': ['group:apps_manager']
}
url = urljoin(self.host, '/apps/%s' % app_data['uuid'])
responses.add(
responses.PUT, url,
body=json.dumps(app_data),
status=200,
content_type='application/json'
)
data = self.client.save_app_data(app_data['uuid'], app_data)
self.assertEqual(data, app_data)
self.check_request_basics(url)
responses.reset()
responses.add(responses.PUT, url, status=404)
with self.assertRaisesRegexp(ClientException, 'HTTP 404'):
self.client.save_app_data(app_data['uuid'], app_data)
@responses.activate
def test_reset_app_key(self):
app_data = {
'uuid': uuid4().hex,
'title': 'Foo',
'url': 'http://www.example.com',
'groups': ['group:apps_manager'],
'key': 'key'
}
url = urljoin(self.host, '/apps/%s/reset_key' % app_data['uuid'])
responses.add(
responses.PUT, url,
body=json.dumps(app_data),
status=200,
content_type='application/json'
)
key = self.client.reset_app_key(app_data['uuid'])
self.assertEqual(key, 'key')
self.check_request_basics(url)
responses.reset()
responses.add(responses.PUT, url, status=404)
with self.assertRaisesRegexp(ClientException, 'HTTP 404'):
self.client.reset_app_key(app_data['uuid'])
|
988,077 | 5b1815f188aedce3bd63675a22d9c52877ec5895 | #!/usr/bin/env python2
# coding=utf-8
import argparse
import shutil
import sys
import tempfile
if sys.version_info.major == 2:
from pathlib2 import Path
else:
from pathlib import Path
def convert(rect_str):
"""把一条字符串转换为二级列表
rect_str: str, '561 281 33 25 608 285 32 26'
return: [[561, 201, 33, 25], [608, 285, 32, 26]]
"""
rect_num_list = [int(string) for string in rect_str.split()]
return [rect_num_list[index:index + 4]
for index in range(0, len(rect_num_list), 4)]
def main():
parser = argparse.ArgumentParser(
description='转换标准标注为 cascade_createsamples 所用的格式')
parser.add_argument('label_pathname', action='store', type=Path)
args = parser.parse_args(sys.argv[1:])
with tempfile.NamedTemporaryFile('r+') as tmp_file, \
args.label_pathname.open as label_file:
for line in label_file:
filename, _, rect_str = line.split(' ', 2)
rects = convert(rect_str)
new_rect_str = ' '.join(
[','.join(map(str, rect)) for rect in rects])
new_line = '{} {}\n'.format(filename, new_rect_str)
tmp_file.write(new_line)
tmp_file.seek(0)
shutil.copy(tmp_file.name, '{}/new_{}'.format(
args.label_pathname.parent, args.label_pathname.name))
if __name__ == '__main__':
main()
|
988,078 | 775714b35c58505d5c3573326c0160b182913aed | #You have Python3 install to use this script
#Executing command
#$python3 pass_generator.py
string = input("Enter The Words You Want separated by commos :> ")
a = string.split(",")
n = (int(input("Enter number of Words you input above"))+1)
x = n
import itertools
for e in range(x):
for i in itertools.permutations(a,e):
print(''.join(i))
|
988,079 | a4a415477504e8f87bab5795771b21361ba6d090 | # Generated by Django 2.2.8 on 2022-04-21 07:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0008_attendanceform_description'),
('candidate', '0004_auto_20220404_1936'),
]
operations = [
migrations.RemoveField(
model_name='requirementmergerequirement',
name='event1',
),
migrations.RemoveField(
model_name='requirementmergerequirement',
name='event2',
),
migrations.RemoveField(
model_name='requirementmergerequirement',
name='linkedRequirement',
),
migrations.RemoveField(
model_name='requirementmergerequirement',
name='multiplier1',
),
migrations.RemoveField(
model_name='requirementmergerequirement',
name='multiplier2',
),
migrations.AlterField(
model_name='requirementmergerequirement',
name='grandTotal',
field=models.FloatField(default=0.0, help_text='The grand total points needed from the weighted sum of connected events (only needed for the first node)'),
),
migrations.CreateModel(
name='MergeEventsMultiplierEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enable', models.BooleanField(default=False, help_text='Toggle this entry')),
('multiplier', models.FloatField(default=1)),
('eventType', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.EventType')),
('requirementMergeRequirement', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='candidate.RequirementMergeRequirement')),
],
),
]
|
988,080 | a68aa23ba7f4fc7e4d9994feab89f732e7d1e4f3 | """
Tutorial - Object inheritance
You are free to derive your request handler classes from any base
class you wish. In most real-world applications, you will probably
want to create a central base class used for all your pages, which takes
care of things like printing a common page header and footer.
"""
import os.path
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</body>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
@cherrypy.expose
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
class AnotherPage(Page):
title = 'Another Page'
@cherrypy.expose
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
|
988,081 | 5bff34ff04c5c69fef384bec7b077b1e455783e7 | # -*- coding: utf-8 -*-
"""microcms.models module, Page the core model of microcms.
THIS SOFTWARE IS UNDER BSD LICENSE.
Copyright (c) 2010-2012 Daniele Tricoli <eriol@mornie.org>
Read LICENSE for more informations.
"""
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
class Page(FlatPage):
author = models.ForeignKey(User, verbose_name=_('author'))
pub_date = models.DateTimeField(_('publication date'), auto_now_add=True)
modified_date = models.DateTimeField(_('last modified date'),
auto_now=True)
links = models.ManyToManyField('Page', blank=True, null=True,
related_name='superpages',
verbose_name=_('links'))
# Fields for Search Engine Optimization
meta_keywords = models.CharField(_('meta keywords'), blank=True,
help_text=_('Key words of the page. '
'Max 255 characters.'),
max_length=255)
meta_description = models.CharField(_('meta description'), blank=True,
help_text=_('A brief description of '
'the page. '
'Max 255 characters.'),
max_length=255)
class Meta:
verbose_name = _('page')
verbose_name_plural = _('pages')
def __unicode__(self):
return self.title
|
988,082 | 48f4266d25b68e30e13b9d76b806284d93107b88 | import os
import cv2
import numpy as np
import shutil
import json
import pickle
import glob
from pycocotools.coco import COCO
import colormap as colormap_utils
def vis_parsing(path, dir, colormap, im_ori, draw_contours):
parsing = cv2.imread(path, 0)
parsing_color_list = eval('colormap_utils.{}'.format(colormap)) # CIHP20
parsing_color_list = colormap_utils.dict_bgr2rgb(parsing_color_list)
colormap = colormap_utils.dict2array(parsing_color_list)
parsing_color = colormap[parsing.astype(np.int)]
parsing_alpha = 0.9
idx = np.nonzero(parsing)
im_ori = im_ori.astype(np.float32)
im_ori[idx[0], idx[1], :] *= 1.0 - parsing_alpha
im_ori += parsing_alpha * parsing_color
######
if draw_contours:
contours, _ = cv2.findContours(parsing.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
parsing_color = parsing_color.astype(np.uint8)
cv2.drawContours(im_ori, contours, -1, (0, 0, 255), 1)
# M = cv2.moments(contours[1]) # 计算第一条轮廓的各阶矩,字典形式
# center_x = int(M["m10"] / M["m00"])
# center_y = int(M["m01"] / M["m00"])
# cv2.circle(parsing_color, (center_x, center_y), 30, 128, -1) # 绘制中心点
# print(center_x, center_y)
cv2.imwrite(os.path.join(dir, os.path.basename(path)), im_ori)
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
def cal_one_mean_iou(image_array, label_array, num_parsing):
hist = fast_hist(label_array, image_array, num_parsing).astype(np.float)
num_cor_pix = np.diag(hist)
num_gt_pix = hist.sum(1)
union = num_gt_pix + hist.sum(0) - num_cor_pix
iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix)
return iu
def comput_iou():
a = '/home/zhuxuhan/par_pretrain/ckpts/rcnn/CIHP/my_experiment/baseline_R-50-FPN-COCO_s1x_ms/test/parsing_instances/0000001_1.png'
b = '/xuhanzhu/CIHP/val_seg/0000001.png'
ann = cv2.imread(b, 0)
pre = cv2.imread(a, 0)
ann_copy = np.zeros_like(ann)
pre_copy = np.zeros_like(pre)
iu_numpy = np.zeros((20, 15, 2))
result_numpy = np.zeros_like(ann)
for i in range(20):
for n in range(15):
if ann[ann == i].shape[0] > 0:
ann_copy[pre == n] = 1
pre_copy[pre == i] = 1
max_id = np.argmax(np.bincount(ann_copy[ann == max_id2]))
iu = cal_one_mean_iou(ann_copy, pre_copy, 2)
iu_numpy[i, n, :] = iu
# if iu[1] > 0.3:
# print(iu[1], i, n)
# result_numpy[pre == n] = n
# result_numpy[ann == i] = n
ann_copy = np.zeros_like(ann)
pre_copy = np.zeros_like(pre)
# print(np.bincount(pre[ann == i]))
# bin = np.bincount(pre[ann == i])
# ann_i, ann_ii = np.argsort(bin)[-2:]
for i in range(15):
max_id = np.argmax(iu_numpy[:, i, 1])
if iu_numpy[max_id, i, 1] > 0.0:
print(max_id, i, iu_numpy[max_id, i, 1])
ann_pre_bool = (ann == max_id) & (pre == i)
result_numpy[ann_pre_bool] = i
for i in range(20):
# if len(np.argsort(iu_numpy[:, i, 1])[-2:])>1:
# max_id1, max_id2 = np.argsort(iu_numpy[:, i, 1])[-2:]
# if max_id2 != 0:
# if iu_numpy[max_id1, i, 1] > 0.0:
# ann_pre_bool_a = (ann == max_id1) & (result_numpy == 0) & (pre == i)
# result_numpy[ann_pre_bool_a] = i
# # if iu_numpy[max_id2, i, 1] > 0.0 and np.bincount(ann[ann == max_id2])[-1] > 100:
# # ann_pre_bool_a = (ann == max_id2) & (result_numpy == 0) & (pre == i)
# # result_numpy[ann_pre_bool_a] = i
# else:
# max_id1 = np.argmax(iu_numpy[:, i, 1])
# if iu_numpy[max_id1, i, 1] > 0.0:
# ann_pre_bool_a = (ann == max_id1) & (result_numpy == 0)
# result_numpy[ann_pre_bool_a] = i
# else:
# print("DEdedede")
max_id = np.argmax(iu_numpy[i, :, 1])
if iu_numpy[i, max_id, 1] > 0.0:
ann_pre_bool_a = (ann == i) & (result_numpy == 0)
result_numpy[ann_pre_bool_a] = max_id
##
cv2.imwrite("test_ann.png", result_numpy)
vis_parsing("test_ann.png", './')
def compute_ious():
inference_dir = '/home/zhuxuhan/par_pretrain/ckpts/rcnn/CIHP/my_experiment/baseline_R-50-FPN-COCO_s1x_ms/test/parsing_predict'
ann_dir = '/xuhanzhu/CIHP/val_seg_uv'
save_pre_dir = '/home/zhuxuhan/par_dir/predict_cihp'
save_ann_dir = '/home/zhuxuhan/par_dir/ann_cihp_uv'
predict_files = os.listdir(inference_dir)
ann_files = os.listdir(ann_dir)
for predict_file in predict_files:
if predict_file in ann_files:
vis_parsing(os.path.join(ann_dir, predict_file), save_ann_dir)
# vis_parsing(os.path.join(inference_dir, predict_file), save_pre_dir)
else:
print(predict_file)
pass
def vis_keypoints(kp_preds, img):
kp_x = kp_preds[::3]
kp_y = kp_preds[1::3]
vs = kp_preds[2::3]
# img = np.zeros_like(img)
for n in range(len(kp_x)):
if vs[n] == 0:
continue
cor_x, cor_y = int(kp_x[n]), int(kp_y[n])
bg = img.copy()
cv2.circle(bg, (int(cor_x), int(cor_y)), 3, (0, 255, 255))
transparency = 0.7
img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 2)
cv2.imwrite("test_kp.png", img)
def keypoints():
json_path = '/xuhanzhu/CIHP/annotations/CIHP_val_with_kp.json'
cihp_coco = COCO(json_path)
im_ids = cihp_coco.getImgIds()
for i, im_id in enumerate(im_ids):
if i == 1:
break
ann_ids = cihp_coco.getAnnIds(imgIds=im_id)
anns = cihp_coco.loadAnns(ann_ids)
im = cihp_coco.loadImgs(im_id)[0]
for kp_ann in anns:
# img = cv2.imread(os.path.join('/xuhanzhu/CIHP/val_img', im['file_name']))
img = cv2.imread('/home/zhuxuhan/par_dir/ann_cihp/0000001.png')
vis_keypoints(kp_ann['keypoints'], img)
# kp_json = json.load(open(json_path, 'r'))
# kp_ann = kp_json['annotations'][0]
# Draw limbs
# for i, (start_p, end_p) in enumerate(l_pair):
# if start_p in part_line and end_p in part_line:
# start_xy = part_line[start_p]
# end_xy = part_line[end_p]
# # bg = img.copy()
# X = (start_xy[0], end_xy[0])
# Y = (start_xy[1], end_xy[1])
# mX = np.mean(X)
# mY = np.mean(Y)
# length = ((Y[0] - Y[1]) ** 2 + (X[0] - X[1]) ** 2) ** 0.5
# angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))
# stickwidth = (kp_scores[start_p] + kp_scores[end_p]) + 1
# polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
# cv2.fillConvexPoly(bg, polygon, line_color[i])
# cv2.line(bg, start_xy, end_xy, line_color[i], (2 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
# transparency = max(0, min(1, 0.5*(kp_scores[start_p] + kp_scores[end_p]))).item()
# img = cv2.addWeighted(bg, transparency, img, 1-transparency, 0)
# img = cv2.addWeighted(bg, 0.7, img, 0.3, 0)
# img1 = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
def compute_iou_pp():
datatset = 'CIHP-COCO' # source data
model = 'CIHP'
class_num = 15
pred_dir = '/xuhanzhu/iccv_panet/ckpts/ICCV/Parsing/parsing_R-101-FPN-COCO-PAR-USEANN_s1x_ms/test/parsing_instances/'
pred_all_dir = '/xuhanzhu/iccv_panet/ckpts/ICCV/Parsing/parsing_R-101-FPN-COCO-PAR-USEANN_s1x_ms/test/parsing_predict/'
# pred_dir = '/xuhanzhu/mscoco2014/train_parsing_cdcl/'
# pred_all_dir = '/xuhanzhu/CDCL-human-part-segmentation/output_coco/'
predict_files = os.listdir(pred_all_dir)
predict_fs = []
save_dir = '/xuhanzhu/inference_par/%s/vis_ori_par' % datatset # train_seg 原标注的可视化图
save_dir_par = '/xuhanzhu/inference_par/%s/par' % datatset # 变换后的mask未上色图
save_dir_par_ins = '/xuhanzhu/inference_par/%s/vis_par_ins' % datatset # 预测结果
save_dir_par_ori = '/xuhanzhu/inference_par/%s/vis_par' % datatset # 变换后的结果
# ori_img_dir = '/xuhanzhu/%s/train2014' % datatset
ori_img_dir = '/xuhanzhu/CIHP/train_img'
img_dir = '/xuhanzhu/inference_par/%s/img' % datatset
con_dir = '/xuhanzhu/inference_par/%s/vis_par_ins_con' % datatset
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not os.path.exists(save_dir_par):
os.makedirs(save_dir_par)
if not os.path.exists(save_dir_par_ins):
os.makedirs(save_dir_par_ins)
if not os.path.exists(save_dir_par_ori):
os.makedirs(save_dir_par_ori)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
if not os.path.exists(con_dir):
os.makedirs(con_dir)
for p_f in predict_files:
filename = os.path.basename(p_f).split('.')[0]
predict_fs.append(filename)
ann_root = '/xuhanzhu/CIHP/train_parsing/'
d = 0
predict_fs = sorted(predict_fs)
for p_f in predict_fs:
if d % 100 == 0:
print(d)
# p_f_s = p_f.split('_')[0]
p_f_s = p_f
# shutil.copy(os.path.join(ori_img_dir, p_f_s + '.jpg'), os.path.join(img_dir, p_f_s + '.jpg'))
ori_img = cv2.imread(os.path.join(ori_img_dir, p_f_s + '.jpg'))
par_pred_list = glob.glob(pred_dir + p_f_s + '*.png')
par_f_list = glob.glob(ann_root + p_f_s + '*.png')
ann_all = cv2.imread('/xuhanzhu/CIHP/train_seg/{}.png'.format(p_f_s), 0)
pred_all = cv2.imread(os.path.join(pred_all_dir, p_f_s + '.png'), 0)
save_name = p_f_s + '.png'
result_numpy = np.zeros_like(pred_all)
for p in par_pred_list:
pre = cv2.imread(p, 0)
for f in par_f_list:
ann = cv2.imread(f, 0)
ann_p_copy = np.zeros_like(ann)
pred_p_copy = np.zeros_like(pre)
ann_p_copy[ann > 0] = 1
pred_p_copy[pre > 0] = 1
iu = cal_one_mean_iou(ann_p_copy, pred_p_copy, 2)
if iu[1] > 0.5:
### 每个人的
for i in range(class_num):
if (pre == i).any():
bin_number = np.bincount(ann[pre == i])
maxid = np.argsort(bin_number)
if len(maxid) > 1:
for mid in reversed(range(len(maxid))):
# if mid < len(maxid)-1:
# continue
if maxid[mid] and bin_number[maxid[mid]] > 0:
# if i not in [14, 15, 16, 17, 18, 19]:
# result_bool_1 = (pre == i)
# result_numpy[result_bool_1] = i
# else:
result_bool_1 = (pre == i) & (ann == maxid[mid]) & (result_numpy == 0) #
result_numpy[result_bool_1] = i
result_bool_1 = (pre == i) & (ann == maxid[mid]) & (result_numpy == 0) #
result_numpy[result_bool_1] = i #
else:
if maxid[0] > 0:
result_bool_1 = (pre == i)
result_numpy[result_bool_1] = i
else:
result_bool_1 = pre > 0
result_numpy[result_bool_1] = pre[pre > 0] #
for i in range(1, class_num):
if i in [1, 14]:
if (pred_all == i).any():
bin_number = np.bincount(ann_all[pred_all == i])
maxid = np.argsort(bin_number)
print(bin_number[maxid[-1]] / np.sum(bin_number))
if len(maxid) > 1 and bin_number[maxid[-1]] / np.sum(bin_number) > 0.2:
for mid in reversed(range(len(maxid))):
if mid < len(maxid) - 1:
continue
if not maxid[mid] in [2]:
if maxid[mid] and bin_number[maxid[mid]] > 0:
result_bool_1 = (ann_all == maxid[mid]) & (result_numpy == 0)
result_numpy[result_bool_1] = i
else:
continue
else:
if maxid[0] > 0:
result_bool_1 = (ann_all == maxid[0]) & (result_numpy == 0)
result_numpy[result_bool_1] = i
# elif i == 2 or i == 3: # or i == 2 or i == 3
# if (pred_all == i).any():
# bin_number = np.bincount(ann_all[pred_all == i])
# maxid = np.argsort(bin_number)
# if len(maxid) > 1 and bin_number[maxid[-1]] / np.sum(bin_number) > 0.5:
# for mid in reversed(range(len(maxid))):
# if mid < len(maxid) - 1:
# continue
# if maxid[mid] and bin_number[maxid[mid]] > 0:
# result_bool_1 = (ann_all == maxid[mid]) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# else:
# if maxid[0] > 0:
# result_bool_1 = (ann_all == maxid[0]) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 1: # or i == 2 or i == 3
# if (pred_all == i).any():
# bin_number = np.bincount(ann_all[pred_all == i])
# maxid = np.argsort(bin_number)
# print(bin_number[maxid[-1]] / np.sum(bin_number))
# if len(maxid) > 1 and bin_number[maxid[-1]] / np.sum(bin_number) > 0.5:
# for mid in reversed(range(len(maxid))):
# if mid < len(maxid) - 1:
# continue
# if maxid[mid] not in [10, 14, 15]:
# if maxid[mid] and bin_number[maxid[mid]] > 0:
# result_bool_1 = (ann_all == maxid[mid]) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# else:
# continue
# else:
# if maxid[0] > 0:
# result_bool_1 = (ann_all == maxid[0]) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# for i in range(1, class_num):
# if i == 2:
# result_bool_1 = (ann_all == 14) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 4:
# result_bool_1 = (ann_all == 1) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 6:
# result_bool_1 = ((ann_all == 10) | (ann_all == 11)) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 1:
# result_bool_1 = ((ann_all == 12) | (ann_all == 13)) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 3:
# result_bool_1 = ((ann_all == 6) | (ann_all == 7)) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 5:
# result_bool_1 = ((ann_all == 8) | (ann_all == 9)) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# for i in range(1, class_num):
# if i == 14:
# result_bool_1 = (ann_all == 2) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# if i == 1:
# result_bool_1 = (ann_all == 4) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 12 or i == 13:
# result_bool_1 = (ann_all == 1) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
# elif i == 10 or i == 11:
# result_bool_1 = (ann_all == 6) & (result_numpy == 0)
# result_numpy[result_bool_1] = i
#
cv2.imwrite(os.path.join(save_dir_par, save_name), result_numpy)
# vis_parsing(os.path.join(save_dir_par, save_name), save_dir_par_ori, 'MHP59', ori_img, True)
# vis_parsing('/xuhanzhu/%s/train_seg_uv/{}.png'.format(p_f_s) % datatset, save_dir, 'CIHP20', ori_img, True)
# vis_parsing(os.path.join(pred_all_dir, p_f_s + '.png'), save_dir_par_ins, 'MHP59', ori_img, False)
# draw_contous(p_f_s, con_dir, datatset)
d += 1
def draw_contous(id, dir, dataset):
mask = cv2.imread('/xuhanzhu/inference_par/%s/par/%s.png' % (dataset, id), 0)
img = cv2.imread('/xuhanzhu/inference_par/%s/vis_par_ins/%s.png' % (dataset, id))
#####
contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 0, 255), 1)
cv2.imwrite(os.path.join(dir, '%s_ins.png' % id), img)
def convert_parsing_2():
coco_folder = '/xuhanzhu/CIHP/'
cihp_coco = COCO(coco_folder + '/annotations/CIHP_train.json')
parsing_dir = '/xuhanzhu/inference_par/CIHP-COCO/par'
target_dir = '/xuhanzhu/CIHP'
im_ids = cihp_coco.getImgIds()
for i, im_id in enumerate(im_ids):
if i % 50 == 0:
print(i)
ann_ids = cihp_coco.getAnnIds(imgIds=im_id)
anns = cihp_coco.loadAnns(ann_ids)
im = cihp_coco.loadImgs(im_id)[0]
height = im['height']
width = im['width']
filename = im['file_name']
for ii, ann in enumerate(anns):
c = ann['category_id']
if c == 1:
parsing_save = np.zeros((height, width))
bbr = np.array(ann['bbox']).astype(int) # the box.
parsing_name = os.path.join(parsing_dir, filename.replace('.jpg', '.png'))
# print(parsing_name)
if os.path.exists(parsing_name):
parsing = cv2.imread(parsing_name, 0)
x1, y1, x2, y2 = bbr[0], bbr[1], bbr[0] + bbr[2], bbr[1] + bbr[3]
x2 = min([x2, width]);
y2 = min([y2, height])
parsing_save[y1:y2, x1:x2] = parsing[y1:y2, x1:x2]
save_name = os.path.join(target_dir + '/train_parsing_cdcl_coco', ann['parsing'])
cv2.imwrite(save_name, parsing_save)
def vis_parsing_dir(new_dir, colormap, img_dir, parsing_dir, draw_contours):
coco_folder = '/xuhanzhu/mscoco2014'
cihp_coco = COCO(coco_folder + '/annotations/densepose_coco_2014_train.json')
im_ids = cihp_coco.getImgIds()
for im_id in sorted(im_ids):
im = cihp_coco.loadImgs(im_id)[0]
filename = im['file_name']
ori_path = os.path.join(img_dir, filename)
new_path = os.path.join(new_dir, filename)
parsing_path = os.path.join(parsing_dir, filename.replace('.jpg', '.png'))
parsing = cv2.imread(parsing_path, 0)
if os.path.exists(ori_path):
im_ori = cv2.imread(ori_path)
parsing_color_list = eval('colormap_utils.{}'.format('MHP59')) # CIHP20
parsing_color_list = colormap_utils.dict_bgr2rgb(parsing_color_list)
colormap = colormap_utils.dict2array(parsing_color_list)
parsing_color = colormap[parsing.astype(np.int)]
# parsing_color = parsing_color[..., ::-1]
parsing_alpha = 0.9
idx = np.nonzero(parsing)
im_ori = im_ori.astype(np.float32)
im_ori[idx[0], idx[1], :] *= 1.0 - parsing_alpha
im_ori += parsing_alpha * parsing_color
#####
if draw_contours:
contours, _ = cv2.findContours(parsing.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(im_ori, contours, -1, (0, 0, 255), 1)
cv2.imwrite(new_path, im_ori)
else:
continue
#
def vis_parsing333(path, dir):
parsing = cv2.imread(path, 0)
parsing_color_list = eval('colormap_utils.{}'.format('MHP59')) # CIHP20
parsing_color_list = colormap_utils.dict_bgr2rgb(parsing_color_list)
colormap = colormap_utils.dict2array(parsing_color_list)
parsing_color = colormap[parsing.astype(np.int)]
cv2.imwrite(os.path.join(dir, os.path.basename(path)), parsing_color)
def vis_parsing_dir2():
dir = '/xuhanzhu/CIHP/train_parsing_cdcl_coco'
files = os.listdir(dir)
for file in files:
path = os.path.join(dir, file)
vis_parsing333(path, './vis')
# compute_ious()
# keypoints()
# compute_iou_pp()
# draw_contous('0000015')
# vis_parsing("/home/zhuxuhan/par_dir/0000001.png", './')
# convert_parsing_2()
# img_dir = '/xuhanzhu/mscoco2014/val2014'
# new_dir = '/xuhanzhu/anno'
# img_dir = '/xuhanzhu/output'
# new_dir = '/xuhanzhu/output_anno'
# parsing_dir = '/xuhanzhu/mscoco2014/train_parsing_cihp'
# vis_parsing_dir(new_dir, 'MHP59', img_dir, parsing_dir, True)
# vis_parsing_dir2()
convert_parsing_2()
#
|
988,083 | d59f263b0bf1e7ea05b2f82a64e1ba3bb519f38c | from __future__ import print_function
import math
import os
import shelve
TABLE = "table"
DATA = "data"
CLASS = "class"
FILE_NAMES = "files"
FEATURE_NAMES = "features"
CLASS_MAL = 'M'
CLASS_BEN = 'B'
def convertToNumber(s):
return int.from_bytes(s.encode(), 'little')
def convertFromNumber(n):
return n.to_bytes(math.ceil(n.bit_length() / 8), 'little').decode()
def read_table(file):
data = shelve.open(file)
table = data[TABLE]
features = data[FEATURE_NAMES]
data.close()
return table, features
def find_element_in_list(element, list_element):
try:
index_element = list_element.index(element)
return index_element
except ValueError:
return None
def read_file(file):
lines = []
with open(file, 'r') as f:
for line in f:
index = lines.append(line.rstrip('\n'))
return lines
def make_table_dict(input_dir, label):
files = os.listdir(input_dir)
table = {}
index = 0
for file in files:
file_name = os.path.splitext(os.path.basename(file))[0]
lines = read_file(os.path.join(input_dir, file))
table[file_name] = {DATA: [], CLASS: label}
for line in lines:
table[file_name][DATA].append(line)
return table
def main():
test_string = "Hello World!"
test_num = convertToNumber(test_string)
print(test_num)
print(test_string)
if __name__ == "__main__":
main()
|
988,084 | a44cc90990eed2fe1e7d663ab93eb7636fad176f | while True:
print "I have aids",
|
988,085 | 6f6f33236269029d132bfd857dc60b1e8f531b4d | # encoding=utf8
var1 = 'Hello World!'
var1 = "Python"
# 在 python 中赋值语句总是建立对象的引用值,而不是复制对象。因此,python 变量更像是指针,而不是数据存储区域,
print(var1)
# 三括号注释
var2 = """
>>> a = "asd"
>>> id(a)
4431000496
>>> a = "122"
>>> id(a)
4431000552
"""
print(var2)
# 第二部分
# 填充
var3 = "1234"
print(var3.center(10, "*"))
print(var3.ljust(10, '^'))
print(var3.rjust(10, "^"))
print(var3.zfill(10))
# 返回值
'''
***1234***
1234^^^^^^
^^^^^^1234
0000001234
'''
# 删减
var4 = "55785"
print(var4.strip("5"))
print(var4.lstrip("5"))
print(var4.rstrip("5"))
# 返回值
'''
78
785
5578
'''
# 变形
var5 = "thank yoU"
print(var5.lower())
print(var5.upper())
print(var5.capitalize())
print(var5.swapcase())
print(var5.title())
'''
thank you
THANK YOU
Thank you
THANK YOu
Thank You
'''
# 切分
var6 = "7890"
# 有点像 find()和 split()的结合体,从 str 出现的第一个位置起,把 字 符 串 string 分 成 一 个 3 元 素 的 元 组 (string_pre_str,str,string_post_str),如果 string 中不包含str 则 string_pre_str == string.
print(var6.partition('9'))
print(var6.partition('2'))
print(var6.rpartition("0"))
var7 = "abz\nzxy"
print(var7.splitlines())
print(var7.split("z"))
print(var7.rsplit("z"))
# 连接
var8 = "ikaf"
print(var8.join("0000"))
# 判定
var9 = "kj45"
# 长度>0 && 都是字母或都是数字 true 否则false
print(var9.isalnum())
# 长度>0 && 都是字母 true 否则false
print(var9.isalpha())
print(var9.isdigit())
print(var9.islower())
print(var9.isupper())
print(var9.isspace())
print(var9.istitle())
print(var9.startswith('k'))
print(var9.endswith('5'))
# 查找
var10 = "1234567890zxc123vbndfgh"
print(var10.count('123', 0, len(var10)))
# 返回第一个满足条件的位置
print(var10.find('3', 0, len(var10)))
#
print(var10.index('3', 0, len(var10)))
# 找不到返回-1
print(var10.rfind('mm', 0, len(var10)))
# 找不到报错
# print(var10.rindex('mm', 0, len(var10)))
# 替换
var11 = "aaaa111222hhhjjjkkk"
print(var11.replace("a", "b", 2))
# print(var11.translate())
# translate(table[,deletechars])
# 编码解码
# 编码就是将字符串转换成字节码,涉及到字符串的内部表示。
# 解码就是将字节码转换为字符串,将比特位显示成字符。
var12 = "什么鬼"
print(var12.encode())
print(var12.encode().decode())
'''
b'\xe4\xbb\x80\xe4\xb9\x88\xe9\xac\xbc'
什么鬼
'''
|
988,086 | 50fc4a69b54c0e5d705be21278f71a2ca4a887ab | #!/usr/bin/env python3
class Donor:
def __init__(self, donor_id, age, sex, cells=[]):
self.donor_id = donor_id
self.age = age
self.sex = sex
self.cells = cells
class Cell:
def __init__(self, cell_barcode, sequenced_areas = []):
self.cell_barcode = cell_barcode
self.sequenced_areas = sequenced_areas
class SequencedArea:
def __init__(self, umis = []):
self.umis = umis
class UMI:
def __init__(self, UMI_id, read_sets = []):
self.UMI_id = UMI_id
self.read_sets = read_sets
class ReadSet:
def __init__(self, reads = []):
self.reads = reads
class Read:
def __init__(self, sequence):
self.sequence = sequence
|
988,087 | 04d4b426f098025558222201d28f8bf1d0e47e44 | #This is a simple open a picture and display edges.
import cv2
import numpy as np
raw = cv2.imread("myTest.jpg")
img = cv2.imread("myTest.jpg", cv2.IMREAD_GRAYSCALE)
edges = cv2.Canny(img,200,200)
edgesBGR= cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)
rawEdges = cv2.add(raw,edgesBGR)
cv2.imshow('image',img)
cv2.imshow('edges',edges)
cv2.imshow('rawEdges',rawEdges)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
988,088 | 9aebbd588be262f69b1c07681e4fbc41995ad2ff | from bs4 import BeautifulSoup as bs
import requests as rq
import re
import pandas as pd
urlparts = ['http://www2.recife.pe.gov.br/servico/', '?op=NzQ0MQ==']
bairros = { \
'rpa1': ['bairro-do-recife',
'boa-vista',
'cabanga',
'coelhos',
'ilha-do-leite',
'ilha-joana-bezerra',
'paissandu',
'santo-amaro',
'santo-antonio',
'sao-jose',
'soledade'],
'rpa2': ['agua-fria',
'alto-santa-terezinha',
'arruda',
'beberibe',
'bomba-do-hemeterio',
'cajueiro',
'campina-do-barreto',
'campo-grande',
'dois-unidos',
'encruzilhada',
'fundao',
'hipodromo',
'linha-do-tiro',
'peixinhos',
'ponto-de-parada',
'porto-da-madeira',
'rosarinho',
'torreao'],
'rpa3': [#'aflitos',
'alto-do-mandu',
#'alto-jose-bonifacio',
#'alto-jose-do-pinho',
'apipucos',
'brejo-da-guabiraba',
#'brejo-do-beberibe',
'casa-amarela',
'casa-forte',
'corrego-do-jenipapo',
'derby',
#'dois-irmaos',
#'espinheiro',
#'gracas',
'guabiraba',
'jaqueira',
'macaxeira',
'mangabeira',
'monteiro',
'morro-da-conceicao',
'nova-descoberta',
'parnamirim',
#'passarinho',
'pau-ferro',
'poco-da-panela',
'santana',
'sitio-dos-pintos',
'tamarineira',
'vasco-da-gama'],
'rpa4': ['caxanga',
'cidade-universitaria',
'cordeiro',
#'engenho-do-meio',
'ilha-do-retiro',
'iputinga',
'madalena',
'prado',
'torre',
'torroes',
'varzea',
'zumbi'],
'rpa5': ['afogados',
'areias',
'barro',
'bongi',
'cacote',
'coqueiral',
#'curado',
'estancia',
'jardim-sao-paulo',
'jiquia',
'mangueira',
'mustardinha',
'san-martin',
'sancho',
'tejipio',
'toto'],
'rpa6': ['boa-viagem',
#'brasilia-teimosa',
'cohab',
'ibura',
'imbiribeira',
'ipsep',
'jordao',
'pina']
}
urls = { key : ( bairro.join(urlparts) for bairro in bairros[key] ) for key in bairros }
bspages = { key : ( bs(rq.get(url).text, 'html.parser') for url in urls[key] ) for key in urls }
first_attrs = { 'class': 'content-text text-servico' }
def get_nome_bairro(bspage):
container = bspage.find('div', attrs = first_attrs)
nome_bairro = container.find('h2').text
return nome_bairro
def get_dist_marco_zero(bspage):
container = bspage.find('div', attrs = first_attrs)
text = container.find('p').text
dist_texts = re.split('[^0-9.,]+', text)
dist_text = dist_texts[-5] if len(dist_texts) > 8 else dist_texts[-4]
dist_text = re.sub(',', '.', dist_text)
dist = float(dist_text)
return dist
def get_area_hectare(bspage):
container = bspage.find('div', attrs = first_attrs)
text = container.find('p').text
area_texts = re.split('[^0-9]+', text)
area_text = area_texts[-4] if len(area_texts) > 11 else area_texts[-3]
area = int(area_text)
return area
def get_populacao(bspage):
container = bspage.find('div', attrs = first_attrs)
text = container.find('p').text
pop_texts = re.split('[^0-9.,]+', text)
pop_text = pop_texts[-1] if pop_texts[-1] != '' else pop_texts[-2]
pop_text = re.sub('\.', '', pop_text)
populacao = int(pop_text)
return populacao
def get_pop_masc(bspage):
container = bspage.find('tbody')
pop_masc_text = container.find_all('td')[4].text
pop_masc_text = re.sub('\.', '', pop_masc_text)
pop_masc = int(pop_masc_text)
return pop_masc
def get_pop_fem(bspage):
container = bspage.find('tbody')
pop_fem_text = container.find_all('td')[7].text
pop_fem_text = re.sub('\.', '', pop_fem_text)
pop_fem = int(pop_fem_text)
return pop_fem
def get_pop_faixa_etaria(bspage):
container = bspage.find_all('tbody')[1]
faixas = ['pop_0_4', 'pop_5_14', 'pop_15_17', 'pop_18_24', 'pop_25_59', 'pop_60_']
faixa_tds = container.find_all('td')[4:23:3]
faixa_qtd_txts = [td.text for td in faixa_tds]
faixa_qtd_txts = [re.sub('\.', '', txt) for txt in faixa_qtd_txts]
faixa_qtds = [int(txt) for txt in faixa_qtd_txts]
pop_faixa = dict(zip(faixas, faixa_qtds))
return pop_faixa
def get_pop_porc_raca(bspage):
container = bspage.find_all('tbody')[2]
racas = ['pop_porc_branca', 'pop_porc_preta', 'pop_porc_parda', 'pop_porc_amarela', 'pop_porc_indigena']
racas_tds = container.find_all('td')[3:14:2]
racas_porcs_txts = [td.text for td in racas_tds]
racas_porcs_txts = [re.sub('\.', '', racas_porc_txt) for racas_porc_txt in racas_porcs_txts]
racas_porcs = [float(re.sub(',', '.', txt)) for txt in racas_porcs_txts]
racas_porcs = dict(zip(racas, racas_porcs))
return racas_porcs
def get_alfab_dez_mais(bspage):
container = bspage.find('div', attrs = first_attrs)
alfab_ps = [p.text for p in container.find_all('p')]
alfab_p_text = alfab_ps[1] if alfab_ps[1] != '\xa0' else alfab_ps[2]
alfab_text = re.split('[^0-9.,]+', alfab_p_text)[3]
alfab_text = re.sub(',', '.', alfab_text)
alfab_dez_mais = float(alfab_text)
return alfab_dez_mais
def get_taxa_m_cresc(bspage):
container = bspage.find('div', attrs = first_attrs)
ps = container.find_all('p')
p_texts = [p.text for p in ps if p.text != '\xa0']
p_texts = p_texts[1:]
p_text = ''.join(p_texts)
info_texts = re.split('[^0-9.,-]+', p_text)
info_texts = [text for text in info_texts if text != '.' and text != ',' and text != '-' and text != '']
taxa_text = info_texts[5]
taxa_text = re.sub(',', '.', taxa_text)
taxa_m_cresc = float(taxa_text)
return taxa_m_cresc
def get_dens_dem(bspage):
container = bspage.find('div', attrs = first_attrs)
ps = container.find_all('p')
p_texts = [p.text for p in ps if p.text != '\xa0']
p_texts = p_texts[1:]
p_text = ''.join(p_texts)
info_texts = re.split('[^0-9.,]+', p_text)
info_texts = [text for text in info_texts if text != ',' and text != '.' and text != '']
dens_text = info_texts[6]
dens_text = re.sub(',', '.', dens_text)
dens_dem = float(dens_text)
return dens_dem
def get_num_domic(bspage):
container = bspage.find('div', attrs = first_attrs)
ps = container.find_all('p')
p_texts = [p.text for p in ps if p.text != '\xa0']
p_texts = p_texts[1:]
p_text = ''.join(p_texts)
info_texts = re.split('[^0-9., ]+', p_text)
info_texts = [text for text in info_texts if text != ',' and text != '.' and text != '' and text != ' ']
num_text = info_texts[8]
num_text = re.sub('\.', '', num_text)
num_text = re.sub(' ', '', num_text)
num_domic = int(num_text)
return num_domic
def get_morador_domic(bspage):
container = bspage.find('div', attrs = first_attrs)
ul = container.find('ul')
if ul:
ul = ul.find('li')
md_text = re.match('.+:[^0-9]*([0-9,]+)[^0-9]*', ul.text).group(1)
md_text = re.sub(',', '.', md_text)
morador_domic = float(md_text)
else:
spans = container.find_all('span')
span_texts = [span.text for span in spans if span.text != '\xa0' and span.text != '']
span_texts = span_texts[-5:]
span_text = ''.join(span_texts)
info_texts = re.split('[^0-9.,]+', span_text)
info_texts = [text for text in info_texts if text != ',' and text != '.' and text != '']
md_text = info_texts[0]
md_text = re.sub(',', '.', md_text)
morador_domic = float(md_text)
return morador_domic
def get_prop_resp_fem(bspage):
container = bspage.find('div', attrs = first_attrs)
ul = container.find('ul')
if ul:
ul = ul.find_all('li')[1]
resp_text = re.match('.+:[^0-9]*([0-9,]+)[^0-9]*', ul.text).group(1)
resp_text = re.sub(',', '.', resp_text)
prop_resp_fem = float(resp_text)
else:
spans = container.find_all('span')
span_texts = [span.text for span in spans if span.text != '\xa0' and span.text != '']
span_texts = span_texts[-5:]
span_text = ''.join(span_texts)
info_texts = re.split('[^0-9.,]+', span_text)
info_texts = [text for text in info_texts if text != ',' and text != '.' and text != '']
prop_text = info_texts[1]
prop_text = re.sub(',', '.', prop_text)
prop_resp_fem = float(prop_text)
return prop_resp_fem
def get_rend_medio(bspage):
container = bspage.find('div', attrs = first_attrs)
ul = container.find('ul')
if ul:
ul = ul.find_all('li')[2]
rend_text = re.match('.+R\$\s*([0-9.,]+)[^0-9]*', ul.text).group(1)
rend_text = re.sub('\.', '', rend_text)
rend_text = re.sub(',', '.', rend_text)
rend_medio = float(rend_text)
else:
spans = container.find_all('span')
span_texts = [span.text for span in spans if span.text != '\xa0' and span.text != '']
span_texts = span_texts[-5:]
span_text = ''.join(span_texts)
info_texts = re.split('[^0-9.,]+', span_text)
info_texts = [text for text in info_texts if text != ',' and text != '.' and text != '']
rend_text = info_texts[2]
rend_text = re.sub('\.', '', rend_text)
rend_text = re.sub(',', '.', rend_text)
rend_medio = float(rend_text)
return rend_medio
def get_data_dict(bspage):
data_fields = ['nome_bairro',
'dist_marco_zero',
'area_hectare',
'populacao',
'pop_masc',
'pop_fem',
'alfab_dez_mais',
'taxa_m_cresc',
'dens_dem',
'num_domic',
'morador_por_domic',
'prop_resp_fem',
'rend_medio']
data_funcs = [get_nome_bairro,
get_dist_marco_zero,
get_area_hectare,
get_populacao,
get_pop_masc,
get_pop_fem,
get_alfab_dez_mais,
get_taxa_m_cresc,
get_dens_dem,
get_num_domic,
get_morador_domic,
get_prop_resp_fem,
get_rend_medio]
data = [func(bspage) for func in data_funcs]
pop_faixa_etaria = get_pop_faixa_etaria(bspage)
pop_porc_raca = get_pop_porc_raca(bspage)
data_dict = dict(list(zip(data_fields, data)) + list(pop_faixa_etaria.items()) + list(pop_porc_raca.items()))
return data_dict
dfs = [pd.DataFrame([get_data_dict(bspage) for bspage in bspages[key]]) for key in bairros]
for n, df in enumerate(dfs):
df['rpa'] = n + 1
left_out_df = pd.DataFrame({
#Bairros faltando:
#Aflitos
#Alto José Bonifácio
#Alto José do Pinho
#Brejo do Beberibe
#Dois Irmãos
#Espinheiro
#Graças
#Passarinho
#Engenho do Meio
#Curado
#Brasília Teimosa
'nome_bairro': ['Aflitos',
'Alto José Bonifácio',
'Alto José do Pinho',
'Brejo do Beberibe',
'Dois Irmãos',
'Espinheiro',
'Graças',
'Passarinho',
'Engenho do Meio',
'Curado',
'Brasília Teimosa'],
'dist_marco_zero': [3.72, 7.27, 6.05, 9.34, 10.4, 3.09, 3.71, 10.97, 8, 9.68, 2.33],
'area_hectare': [31, 57, 41, 64, 585, 73, 144, 406, 87, 798, 61],
'populacao': [5773, 12462, 12334, 8292, 2566, 10438, 20538, 20305, 10211, 16418, 18334],
'pop_masc': [2541, 5863, 5617, 3938, 1251, 4465, 8842, 9954, 4609, 7753, 8571],
'pop_fem': [3232, 6599, 6717, 4354, 1315, 5973, 11696, 10371, 5602, 8665, 9773],
'alfab_dez_mais': [99.2, 91, 91.7, 90.2, 93.1, 98.1, 99.2, 87.1, 96.1, 90.3, 91.8],
'taxa_m_cresc': [2.8, .07, -0.08, 3.62, -1.7, 1.6, 1.6, 2.79, -0.34, 1.99, -0.44],
'dens_dem': [187.83, 219.26, 298.4, 129.86, 4.39, 142.56, 143.08, 49.98, 117.54, 20.56, 302.81],
'num_domic': [1937, 3570, 3510, 2459, 737, 3602, 7015, 5792, 3053, 4900, 5464],
'morador_por_domic': [3, 3.5, 3.5, 3.4, 3.5, 2.9, 2.9, 3.5, 3.3, 3.3, 3.4],
'prop_resp_fem': [51.24, 42.64, 53.72, 48.21, 44.1, 49.28, 49.18, 41.31, 46.09, 40.08, 49.57],
'rend_medio': [1028.96, 908.76, 1101.22, 1058.37, 1936.1, 7299.96, 9484.01, 824.02, 2594.45, 1216.36, 1220.81],
'pop_0_4': [240, 911, 785, 655, 177, 469, 794, 1733, 486, 1258, 1285],
'pop_5_14': [546, 2085, 2027, 1514, 427, 892, 1904, 3940, 1204, 2757, 2854],
'pop_15_17': [224, 696, 644, 450, 151, 327, 838, 1242, 464, 830, 907],
'pop_18_24': [695, 1596, 1507, 1094, 372, 1285, 2608, 2731, 1170, 1928, 2156],
'pop_25_59': [3030, 5996, 6022, 3978, 1241, 5415, 10648, 9354, 2242, 8319, 9084],
'pop_60_': [1038, 1188, 1349, 601, 198, 2050, 3746, 1305, 1645, 1326, 2048],
'pop_porc_branca': [76.11, 25.49, 29.02, 31.5, 35.39, 70.56, 76.68, 25.24, 43.49, 33.91, 33.05],
'pop_porc_preta': [1.87, 4.81, 15.65, 9.85, 7.91, 3.43, 2.4, 7.75, 7.56, 8.02, 8.93],
'pop_porc_parda': [21.13, 58.9, 53.57, 57.18, 55.53, 25.03, 19.85, 66.65, 47.25, 56.94, 56.62],
'pop_porc_amarela': [.87, .67, 1.39, 1.45, 1.09, .8, .96, .32, 1.24, .9, .99],
'pop_porc_indigena': [.02, .13, .37, .02, 1.09, .18, .1, .04, .39, .23, .31],
'rpa': [3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 6]
})
dfs.append(left_out_df)
final_df = pd.concat(dfs, ignore_index = True)
pd.DataFrame.to_csv(final_df, 'dados_preliminares.csv')
|
988,089 | c318675f9890dab512ca0b10ef537a747a4833ff | import numpy as np
class AalenAdditive():
"""
Aalen's additive regression model
"""
def __init__(self, events, durations, X, entry_time = None):
"""
Params:
events (numpy.array): 0 or 1
durations (numpy.array): time at which event happened or observation was censored
entry_time (numpy.array): time of entry into study (default is None - entry T is 0 for all)
X (numpy.matrix): data matrix
"""
self.events = events
self.durations = durations
self.X = X
if entry_time is None:
entry_time = np.zeros((len(self.events),))
self.entry_time = entry_time
self._fit(X, events, entry_time, durations)
def _fit(self, X, events, entry_time, durations):
n, p = X.shape
ids = np.arange(len(X))
# Matrix of interest
matrix = np.vstack([ids, events, entry_time, durations, X.T]).T
# Event times
unique_event_times = np.unique(durations[events==1])
unique_event_times.sort()
T = len(unique_event_times)
Y = np.zeros((T, n, p))
I = np.zeros((T, n))
# At each event time
for j, t in enumerate(unique_event_times):
# Get t
risk_pool = matrix[(entry_time < t) & (durations >= t)]
Y[j, risk_pool[:,0].astype(int)] = risk_pool[:,4:]
I[j, risk_pool[(risk_pool[:,1] == 1) & (risk_pool[:,3] == t),0].astype(int)] = 1
A = np.zeros((T+1, p))
cov_A = np.zeros((T+1, p, p))
for i, t in enumerate(unique_event_times):
try:
X_t = np.dot(np.linalg.inv(np.dot(Y[i].T, Y[i])), Y[i].T)
except:
X_t = np.zeros((p, n))
I_t = I[i,:]
I_d_t = np.diag(I_t)
A[i+1] = A[i] + np.dot(X_t, I_t)
cov_A[i+1] = cov_A[i] + np.dot(X_t, I_d_t).dot(X_t.T)
A = A[1:]
cov_A = cov_A[1:]
self.coefficients = A
self.covars = cov_A
self._unique_event_times = unique_event_times |
988,090 | a3d24ddb10a03e078910b825032836145d9d7a06 | import random
import os
from flask import jsonify, request, render_template, redirect, url_for
from flask_wtf import FlaskForm
from wtforms import RadioField, TextField, TextAreaField, BooleanField
from wtforms import StringField, SelectMultipleField
from wtforms.validators import Required
from wtforms.widgets import ListWidget, CheckboxInput
from wtforms import validators, ValidationError
from wtforms.validators import DataRequired
from app import app
from app.ImageService import ImageService
STORE_DATA = True
USER_ID = 12345
class MultiCheckboxField(SelectMultipleField):
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
class FormProject(FlaskForm):
Code = StringField('Code', [Required(message='Please enter your code')])
Tasks = MultiCheckboxField('Proses', [Required(message='Please tick your task')],
choices=[('female', 'man'), ('female', 'male')])
class DataGethererForm(FlaskForm):
gender = RadioField(label='Gender',
choices=[('0', '<img src="/static/female-icon.png">'),
('1', '<img src="/static/male-icon.png">')],
validators=[DataRequired()]
)
age = RadioField(label='Age',
choices=[('0', '<img src="/static/age/child.jpg"> <div class="desc"> 1+ </div>'),
('1', '<img src="/static/age/teen.png"> <div class="desc"> 19+ </div>'),
('2', '<img src="/static/age/youngadult.png"> <div class="desc"> 30+ </div>'),
('3', '<img src="/static/age/adult.jpg"> <div class="desc"> 45+ </div>'),
('4', '<img src="/static/age/retiree.png"> <div class="desc"> 60+ </div>')],
validators=[validators.DataRequired("Please select age.")]
)
casualTooltip = '"everyday clothes, jacket, sweatshirt, shirt, jeans, tracksuit, sneakers, sandals, hookers"'
sportTooltip = '"cyclists, runners, pads, helmet, sportswear, top, jacket, sports leggings, tracksuits, sneakers"'
rockTooltip = '"chin, glasses, scarves, chains, accessories, dark clothes, leather, print shirts, skulls, big shoes"'
streetTooltip = '"caps, headphones, chains, T-shirts and trousers, torn, jackets, sneakers,"'
elegantTooltip = '"manager, clerk, shirt, sweater, jacket, coat, pants, skirt, dress, high heels, boots, stockings, handbag"'
formalTooltip = '"heels, boots, stockings, jacket, tuxedo, dress, heels, purse"'
workTooltip = '"soldier, cop, worker, nurse, work clothes, uniform"'
style = RadioField(label='Mode style',
choices=[
('0', '<div data-toggle="tooltip" data-placement="bottom" title= ' + casualTooltip + ' >'
'<img data-toggle="tooltip" src="/static/modestyle/casual.png"> '
'<div class="desc">casual</div> '
'</div>'),
('1', '<div data-toggle="tooltip" data-placement="bottom" title= ' + sportTooltip + ' >'
'<img src="/static/modestyle/sport.png"> '
'<div class="desc">sport</div>'
'</div>'),
('2', '<div data-toggle="tooltip" data-placement="bottom" title= ' + rockTooltip + ' >'
'<img src="/static/modestyle/rock.png"> '
'<div class="desc">rock</div>'
'</div>'),
('3', '<div data-toggle="tooltip" data-placement="bottom" title= ' + streetTooltip + ' >'
'<img src="/static/modestyle/street.png"> '
'<div class="desc">street</div>'
'</div>'),
('4', '<div data-toggle="tooltip" data-placement="bottom" title= ' + elegantTooltip + ' >'
'<img data-toggle="tooltip"'
'src="/static/modestyle/elegant.png"> <div class="desc">elegant</div>'
'</div>'),
('5', '<div data-toggle="tooltip" data-placement="bottom" title= ' + formalTooltip + ' >'
'<img data-toggle="tooltip"'
'src="/static/modestyle/formal.png"> <div class="desc">formal</div>'
'</div>'),
('6', '<div data-toggle="tooltip" data-placement="bottom" title= ' + workTooltip + ' >'
'<img data-toggle="tooltip"'
'src="/static/modestyle/worksuit.png"> <div class="desc">work suit</div>'
'</div>'),
],
validators=[validators.DataRequired("Please select style.")]
)
backpack = BooleanField('<img class="attribute" src="/static/attributes/backpack.jpg">', default=False)
handbag = BooleanField('<img class="attribute" src="/static/attributes/handbag.jpg">', default=False)
shopping = BooleanField('<img class="attribute" src="/static/attributes/shopping.png">', default=False)
glasses = BooleanField('<img class="attribute" src="/static/attributes/glasses.jpg">', default=False)
cap = BooleanField('<img class="attribute" src="/static/attributes/cap.jpg">', default=False)
description = TextField("Something more? ")
def process_attribudes(form):
desc = form['description']
if 'backpack' in form:
desc += ' backpack'
if 'handbag' in form:
desc += ' handbag'
if 'shopping' in form:
desc += ' shoppingbag'
if 'glasses' in form:
desc += ' glasses'
if 'cap' in form:
desc += ' cap'
return desc
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
imageService = ImageService()
next_img_id = imageService.next_rnd_id()
return redirect(url_for('image', image_id=str(next_img_id["id"])))
return render_template('index.html')
@app.route('/image/<int:image_id>', methods=['GET', 'POST'])
def image(image_id):
print("*******************************")
imageService = ImageService()
form = DataGethererForm(request.form)
form.gender(class_="my_class")
selected_img = imageService.get_img_by_id(image_id)
if os.path.isfile("app/static/clear/" + selected_img["path"]):
selected_img["resized_image"] = "clear/" + selected_img["path"]
else:
selected_img["resized_image"] = selected_img["path"]
print("id obr:", selected_img["id"])
if request.method == 'POST':
if form.validate_on_submit() and request.form['form-type'] == 'Confirm »':
checked_gender = request.form['gender']
checked_age = request.form['age']
checked_style = request.form['style']
descr = process_attribudes(request.form)
print("gender:", checked_gender)
print("age:", checked_age)
print("style:", checked_style)
print("description:", descr)
if (STORE_DATA):
user_id = request.cookies.get('id')
print("id user: ", user_id)
new_priority = round(selected_img["priority"] / 2)
imageService.update_image(selected_img["id"], ("priority", new_priority))
imageService.save_annotation(selected_img['id'],
user_id,
int(checked_gender),
int(checked_age),
int(checked_style),
descr)
next_img_id = imageService.next_rnd_id()
return redirect(url_for('image', image_id=str(next_img_id["id"])))
elif request.form['form-type'] == 'Bad image':
imageService.update_image(selected_img["id"], ("error_img", True))
next_img_id = imageService.next_rnd_id()
return redirect(url_for('image', image_id=str(next_img_id["id"])))
elif request.form['form-type'] == 'Skip »':
next_img_id = imageService.next_rnd_id()
return redirect(url_for('image', image_id=str(next_img_id["id"])))
return render_template('datagetherer.html', form=form, image=selected_img)
return jsonify({'error': 'Image id not found'}), 200
|
988,091 | 0cc8a096bb5b1b81fa0268c78a698ba446d579c1 | import re
from urlparse import urlparse
import requests # Yes, you need to install that
from app import cache
def validateform(username):
if username==None:
return None
else:
if username.startswith('http://') or username.startswith('https://'):
try:
username = urlparse(username).path.split('/')[-1]
except IndexError:
return None
if not re.match(r'^(?!_)[a-zA-Z0-9_]+$',username):
return None
else:
return username
@cache.memoize(timeout=172800)
def getnickname(username):
r = requests.get("http://api.bgm.tv/user/"+username)
return r.json()['nickname']
@cache.memoize(timeout=172800)
def getitemname(itemidx):
r = requests.get("http://api.bgm.tv/subject/"+str(itemidx))
j = r.json()
if len(j['name_cn']):
return j['name_cn']
else:
return j['name'] |
988,092 | 4395bf4a95b42899e982b988144dc8009c8f879b | import RPi.GPIO as GPIO
button = 17
led = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(led,GPIO.OUT)
GPIO.setup(button,GPIO.IN)
while True:
if GPIO.input(button):
GPIO.output(led,GPIO.HIGH)
else:
GPIO.output(led,GPIO.LOW)
GPIO.cleanup()
|
988,093 | ddde8f981674abd40fc0043a4cdb296b84c86745 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 15:14:47 2018
Part 1 creating the multi-flavored option backtests: SAN
@author: dpsugasa
"""
import os, sys
import pandas as pd
from tia.bbg import LocalTerminal
import numpy as np
from datetime import datetime
from operator import itemgetter
import plotly
import plotly.plotly as py #for plotting
import plotly.graph_objs as go
import plotly.dashboard_objs as dashboard
import plotly.tools as tls
import plotly.figure_factory as ff
import credentials
#set the script start time
start_time = datetime.now()
#plotly function
def create_trace(df, label):
dates = df.index
prices = df.values
trace = go.Scatter(
x = dates,
y = prices,
name = label,
line = dict(width = 2,
#color=color,
)
)
return trace
# set dates to grab data
start_date = '01/01/2012'
end_date = "{:%m/%d/%Y}".format(datetime.now())
date_now = "{:%m_%d_%Y}".format(datetime.now())
# Files in Option Directory
path2 = "C:\\Users\\dpsugasa\\WorkFiles\\option_backtests\\options"
dirs2 = os.listdir(path2)
#path = f"D:\\Users\\dpsugasa\\option_backtests\\options\\{direct}"
#dirs = os.listdir(path)
ts = {} #dict for trades
tkr = {} #dict for tickers
px = {} #dict of original dataframes per ID
px2 = {} #final dict of a prices
temp = {} #dict of temp dataframes
temp2 = {} #additional dict of temp dataframes
tot_ser = {} #df for all options series per strategy
m = {} #df for option ref data
n = {} #temp df
quants= []
for direct in dirs2:
path = f"C:\\Users\\dpsugasa\\WorkFiles\\option_backtests\\options\\{direct}"
dirs = os.listdir(path)
for file in dirs:
curr_path = fr'C:\Users\dpsugasa\Workfiles\option_backtests\options\{direct}/{file}'
ts[file] = pd.read_csv(curr_path)
ts[file].Date = pd.to_datetime(ts[file].Date)
ts[file].Expiry = pd.to_datetime(ts[file].Expiry)
ts[file] = ts[file].drop('Vol', axis=1)
ts[file]['Amount'] = ts[file]['Amount']/100
tkr[file] = ts[file]['Ticker'].unique().tolist()
# set securities, and fields
IDs = tkr[file]
fields = ['LAST PRICE']
d = {} #dict of original dataframes per ID
d2 = {} #final dict of a prices
temp = {} #dict of temp dataframes
temp2 = {} #additional dict of temp dataframes
ref_data = ['OPTION_ROOT_TICKER', 'OPT_MULTIPLIER', 'OPT_UNDL_PX',
'COMPANY_CORP_TICKER', 'CRNCY']
#get initial prices in 'd', create a temp dataframe with entry/exit dates,
# price, and expiry for each ticker
for name in IDs:
d[file, name] = LocalTerminal.get_historical(name, fields, start_date, end_date,
period = 'DAILY').as_frame()
d[file, name].columns = d[file, name].columns.droplevel()
d[file, name] = d[file, name].fillna(method = 'ffill')
temp[file, name] = ts[file].loc[ts[file].Ticker == name][['Date',
'Amount', 'Expiry', 'Direction','Shares']]
temp[file, name].index = temp[file, name].Date
temp[file, name] = temp[file, name].drop('Date', axis=1)
m[file, name] = LocalTerminal.get_reference_data(name, ref_data).as_frame()
n[file] = LocalTerminal.get_reference_data(name, ref_data).as_frame()
#set option qtty equal to $1mm USD worth of bonds so they can be compared in 'return space'
opt_curr = n[file]['CRNCY'].item() + " CURNCY"
curr_px = LocalTerminal.get_reference_data(opt_curr, 'PX_LAST').as_frame().values.item()
multy = 100.00 #n[file]['OPT_MULTIPLIER'].item() Hard coding as 100 multiplier
undl = n[file]['OPT_UNDL_PX'].item()
bond_size = 1000000.0 #1m USD
b_size_adj = bond_size/curr_px
opt1_qtty = np.round(((b_size_adj)/(multy*undl)))
for l in IDs:
quants.append(opt1_qtty)
#because some of the price info does not extend to maturity, make new pricing
# dataframes that have the full price set, including expiry value = 'd2'
for i in IDs:
temp2[file, i] = pd.DataFrame(np.nan, columns = ['LAST PRICE_NA'],
index = pd.date_range(start = d[file, i].index[0],
end = temp[file,i]['Expiry'][-1],
freq = 'B'))
frames = [temp2[file, i], d[file, i]]
d2[file, i] = pd.concat(frames, join = 'outer', axis = 1)
d2[file, i] = d2[file, i].drop(['LAST PRICE_NA'], axis = 1)
#making sure entry and exit days are in the index
d2[file, i].loc[temp[file, i].index[-1]] = np.nan
d2[file, i].loc[temp[file, i].index[0]] = np.nan
d2[file, i] = d2[file, i].sort_index()
d2[file, i] = d2[file, i].fillna(method = 'ffill')
d2[file, i] = d2[file, i].dropna()
d2[file, i] = d2[file, i].sort_index().truncate(after = end_date)
d2[file, i]['trade'] = 0.0
d2[file, i]['prev_pos'] = 0.0
d2[file, i]['pos'] = 0.0
#entry trade; 1.0 for buy, -1.0 for sell; amend price to entry price
if temp[file, i]['Direction'][0] == 'Buy':
d2[file, i]['trade'].loc[temp[file, i].index[0]] = 1.0
#d2[i]['LAST PRICE'].loc[temp[i].index[0]] = temp[i]['Amount'].loc[temp[i].index[0]]
else:
d2[file, i]['trade'].loc[temp[file, i].index[0]] = -1.0
#d2[i]['LAST PRICE'].loc[temp[i].index[0]] = temp[i]['Amount'].loc[temp[i].index[0]]
#exit trade; current options use the final day of the series
if temp[file, i]['Expiry'][-1] < pd.to_datetime(end_date):
if temp[file, i]['Direction'][-1] == 'Buy':
d2[file, i]['trade'].loc[temp[file, i].index[-1]] = 1.0
d2[file, i]['LAST PRICE'].loc[temp[file, i].index[-1]] =\
temp[file, i]['Amount'].loc[temp[file, i].index[-1]]
else:
d2[file, i]['trade'].loc[temp[file, i].index[-1]] = -1.0
d2[file, i]['LAST PRICE'].loc[temp[file, i].index[-1]] =\
temp[file, i]['Amount'].loc[temp[file, i].index[-1]]
else:
if temp[file, i]['Direction'][-1] == 'Buy':
d2[file, i]['trade'][-1] = 1.0
else:
d2[file, i]['trade'][-1] = -1.0
d2[file, i] = d2[file, i].sort_index()
for z, row in d2[file, i].iterrows():
idx_loc = d2[file, i].index.get_loc(z)
prev = idx_loc -1
d2[file, i]['prev_pos'].loc[z] = d2[file, i]['pos'].iloc[prev]
if row['trade'] == 1.0:
d2[file, i]['pos'].loc[z] = 1.0 + d2[file, i]['prev_pos'].loc[z]
elif row['trade'] == -1.0:
d2[file, i]['pos'].loc[z] = -1.0 + d2[file, i]['prev_pos'].loc[z]
else:
d2[file, i]['pos'].loc[z] = d2[file, i]['prev_pos'].loc[z]
d2[file, i]['shares'] = temp[file, i]['Shares'].iloc[0]
d2[file, i]['qtty'] = opt1_qtty
d2[file, i]['cash_val'] = 0.0
d2[file, i]['trade_val'] = d2[file, i]['trade']*d2[file, i]['shares']*\
d2[file, i]['qtty']*d2[file, i]['LAST PRICE']
d2[file, i]['pos_val'] = d2[file, i]['prev_pos']*d2[file, i]['shares']*\
d2[file, i]['qtty']*d2[file, i]['LAST PRICE']
for z, row in d2[file, i].iterrows():
idx_loc = d2[file, i].index.get_loc(z)
prev = idx_loc -1
if row['trade'] != 0:
d2[file, i]['cash_val'].loc[z] =\
np.negative(d2[file, i]['trade'].loc[z]* \
d2[file, i]['shares'].loc[z]* \
d2[file, i]['qtty'].loc[z]* \
d2[file, i]['LAST PRICE'].loc[z]) +\
d2[file, i]['cash_val'].iloc[prev]
else:
d2[file, i]['cash_val'].loc[z] = d2[file, i]['cash_val'].iloc[prev]
d2[file, i]['total_val'] = d2[file, i]['trade_val'] +\
d2[file, i]['pos_val'] + d2[file, i]['cash_val']
d2[file, i] = d2[file, i].truncate(after = end_date)
frames = []
for i in IDs:
frames.append(d2[file, i]['total_val'])
tot_ser[file] = pd.concat(frames, join='outer', axis=1)
tot_ser[file] = tot_ser[file].fillna(method = 'ffill')
tot_ser[file] = tot_ser[file].fillna(0)
tot_ser[file]['port_val'] = tot_ser[file].sum(axis=1)/b_size_adj
# tot_ser[file]['port_val'] = tot_ser[file].sum(axis=1)
root_tkr = m[file, i]['COMPANY_CORP_TICKER'].item()
file_name = file.replace('.csv','')
trace1 = go.Scatter(
x = tot_ser[file]['port_val'].index,
y = tot_ser[file]['port_val'].values,
name = f'{root_tkr}_Options',
line = dict(
color = ('#4155f4'),
width = 2))
layout = {'title' : f'{file_name}_{date_now}',
'xaxis' : {'title' : 'Date', 'type': 'date'},
'yaxis' : {'title' : 'Returns'},
# 'shapes': [{'type': 'rect',
# 'x0': d[i]['scr_1y'].index[0],
# 'y0': -2,
# 'x1': d[i]['scr_1y'].index[-1],
# 'y1': 2,
# 'name': 'Z-range',
# 'line': {
# 'color': '#f48641',
# 'width': 2,},
# 'fillcolor': '#f4ad42',
# 'opacity': 0.25,
# },]
}
data = [trace1]
figure = go.Figure(data=data, layout=layout)
py.iplot(figure, filename =\
f'option_backtest/{root_tkr}/{file_name}/{file_name}_{date_now}')
b = {}
for file in dirs:
b[file] = create_trace(tot_ser[file]['port_val'], file)
root_tkr = n[file]['COMPANY_CORP_TICKER'].item()
file_name = file.replace('.csv','')
layout = {'title' : f'{root_tkr}_All_Flavors',
'xaxis' : {'title' : 'Date', 'type': 'date'},
'yaxis' : {'title' : 'Returns'},
# 'shapes': [{'type': 'rect',
# 'x0': d[i]['scr_1y'].index[0],
# 'y0': -2,
# 'x1': d[i]['scr_1y'].index[-1],
# 'y1': 2,
# 'name': 'Z-range',
# 'line': {
# 'color': '#f48641',
# 'width': 2,},
# 'fillcolor': '#f4ad42',
# 'opacity': 0.25,
# },]
}
data = list(b.values())
figure = go.Figure(data=data, layout=layout)
py.iplot(figure, filename =\
f'option_backtest/{root_tkr}/{root_tkr}_All_Flavors_{date_now}')
print ("Time to complete:", datetime.now() - start_time)
|
988,094 | bfd4368b219490c194b917ea98c89e234b9dc2c4 | from __future__ import annotations
from typing import List
from textual import events
from textual import messages
from textual.geometry import Size, SpacingDimensions
from textual.widget import Widget
from textual.view import View
from textual.layouts.vertical import VerticalLayout
from textual.views._window_view import WindowChange
from rich.console import RenderableType
class DoNotSet:
pass
do_not_set = DoNotSet()
class WindowView(View):
def __init__(
self,
widgets: List[RenderableType | Widget],
*,
auto_width: bool = False,
gutter: SpacingDimensions = (1, 0),
name: str | None = None
) -> None:
layout = VerticalLayout(gutter=gutter, auto_width=auto_width)
for widget in widgets:
layout.add(widget)
super().__init__(name=name, layout=layout)
async def update(self, widgets: List[RenderableType | Widget]) -> None:
layout = self.layout
assert isinstance(layout, VerticalLayout)
layout.clear()
for widget in widgets:
layout.add(widget)
await self.refresh_layout()
await self.emit(WindowChange(self))
async def handle_update(self, message: messages.Update) -> None:
message.prevent_default()
await self.emit(WindowChange(self))
async def handle_layout(self, message: messages.Layout) -> None:
self.log("TRANSLATING layout")
self.layout.require_update()
message.stop()
self.refresh()
async def watch_virtual_size(self, size: Size) -> None:
await self.emit(WindowChange(self))
async def watch_scroll_x(self, value: int) -> None:
self.layout.require_update()
self.refresh()
async def watch_scroll_y(self, value: int) -> None:
self.layout.require_update()
self.refresh()
async def on_resize(self, event: events.Resize) -> None:
await self.emit(WindowChange(self))
|
988,095 | 48b179f9f145098018dbe9d34642393e18e4f61c | """
This file contains method for topic assignment calculation on data.
Use calculate_assignments() for small data. (this method will return D x T dense matrix where D is the number of documents and T is the number of topics)
Use calculate_assignments_sparse() for large data. This will return D x T _sparse_ matrix which i s an approximation of the matrix that is returned from calculate_assignments(). The approximation results from converting negligeble values to 0. Note that this might lead for a better approximation of the documents since the NMF solver from scikit is numerical and will not reach 0 even if needed.
"""
import numpy as np
import pickle as pkl
from FDM import FDM
from sklearn.decomposition import non_negative_factorization
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import normalize
from scipy.sparse import csr_matrix
def _get_empirical_dist(data, voc_size):
cv = CountVectorizer(token_pattern='[0-9]+', vocabulary = map(lambda x: str(x), range(voc_size)) )
csr_counts = cv.fit_transform([' '.join(map(lambda x:str(x), doc)) for doc in data])
return normalize(csr_counts, norm= 'l1', axis=1)
def calculate_assignments(topics, data, voc_size, iterations = 1000):
"""
Parameters:
data : list of lists of int
iterations : int
maximum number of iteration for NMF (optional)
Returns:
assignments : np array
assignments[i,:] is the proportions of each topic in document i
"""
nmf_solver = non_negative_factorization(X = _get_empirical_dist(data, voc_size),
H = topics,
max_iter = iterations,
solver = 'mu',
beta_loss = 'kullback-leibler',
init = 'custom',
update_H = False,
n_components = topics.shape[0])
return nmf_solver[0]
def _csr_vappend(a,b):
a.data = np.hstack((a.data,b.data))
a.indices = np.hstack((a.indices,b.indices))
a.indptr = np.hstack((a.indptr,(b.indptr + a.nnz)[1:]))
a._shape = (a.shape[0]+b.shape[0],b.shape[1])
return a
from time import time
def calculate_assignments_sparse(topics, data, voc_size, iterations = 1000, threshold = 1e-4):
"""
This function should be used when theres a lot of documents and the vocabulary size is large
Parameters:
data : list of lists
iterations : int
maximum number of iteration for NMF (optional)
Returns:
assignments : CSR matrix
assignments[i,:] is the proportions of each topic in document i where topics with low probability are removed
"""
#calulate block size
Ndocs_batch = (50000*10000) // voc_size #fits in 4GB of memory
Nbatches = len(data) // Ndocs_batch
if Nbatches*Ndocs_batch < len(data):
Nbatches += 1
start_time = time()
for i in range(Nbatches):
partial_assignments = calculate_assignments(topics, data[i*Ndocs_batch:(i+1)*Ndocs_batch], voc_size, iterations)
partial_assignments[partial_assignments < threshold] = 0
#re-normalize
partial_assignments /= partial_assignments.sum(axis=1)[:,np.newaxis]
if i==0:
sparse_assignments = csr_matrix(partial_assignments)
else:
sparse_assignments = _csr_vappend(sparse_assignments, csr_matrix(partial_assignments))
print('Done batch {} out of {}. Elapsed {:.2f} min.'.format(i,Nbatches, (time()-start_time)/60 ))
return sparse_assignments
|
988,096 | 1418baeb7bbfd435fe5f40f7c06c4cabf2652119 | import sys
sys.stdin = open("도넛츠 합계.txt")
def donut(i, j):
global sum_edge_list
sum_edge = 0
for x in range(i, i+K):
for y in range(j, j+K):
if x == i or x == i+K-1:
sum_edge += data[x][y]
if y == j or y == j+K-1:
sum_edge += data[x][y]
if x == i and y == j:
sum_edge -= data[x][y]
if x == i and y == j+K-1:
sum_edge -= data[x][y]
if x == i+K-1 and y == j:
sum_edge -= data[x][y]
if x == i+K-1 and y == j+K-1:
sum_edge -= data[x][y]
sum_edge_list.append(sum_edge)
return sum_edge_list
N, K = map(int, input().split())
data = [list(map(int, input().split())) for _ in range(N)]
# print(data)
sum_edge_list = []
for i in range(0, N-K+1):
for j in range(0, N-K+1):
donut(i, j)
print((max(sum_edge_list))) |
988,097 | d408934a887a1de45b7d36cfc8f607a65e290289 | import random
################# Fruit classes #################
class Fruit():
def __init__(self) -> None:
self.flavour, self.colour = random.choice(self.varieties)
def __repr__(self) -> str:
return f"<{self.flavour}, {self.colour}, {self.__class__.__name__}>"
class Apple(Fruit):
varieties = [('sour', 'green'), ('sweet', 'red')]
class Pear(Fruit):
varieties = [('mellow', 'yellow'), ('sharp', 'green')]
################# Tree classes #################
class Tree():
def __init__(self) -> None:
self.fruits = []
def __repr__(self) -> str:
return f"{self.fruit_type.__name__} tree"
def blossom(self):
for i in range(self.fecundity):
self.fruits.append(self.fruit_type())
def harvest(self):
crop = self.fruits
self.fruits = []
return crop
class AppleTree(Tree):
fecundity = 8
fruit_type = Apple
class PearTree(Tree):
fecundity = 5
fruit_type = Pear
################# Cider classes #################
class Cider():
def __init__(self, fruitlist) -> None:
self.flavour = {
"sweet": 0,
"sour": 0,
"mellow": 0,
"sharp": 0
}
for fruit in fruitlist:
self.flavour[fruit.flavour] += 1
def __repr__(self) -> str:
return f"And you get a barrel of {max(self.flavour, key=lambda key: self.flavour[key])} cider from the fruit!"
################# Farm classes #################
class Farm():
def __init__(self) -> None:
user_input = int(input(
"Welcome to my Cider Farm, how many apple trees would you like to plant? "))
self.orchard = [AppleTree() for x in range(user_input)]
def __repr__(self) -> str:
return f"The farm currently has {len(self.orchard)} trees planted"
def spring(self):
for tree in self.orchard:
tree.blossom()
print("The trees have bore fruit!")
def autumn_harvest(self):
autumn_crop = []
for tree in self.orchard:
autumn_crop.extend(tree.harvest())
return autumn_crop
def brew_cider(self, fruitlist):
self.cider = Cider(fruitlist)
|
988,098 | ef0024931e64dbf494b77f1833f793f2d0eca7ef | '''
Author:
Alexandros Kanterakis (kantale@ics.forth.gr)
This is a script to help grade exercises for this course
'''
import re
import os
import glob
import json
import email
import time
import argparse
import smtplib, ssl # For mail
import pandas as pd
from itertools import groupby
from collections import defaultdict
from os.path import expanduser
from get_ask import get_ask
from params import Params
try:
from penalties import Penalties
except ImportError:
class Penalties:
PENALTIES: {}
class Utils:
'''
Useful generic utils
'''
@staticmethod
def get_home_dir():
'''
'''
return expanduser("~")
@staticmethod
def get_immediate_subdirectories(a_dir):
'''
https://stackoverflow.com/a/800201
'''
for name in os.listdir(a_dir):
p = os.path.join(a_dir, name)
if os.path.isdir(p):
yield p
class Mail:
PASSWORD_PATH = '.gmail/settings.json'
def __init__(self,):
self.connect_to_gmail()
@staticmethod
def get_password():
password_filename = os.path.join(
Utils.get_home_dir(),
Mail.PASSWORD_PATH
)
with open(password_filename) as f:
data = json.load(f)
return data['password']
def connect_to_gmail(self,):
port = 587 # For starttls
smtp_server = "smtp.gmail.com"
sender_email = "alexandros.kanterakis@gmail.com"
password = Mail.get_password()
context = ssl.create_default_context()
self.server = smtplib.SMTP(smtp_server, port)
self.server.ehlo() # Can be omitted
self.server.starttls(context=context)
self.server.ehlo() # Can be omitted
self.server.login(sender_email, password)
print ('CONNECTED TO GMAIL')
def do_send_mail(self, to, subject, text, sleep=10, actually_send_mail=False):
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
#msg = MIMEText(text, 'plain', 'utf-8') # If text is garbled, try this
msg = MIMEText(text)
sender_email = "alexandros.kanterakis@gmail.com"
receiver_email = to
email = MIMEMultipart('mixed') # email = MIMEMultipart()
email['From'] = sender_email
email['To'] = receiver_email
email['Subject'] = Header(subject, 'utf-8')
msg.set_payload(text.encode('utf-8')) #msg.set_payload(text.encode('ascii'))
email.attach(msg)
message = email.as_string()
if False:
message = 'Subject: {}\n\n{}'.format(subject, text)
if actually_send_mail:
self.server.sendmail(sender_email, receiver_email, message)
else:
print (text)
#print (message)
time.sleep(sleep)
print ('Mail sent')
def disconnect_from_gmail(self,):
self.server.quit()
print ('DISCONNECTED FROM GMAIL')
class Grades:
# Filetypes
IPYNB = 1
MIME = 2
PLAIN = 3
declarations = [
'askhsh','Askhsh','ASKHSH','Askisi','askisi',
'΄askhsh','ΆΣΚΗΣΗ','ΑΣΚΗΣΗ','ασκηση','άσκηση',
'Άσκηση','ασκιση', 'akshsh', 'Αskhsh', 'Askhsk',
'Απαντηση ασκησης', 'Απάντηση ασκησης', 'απαντηση ασκησης',
'Task_', 'απαντηση ακσησης', 'απάντηση άσκησης',
'this is the solution for ex.', r'-+ΑΣΚΗΣΗ',
"'Ασκηση", "Αskisi", "Άσκση", "asksisi", 'Aslisi',
'Ασκηση', "Task", "ask", "AKHSH", "aksisi", 'Akshsh',
'askshsh', 'ασκ', '΄άσκηση', 'Asksh', 'Askhshh', 'asksi',
'Ask', 'askkisi', 'aσκηση', 'ASkhsh', '΄Άσκηση', 'Akhsh',
'Askhh', 'Askshsh', '΄΄Ασκηση', '΄΄Άσκηση', 'Άskisi', 'Αskisi',
'.+skisi',
'Exercise', 'exercise', 'ex', 'exercise.', 'Ex', 'Ex.',
'excercise', 'exercice', 'EX', 'EX.'
]
ex_regexp = re.compile(r'^\s*#+\s*({})\s*_*(?P<ask>\d+)'.format('|'.join(declarations)))
SOLUTIONS_FILENAME_PATTERN = 'AM_{id_}_ASK_{ASK}'
GRADE_RE = r'^-?\d+$' # The regexp that matched grades.
def __init__(self, directory, solutions_dir, action,
ex=None,
actually_send_mail=False,
start = 1,
end = 20,
send_to_me=False,
random_list=None,
optional=None,
show_answer_when_already_graded=False,
):
self.dir = directory
self.solutions_dir = solutions_dir
self.actually_send_mail = actually_send_mail
self.start = start
self.end = end
self.exercises_range = list(range(self.start, self.end+1))
self.send_to_me = send_to_me
self.all_anonymous_grades = [] # For plotting and statistics
self.random_list = random_list
self.optional = set(optional) if optional else set()
self.show_answer_when_already_graded = show_answer_when_already_graded
print (f'EXERCICE DIR: {self.dir}')
print (f'SOLUTIONS DIR: {self.solutions_dir}')
self.get_filenames(ex)
self.get_all_exercises()
if action == 'grade':
self.grade()
elif action == 'send_mail':
self.collect_all_grades()
self.mail = Mail()
self.send_mail()
self.mail.disconnect_from_gmail()
elif action == 'aggregate':
pass # Do nothing
else:
raise Exception('Unknown action: {}'.format(action))
def save_anonymoys_grades(self,):
with open('grades.json', 'w') as f:
json.dump(self.all_anonymous_grades, f)
print ('Saved anonymous grades at: grades.json ')
def get_solutions_filename(self, id_, exercise):
filename = self.SOLUTIONS_FILENAME_PATTERN.format(id_=id_, ASK=exercise)
return os.path.join(self.solutions_dir, filename)
def get_grade_from_comment(self, comment=None, filename=None):
if filename:
with open(filename) as f:
comment = f.read()
grades = [
int(x)
for x in comment.split('\n')
if re.match(self.GRADE_RE, x)
]
assert len(grades) == 1
assert grades[0] in list(range(0, 11)) + [-1] # -1 means: do not grade!
if grades[0] == -1:
return pd.NA
return grades[0]
def remove_grade_from_comment(self, comment):
return '\n'.join(
x
for x in comment.split('\n')
if not re.match(self.GRADE_RE, x)
)
def grade(self,):
# How many answers are in total?
total = len(self.all_exercises)
print ('Total Answers:', total)
for i, (exercise, id_, answer) in enumerate(self.all_exercises):
filename = self.get_solutions_filename(id_, exercise)
print ('Progress: {}/{} {:0.2f}%'.format(i+1, total, 100*(i+1)/total))
print ('Exercise:', exercise)
print (' AM:', id_, ' Origin:', os.path.join(self.dir, id_))
print ('Filename:', filename)
print ('==================')
#if id_ == '3052':
# print (answer)
if os.path.exists(filename):
print (' Already graded..')
if self.show_answer_when_already_graded:
print ('==ANSWER:==========')
print (answer)
print ('==COMMENTS:========')
with open(filename) as f:
comment = f.read()
print (comment)
print ('===================')
continue
print (answer)
print ('==================')
comment = ''
while True:
line = input() # NEVER USE INPUT!!
if line.strip() in ['q', 'Q', ';']:
break
comment += line + '\n'
# Is there a grade in there?
grade = self.get_grade_from_comment(comment)
# Good.. no exception happened. Save the comment
with open(filename, 'w') as f:
f.write(comment)
def get_id_from_filename(self, filename):
# Some files are like 1234.1 1234.2 ...
dot_splitted = filename.split('.')
if re.match(r'^\d+$', dot_splitted[-1]): # last part is a number
filename = '.'.join(dot_splitted[:-1])
return os.path.split(filename)[1]
def get_all_exercises(self,):
# Read all files
data = []
for filename in self.filenames:
#print (filename)
id_ = self.get_id_from_filename(filename)
content = self.get_exercises(filename)
try:
for ask, solution in self.iterate_exercises(content, filename=filename):
data.append((id_, ask, solution))
except Exception as e:
print ('Problem in file:', filename)
raise e
# Group together multiple solutions to the same exercise from the same student
self.all_exercises = defaultdict(dict)
for group, it in groupby(sorted(data), lambda x : x[:2]):
self.all_exercises[group[0]][group[1]] = '\n'.join(x[2] for x in it)
# Print some stats..
print ('Total students:', len(self.all_exercises))
print ('Average exercises per student:', sum(len(v) for k,v in self.all_exercises.items())/len(self.all_exercises))
# Some basic quality control..
for k,v in self.all_exercises.items():
for k2,v2 in v.items():
assert k
if not k2:
raise Exception('AM: {} Empty ASK:{} '.format(k, str(k2)))
# Group and sort according to ask / AM
self.all_exercises = sorted((int(k2), k, v2) for k,v in self.all_exercises.items() for k2,v2 in v.items())
def collect_all_grades(self,):
all_answers = {}
#print (json.dumps(self.all_exercises, indent=4))
#print ('=====')
for ask, AM, answer in self.all_exercises:
if not AM in all_answers:
all_answers[AM] = {}
assert not ask in all_answers[AM]
filename = self.get_solutions_filename(AM, ask)
with open(filename) as f:
comment = f.read()
grade = self.get_grade_from_comment(comment)
comment = self.remove_grade_from_comment(comment)
all_answers[AM][ask] = {
'answer': answer,
'grade': grade,
'comment': comment,
}
self.all_answers = all_answers
#print (json.dumps(self.all_answers, indent=4))
self.all_anonymous_grades = [
[
v.get(i, {'grade':0})['grade'] for i in self.exercises_range
]
for k,v in all_answers.items()
]
with open('grades.json', 'w') as f:
# Remove NAN
to_save = [
list(filter(pd.notna, x))
for x in self.all_anonymous_grades
]
json.dump(to_save, f)
print ('Created anonymous grades.json')
@staticmethod
def create_mail_address(AM):
if '@' in AM:
return AM
return 'bio' + AM + '@edu.biology.uoc.gr'
@staticmethod
def create_AM_from_email(email):
m = re.fullmatch(r'bio(\d+)@edu\.biology\.uoc\.gr', email)
if m:
return m.group(1)
return email
def send_mail(self,):
total = len(self.all_answers)
for i, AM in enumerate(self.all_answers):
if self.send_to_me:
mail_address = 'alexandros.kanterakis@gmail.com'
else:
mail_address = Grades.create_mail_address(AM)
print ('{}/{} -- {}'.format((i+1), total, mail_address)) # Don't comment this!
mail = self.create_mail(AM)
#print(mail) # Comment this!
if True:
self.mail.do_send_mail(
to=mail_address,
subject=Params.MAIL_SUBJECT.format(START=self.start, END=self.end),
#subject=self.MAIL_SUBJECT_2, # Final
text=mail,
actually_send_mail=self.actually_send_mail,
)
#a=1/0
def create_exercise_mail(self, exercise, solution, comment, grade):
if pd.isna(grade):
grade_str = '---'
else:
grade_str = f'{grade}/10'
return Params.MAIL_EXERCISE_PATTERN.format(
EXERCISE = exercise,
SOLUTION = solution,
COMMENT = comment,
GRADE = grade_str,
)
def create_mail(self, AM):
exercises_mail = ''
pandas_df = []
if not self.random_list is None:
required_list = get_ask(AM)
else:
required_list = None
#for ASK, details in self.all_answers[AM].items():
for ASK in self.exercises_range:
if required_list and not ASK in required_list:
continue
if ASK in self.all_answers[AM]:
details = self.all_answers[AM][ASK]
answer = details['answer']
comment = details['comment']
grade = details['grade']
else:
answer = '\n'
if ASK in self.optional:
comment = 'Αυτή η άσκηση είναι προαιρετική. Δεν θα μετρήσει στη βαθμολογία'
grade = pd.NA
else:
comment = Params.SUBMIT_NOTHING # 'Δεν έστειλες τίποτα για αυτή την άσκηση!'
grade = 0
grade_dics = {Params.EXERCISE: ASK, Params.GRADE: grade} #{'Άσκηση': ASK, 'Βαθμός': grade}
exercises_mail += self.create_exercise_mail(ASK, answer, comment, grade)
pandas_df.append(grade_dics)
pandas_df = pd.DataFrame(pandas_df)
summary = pandas_df.to_string(index=False, na_rep='---')
summary = summary.replace('<NA>', ' ---') # The above does not work!!!
average = pandas_df[Params.GRADE].mean(skipna=True)
summary += f'\n\n{Params.AVERAGE}: {average}'
greeting = Params.GREETING.format(START=self.start, END=self.end) # Interim
#greeting = self.GREETING_2 # Final
ret = Params.MAIL_PATTERN.format(
GREETING=greeting,
AM=AM,
EXERCISES=exercises_mail,
SUMMARY=summary,
)
return ret
def get_type(self, filename):
'''
Return the type of file
'''
with open(filename) as f:
content = f.read()
# try to parse it as json
try:
data = json.loads(content)
except json.decoder.JSONDecodeError:
# This is not a JSON file..
pass
else:
# this is json.. assume ipynb..
return self.IPYNB
# Check if MIME. Any better way?
if 'X-Google-Smtp-Source' in content:
return self.MIME
# Assuming file with content
return self.PLAIN
def iterate_exercises(self, text, filename=None):
content = ''
exercise = None
for line in text.split('\n'):
#print (line)
m = re.match(self.ex_regexp, line)
# Set this true to check declarations!
if False:
if re.match(r'^\s*#', line):
print (line, '-->', {True: 'MATCHED', False: 'NO MATCH!'}[bool(m)])
if m:
if exercise:
yield (exercise, content)
content = ''
exercise = m.groupdict()['ask']
content += '\n' + line
if exercise is None:
print (f'Could not find any exercise in file: {filename}')
print (text)
assert False
yield (exercise, content)
def get_exercises(self, filename):
t = self.get_type(filename)
if t == self.IPYNB:
content = self.get_exercises_ipynb(filename)
elif t == self.MIME:
content = self.get_exercises_MIME(filename)
elif t == self.PLAIN:
content = self.get_exercises_plain(filename)
else:
raise Exception('Unknown type={}'.format(t))
return content
def get_exercises_plain(self, filename):
with open(filename) as f:
content = f.read()
return content
def get_exercises_ipynb(self, filename):
with open(filename) as f:
content = json.load(f)
code_cells = [
''.join(x['source']) for x in content['cells']
if x['cell_type'] == 'code'
]
return '\n\n'.join(code_cells)
def get_exercises_MIME(self, filename):
with open(filename) as f:
content = f.read()
m = email.message_from_string(content)
payload = m.get_payload()
#assert len(payload) == 21 # FIXME
content = ""
#for x in payload[1:]:
for x in payload[:]:
if hasattr(x, "get_payload"):
content += '\n' + x.get_payload(decode=True).decode("utf-8")
return content
def get_filenames(self, ex=None):
if not ex:
ex='*'
else:
ex = ex + '*'
self.filenames = glob.glob(os.path.join(self.dir, ex))
print ('Read: {} files'.format(len(self.filenames)))
@staticmethod
def get_project_grades(projects_dir = 'projects'):
'''
Get all projects
'''
gen = Utils.get_immediate_subdirectories(projects_dir)
def normalize_AM(AM):
if AM.startswith('bio'):
assert re.fullmatch(r'bio\d+', AM)
return AM.replace('bio', '')
return AM
ret = []
for d in gen:
project_path = os.path.split(d)[1]
AMs = project_path.split('_')
AMs = list(map(normalize_AM, AMs))
#print (project_path, '-->', AMs)
notes_filename = os.path.join(d, 'notes.md')
try:
with open(notes_filename) as f:
notes = f.read()
except FileNotFoundError as e:
print (f'WARNING: COULD NOT FIND: {notes_filename}')
continue
regexp = fr'\*\*{Params.GRADE}: ([\d\.]+)\*\*'
m = re.search(regexp, notes)
assert m, f'Regular expression {regexp} was not matched in file: {notes_filename}'
grade = float(m.group(1))
assert 0<=grade<=10.0
ret.append({
'AMs': AMs,
'grade': grade,
})
return ret
class Aggregator:
'''
Aggregates all grades
'''
TOTAL_FINAL = 10
def __init__(self,
excel_filename = None,
optional=None,
ex = None,
send_to_me = False,
actually_send_mail = False,
):
self.excel_filename = excel_filename
self.optional = set(map(int, optional)) if optional else set()
self.ex = ex
self.send_to_me = send_to_me
self.actually_send_mail = actually_send_mail
self.get_all_dirs()
self.get_all_grades()
self.average_grades()
self.generate_excel()
@staticmethod
def final_grade(decimal_grade):
g2 = round(decimal_grade * 10) / 10
if g2 in [4.3, 4.4, 4.5, 4.6, 4.7]:
return 5.0
g3 = round(decimal_grade * 2) / 2
return g3
def get_all_dirs(self,):
self.all_exercise_dirs = glob.glob('exercises?')
self.all_dirs = {
'exercises': [],
'final': {
'exercises': 'final',
'solutions': 'solutions_final',
},
'projects': 'projects',
}
for d in self.all_exercise_dirs:
m = re.search(r'(\d+)$', d)
assert m
n = int(m.group(1))
dic = {
'exercises': d,
'solutions': f'solutions{n}',
}
self.all_dirs['exercises'].append(dic)
print (f'All directories:')
print (json.dumps(self.all_dirs, indent=4))
def store_grades(self, grades, type_):
'''
type_ : `exercises` or 'final'
'''
for AM, grade in grades.all_answers.items():
if not AM in self.all_grades:
self.all_grades[AM] = {
'exercises': {},
'final': {},
'project': 0.0,
}
for exercise, exercise_d in grade.items():
assert not exercise in self.all_grades[AM][type_]
self.all_grades[AM][type_][exercise] = exercise_d['grade']
def get_all_grades(self,):
self.all_grades = {}
# Get all exercise grades
for exercise_round in self.all_dirs['exercises']:
grades = Grades(
directory = exercise_round['exercises'],
solutions_dir = exercise_round['solutions'],
action = 'aggregate',
ex = self.ex,
)
grades.collect_all_grades()
self.store_grades(grades, type_='exercises')
#print (grades.all_answers) # {'1764': {1: {'answer': "\n# 'Ασκηση 1\n\ndef num(a):\n
# If the final directory does not exist do not
# collect final grades
if not os.path.exists(self.all_dirs['final']['exercises']):
print ('Could not final exercise directory')
self.has_final = False
else:
self.has_final = True
print ('Collecting final grades')
grades = Grades(
directory = self.all_dirs['final']['exercises'],
solutions_dir = self.all_dirs['final']['solutions'],
action = 'aggregate',
ex = self.ex,
)
grades.collect_all_grades()
self.store_grades(grades, type_='final')
print ('Collecting project grades')
project_grades = Grades.get_project_grades()
for project_grade in project_grades:
for AM in project_grade['AMs']:
if self.ex:
if AM != self.ex:
continue
assert AM in self.all_grades, f'Could not find {AM} in total grades'
assert 'project' in self.all_grades[AM]
self.all_grades[AM]['project'] = project_grade['grade']
def average_grades(self,):
self.lesson_grades = {}
self.mail = Mail()
final_grades = [] # Final grades for excel
total = len(self.all_grades)
c = 0
for AM, grades in self.all_grades.items():
c += 1
text = Params.START_AGGREGATE_MAIL.format(AM=AM)
exercises_sum = 0
exercises_count = 0
for x in range(1, Params.TOTAL_EXERCISES+1):
text += f'{x}\t'
if x in grades['exercises']:
g = grades['exercises'][x]
if pd.isna(g):
text += f'---\n'
elif x in self.optional and g == 0:
text += f'---\n'
else:
text += f'{g}\n'
exercises_sum += g
exercises_count += 1
else:
if x in self.optional:
text += f'---\n'
else:
text += '0\n'
exercises_count += 1
exercise_average = exercises_sum/exercises_count
text += f'\n{Params.AVERAGE_EXERCISES}: {exercises_sum}/{exercises_count}={exercise_average}\n\n'
if self.has_final:
text += 'Τελικό Διαγώνισμα:\n'
for k,v in grades['final'].items():
text += f'{k}\t{v}\n'
nominator = sum(grades['final'].values())
denominator = self.TOTAL_FINAL
final_average = nominator/denominator
text += f'Μέσος όρος τελικού: {nominator}/{denominator}={final_average}\n\n'
else:
final_average = 0.0
project_average = grades['project']
text += f'{Params.PROJECT_GRADE}: {project_average}\n\n'
decimal_grade = Params.WEIGHT_FUN(
exercises = exercise_average,
final=final_average,
project=project_average,
)
text += f'{Params.FINAL_FLOAT_GRADE}:\n'
text += Params.FINAL_GRADE_FUN(
exercise_average = exercise_average,
final_average = final_average,
project_average = project_average,
decimal_grade = decimal_grade,
)
rounded_grade = Aggregator.final_grade(decimal_grade)
text += f'{Params.FINAL_ROUNDED_GRADE}: {rounded_grade}\n\n'
if AM in Penalties.PENALTIES:
rounded_grade = Penalties.PENALTIES[AM]
text += f'\n\nGrade After disciplinary actions: {rounded_grade}\n\n'
text += Params.END_AGGREGATE_MAIL
final_grades.append({'Email': AM, 'Final_Grade': rounded_grade})
print (
f'AM:{AM},'
f' dec_grade: {decimal_grade},'
f' rnd_grade: {rounded_grade},'
f' ex: {exercise_average},'
f' fin: {final_average},'
f' proj: {project_average} '
)
#print (text)
self.lesson_grades[AM] = rounded_grade
if self.send_to_me:
mail_address = 'alexandros.kanterakis@gmail.com'
else:
mail_address = Grades.create_mail_address(AM)
subject = Params.FINAL_SUBJECT #'ΒΙΟΛ-494, Τελικός βαθμός'
if self.actually_send_mail:
self.mail.do_send_mail(
to=mail_address,
subject=subject,
text=text,
actually_send_mail=True,
)
print (f'{c}/{total} Sent mail to: {mail_address}')
else:
print (text)
self.mail.disconnect_from_gmail()
# Create findal_grades excel
final_grades_df = pd.DataFrame(final_grades)
final_grades_df.to_excel('final_grades.xlsx')
print ('Created: final_grades.xlsx')
def generate_excel(self,
new_column = 'Βαθ Εξετ 2',
#AM_column = 'ΑΜ', # <-- Attention! this is Greek letters!
AM_column = 'email',
AM_column_is_email = True,
):
if not self.excel_filename:
print ('Excel filename not found!')
return
print (f'Reading: {self.excel_filename}')
original_excel = pd.read_excel(self.excel_filename)
records = original_excel.to_dict('records')
new_records = []
in_excel = set()
for record in records:
new_dict = dict(record)
AM = str(record[AM_column]) # this might be an email
if AM_column_is_email:
AM = Grades.create_AM_from_email(AM)
if AM in self.lesson_grades:
new_dict[new_column] = str(self.lesson_grades[AM])
else:
new_dict[new_column] = ''
new_records.append(new_dict)
in_excel.add(AM)
# Get students with grades that are NOT in excel!
students_with_grades = set(self.lesson_grades)
students_with_grades_not_in_excel = students_with_grades-in_excel
if students_with_grades_not_in_excel:
print ('WARNING!!!!')
print ('The following graded students are not in Excel!!!')
for student in students_with_grades_not_in_excel:
print (f'AM: {student} Grade: {self.lesson_grades[student]}')
print ('==================================================')
new_excel = pd.DataFrame(new_records)
new_excel.to_excel('grades.xlsx')
print ('Generated: grades.xlsx')
if __name__ == '__main__':
'''
#GRADE
python grade.py --dir /Users/admin/BME_17/exercises1 --sol /Users/admin/BME_17/solutions1 --action grade --start 1 --end 25 --show_answer_when_already_graded
#SEND EMAIL FOR EXERCISES
python grade.py --dir /Users/admin/BME_17/exercises1 --sol /Users/admin/BME_17/solutions1 --action send_mail --start 1 --end 25
python grade.py --dir /Users/admin/BME_17/exercises1 --sol /Users/admin/BME_17/solutions1 --action send_mail --start 1 --end 25 --ex alkaios.lmp@gmail.com --actually_send_mail --send_to_me
python grade.py --dir /Users/admin/BME_17/exercises1 --sol /Users/admin/BME_17/solutions1 --action send_mail --start 1 --end 25 --actually_send_mail
python grade.py --dir /Users/admin/BME_17/exercises1 --sol /Users/admin/BME_17/solutions1 --action send_mail --start 1 --end 25 --actually_send_mail --ex jacobgavalas.bme.uoc@gmail.com
python grade.py --dir /Users/admin/BME_17/exercises1 --sol /Users/admin/BME_17/solutions1 --action send_mail --start 1 --end 25 --actually_send_mail --ex iropap94@gmail.com
#AGGREGATE
python grade.py --action aggregate --send_to_me --ex alkaios.lmp@gmail.com
python grade.py --action aggregate --ex letsosalexandros@gmail.com
python grade.py --action aggregate --ex med12p1170012@med.uoc.gr
python grade.py --action aggregate --ex manthostr@gmail.com
python grade.py --action aggregate --ex manthostr@gmail.com --actually_send_mail --send_to_me
python grade.py --action aggregate --ex alkaios.lmp@gmail.com --actually_send_mail --send_to_me
python grade.py --action aggregate --actually_send_mail
====================
python grade.py --dir /Users/admin/biol-494/exercises/ --sol /Users/admin/biol-494/solutions --action grade
python grade.py --dir /Users/admin/biol-494/exercises/ --sol /Users/admin/biol-494/solutions --action send_mail
python grade.py --dir /Users/admin/biol-494/exercises/ --sol /Users/admin/biol-494/solutions --action send_mail --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises/ --ex 3158 --action grade
python grade.py --dir /Users/admin/biol-494/exercises/ --sol /Users/admin/biol-494/solutions --ex 2743 --action grade
python grade.py --dir /Users/admin/biol-494/exercises/ --sol /Users/admin/biol-494/solutions --ex 2743 --action send_mail --actually_send_mail
# 2nd Round grade:
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --action grade --start 21 --end 40
# 2nd Round Send mail:
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --ex 2743 --action send_mail --start 21 --end 40
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --ex 2743 --action send_mail --start 21 --end 40 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --ex 3052 --action send_mail --start 21 --end 40 --actually_send_mail --send_to_me
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --action send_mail --start 21 --end 40 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --ex 3052 --action send_mail --start 21 --end 40 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --action send_mail --start 21 --end 40 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises/ --sol /Users/admin/biol-494/solutions --ex 2743 --action grade
python grade.py --dir /Users/admin/biol-494/exercises2/ --sol /Users/admin/biol-494/solutions2 --action grade
# 3rd Round
python grade.py --dir /Users/admin/biol-494/exercises3/ --sol /Users/admin/biol-494/solutions3 --action grade --start 41 --end 60
python grade.py --dir /Users/admin/biol-494/exercises3/ --sol /Users/admin/biol-494/solutions3 --ex 3053 --action send_mail --start 41 --end 60 --send_to_me --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises3/ --sol /Users/admin/biol-494/solutions3 --action send_mail --start 41 --end 60 --actually_send_mail
# 4th Round
python grade.py --dir /Users/admin/biol-494/exercises4/ --sol /Users/admin/biol-494/solutions4 --action grade --start 61 --end 80
python grade.py --dir /Users/admin/biol-494/exercises4/ --sol /Users/admin/biol-494/solutions4 --action send_mail --ex 3168 --send_to_me --actually_send_mail --start 61 --end 80
python grade.py --dir /Users/admin/biol-494/exercises4/ --sol /Users/admin/biol-494/solutions4 --action send_mail --ex 2729 --actually_send_mail --start 61 --end 80
python grade.py --dir /Users/admin/biol-494/exercises4/ --sol /Users/admin/biol-494/solutions4 --action send_mail --ex 2913 --actually_send_mail --start 61 --end 80
python grade.py --dir /Users/admin/biol-494/exercises4/ --sol /Users/admin/biol-494/solutions4 --action send_mail --ex 3125 --actually_send_mail --start 61 --end 80
python grade.py --dir /Users/admin/biol-494/exercises4/ --sol /Users/admin/biol-494/solutions4 --action send_mail --ex 2898 --actually_send_mail --start 61 --end 80
python grade.py --dir /Users/admin/biol-494/exercises4/ --sol /Users/admin/biol-494/solutions4 --action send_mail --ex 2871 --actually_send_mail --start 61 --end 80
# 5th Round
python grade.py --dir /Users/admin/biol-494/exercises5/ --sol /Users/admin/biol-494/solutions5 --action grade --start 81 --end 90
python grade.py --dir /Users/admin/biol-494/exercises5/ --sol /Users/admin/biol-494/solutions5 --ex 2970 --action send_mail --send_to_me --actually_send_mail --start 81 --end 90
python grade.py --dir /Users/admin/biol-494/exercises5/ --sol /Users/admin/biol-494/solutions5 --action send_mail --actually_send_mail --start 81 --end 90
python grade.py --dir /Users/admin/biol-494/exercises5/ --sol /Users/admin/biol-494/solutions5 --ex 2967 --action send_mail --actually_send_mail --start 81 --end 90
python grade.py --dir /Users/admin/biol-494/exercises5/ --sol /Users/admin/biol-494/solutions5 --ex 3037 --action send_mail --actually_send_mail --start 81 --end 90
# 6th Round
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --action grade --start 91 --end 100 --optional 94
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --action grade --start 91 --end 100 --ex 2979
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --ex 2979 --action send_mail --start 91 --end 100
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --ex 2979 --action send_mail --actually_send_mail --start 91 --end 100
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --action send_mail --start 91 --end 100 --optional 94
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --action send_mail --start 91 --end 100 --optional 94 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --ex 3103 --action send_mail --start 91 --end 100 --optional 94 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --ex 3089 --action send_mail --start 91 --end 100 --optional 94 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --ex 3052 --action send_mail --start 91 --end 100 --optional 94 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --ex 3094 --action send_mail --start 91 --end 100 --optional 94 --actually_send_mail
python grade.py --dir /Users/admin/biol-494/exercises6/ --sol /Users/admin/biol-494/solutions6 --ex 2871 --action send_mail --start 91 --end 100 --optional 94 --actually_send_mail
# final
python grade.py --dir /Users/admin/biol-494/final/ --sol /Users/admin/biol-494/solutions_final --action grade --start 1 --end 100
python grade.py --dir /Users/admin/biol-494/final/ --sol /Users/admin/biol-494/solutions_final --action grade --start 1 --end 100 --ex 2979
python grade.py --dir /Users/admin/biol-494/final/ --sol /Users/admin/biol-494/solutions_final --ex 2979 --action send_mail --random_list 10 --actually_send_mail --send_to_me --start 1 --end 100
python grade.py --dir /Users/admin/biol-494/final/ --sol /Users/admin/biol-494/solutions_final --ex 2979 --action send_mail --random_list 10 --actually_send_mail --start 1 --end 100
python grade.py --dir /Users/admin/biol-494/final/ --sol /Users/admin/biol-494/solutions_final --ex 3117 --action send_mail --random_list 10 --actually_send_mail --start 1 --end 100 --send_to_me
python grade.py --dir /Users/admin/biol-494/final/ --sol /Users/admin/biol-494/solutions_final --action send_mail --random_list 10 --actually_send_mail --start 1 --end 100
# Aggregate
python grade.py --action aggregate
python grade.py --action aggregate --excel 494_ΒΙΟΛ-494.xlsx
python grade.py --action aggregate --excel 494_ΒΙΟΛ-494.xlsx --optional 94
python grade.py --action aggregate --excel 494_ΒΙΟΛ-494.xlsx --optional 94 --ex 2871 --send_to_me
python grade.py --action aggregate --excel ΒΙΟΛ-494_Ιούνιος_2021.xlsx --optional 94
'''
parser = argparse.ArgumentParser()
parser.add_argument("--dir", help="Directory with exercises")
parser.add_argument("--sol", help="Directory with solutions")
parser.add_argument("--ex", help="Examine only given ΑΜ")
parser.add_argument("--action", help="What to do: grade")
parser.add_argument("--actually_send_mail", action="store_true")
parser.add_argument("--send_to_me", action="store_true")
parser.add_argument("--show_answer_when_already_graded", action="store_true")
parser.add_argument("--start", type=int, help="Start from")
parser.add_argument("--end", type=int, help="Start end")
parser.add_argument("--random_list", type=int, help='Number of random exercises')
parser.add_argument("--optional", nargs='*', type=int, help="Optional exercises")
parser.add_argument("--excel", help="Excel file with all students")
args = parser.parse_args()
if args.action == 'aggregate':
a = Aggregator(
excel_filename = args.excel,
optional = args.optional,
ex=args.ex,
send_to_me=args.send_to_me,
actually_send_mail=args.actually_send_mail,
)
else:
g = Grades(directory=args.dir, ex=args.ex, solutions_dir=args.sol,
action=args.action,
actually_send_mail=args.actually_send_mail,
send_to_me=args.send_to_me,
start = args.start,
end = args.end,
random_list = args.random_list,
optional = args.optional,
show_answer_when_already_graded = args.show_answer_when_already_graded,
)
|
988,099 | ef89794cd697007302b7e88a63a5c744b3dce877 | '''
@Author: your name
@Date: 2020-05-27 13:33:49
@LastEditTime: 2020-05-28 14:17:18
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /model-building/recommend/fm/inputs.py
'''
import tensorflow as tf
from collections import namedtuple
SparseFeature = namedtuple('SparseFeature', ['feature_name', 'vocab_size', 'embedding_dim'])
DenseFeature = namedtuple('DenseFeature', ['feature_name'])
def build_input_placeholder(feature_columns):
input_placeholders_dict = {}
for feature in feature_columns:
if isinstance(feature, SparseFeature):
h = tf.placeholder(dtype=tf.float32, shape=[None, feature.embedding_dim])
elif isinstance(feature, DenseFeature):
h = tf.placeholder(dtype=tf.float32, shape=[None, 1])
else:
raise ValueError('unknown')
input_placeholders_dict[feature.feature_name] = h
return input_placeholders_dict
def build_embedding_matrix_dict(sparse_feature_columns):
embedding_matrix_dict = {}
for feature in sparse_feature_columns:
if not isinstance(feature, SparseFeature):
raise ValueError('只有sparse_feature才能建立embedding矩阵')
embedding_matrix = tf.get_variable(
name=feature.feature_name+'_embedding_matrix',
shape=[feature.vocab_size,feature.embedding_dim],
initializer=tf.random_normal_initializer(),
dtype=tf.float32
)
embedding_matrix_dict[feature.feature_name] = embedding_matrix
return embedding_matrix_dict
# test = SparseFeature(embedding_dim=4)
# print(test.embedding_dim) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.