index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,200 | b71441c671b5afc9207a3d47d5845ea90f5a66f8 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-02 08:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fish', '0005_fishdetection_aquarium_id'),
]
operations = [
migrations.AddField(
model_name='fishdetection',
name='duration',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='fishdetection',
name='nb_detection',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
|
987,201 | 133732564f8f125343a897eebfadbfbaa39b3898 | from PIL import Image
im = Image.open('font.png', 'r')
pix_val = list(im.getdata())
pix_val_flat = [x for sets in pix_val for x in sets]
w, h = im.size
for i in range(len(pix_val)):
pix_val[i] = int(round(sum(pix_val[i]) / float(len(pix_val[i]))))
import numpy as np
pix_val = np.array(pix_val)
pix_val = pix_val.reshape(512, 512)
np.savetxt("font.txt", pix_val, delimiter=" ")
|
987,202 | 6d590643753538b9ca7fad9162465a14c216d00d | #! /usr/bin/python3.5
import os
import re
from config import token
import telebot
from sql import Db
from system_func import get_ip
from system_func import is_admin
from system_func import list_to_str
bot = telebot.TeleBot(token)
db_dir = os.path.dirname(os.path.abspath(__file__)) + "/words.db"
def get_cmd_param(message, default):
reg = re.search(r'/\w+ (\d+)', message.text)
param = default
if reg is not None:
param = int(reg.group(1))
return param
@bot.message_handler(commands=["start"])
def start(message):
cmd_help(message)
@bot.message_handler(commands=["ip"])
def ip(message):
bot.send_message(message.chat.id, 'Мой ip: `' + get_ip() + '`', parse_mode='Markdown')
@bot.message_handler(commands=["allwords"])
def all_words(message):
db = Db(db_dir)
words = db.get_frequency()
_str = list_to_str('Топ слов', words, [0, 1])
if _str:
bot.send_message(message.chat.id, str(_str), parse_mode='HTML')
@bot.message_handler(commands=["topwords"])
def top_words(message):
db = Db(db_dir)
words = db.get_frequency_long(get_cmd_param(message, 3))
_str = list_to_str('Топ слов', words, [0, 1])
if _str:
bot.send_message(message.chat.id, str(_str), parse_mode='HTML')
@bot.message_handler(commands=["mywords"])
def my_words(message):
db = Db(db_dir)
words = db.get_frequency_by_id(message.from_user.id, get_cmd_param(message, 3))
_str = list_to_str('Топ слов', words, [0, 1])
if _str:
bot.send_message(message.chat.id, str(_str), parse_mode='HTML')
@bot.message_handler(commands=["die"])
def die(message):
if is_admin(message):
bot.stop_polling()
@bot.message_handler(commands=["test"])
def test(message):
print(message)
@bot.message_handler(commands=["help"])
def cmd_help(message):
text = '''
Мои возможные команды:```
help - Показывает этот список
mywords - Частота употребляемых мной слов
topwords - Самые часто употребляемые слова
allwords - Самые часто употребляемые слова
```'''
bot.send_message(message.chat.id, text, parse_mode='Markdown')
@bot.message_handler(content_types=["text"])
def text_request(message):
db = Db(db_dir)
user_id = message.from_user.id
text = re.sub(r'(http.+?\s)|(http.+$)', r' ', message.text)
text = re.sub(r'[_+-.,!@#$%^&*();/|<>"\']', r' ', text).split()
if text:
for word in text:
word = word.lower()
if db.get_word_by_user(user_id, word):
db.add_word(user_id, word)
else:
db.new_word(user_id, word)
if __name__ == '__main__':
bot.remove_webhook()
print('Бот работает!')
bot.polling(none_stop=True)
|
987,203 | e44a78c4ed824700a582bee0f1567e5ae8bef8ee | import numpy as np
import pandas as pd
from scipy import stats
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def rvec(x):
return np.atleast_2d(x)
def cvec(x):
return rvec(x).T
def to_3d(mat):
return np.atleast_3d(mat).transpose(2,0,1)
def srho(x,y):
return stats.spearmanr(x,y)[0]
"""
VECTORIZED PAIRWISE CORRELATION
A : (n x p x s)
n: sample size (rows of the data)
p: columns of the data (could be bootstrapped columns)
s: copies of the n x p matrix (could be studentized copies)
"""
def pairwise_cor(A, B):
assert A.shape == B.shape
n = A.shape[0]
if (len(A.shape) == 2):
mu_A, mu_B = rvec(A.mean(0)), rvec(B.mean(0))
se_A, se_B = A.std(axis=0,ddof=1), B.std(axis=0,ddof=1)
else:
mu_A, mu_B = to_3d(A.mean(0)), to_3d(B.mean(axis=0))
se_A, se_B = A.std(axis=0,ddof=1), B.std(axis=0,ddof=1)
D = np.sum((A - mu_A) * (B - mu_B),0) / (n-1)
return D / (se_A*se_B)
def bs_student_spearman(x, y, n_bs, n_s, alpha=0.05):
# alpha = rvec([0.05, 0.1, 0.2])
tt = ['student','normal','quant']
if isinstance(alpha, float) | isinstance(alpha,list):
alpha = np.array([alpha])
alpha = rvec(alpha)
assert len(x) == len(y)
assert np.all(alpha > 0) & np.all(alpha < 0.5)
# (i) Get baseline statistic
rho = stats.spearmanr(x, y)[0]
n = len(x)
pvals = np.r_[alpha/2,1-alpha/2].T
# (ii) Transform data into ranks and sample with replacement
x_r, y_r = stats.rankdata(x), stats.rankdata(y)
x_bs = pd.Series(x_r).sample(frac=n_bs,replace=True)
y_bs = pd.Series(y_r).iloc[x_bs.index]
x_bs = x_bs.values.reshape([n,n_bs])
y_bs = y_bs.values.reshape([n,n_bs])
rho_bs = pairwise_cor(x_bs, y_bs)
se_bs = rho_bs.std(ddof=1)
# (iii) Bootstrap the bootstraps (studentize) to get standard error
x_s = pd.DataFrame(x_bs).sample(frac=n_s,replace=True)
y_s = pd.DataFrame(y_bs).iloc[x_s.index]
x_s = x_s.values.reshape([n_s,n,n_bs]).transpose(1,2,0)
y_s = y_s.values.reshape([n_s,n,n_bs]).transpose(1,2,0)
se_s = pairwise_cor(x_s, y_s).std(axis=1,ddof=1)
del x_s, y_s
# Get the confidence intervals for the different approaches
z_q = np.quantile(rho_bs,pvals.flat).reshape(pvals.shape)
z_n = stats.norm.ppf(pvals)
t_s = (rho_bs-rho)/se_s
z_s = np.quantile(t_s,pvals.flat).reshape(pvals.shape)
df = pd.DataFrame(np.r_[rho - se_bs*z_s[:,[1,0]], rho - se_bs*z_n[:,[1,0]], z_q],columns=['lb','ub'])
df.insert(0,'rho',rho)
df = df.assign(tt=np.repeat(tt,len(pvals)),alpha=np.tile(2*pvals[:,0],len(tt)))
return df
|
987,204 | ce78dd874ffd82fd2a10a06a214eb9b3005dd1f8 | '''
A test simulation involving the SEIR flu model in isolation.
'''
from pram.data import GroupSizeProbe, ProbeMsgMode
from pram.entity import Group, Site
from pram.rule import SEIRFluRule
from pram.sim import Simulation
rand_seed = 1928
probe_grp_size_flu = GroupSizeProbe.by_attr('flu', SEIRFluRule.ATTR, SEIRFluRule.State, msg_mode=ProbeMsgMode.DISP, memo='Mass distribution across flu states')
(Simulation().
set().
rand_seed(rand_seed).
done().
add().
rule(SEIRFluRule()).
probe(probe_grp_size_flu).
done().
new_group(1000).
done().
summary(True, 0,0,0,0, (0,1)).
run(16).
compact().
summary(False, 8,0,0,0, (1,0))
)
# (Simulation().
# set().
# rand_seed(rand_seed).
# pragma_analyze(False).
# pragma_autocompact(True).
# done().
# add().
# rule(SEIRFluRule()).
# probe(probe_grp_size_flu).
# done().
# new_group(1000).
# done().
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0).
# run().summary(False, 8,0,0,0)
# )
|
987,205 | ef5d894c5edbe85c7961b9537b6f56b2eb9ac831 | from .graph_node import GraphNode
from .graph import Graph
from .square_grid import SquareGrid, Point
__all__ = GraphNode, Graph, SquareGrid, Point
|
987,206 | dd7d8a18f8c1c27db24bb92ba7067ab01a50b452 | """
This script converts CEU.BEAM.txt (which is in BEAM format: first line is 0 or 1 if subject is case or control,
then one line per SNP and 0 if homozygous for the major allele, 1 if heterozygous, 2 if homozygous for the minor allele).
The output is two matrices: cases and controls
and a description of the implanted biclusters.
cases and controls a are transposed matrices:
Each row corresponds to a person. Each column corresponds to a SNP. Cells are 0, 1 or 2 (same meaning as in BEAM format)
SNPs in the biclusters are set to 2.
"""
import json
import os
import cPickle as pickle
import random
#from scipy.stats import pearsonr
from bicluster import bit_encode
from utils import *
def generate_random_cases_and_controls():
SNPs = 300000
CASES = 1000
CONTROLS = 100
BICLUSTERS = 1
BI_MAX_SNPs = 101
BI_MAX_INDs = 100
case_m = matrix(SNPs, CASES)
cont_m = matrix(SNPs, CONTROLS)
implanted_biclusters = []
# add some noise
MAFs = [0.1] * SNPs
# MAFs = [0.05 + (float(i)/(3*SNPs)) for i in xrange(SNPs)]
for i, maf in enumerate(MAFs):
for j in xrange(CASES):
case_m[i][j] = (1 if random.random() < maf else 0) + (1 if random.random() < maf else 0)
for j in xrange(CONTROLS):
cont_m[i][j] = (1 if random.random() < maf else 0) + (1 if random.random() < maf else 0)
for bc in xrange(BICLUSTERS):
bc_i = BI_MAX_INDs #random.randint(BI_MAX_INDs - 2, BI_MAX_INDs)
bc_s = BI_MAX_SNPs #random.randint(BI_MAX_SNPs - 2, BI_MAX_SNPs)
case_inds = sorted(random.sample(xrange(CASES), bc_i))
case_snps = sorted(random.sample(xrange(SNPs), bc_s))
implanted_biclusters.append((case_snps, case_inds))
for i in case_snps:
for j in case_inds:
case_m[i][j] = 2
json.dump({'cases': case_m, 'controls': cont_m, 'implanted_biclusters' : implanted_biclusters}, open('random_GWAS_300k.json', 'w'))
def generate_from_BEAM():
PPL_TO_TAKE = 1000
CASE_CONTROL_RATIO = 0.5 # the ratio of cases vs controls
BI_CASES = 100 # fraction of cases that are in one bicluster
BI_SNPS = 100 # number of SNPs per bicluster
BICLUSTERS = 1 # number of biclusters
snp_file = open('SIMLD/CEU.BEAM.txt')
# read out disease status. this information is irrelevant.
_ = snp_file.readline()
snps = [map(int, l.split()) for l in snp_file]
snp_file.close()
total_snps = len(snps)
pop_size = len(snps[0])
total_cases = int(CASE_CONTROL_RATIO * pop_size)
# create cases and controls matrices and transpose them:
# cases = [list(row) for row in zip(*[snp[:total_cases] for snp in snps])][:PPL_TO_TAKE]
# controls = [list(row) for row in zip(*[snp[total_cases:] for snp in snps])][:PPL_TO_TAKE]
# don't transpose anything
cases = [snp[:total_cases] for snp in snps]
controls = [snp[total_cases:] for snp in snps]
total_cases = min(PPL_TO_TAKE, total_cases)
implanted_biclusters = [[ random.sample(xrange(total_snps), BI_SNPS),
random.sample(xrange(total_cases), int(BI_CASES))]
for _ in xrange(BICLUSTERS)]
for bi_snps, bi_ppl in implanted_biclusters:
print 'implanting - people:', len(bi_ppl), ', snps:', len(bi_snps)
for snp_id in bi_snps:
for person_id in bi_ppl:
cases[snp_id][person_id] = 2
# snp_freq = [sum(snp)/float(2*pop_size) for snp in snps]
# cc = [[0 for i in xrange(total_snps)] for j in xrange(total_snps)]
#
# for i in xrange(total_snps):
# for j in xrange(total_snps):
# cc[i][j] = pearsonr(snps[i], snps[j])[0]
# print min(snp_freq), max(snp_freq)
# print total_snps, pop_size
json.dump({'cases': cases, 'controls': controls, 'implanted_biclusters' : implanted_biclusters}, open('CEU_GWAS.json', 'w'))
def generate_from_BEAM_chunks():
HOMOZYGOUS = 1
HETEROZYGOUS = 0
CASE_CONTROL_RATIO = 0.5 # the ratio of cases vs controls
BI_CASES = 100 # fraction of cases that are in one bicluster
BI_SNPS = 100 # number of SNPs per bicluster
BICLUSTERS = 1 # number of biclusters
TOTAL_SNPS = 300000
FILES_TO_TAKE = random.sample(range(30), TOTAL_SNPS/10000)
print 'files to take:', FILES_TO_TAKE
TOTAL_CASES = 1000
TOTAL_INDIVIDUALS = 2000
case_ids = random.sample(xrange(TOTAL_INDIVIDUALS), TOTAL_CASES)
control_ids = [pid for pid in xrange(TOTAL_INDIVIDUALS) if pid not in case_ids]
snp_dir = 'SIMLD/CEU_300k_10k_chunked'
cases = []
controls = []
file_index = 0
for snp_fname in map(lambda f: os.path.join(snp_dir, f), sorted([f for f in os.listdir(snp_dir) if f.endswith('.txt')])):
file_index += 1
if (file_index - 1) not in FILES_TO_TAKE:
print 'skipping', file_index
continue
print 'processing', snp_fname
snp_file = open(snp_fname)
# read out disease status to determine population size.
pop_size = len(snp_file.readline().split())
total_cases = int(CASE_CONTROL_RATIO * pop_size)
for l in snp_file:
snps = [0 if v == '0' else
(HETEROZYGOUS if v == '1' else
(HOMOZYGOUS if v == '2' else None)) for v in l.split()]
cases.append([snps[pid] for pid in case_ids])
controls.append([snps[pid] for pid in control_ids])
snp_file.close()
total_snps = len(cases)
implanted_biclusters = [[ sorted(random.sample(xrange(total_snps), BI_SNPS)),
sorted(random.sample(xrange(total_cases), BI_CASES))]
for _ in xrange(BICLUSTERS)]
for bi_snps, bi_ppl in implanted_biclusters:
print 'implanting - people:', len(bi_ppl), ', snps:', len(bi_snps)
for snp_id in bi_snps:
# first delete an equal amount of minor alleles of the snp to keep the
# MAF the same after implanting the bicluster
carriers = [person_id for person_id in xrange(total_cases) if cases[snp_id][person_id] == HOMOZYGOUS]
to_delete = random.sample(carriers, min(len(carriers), len(bi_ppl)))
for person_id in to_delete:
cases[snp_id][person_id] = 0
for person_id in bi_ppl:
cases[snp_id][person_id] = HOMOZYGOUS
out_fname = os.path.join(snp_dir, 'CEU_%dk_%d_SNPs_by_%d_INDS.pickle' % (len(cases)/1000, BI_SNPS, BI_CASES))
print 'output:', out_fname
# pickle.dump(cases, open(out_fname+'.cases.pickle'))
# pickle.dump(controls, open(out_fname+'.controls.pickle'))
# pickle.dump(implanted_biclusters, open(out_fname+'.implanted_biclusters.pickle'))
total_controls = len(controls[0])
total_cases = len(cases[0])
pickle.dump( [bit_encode(cases),
total_cases,
bit_encode(controls),
total_controls,
implanted_biclusters],
open(out_fname, 'w'),
pickle.HIGHEST_PROTOCOL)
# boost = open(out_fname+ '.BOOST', 'w')
# boost.write('\n'.join(' '.join(map(str, [1] + [cases[snp_id][person_id] for snp_id in xrange(TOTAL_SNPS)])) for person_id in xrange(total_cases)))
# boost.write('\n'.join(' '.join(map(str, [0] + [controls[snp_id][person_id] for snp_id in xrange(TOTAL_SNPS)])) for person_id in xrange(total_controls)))
# print implanted_biclusters
# boost.close()
# pickle.dump({ 'cases': cases,
# 'controls': controls,
# 'implanted_biclusters' : implanted_biclusters},
# open(out_fname, 'w'),
# pickle.HIGHEST_PROTOCOL)
#
if __name__ == '__main__':
# generate_from_BEAM()
generate_from_BEAM_chunks()
# generate_random_cases_and_controls()
|
987,207 | 35d48342d315269a47d0facc0ec01dd8764a0cfd | import os
from unittest import TestCase
from musicscore.musicstream.streamvoice import SimpleFormat
from musicscore.musictree.treeinstruments import Violin, Cello, Viola
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from tests.score_templates.xml_test_score import TestScore
path = str(os.path.abspath(__file__).split('.')[0])
class Test(TestCase):
def setUp(self) -> None:
self.score = TreeScoreTimewise()
sf = SimpleFormat(quarter_durations=[4])
sf.to_stream_voice().add_to_score(self.score, part_number=1)
sf.to_stream_voice().add_to_score(self.score, part_number=2)
sf.to_stream_voice().add_to_score(self.score, part_number=3)
sf.to_stream_voice().add_to_score(self.score, part_number=4)
self.score.get_score_parts()[0].instrument = Violin(1)
self.score.get_score_parts()[1].instrument = Violin(2)
self.score.get_score_parts()[2].instrument = Viola()
self.score.get_score_parts()[3].instrument = Cello()
def test_1(self):
xml_path = path + '_test_1.xml'
self.score.get_score_parts()[0].add_part_group(number=1, type='start', name='', symbol='bracket', barline='yes')
self.score.get_score_parts()[2].add_part_group(number=1, type='stop')
self.score.write(path=xml_path)
TestScore().assert_template(result_path=xml_path)
def test_2(self):
xml_path = path + '_test_2.xml'
self.score.get_score_parts()[1].add_part_group(number=1, type='start', name='', symbol='bracket', barline='yes')
self.score.get_score_parts()[2].add_part_group(number=1, type='stop')
self.score.write(path=xml_path)
TestScore().assert_template(result_path=xml_path)
def test_3(self):
xml_path = path + '_test_3.xml'
self.score.get_score_parts()[0].add_part_group(number=1, type='start', name='', symbol='bracket', barline='yes')
self.score.get_score_parts()[3].add_part_group(number=1, type='stop')
self.score.write(path=xml_path)
TestScore().assert_template(result_path=xml_path)
|
987,208 | 12d65c2a577a3852b315a595a95f0b8170988c01 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import gtk
import os.path
import manager
import db
# import shell
# import terminal
# import filemanager
class Null():
"""NULL
"""
def __init__(self, builder):
"""
Arguments:
- `builder`:
"""
self.builder = builder
self.liststore_shell = self.builder.get_object("liststore_shell")
self.treeview_shell = self.builder.get_object("treeview_shell")
self.treeview_shell.connect("row-activated", self.shell_choose)
self.textview_res = self.builder.get_object("textview_res")
self.notebook = self.builder.get_object("notebook")
self.statusbar_shell = self.builder.get_object("statusbar_shell")
# self.notebook.connect("switch-page", self.switch_page)
self.set_shell_title()
self.create_shell_list()
# self.terminal = terminal.Terminal(self.builder)
# self.filemanager = filemanager.Filemanager(self.builder)
menu = gtk.Menu()
#Fill it with menu items
item_terminal = gtk.MenuItem("Terminal")
item_file = gtk.MenuItem("File Manager")
item_db = gtk.MenuItem("DataBase")
item_refresh = gtk.MenuItem("Refresh")
item_new = gtk.MenuItem("New")
item_del = gtk.MenuItem("Del")
item_sep = gtk.SeparatorMenuItem()
item_sep2 = gtk.SeparatorMenuItem()
item_edit = gtk.MenuItem("Edit")
menu.append(item_terminal)
menu.append(item_file)
menu.append(item_db)
menu.append(item_sep)
menu.append(item_refresh)
menu.append(item_sep2)
menu.append(item_new)
menu.append(item_edit)
menu.append(item_del)
item_terminal.show()
item_file.show()
item_db.show()
item_sep.show()
item_sep2.show()
item_refresh.show()
item_new.show()
item_edit.show()
item_del.show()
item_new.connect("activate", self.run_add_dialog)
self.treeview_shell.connect_object("event", self.right_button_press, menu)
def right_button_press(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:
#make widget popup
widget.popup(None, None, None, event.button, event.time)
pass
def set_shell_title(self):
"""设置标题行
"""
head_title = gtk.TreeViewColumn("ID", gtk.CellRendererText(), text = 0)
head_title.set_sort_column_id(0)
self.treeview_shell.append_column(head_title)
head_title = gtk.TreeViewColumn("SCRIPT", gtk.CellRendererText(), text = 1)
self.treeview_shell.append_column(head_title)
head_title = gtk.TreeViewColumn("COUNTRY", gtk.CellRendererText(), text = 2)
self.treeview_shell.append_column(head_title)
head_title = gtk.TreeViewColumn("SHELL", gtk.CellRendererText(), text = 3)
self.treeview_shell.append_column(head_title)
head_title = gtk.TreeViewColumn("PASS", gtk.CellRendererText(), text = 4)
self.treeview_shell.append_column(head_title)
head_title = gtk.TreeViewColumn("INFO", gtk.CellRendererText(), text = 5)
self.treeview_shell.append_column(head_title)
def run_add_dialog(self,widget):
dialog = shell.Add()
dialog.run()
self.create_shell_list()
# def switch_page(self, notebook, move_focus, test):
def shell_choose(self, treeview, path, view_column):
(model, iter) = treeview.get_selection().get_selected()
url = model.get_value(iter, 3)
script = model.get_value(iter, 2)
p = model.get_value(iter,4)
host = url[7:].split('/')[0]
print url,p,script
m = manager.Manager()
l = gtk.Label(host)
self.notebook.append_page(m.vbox, l)
# self.notebook.set_current_page(1)
def create_shell_list(self):
"""
"""
self.liststore_shell.clear() # 清除原有数据
shell = db.Db('null.db')
shell_list = shell.get_all_shells()
slen = len(shell_list)
#print self.shell_list, len(self.shell_list)
for s in range(slen):
li_iter = self.liststore_shell.append()
self.liststore_shell.set(li_iter,
0, s+1,
1, shell_list[s]['country'],
2, shell_list[s]['script'],
3, shell_list[s]['addr'],
4, shell_list[s]['pass'],
5, shell_list[s]['info'],
)
self.statusbar_shell.push(1,"Your have %s Shells." % slen)
if __name__ == '__main__':
builder = gtk.Builder()
builder.add_from_file('ui/main.glade')
win_main = builder.get_object("window_main")
win_main.connect("destroy",gtk.main_quit)
Null(builder)
gtk.main()
|
987,209 | 940b25cff6bc8c06c89e070a3b87d20c01ef3ed9 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 14:56:37 2013
@author: daniel
"""
#programa: subcadena_1.py
#ejercicio 199 y 200
#data input
cadena = raw_input('Escribe una cadena: ')
i = int(raw_input('Numero A: '))
j = int(raw_input('Numero B: '))
flag = 0
subcadena = ''
if i < 0:
i = 0
if j > len(cadena):
final = len(cadena)
if j < len(cadena):
final = j
if i >= final:
flag = 1
for k in range(i, final):
subcadena += cadena[k]
if flag == 0:
print 'Subcadena entre %i y %i es %s' %(i, final, subcadena)
else:
print 'Subcadena en blanco' |
987,210 | 5c1e1981e194ba0c252608e9fc356c0dda9d9c61 | from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
@python_2_unicode_compatible
class User(AbstractUser):
TYPE_OF_USER = Choices(
(0, 'bibliotecario', _('bibliotecario')),
(1, 'estudiante', _('estudiante')),
(2, 'visitante', _('visitante')),
)
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255)
type_of_user = models.IntegerField(choices=TYPE_OF_USER,
default=TYPE_OF_USER.visitante)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
def is_visitor(self):
return self.type_of_user == self.TYPE_OF_USER.visitante
def is_student(self):
return self.type_of_user == self.TYPE_OF_USER.estudiante
def is_librarian(self):
return self.type_of_user == self.TYPE_OF_USER.bibliotecario
def has_visitor_profile(self):
return hasattr(self, 'visitor')
def has_student_profile(self):
return hasattr(self, 'student')
def has_librarian_profile(self):
return hasattr(self, 'librarian')
|
987,211 | 70d6a3345edbfe0d8495853d8d5dd1d0173eb64e | # -*- coding: utf-8 -*-
"""
ECoG Channel Localization (Joseph Tseung)
Trying SimpleITK
Created on Sun Nov 11 16:53:26 2018
@author: josep
"""
# from tkinter import Tk
# from tkinter.filedialog import askopenfilename
# from skimage import measure
import SimpleITK as sitk
# from matplotlib import pyplot as plt
import os
""" Read images """
def run(cursor, subject_id, CT_filename, MR_filename):
moving_image = sitk.ReadImage(CT_filename)
fixed_image = sitk.ReadImage(MR_filename)
#interact(display_images, fixed_image_z=(0,fixed_image.GetSize()[2]-1), moving_image_z=(0,moving_image.GetSize()[2]-1), fixed_npa = fixed(sitk.GetArrayViewFromImage(fixed_image)), moving_npa=fixed(sitk.GetArrayViewFromImage(moving_image)));
# fix up some image characteristics
fixed_image.SetOrigin((0, 0, 0))
fixed_image.SetDirection([1, 0, 0, 0, 1, 0, 0, 0, 1])
fixed_image.SetSpacing([1, 1, 1])
""" Initial alignment """
initial_transform = sitk.CenteredTransformInitializer(fixed_image,
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
moving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())
#interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
""" registration """
registration_method = sitk.ImageRegistrationMethod()
# Similarity metric settings.
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
# Optimizer settings.
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Setup for the multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Don't optimize in-place, we would possibly like to run this cell multiple times.
registration_method.SetInitialTransform(initial_transform, inPlace=False)
# Connect all of the observers so that we can perform plotting during registration.
# registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
# registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
# registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations)
# #registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))
final_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
""" Post-registration analysis """
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())
#interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
rct_file = os.path.join(os.path.dirname(os.path.realpath(CT_filename)), 'registered'+'.nii.gz')
sitk.WriteImage(moving_resampled, rct_file)
cursor.execute("UPDATE subjects SET rct_path = %s WHERE sid = %s", (rct_file, subject_id))
################## ELECTRODE DETECTION #####################
# Tk().withdraw()
# filename = askopenfilename(title = "Select registered image")
image = sitk.ReadImage(rct_file)
# fix up some image characteristics
image.SetOrigin((0, 0, 0))
image.SetDirection([1, 0, 0, 0, 1, 0, 0, 0, 1])
image.SetSpacing([1, 1, 1])
# Cast/normalize to an image with a 0-255 range
image_255 = sitk.Cast(sitk.RescaleIntensity(image), sitk.sitkUInt8)
# Get information on image intensity distribution
intensities = []
for i in range(image.GetWidth()):
for j in range(image.GetHeight()):
for k in range(image.GetDepth()):
intensities.append(image_255.GetPixel(i, j, k))
#fig = plt.figure()
#plt.title('Intensity Histogram')
#plt.xlabel("Pixel intensity")
#plt.ylabel("Number of pixels")
#plt.hist(intensities)
#plt.show()
#print("Done getting intensities")
# Hard-coded threshold based on intensity histogram results
thresholded_image = image_255 > 250
# Gaussian blurring to take out high-resolution noise
gaussian = sitk.SmoothingRecursiveGaussianImageFilter()
gaussian_blurred = gaussian.Execute(thresholded_image)
# Cast/normalize to an image with a 0-255 range
gaussian_blurred_255 = sitk.Cast(sitk.RescaleIntensity(gaussian_blurred), sitk.sitkUInt8)
print("Done blurring")
# Display connected component sizes
stats = sitk.LabelShapeStatisticsImageFilter()
stats.Execute(sitk.ConnectedComponent(gaussian_blurred_255))
label_sizes = [stats.GetNumberOfPixels(l) for l in stats.GetLabels()]
#plt.hist(label_sizes)
#plt.title("Distribution of Object Sizes")
#plt.xlabel("Size in Pixels")
#plt.ylabel("Number of Objects")
# output electrode locations s tuples
electrodes = []
for l in stats.GetLabels():
if (100 < stats.GetNumberOfPixels(l) < 1000):
electrodes.append(stats.GetCentroid(l))
n = len(electrodes)
print(str(n) + " electrodes found")
# modify electrode coordinates here:
x = []
y = []
z = []
# first switch x and z axes to convert from SimpleITK format to numpy array format
for i in range(len(electrodes)):
x.append(float(electrodes[i][2]))
y.append(float(electrodes[i][1]))
z.append(float(electrodes[i][0]))
import numpy as np
attempt = np.column_stack([x, y, z])
# rotate by 90 degrees towards the xy plane and 45 degrees towards yz plane
theta = np.radians(90)
c, s = np.cos(theta), np.sin(theta)
rotation_matrix = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
rotated = np.dot(attempt, rotation_matrix)
# rotate yz plane clockwise 45 degrees
yz_tempt = rotated[:,[1,2]]
theta = np.radians(45)
c,s = np.cos(theta), np.sin(theta)
rotation_matrix = np.array([[c,-s], [s,c]])
yz_rotated = np.dot(yz_tempt, rotation_matrix)
rotated[:,[1,2]] = yz_rotated
rotated[:, 0] += image.GetWidth()-30
rotated[:, 1] -= 75
rotated[:, 2] += 130
# save electrode locations in csv format
# import csv
# with open('electrode_coordinates.csv', mode='w', newline='') as electrode_coordinates:
# writer = csv.writer(electrode_coordinates)
# writer.writerows(rotated)
# print('csv done')
## update to push to database channels table
## then, Yannan's electrode_position_correction will update these values again
#####################################################################
###################Update Database###################################
#####################################################################
results = []
for i in range(len(rotated)):
results.append({"x": rotated[:, 0][i], "y": rotated[:, 1][i], "z": rotated[:, 2][i]})
return results
|
987,212 | aa60f7417e51b9ffd722f6815a0811ea69e3a4e4 | #6
import functools
lst=list(map(int,input("Enter the list: ").split(" ")))
print ("The sum of the list elements is : ",end="")
print (functools.reduce(lambda a,b : a+b,lst)) |
987,213 | d86d20fee2a59917b6cd98a225bb909c18f0b93b | from datetime import datetime
user_data = []
class User:
""" model class for users """
def __init__(self, **kwargs):
self._id = len(user_data)+1
self.firstname = kwargs["firstname"]
self.lastname = kwargs["lastname"]
self.othernames = kwargs["othernames"]
self.email = kwargs["email"]
self.phone_number = kwargs["phone_number"]
self.username = kwargs["username"]
self.registered = datetime.now()
self.is_admin = False
def format_user_record(self):
return {
'id': self._id,
'firstname': self.firstname,
'lastname': self.lastname,
'othernames': self.othernames,
'email': self.email,
'phoneNumber': self.phone_number,
'username': self.username,
'registered': self.registered,
'isAdmin': self.is_admin
} |
987,214 | b09056bec6639ee7c5d658ff38be15e193511b43 | import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
torch.manual_seed(seed=0)
def logistic_regression(X, y, W_init, lr, max_nsteps):
N, D = X.shape
# Assign bias values
X_ = torch.ones(N, D+1)
X_[:, 1:] = torch.tensor(X)
y = torch.FloatTensor(y).unsqueeze(1)
# Setup
model = nn.Linear(D+1, 1, bias=False)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=lr)
# Log history
history = []
step = 1
while True:
h = model(X_)
loss = criterion(h, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Stop condition
history.append((model.weight.t().detach(), loss.item()))
step += 1
if step > max_nsteps:
break
return model.weight.t().detach(), history
def test():
from sklearn.datasets import make_blobs
# Data
N, C = 1000, 2
X, y = make_blobs(n_samples=N, centers=C, n_features=2)
# Hyperparameters
lr = 0.1
max_nsteps = 10000
# Solve Softmax Regression by Gradient Descent
W, h = logistic_regression(X, y, None, lr, max_nsteps)
Wh, Lh = list(zip(*h))
# Visualize Result
print('Result:')
print('Final loss:', Lh[-1])
sns.lineplot(x=range(len(Lh)), y=Lh)
plt.savefig('logreg_torch')
plt.close()
if __name__ == '__main__':
import traceback
try:
test()
except Exception as e:
traceback.print_exc()
|
987,215 | 6114967a43296defe3dd21061d1443e964c016cd | #!/usr/bin/env python
import uvicorn
from fastapi import FastAPI, WebSocket
from src.servers.config import host, port
from src.servers.utils import random_greeting
app = FastAPI()
@app.websocket("/greeting")
async def greeting(websocket: WebSocket):
await websocket.accept()
while True:
try:
data = await websocket.receive_json()
greeting = random_greeting(data['name'])
await websocket.send_json({ 'greeting': greeting })
except:
await websocket.close()
break
def run_test():
print('Server starting at: ' + 'ws://{}:{}/greeting'.format(host, port))
uvicorn.run(app, host=host, port=port, log_level='error')
if __name__ == '__main__':
run_test() |
987,216 | 7b92d679b1c637d6e4515d754c7aed9f481faacf | T = int(input())
for q in range(T):
N, M = map(int, input().split())
arr = [input() for _ in range(N)]
result =[]
tmp_1 = 0
tmp_2 = 0
num = {'0001101': 0, '0011001': 1,'0010011': 2,'0111101': 3,'0100011': 4,'0110001': 5,'0101111': 6,'0111011': 7,'0110111': 8,'0001011': 9}
for i in range(N):
for j in range(M-1, -1, -1):
if arr[i][j] == '1':
for k in range(j-55, j+1, 7):
tmp = ''
for m in range(7):
tmp += arr[i][k+m]
for key, value in num.items():
if key == tmp:
result.append(value)
if result:
break
if result:
break
for i in range(8):
if i % 2 == 0:
tmp_1 += result[i]
else:
tmp_2 += result[i]
ans = tmp_1 * 3 + tmp_2
if ans % 10 == 0:
print('#{} {}'.format(q+1, sum(result)))
else:
print('#{} {}'.format(q+1, 0)) |
987,217 | 6397c816cb14f76445ae4618bb2632867d235441 | import logging
import re
from typing import Optional
from pydantic import BaseModel
from opennem.core.unit_single import facility_unit_numbers_are_single
logger = logging.getLogger(__name__)
__is_number = re.compile(r"^\d+$")
__is_single_number = re.compile(r"^\d$")
__is_unit_alias = re.compile(r"([A-Z|a-z]{1,6})")
__is_unit_alias_forced = re.compile(r"([A-Z|a-z]{1,6}\d{1,2})")
__unit_range_parse = re.compile(r"(\d+)\-(\d+)")
class UnitSchema(BaseModel):
# The unit id
id: int = 1
# The unit alias
alias: Optional[str] = None
# The number of units
number: int = 1
capacity: Optional[int]
def is_number(v: str) -> bool:
return bool(re.match(__is_number, v))
def is_single_number(v: str) -> bool:
return bool(re.match(__is_single_number, v))
# Has a unit alias like A or GT
def unit_has_alias(v: str) -> bool:
return bool(re.search(__is_unit_alias, v))
def unit_has_alias_forced(v: str) -> bool:
return bool(re.search(__is_unit_alias_forced, v))
def strip_whitespace(v: str) -> str:
return str(re.sub(r"\s+", "", v.strip()))
def parse_unit_duid(unit_input: str, unit_duid: str) -> UnitSchema:
return parse_unit_number(unit_input, facility_unit_numbers_are_single(unit_duid))
def parse_unit_number(unit_input: str, force_single: bool = False) -> UnitSchema:
"""
Parses unit number string into a UnitSchema model
force_single is a hack for units like Hallett where "GT 2-4" means
unit alias GT2-4 rather than alias GT with id 2 and 2 units
AEMO put a unit no in sometimes when they mean a unit ID (ie. 8) and
sometimes it means the number of units (ie. 40)
"""
unit_id = 1
unit_no = 0
unit_alias = None
has_alias = False
if unit_input == None:
unit_input = ""
# Normalize to string
if type(unit_input) is not str:
unit_input = str(unit_input)
# Strip whitespace and capitalize
unit_input = strip_whitespace(unit_input)
unit_input = unit_input.upper()
if unit_input == "":
unit_input = "1"
# @TODO handle the silly multi unit lines
if "," in unit_input:
uc = unit_input.split(",")
# This is a bit of a hack - we use the first unit and
# count the additionals as extra unit numbers. It works
# for now
unit_input = uc[0]
uc = uc[1:]
unit_no += len(uc)
for unit_component in uc:
if "&" in unit_component:
unit_no += 1
if "&" in unit_input:
unit_no += len(unit_input.split("&"))
if force_single and unit_has_alias_forced(unit_input):
has_alias = True
# extract the unit alias
unit_alias_search = re.search(__is_unit_alias_forced, unit_input)
if unit_alias_search and unit_alias_search.lastindex == 1:
unit_alias = unit_alias_search.group(1)
if not unit_alias or not type(unit_alias) is str:
raise Exception(
"Error extracting alias from {}: Got {}".format(unit_input, unit_alias)
)
# remove the unit alias
unit_input = re.sub(r"[A-Za-z]{1,6}\d{1,2}\-", "", unit_input)
if not has_alias and unit_has_alias(unit_input):
has_alias = True
# extract the unit alias
unit_alias_search = re.search(__is_unit_alias, unit_input)
if unit_alias_search and unit_alias_search.lastindex == 1:
unit_alias = unit_alias_search.group(1)
if not unit_alias or not type(unit_alias) is str:
raise Exception(
"Error extracting alias from {}: Got {}".format(unit_input, unit_alias)
)
# remove the unit alias
unit_input = re.sub(r"[A-Za-z\ ]", "", unit_input)
# Simple single number matches
if is_number(unit_input):
unit_id = int(unit_input)
unit_no += 1
# This is the crazy hack for when AEMO mix unit_no and unit_id
# in the same field
if unit_id > 8:
unit_id = 1
unit_no = unit_id
# Range matches (ex. 1-50)
unit_range_match = re.search(__unit_range_parse, unit_input)
if unit_range_match and unit_range_match.lastindex == 2:
unit_id = int(unit_range_match.group(1))
unit_max = int(unit_range_match.group(2))
if unit_max < unit_id:
raise Exception(
"Invalid max unit number {} on id {} for range {}".format(
unit_max, unit_id, unit_input
)
)
unit_no += unit_max - unit_id + 1
unit = UnitSchema(
id=unit_id,
number=unit_no,
alias=unit_alias,
)
return unit
|
987,218 | c2b377558df8f5b7321ef91b295dbc745832b18e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : test_pytest.py
# @Author: yubo
# @Date : 2019/12/6
# @Desc :
import pytest
variable_module = 0
@pytest.fixture(scope='module')
def variable_to_module():
global variable_module
variable_module += 1
return variable_module
def test_variable_module1(variable_to_module):
assert variable_module == 1
def test_variable_module2(variable_to_module):
assert variable_module == 1
|
987,219 | 71a7389408a5620d24c9473da17b97ae1cbc0bd9 | from imutils.video import VideoStream
import face_recognition
import argparse
import imutils
import pickle
import time
import cv2
#from live_cam import live
cv2.namedWindow("Image",cv2.WINDOW_NORMAL)
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-o", "--output", type=str,
help="path to output video")
ap.add_argument("-y", "--display", type=int, default=1,
help="whether or not to display output frame to screen")
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# load the input image and convert it from BGR to RGB
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
writer = None
time.sleep(2.0)
while True:
# image = live()
image= vs.read()
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(image, width=750)
r = image.shape[1] / float(rgb.shape[1])
# detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial embeddings
# for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,0.75, (0, 255, 0), 2)
# if the video writer is None *AND* we are supposed to write
# the output video to disk initialize the writer
# if writer is None and args["output"] is not None:
# fourcc = cv2.VideoWriter_fourcc(*"MJPG")
# writer = cv2.VideoWriter(args["output"], fourcc, 20,
# (image.shape[1], image.shape[0]), True)
# # if the writer is not None, write the frame with recognized
# # faces to disk
# if writer is not None:
# writer.write(image)
cv2.imshow("Image", image)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
if writer is not None:
writer.release()
|
987,220 | 8ef04a64fa9de57295da32e46b8eac88129e43d2 | from time import sleep
import os
while(1):
sleep(1)
os.system("dmesg > LogFirewall") |
987,221 | aa5f0e999c5212472388d13fbf9f7aaca2a2fb3f | #! /usr/bin/env python
from ROOT import *
import sys
import script_utils as script_utils
sys.path.append("/home/irfulx204/mnt/tmain/Desktop/Run308_Analyse_ERA/Scripts_ERA/")
import create_extrapolated_bckg as create_bckg
import subprocess as subp
import BDT_utils as BDT_ut
import BDT_file_handler as BDT_fh
import PyROOTPlots as PyRPl
def launch_analysis(bolo_name, analysis_type, d_cut, d_overwrite, nsimu, exposure, d_event_type_num_event, list_mass):
"""Launch BDT simulations
Detail:
Detailed description
Args:
bolo_name = (str) bolometer name
analysis_type = (str) type of analysis (cuts on heat and ion and veto)
d_cut = (dict) indicates the cuts (inf/sup) on heat and ion
nsimu = (int) number of true event simulations
overwrite = (dict) indicates if standard files need to be overwritten
exposure = (float) desired exposure
d_event_type_num_event = (dict) indicates how many events to simulate
list_mass = (list) list of WIMP masses
Returns:
void
Raises:
AssertionError
"""
gen_path = "/home/irfulx204/mnt/tmain/Desktop/Run308_BDT_simu_corr/"
#Create empty directories needed for chosen analysis
BDT_ut.create_BDT_directories(bolo_name, analysis_type, "corr")
#First check if the files for Heat + Gamma simulations have been created
Ana_path = "/home/irfulx204/mnt/tmain/Desktop/Run308_Analyse_ERA/"
file_Gamma = Ana_path + "Analyse_" + bolo_name + "/ROOT_files/" + bolo_name + "_Gamma_spectrum_extrapol.root"
file_heat = Ana_path + "Analyse_" + bolo_name + "/ROOT_files/" + bolo_name + "_heatonly_spectrum_extrapol.root"
try :
assert( script_utils.bool_file_exist([file_heat, file_Gamma]) )
if d_overwrite["Extra"]==1: create_bckg.create_extrapolated_bckg(bolo_name)
except AssertionError:
create_bckg.create_extrapolated_bckg(bolo_name)
# Relevant imports
sys.path.append("../Event_generation/Beta_and_Pb_generation/")
sys.path.append("../Event_generation/Gamma_generation/")
sys.path.append("../Event_generation/Heatonly_generation/")
sys.path.append("../Event_generation/True_events_generation/")
sys.path.append("../Event_generation/WIMP_generation/")
sys.path.append("../MVA/")
import adjust_BetaPb_file as adjust_BetaPb
import adjust_Gamma_file as adjust_Gamma
import adjust_heat_file as adjust_heat
import heatonly_KDE as heatonly_KDE
import adjust_true_events_file as adjust_true_events
import adjust_WIMP_files as adjust_WIMP
import adjust_MVA_scaling_file as adjust_MVA_scaling
import adjust_MVA_files as adjust_MVA
#Get the correct root config options for compilation:
cflags = subp.Popen(["root-config", "--cflags"], stdout = subp.PIPE).communicate()[0].rstrip()
libs = subp.Popen(["root-config", "--libs"], stdout = subp.PIPE).communicate()[0].rstrip()
#Prepare the bckg cut file
list_evt_type = ["heatonly", "FidGamma", "S1Gamma", "S2Gamma", "S1Beta", "S2Beta", "S1Pb", "S2Pb"]
file_eff = Ana_path + "Analyse_" + bolo_name + "/Text_files/" + bolo_name + "_bckg_cuteff_" + analysis_type + ".txt"
try :
assert( script_utils.bool_file_exist([file_eff]) )
except AssertionError:
with open(file_eff, "w") as feff:
for event_type in list_evt_type:
feff.write(event_type + "\n")
# ##############
# #Beta and Pb
# ##############
# script_utils.print_utility("Beta and Pb")
# adjust_BetaPb.adjust_BetaPb_file(bolo_name, d_cut, analysis_type, d_event_type_num_event)
# BetaPb_path = gen_path + "BDT_" + bolo_name + "/" +analysis_type + "/Beta_and_Pb/ROOT_files/" + bolo_name + "_"
# list_BetaPb_files = [BetaPb_path + event_type + "_tree.root" for event_type in d_event_type_num_event.keys() if ("Beta" in event_type or "Pb" in event_type) ]
# list_subp_args = ["g++", "-o", "../Event_generation/Beta_and_Pb_generation/essai.exe", "../Event_generation/Beta_and_Pb_generation/build_BetaPb_tree.C", "-Wl,--no-as-needed"]
# list_subp_args.extend(cflags.split(" "))
# list_subp_args.extend(libs.split(" "))
# try :
# assert( script_utils.bool_file_exist(list_BetaPb_files) )
# if d_overwrite["BetaPb"] == 1:
# subp.call(list_subp_args)
# subp.call("../Event_generation/Beta_and_Pb_generation/essai.exe")
# except AssertionError:
# subp.call(list_subp_args)
# subp.call("../Event_generation/Beta_and_Pb_generation/essai.exe")
# script_utils.print_utility("Done")
# #########
# #Gamma
# #########
# script_utils.print_utility("Gamma")
# adjust_Gamma.adjust_Gamma_file(bolo_name, d_cut, analysis_type, d_event_type_num_event)
# Gamma_path = gen_path + "BDT_" + bolo_name + "/" +analysis_type + "/Gamma/ROOT_files/" + bolo_name + "_"
# list_Gamma_files = [Gamma_path + event_type + "_tree.root" for event_type in d_event_type_num_event.keys() if "Gamma" in event_type]
# list_subp_args = ["g++", "-o", "../Event_generation/Gamma_generation/essai.exe", "../Event_generation/Gamma_generation/build_Gamma_tree.C", "-Wl,--no-as-needed"]
# list_subp_args.extend(cflags.split(" "))
# list_subp_args.extend(libs.split(" "))
# try :
# assert( script_utils.bool_file_exist(list_Gamma_files) )
# if d_overwrite["Gamma"] == 1:
# subp.call(list_subp_args)
# subp.call("../Event_generation/Gamma_generation/essai.exe")
# except AssertionError:
# subp.call(list_subp_args)
# subp.call("../Event_generation/Gamma_generation/essai.exe")
# script_utils.print_utility("Done")
# sys.exit()
# ###########
# #Heatonly
# ############
# script_utils.print_utility("Heatonly")
# adjust_heat.adjust_heat_file(bolo_name, d_cut, analysis_type, d_event_type_num_event["heatonly"])
# heatonly_path = gen_path + "BDT_" + bolo_name + "/" +analysis_type + "/Heatonly/ROOT_files/" + bolo_name + "_"
# heatonly_txt_path = gen_path + "BDT_" + bolo_name + "/" +analysis_type + "/Heatonly/Text_files/" + bolo_name + "_"
# list_heatonly_files = [heatonly_path + "heatonly_tree.root", heatonly_txt_path + "heatonly_2D_time.txt"]
# list_subp_args = ["g++", "-o", "../Event_generation/Heatonly_generation/essai.exe", "../Event_generation/Heatonly_generation/build_heatonly_tree_fromhist.C", "-Wl,--no-as-needed"]
# list_subp_args.extend(cflags.split(" "))
# list_subp_args.extend(libs.split(" "))
# try :
# assert( script_utils.bool_file_exist(list_heatonly_files) )
# if d_overwrite["Heatonly"] == 1:
# # heatonly_KDE.generate_heatonly_from_KDE(bolo_name, analysis_type, d_event_type_num_event["heatonly"])
# subp.call(list_subp_args)
# subp.call("../Event_generation/Heatonly_generation/essai.exe")
# except AssertionError:
# # heatonly_KDE.generate_heatonly_from_KDE(bolo_name, analysis_type, d_event_type_num_event["heatonly"])
# subp.call(list_subp_args)
# subp.call("../Event_generation/Heatonly_generation/essai.exe")
# script_utils.print_utility("Done")
# ############################
# #Simulated data events
# ############################
# script_utils.print_utility("simulated data")
# adjust_true_events.adjust_true_events_file(bolo_name, d_cut, analysis_type, nsimu, exposure)
# true_events_path = gen_path + "BDT_" + bolo_name + "/" +analysis_type + "/True_events/ROOT_files/" + bolo_name + "_"
# list_true_events_files = [true_events_path + "true_events_tree.root"]
# list_subp_args = ["g++", "-o", "../Event_generation/True_events_generation/essai.exe", "../Event_generation/True_events_generation/build_true_events_tree.C", "-Wl,--no-as-needed"]
# list_subp_args.extend(cflags.split(" "))
# list_subp_args.extend(libs.split(" "))
# try :
# assert( script_utils.bool_file_exist(list_true_events_files) )
# if d_overwrite["True"] == 1:
# subp.call(list_subp_args)
# subp.call("../Event_generation/True_events_generation/essai.exe")
# except AssertionError:
# subp.call(list_subp_args)
# subp.call("../Event_generation/True_events_generation/essai.exe")
# script_utils.print_utility("Done")
# ############################
# #MVA scaling
# ############################
# script_utils.print_utility("MVA scaling")
# MVA_scaling_path = Ana_path + "Analyse_" + bolo_name + "/Text_files/" + bolo_name + "_MVA_scaling_" +analysis_type + ".txt"
# list_MVA_scaling_file = [MVA_scaling_path]
# list_subp_args = ["g++", "-o", "../Event_generation/True_events_generation/essai.exe", "../Event_generation/True_events_generation/get_MVA_scaling.C", "-Wl,--no-as-needed"]
# list_subp_args.extend(cflags.split(" "))
# list_subp_args.extend(libs.split(" "))
# try :
# assert( script_utils.bool_file_exist(list_MVA_scaling_file) )
# if d_overwrite["MVA_scaling"] == 1:
# adjust_MVA_scaling.adjust_MVA_scaling_file(bolo_name, d_cut, analysis_type, 10000)
# subp.call(list_subp_args)
# subp.call("../Event_generation/True_events_generation/essai.exe")
# except AssertionError:
# adjust_MVA_scaling.adjust_MVA_scaling_file(bolo_name, d_cut, analysis_type, 10000)
# subp.call(list_subp_args)
# subp.call("../Event_generation/True_events_generation/essai.exe")
# script_utils.print_utility("Done")
#################
# WIMP events
# ###############
script_utils.print_utility("WIMP")
adjust_WIMP.adjust_WIMP_files(bolo_name, d_cut, analysis_type, d_event_type_num_event)
#Generate the event tree
list_subp_args = ["g++", "-o", "../Event_generation/WIMP_generation/essai.exe", "../Event_generation/WIMP_generation/build_WIMP_tree.C", "-Wl,--no-as-needed"]
list_subp_args.extend(cflags.split(" "))
list_subp_args.extend(libs.split(" "))
if d_overwrite["WIMP"] == 1:
subp.call(list_subp_args)
subp.call("../Event_generation/WIMP_generation/essai.exe")
sys.exit()
# #Generate the no cut event tree
# list_subp_args = ["g++", "-o", "../Event_generation/WIMP_generation/essai.exe", "../Event_generation/WIMP_generation/build_WIMP_tree_nocut.C", "-Wl,--no-as-needed"]
# list_subp_args.extend(cflags.split(" "))
# list_subp_args.extend(libs.split(" "))
# if d_overwrite["WIMP_nocut"] == 1:
# subp.call(list_subp_args)
# subp.call("../Event_generation/WIMP_generation/essai.exe")
# script_utils.print_utility("Done")
# #############
# #MVA analysis
# #############
script_utils.print_utility("Launching MVA analysis")
adjust_MVA.adjust_MVA_files(bolo_name, analysis_type, nsimu)
list_subp_args = ["g++", "-o", "../MVA/essai.exe", "../MVA/TMVAClassification.C"]
list_subp_args.extend(cflags.split(" "))
list_subp_args.extend(libs.split(" "))
list_subp_args.append("-lTMVA")
# Launch TMVAClassification
subp.call(list_subp_args)
subp.call("../MVA/essai.exe")
#Launch TMVAindividual
list_subp_args[3] = "../MVA/TMVAindividual.C"
subp.call(list_subp_args)
subp.call("../MVA/essai.exe")
# #Launch TMVAtruedata
# list_subp_args[3] = "../MVA/TMVAtruedata.C"
# subp.call(list_subp_args)
# subp.call("../MVA/essai.exe")
# script_utils.print_utility("Done")
# #Launch TMVAtruedata
# list_subp_args[3] = "../MVA/TMVAtruedata_xcheck.C"
# subp.call(list_subp_args)
# subp.call("../MVA/essai.exe")
# script_utils.print_utility("Done")
#Launch TMVArealtruedata (= real non simulated data)
list_subp_args[3] = "../MVA/TMVArealtruedata.C"
subp.call(list_subp_args)
subp.call("../MVA/essai.exe")
script_utils.print_utility("Done")
# #Launch TMVArealtruedata (= real non simulated data with strict veto cut)
# list_subp_args[3] = "../MVA/TMVArealtruedata_xcheck.C"
# subp.call(list_subp_args)
# subp.call("../MVA/essai.exe")
# script_utils.print_utility("Done")
# #Launch TMVArealtrueneutron (= real non simulated data with strict veto cut)
# list_subp_args[3] = "../MVA/TMVArealtrueneutron.C"
# subp.call(list_subp_args)
# subp.call("../MVA/essai.exe")
# script_utils.print_utility("Done")
bolo_name = "FID837"
#convention ana_u_v_w_x : cut @ u keV Heat, v sigma heat only ion band width, w sigma veto
analysis_type = "ana_min2_min2_5"
FWHM_type = "standard_resolution"
d_cut = {"ECinf": -2, "ECsup": 15, "EIinf": -2, "EIsup": 15, "sigma_vet": 5}
d_overwrite = {"Gamma": 1, "BetaPb": 1, "Heatonly":1, "WIMP":1, "WIMP_nocut":0, "True":0, "MVA_scaling":1, "Extra":0}
#Heat cut at min2 min2 keV (enormous increase of 3GeV data)
d_event_type_num_event ={"S1Beta":600000, "S2Beta":600000, "S1Pb":300000, "S2Pb":250000,
"S1Gamma":1200000, "S2Gamma":1200000, "FidGamma":80000, "heatonly":200000,
"3GeV":40, "4GeV":4800, "5GeV":14000, "6GeV":800000, "7GeV":400000, "10GeV":250000, "25GeV":120000}
# #Heat cut at 0.5 keV (enormous increase of 3GeV data)
# d_event_type_num_event ={"S1Beta":600000, "S2Beta":600000, "S1Pb":300000, "S2Pb":250000,
# "S1Gamma":1200000, "S2Gamma":1200000, "FidGamma":160000, "heatonly":800000,
# "3GeV":80000000, "4GeV":4800000, "5GeV":1400000, "6GeV":800000, "7GeV":400000, "10GeV":250000, "25GeV":120000}
# #Heat cut at 1.5 keV
# d_event_type_num_event ={"S1Beta":1200000, "S2Beta":2000000, "S1Pb":300000, "S2Pb":280000,
# "S1Gamma":20000000, "S2Gamma":100000000, "FidGamma":100000, "heatonly":5000000,
# "3GeV":1, "4GeV":1, "5GeV":200000000, "6GeV":10000000, "7GeV":5000000, "10GeV":750000, "25GeV":150000}
list_mass = [3,4,5,6,7,10,25]
# list_mass = [5,6,7,10,25]
nsimu = 10
exposure = 66
launch_analysis(bolo_name, analysis_type, d_cut, d_overwrite, nsimu, exposure, d_event_type_num_event, list_mass) |
987,222 | 76e7c03215f969d8635c42b931451a756f53f6a8 | # coding=utf8
import pytest
# coding=utf8
import pytest
#如果使用autouse=True,默认所有用例都是用此装饰器,function级别的。但是如果需要yield的返回值,则需要将login方法名传入用例作为参数
@pytest.fixture(params=["tom","jerry"])
def login(request):
# 相当于setup
print("登录操作")
# yield相当于return,也就是teardown后,return出参数
# yield ["1111","2222"]
username= request.param
yield username
print("登出操作")
@pytest.fixture
def conn_db():
print("数据库连接!!!")
yield
print("数据库断开连接~~~")
def test_case1(): #传入login函数名作为参数传入用例,则可以获取yield的返回值
print("用例1")
def test_case2(login):
print(login) #打印yield返回的参数
print("用例2")
@pytest.mark.usefixtures("conn_db") #使用装饰器,无法获取yield的返回值
def test_case3():
print("用例3") |
987,223 | fcc1953db017bafd949161c1decf4f25a17e0fa0 | #python 3.7.0 64-bit
#py -3 -m pip install numpy==1.19.3
#py -3 -m pip install spacy==2.1.0
#py -3 -m pip install neuralcoref==4.0
#py -3 -m spacy download en_core_web_sm
#py -3 -m spacy download en_core_web_lg
import sys
import spacy
import neuralcoref
import re
import random
def get_aux_bin(sent):
subj_found = False
verb_found = False
question = None
for idx, token in enumerate(sent):
#locate verb index
if token.dep_ == "ROOT":
verb_found = True
#locate subj index
if verb_found:
for idx, token in enumerate(sent):
if token.dep_ == "nsubj":
subj_found = True
subj_index = idx
if subj_found and token.pos_ == "AUX" and token.dep_ == "aux":
question = str(token).capitalize() + " " + str(sent[subj_index]).lower() + " " + str(sent[idx+1:])
if question[-1] == '\n':
question.rstrip('\n')
question = question[:-1].strip(".") + "?"
return question
def get_vb_bin(sent): #not done yet
subj_found = False
verb_found = False
question = None
for idx, token in enumerate(sent):
if token.dep_ == "ROOT":
verb_found = True
verb_index = idx
if verb_found:
for idx, token in enumerate(sent):
if token.dep_ == "nsubj":
subj_found = True
subj_index = idx
# VBZ --> Does, VBP --> Do, VBD --> Did
if subj_found and token.pos_ == 'VERB':
if token.tag_ == "VBZ":
question = "Does" + " " + str(sent[subj_index])
return None
def get_who(sent): #also gets what
verb_found = False
subj_found = False
type_found = False
#locate verb index
for idx, token in enumerate(sent):
if token.dep_ == "ROOT":
#verb_index = idx
verb_found = True
#locate subj index
for idx, token in enumerate(sent):
if token.dep_ == "nsubj":
subj_index = idx
subj_found = True
if token.ent_type_ == "ORG":
question_type = "What"
type_found = True
if token.ent_type_ == "PERSON":
question_type = "Who"
type_found = True
if verb_found and subj_found and type_found:
question = question_type + " " + str(sent[subj_index+1]) + " " + str(sent[subj_index+2:])
if question[-1] == "\n":
question.rstrip("\n")
question = question[:-1] + "?"
return question
def get_what(sent): #feel like this is not needed since it's covered in get_who
'''
verb_found = False
subj_found = False
type_found = False
for idx, token in enumerate(sent):
if token.dep_ == "ROOT":
verb_found = True
for idx, token in enumerate(sent):
'''
return None
def get_where(sent): #
'''
subj_found = False
type_found = False
verb_found = False
start = None
end = None
prepositions = ["at", "in", "from", "to", "on"]
for ent in sent.ents:
if (ent.label_ == "GPE" or ent.label_ == "LOC" or ent.label_ == "ORG"):
print(ent)
prev_word = str(sent[ent.start-1]) # <-- error here for some reason
print(prev_word, ent, ent.start)
if prev_word in prepositions:
print(prev_word)
'''
return None
def get_when(sent):
return None
#not tested yet
def generate_questions(doc):
question_list = []
#parse/tokenize document
for sent in doc.sents:
token_list = [token.text for token in sent]
if ("(" in token_list or ")" in token_list):
pass
if "." not in token_list:
pass
#if some other rule
else:
#print("\n")
#print(sent)
temp_qs = []
temp_qs.append(get_aux_bin(sent))
temp_qs.append(get_vb_bin(sent))
#temp_qs.append(get_who(sent))
temp_qs.append(get_what(sent))
temp_qs.append(get_where(sent))
temp_qs.append(get_when(sent))
for q in temp_qs:
if q != None:
question_list.append(q)
return question_list
def print_questions(question_list, n_questions):
if len(question_list) < n_questions:
print("Error: Not enough questions generated from text.")
else:
question_counter = 0
while question_counter < n_questions:
random_index = random.randint(0, len(question_list)-1)
random_question = question_list.pop(random_index)
print(random_question)
question_counter += 1
def main():
print("Starting...")
if len(sys.argv) != 3:
print("**USAGE ERROR*** ./ask article.txt nquestions")
sys.exit(1)
article_text = sys.argv[1]
n_questions = sys.argv[2]
n_questions = int(n_questions)
with open(article_text, 'r', encoding = 'utf8') as f:
text = f.read()
newtext = text.split('\n\n') #list of sections
nlp = spacy.load('en_core_web_sm')
neuralcoref.add_to_pipe(nlp)
print("makes past neuralcoref")
final_question_list = []
#newtext is a list of sections split by double newlines
for section in newtext:
#print("NEW SECTION")
#run nlp on each section
doc = nlp(section)
#print("SECTION DONE")
#doc = doc._.coref_resolved
question_list = generate_questions(doc)
for question in question_list:
final_question_list.append(question)
#question_list = generate_questions(text)
#question_list_test = ["what is my name?", "who are you?", "sup?", "gang?"]
#function to print selected questions from question_list
#print_questions(question_list_test, n_questions)
print("_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\n")
print_questions(final_question_list, n_questions)
if __name__ == "__main__":
main()
|
987,224 | a9997307f82838f57ee90bbc6ad4d5bab13cc31b | import time
start = time.time()
def compute(n):
phi = [i for i in range(n + 2)]
for p in range(2, n + 1):
if phi[p] == p:
phi[p] = p - 1
for i in range(2 * p, n + 1, p):
phi[i] = (phi[i] // p) * (p - 1)
for i in range(2, n + 1):
if sorted(list(str(i))) == sorted(list(str(phi[i]))):
dic[i] = phi[i]
dic = dict()
n = 10000000
compute(n)
print(min(dic, key=lambda x: x / dic[x]))
print(time.time()-start) |
987,225 | f4dac99bd494a9f202b7da1ed265f52a817775c8 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 1 00:26:04 2016
@author: hy
"""
import tensorflow as tf
input1=tf.constant(3.0)
input2=tf.constant(2.0)
input3=tf.constant(5.0)
intermed=tf.add(input2,input3)
mul=tf.mul(input1,intermed)
with tf.Session() as sess:
result=sess.run([mul,intermed])
print result |
987,226 | 525374d02696c5f78c771fdcc6685c826023fd21 |
# 输入函数:input
# num1 = input("num >>>")
# num2 = input("num >>>")
# print(int(num1)+int(num2))
# 输出函数:print
# print("yuan",end="")
# print("alvin")
print("张三","李四",sep=":::")
|
987,227 | 50238be2d274446ab842353ed93558372911ea32 | import numpy as np
import torch
class DataNormalizer(object):
def __init__(self, dataloader):
self.dataloader = dataloader
self._range_normalizer(magnitude_margin=0.8, IF_margin=1.0)
print("s_a:", self.s_a )
print("s_b:", self.s_b )
print("p_a:", self.p_a)
print("p_b:", self.p_b)
def _range_normalizer(self, magnitude_margin, IF_margin):
min_spec = 10000
max_spec = -10000
min_IF = 10000
max_IF = -10000
for batch_idx, (spec, IF, pitch_label, mel_spec, mel_IF) in enumerate(self.dataloader.train_loader):
# training mel
spec = mel_spec
IF = mel_IF
if spec.min() < min_spec: min_spec=spec.min()
if spec.max() > max_spec: max_spec=spec.max()
if IF.min() < min_IF: min_IF=IF.min()
if IF.max() > max_IF: max_IF=IF.max()
self.s_a = magnitude_margin * (2.0 / (max_spec - min_spec))
self.s_b = magnitude_margin * (-2.0 * min_spec / (max_spec - min_spec) - 1.0)
self.p_a = IF_margin * (2.0 / (max_IF - min_IF))
self.p_b = IF_margin * (-2.0 * min_IF / (max_IF - min_IF) - 1.0)
def normalize(self, feature_map):
a = np.asarray([self.s_a, self.p_a])[None, :, None, None]
b = np.asarray([self.s_b, self.p_b])[None, :, None, None]
a = torch.FloatTensor(a).cuda()
b = torch.FloatTensor(b).cuda()
feature_map = feature_map *a + b
return feature_map
def denormalize(spec, IF, s_a, s_b, p_a, p_b):
spec = (spec -s_b) / s_a
IF = (IF-p_b) / p_a
return spec, IF |
987,228 | efbe83307a35e9e2623d46766c86605323d3026e | from bson import ObjectId
from bson.errors import InvalidId
def id_generator(doctype):
def id_gen(oid):
if doctype is None or not oid:
return None
try:
data = doctype.datatype.one({'_id':ObjectId(oid)})
if data:
return doctype(data)
return None
except InvalidId:
return None
return id_gen
def index_generator(doctype):
def index_gen(*args, **kwargs):
data = doctype.datatype.find_one(*args, **kwargs)
if data:
return doctype(data)
return None
return index_gen
__cls_mapper = dict()
def cls_mapper_reg(cls_obj):
__cls_mapper[cls_obj.cls_name] = cls_obj
cls_alias = cls_obj.cls_alias
if isinstance(cls_alias, basestring):
__cls_mapper[cls_alias] = cls_obj
elif isinstance(cls_alias, tuple) or isinstance(cls_alias, list):
for each in cls_alias:
__cls_mapper[each] = cls_obj
return cls_obj
def cls_gen(objtype):
try:
objtype = objtype.lower()
except AttributeError:
return None
if objtype not in __cls_mapper:
return None
return __cls_mapper[objtype]
def generator(objid, objtype):
return id_generator(cls_gen(objtype))(objid)
def list_generator(objinfos):
'''input list of [objid, objtype]
output list of objs, None if not exist
'''
cls_map = dict()
objs_map = dict()
if not isinstance(objinfos, list):
objinfos = list(objinfos)
objinfos = [(ObjectId(each[0]), each[1])
if each and len(each) == 2 and ObjectId.is_valid(each[0])
else None
for each in objinfos]
for each in objinfos:
if each is None:
continue
tmp_cls = cls_gen(each[1])
if tmp_cls is None:
continue
tmp_cls_name = tmp_cls.cls_name
if tmp_cls_name not in cls_map:
cls_map[tmp_cls_name] = list([each[0]])
else:
cls_map[tmp_cls_name].append(each[0])
for ek, ev in cls_map.items():
ccls = cls_gen(ek)
tmps = [ccls(data=each) for each in ccls.datatype.find(
{'_id':{'$in':ev}})]
objs_map.update((each.uid, each) for each in tmps)
return [objs_map[str(each[0])]
if each and str(each[0]) in objs_map
else None
for each in objinfos]
def ungenerator(obj):
return obj._id, obj.cls_name
|
987,229 | f3e0a17dda7c95527940d1301cd55279d816730c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Author: Sergio Araujo
# Last Change: 2018 mai 24 17:44
# Created: qui 26 jan 2017 07:22:20 BRT
# email: <voyeg3r ✉ gmail.com>
# Github: https://github.com/voyeg3r
# twitter: @voyeg3r
# References:
# the first version of this script came from:
# https://gist.github.com/fmasanori/4673017
# to get the file:
# lynx -dump http://www.gutenberg.org/cache/epub/11/pg11.txt > alice.txt
with open('alice.txt', 'r') as f:
f = f.read().lower().split()
result = {i:f.count(i) for i in f if i == 'alice'}
name = next(iter(result.keys()))
num = next(iter(result.values()))
print(f'{name} aparece {num} vezes no texto')
|
987,230 | 698a0fc43f2dfa304ef5b4443bacc6ae2ab45474 | import os
import csv
zFile = 'C:\\Users\\viresh.patel\\Documents\\Excel Docs\\global_superstore.csv'
zInfo = os.stat(zFile)
print(zInfo)
print(zInfo.st_atime)
print(zInfo.st_ctime)
print(zInfo.st_dev)
# print(zInfo.st_file_attributes)
print(zInfo.st_gid)
print(zInfo.st_ino)
print(zInfo.st_mode)
print(zInfo.st_mtime)
print(zInfo.st_nlink)
print(zInfo.st_size)
print(zInfo.st_uid)
for zLang in csv.list_dialects():
print(zLang)
zFileR = open(zFile, 'r')
reader = csv.reader(zFileR)
for line in reader:
if len(line) != 0:
print(line[0])
zFileR.close()
|
987,231 | 317a112d47850fe2140b293ecca2a97bb3ca4f24 | import graphene
from graphene_django import DjangoObjectType
from ..apps.data.models import Project
class Projects(DjangoObjectType):
class Meta:
model = Project
class ProjectQuery(graphene.ObjectType):
get_all_projects = graphene.List(Projects)
get_project_by_id = graphene.Field(Projects, id=graphene.String())
def resolve_get_all_projects(self, args):
return Project.objects.all()
def resolve_get_project_by_id(self, args, **kwargs):
try:
return Project.objects.get(unique_id=kwargs["id"])
except (Project.DoesNotExist, KeyError):
return None
|
987,232 | 5dc3308cbc0c24fd4d562e679319a760bd5eba85 | import os
import gdal
from glob import glob
dir_with_csvs = r"C:/anhHoangKTTV/PythonScripts_DFS0/Project_NTB/"
os.chdir(dir_with_csvs)
def find_csv_filenames(path_to_dir, suffix=".csv"):
relativePath = path_to_dir + "*.csv"
print(relativePath)
# filenames = glob.glob(relativePath)
filenames = [os.path.relpath(x) for x in glob(relativePath)]
return filenames
csvfiles = find_csv_filenames(dir_with_csvs)
for fn in csvfiles:
vrt_fn = fn.replace(".csv", ".vrt")
lyr_name = fn.replace('.csv', '')
out_tif = fn.replace('.csv', '.tiff')
with open(vrt_fn, 'w') as fn_vrt:
fn_vrt.write('<OGRVRTDataSource>\n')
fn_vrt.write('\t<OGRVRTLayer name="%s">\n' % lyr_name)
fn_vrt.write('\t\t<SrcDataSource>%s</SrcDataSource>\n' % fn)
fn_vrt.write('\t\t<GeometryType>wkbPoint</GeometryType>\n')
fn_vrt.write('\t\t<GeometryField encoding="PointFromColumns" x="Lon" y="Lat" z="Ref"/>\n')
fn_vrt.write('\t</OGRVRTLayer>\n')
fn_vrt.write('</OGRVRTDataSource>\n')
output = gdal.Grid(out_tif,vrt_fn)
# below using your settings - I don't have sample large enough to properly test it, but it is generating file as well
output2 = gdal.Grid('outcome2.tif','name.vrt', algorithm='invdist:power=2.0:smoothing=1.0') |
987,233 | 62e84c17900afdaa8ee39f5ec3d8536d8c5b02a6 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 22:24:24 2020
@author: T SRIDHAR
"""
from TOPSIS-Divij-101816056 import topsis |
987,234 | e3d8d27d292ed0a9b6aa04ecf65eacb5dd3aedd8 | from flask import Blueprint
sample = Blueprint('sample', __name__)
#profile = Blueprint('profile', __name__,
# template_folder='templates',
# static_folder='static')
@sample.route('/hello')
def hello():
# Do some stuff
return "Hello , It's a sample page" |
987,235 | 38a36d19f136d68b30d47f4090a3e481649677e5 | from base.activity import Activity
from base.titlelayout import TitleLayout
class WebViewActivity(Activity):
def __init__(self):
Activity.__init__(self, self.__class__.__name__)
self.scrollTest()
def scrollTest(self):
title = TitleLayout()
self.swipeUp(n=5)
title.finish()
|
987,236 | 0eda77fc4dfed7ee40ac2df8599cb6f391dde76f | #!/usr/bin/python
import jinja2, json, re
from functools import reduce
import subprocess
import nanodurationpy as durationpy
import csv
import time
import datetime
import os
import shutil
import glob
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--wasmoutdir', help='full path of dir containing wasm files')
parser.add_argument('--csvresults', help='full path of csv result file')
parser.add_argument('--rustcodedir', help='comma-separated list of engines to benchmark')
parser.add_argument('--inputvectorsdir', help='comma-separated list of engines to benchmark')
args = vars(parser.parse_args())
# how many times to run native exec
RUST_BENCH_REPEATS = 50
def get_rust_bytes(hex_str):
tmp = map(''.join, zip(*[iter(hex_str)]*2))
tmp = map(lambda x: int(x, 16), tmp)
tmp = map(lambda x: '{}u8'.format(x), tmp)
tmp = reduce(lambda x, y: x+', '+y, tmp)
return '[ '+tmp+' ]'
def bench_rust_binary(rustdir, input_name, native_exec):
print("running rust native {}...\n{}".format(input_name, native_exec))
bench_times = []
for i in range(1,RUST_BENCH_REPEATS):
rust_process = subprocess.Popen(native_exec, cwd=rustdir, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
rust_process.wait(None)
stdoutlines = [str(line, 'utf8') for line in rust_process.stdout]
print(("").join(stdoutlines), end="")
elapsedline = stdoutlines[0]
elapsedmatch = re.search("Time elapsed in bench\(\) is: ([\w\.]+)", elapsedline)
elapsed_time = durationpy.from_str(elapsedmatch[1])
bench_times.append(elapsed_time.total_seconds())
return bench_times
def do_rust_bench(benchname, input, rust_code_dir, wasm_out_dir):
#rustsrc = "{}/rust-code/src/bench.rs".format(os.path.abspath(benchname))
#rustsrc = "{}/rust-code".format(os.path.abspath(benchname))
rust_code_path = os.path.abspath(os.path.join(rust_code_dir, benchname))
#rustsrc = "{}/rust-code".format(os.path.abspath(benchname))
rustsrc = rust_code_path
#rusttemplate = "{}/src/bench.rs".format(rustsrc)
rusttemplate = os.path.join(rust_code_path, "src/bench.rs")
if not os.path.exists(rustsrc):
return False
#filldir = os.path.abspath("{}/rust-code-filled".format(benchname))
filldir = os.path.abspath(os.path.join("./rust-code-filled/", benchname))
if os.path.exists(filldir):
shutil.rmtree(filldir)
shutil.copytree(rustsrc, filldir)
template_args = {}
for key in input.keys():
if key == "name":
continue
if key == "input":
input_len = int(len(input['input']) / 2)
input_str = "let input: [u8; {}] = {};".format(input_len, get_rust_bytes(input['input']))
template_args["input"] = input_str
elif key == "expected":
expected_len = int(len(input['expected']) / 2)
expected_str = "let expected: [u8; {}] = {};".format(expected_len, get_rust_bytes(input['expected']))
template_args["expected"] = expected_str
else:
template_args[key] = input[key]
# fill template if necessary
if len(template_args.keys()) > 1:
print("filling template for {}".format(input['name']))
with open(rusttemplate) as file_:
template = jinja2.Template(file_.read())
filledrust = template.render(**template_args)
#rustfileout = "{}/src/bench.rs".format(filldir)
rustfileout = os.path.join(filldir, "src/bench.rs")
with open(rustfileout, 'w') as outfile:
outfile.write(filledrust)
# compile rust code
benchname_rust = benchname.replace("-", "_")
rust_native_cmd = "cargo build --release --bin {}_native".format(benchname_rust)
print("compiling rust native {}...\n{}".format(input['name'], rust_native_cmd))
rust_process = subprocess.Popen(rust_native_cmd, cwd=filldir, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
rust_process.wait(None)
stdoutlines = [str(line, 'utf8') for line in rust_process.stdout]
print(("").join(stdoutlines), end="")
# native binary is at ./target/release/sha1_native
exec_path = "{}/target/release/{}_native".format(filldir, benchname_rust)
exec_size = os.path.getsize(exec_path)
# TODO: get rustc compile time
# TODO: also build with optimization turned off
# TODO: run wasm through wasm-gc
rust_wasm_cmd = "cargo build --release --lib --target wasm32-unknown-unknown"
print("compiling rust wasm {}...\n{}".format(input['name'], rust_wasm_cmd))
rust_process = subprocess.Popen(rust_wasm_cmd, cwd=filldir, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
rust_process.wait(None)
stdoutlines = [str(line, 'utf8') for line in rust_process.stdout]
print(("").join(stdoutlines), end="")
# wasm is at ./target/wasm32-unknown-unkown/release/sha1_wasm.wasm
wasmbin = "{}/target/wasm32-unknown-unknown/release/{}_wasm.wasm".format(filldir, benchname_rust)
wasmdir = os.path.abspath(wasm_out_dir)
wasmoutfile = os.path.join(wasmdir, "{}.wasm".format(input['name']))
if not os.path.exists(wasmdir):
os.mkdir(wasmdir)
shutil.copy(wasmbin, wasmoutfile)
# TODO: get cargo build compiler time and report along with exec time.
# run rust binary
native_times = bench_rust_binary(filldir, input['name'], "./target/release/{}_native".format(benchname_rust))
return { 'bench_times': native_times, 'exec_size': exec_size }
def saveResults(native_benchmarks, result_file):
#result_file = os.path.join(RESULT_CSV_OUTPUT_PATH, RESULT_CSV_FILENAME)
# move existing files to old-datetime-folder
ts = time.time()
date_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
ts_folder_name = "{}-{}".format(date_str, round(ts))
result_path = os.path.dirname(result_file)
dest_backup_path = os.path.join(result_path, ts_folder_name)
os.makedirs(dest_backup_path)
#for file in glob.glob(r"{}/*.csv".format(RESULT_CSV_OUTPUT_PATH)):
# print("backing up existing {}".format(file))
# shutil.move(file, dest_backup_path)
if os.path.isfile(result_file):
print("backing up existing {}".format(result_file))
shutil.move(result_file, dest_backup_path)
print("existing csv file backed up to {}".format(dest_backup_path))
with open(result_file, 'w', newline='') as bench_result_file:
fieldnames = ['test_name', 'elapsed_times', 'native_file_size']
writer = csv.DictWriter(bench_result_file, fieldnames=fieldnames)
writer.writeheader()
for test_name, test_results in native_benchmarks.items():
bench_times = [str(t) for t in test_results['bench_times']]
times_str = ", ".join(bench_times)
writer.writerow({"test_name" : test_name, "elapsed_times" : times_str, "native_file_size" : test_results['exec_size']})
def main():
wasm_out_dir = args['wasmoutdir']
csv_file_path = args['csvresults']
rust_code_dir = args['rustcodedir']
input_vectors_dir = args['inputvectorsdir']
rustcodes = [dI for dI in os.listdir(rust_code_dir) if os.path.isdir(os.path.join(rust_code_dir,dI))]
#benchdirs = [dI for dI in os.listdir('./') if os.path.isdir(os.path.join('./',dI))]
native_benchmarks = {}
for benchname in rustcodes:
if benchname in ["__pycache__"]:
continue
print("start benching: ", benchname)
#rust_code_path = os.path.join(RUST_CODES_DIR, benchname)
## TODO: move input vectors to their own "standalone" folder
# use "ewasm" folder
inputvecs_path = os.path.join(input_vectors_dir, "{}-inputs.json".format(benchname))
with open(inputvecs_path) as f:
bench_inputs = json.load(f)
for input in bench_inputs:
print("bench input:", input['name'])
native_input_times = do_rust_bench(benchname, input, rust_code_dir, wasm_out_dir)
if native_input_times:
native_benchmarks[input['name']] = native_input_times
print("done with input:", input['name'])
print("done benching: ", benchname)
print("got native_benchmarks:", native_benchmarks)
saveResults(native_benchmarks, csv_file_path)
if __name__ == "__main__":
main() |
987,237 | 3260da175b41644aeaf00c743ae946ea0789e0b5 | from KMCLib import *
from .constant import *
from .rate import *
# rate_constant set here refers to prefactor
p_des = { 'N2O':{} }
p_des['N2O']['top'] = [KMCProcess(
coordinates = [Origin],
elements_before = ['top'],
elements_after = ['o'],
basis_sites = [Bp_map['t']],
rate_constant = pre_des('N2O', A_uc, Vib_ad['N2O']['top']))]
p_des['N2O']['top'][-1].name = ('des', 'N2O', 'top', 't')
|
987,238 | f3a18dadc546cfbd3d1c0be66d55e1ed707ff860 | #! /usr/bin/env python
import textile
import sys, os, shutil, random
from optparse import OptionParser
from BeautifulSoup import BeautifulSoup
from Cheetah.Template import Template
from xml.sax.saxutils import escape
VERSION = '0.1.1'
HOME = '/home/curtis/working/reptile'
TEMPLATE = HOME + '/templates/reptile.tpl'
# FIXME - bad name here
CSS = HOME + '/css'
class TextileFile:
def __init__(self, path):
self.path = path
self.html = ''
self.TOC = ''
self.toc_list = []
self.toc_string = ''
self.HEADERS = ['h1', 'h2', 'h3', 'h4']
# Contains {somerandomnumber: notextile_text}
self.notextiles = {}
try:
self.text = open(self.path).read()
except:
print "ERROR opening " + self.path
sys.exit(1)
self.create_html()
def create_html(self):
self.html = self.extract_notextile()
self.html = textile.textile(self.html)
self.html = self.replace_notextile()
self.TOC = self.create_toc()
def recurse_header(self,h):
try:
# If it doesn't have a name in headers, we skip it
if h.nextSibling.name in self.HEADERS:
last_header = h.nextSibling.name
self.toc_list.append(str(h.nextSibling))
except:
pass
try:
# if no nextSibling we're at the end, so quit
h.nextSibling
except:
return False
self.recurse_header(h.nextSibling)
def make_ordered_list_toc(self):
soup = BeautifulSoup(self.toc_string)
# FIXME
#for h in soup.findAll():
# print h
def create_toc(self):
soup = BeautifulSoup(self.html)
# FIXME If there's no h1 this'll error out
for h in soup.findAll('h1'):
self.toc_list.append(str(h))
self.recurse_header(h)
for i in self.toc_list:
self.toc_string = self.toc_string + '\n' + i
self.make_ordered_list_toc()
return self.toc_string
def extract_notextile(self):
soup = BeautifulSoup(self.text)
for idx, nt in enumerate(soup.findAll('notextile')):
rand = str(random.random())
# FIXME
nt.replaceWith(rand)
self.notextiles[rand] = nt
#return soup.prettify()
return soup.renderContents()
def replace_notextile(self):
soup = BeautifulSoup(self.html)
for k, v in self.notextiles.items():
for nt in soup.findAll(text=k):
# escape xml characters...
nt.replaceWith(v)
#return soup.prettify()
return soup.renderContents()
class Reptile:
def __init__(self, template, in_directory, out_directory):
self.in_directory = in_directory
self.out_directory = out_directory
self.template = template
self.css_dir = CSS
# Will be a list of noTextile objects
self.files = []
self.set_files(in_directory)
# FIXME - Should also set 1st level dirs too
def run(self):
self.create_out_directory()
self.copy_css()
self.export_html()
def copy_css(self):
try:
shutil.copytree(self.css_dir, os.path.join(self.out_directory, 'css'))
except:
print "ERROR: Could not copy " + CSS + " to " + os.path.join(self.out_directory, 'css')
sys.exit(1)
# FIXME
def set_files(self, directory):
dirs = os.listdir(directory)
# The point is this mumbo-jumbo is to only go down one level into the main dir
for dir in dirs:
dir_files = os.listdir(os.path.join(directory, dir))
for dir_file in dir_files:
if os.path.isfile(os.path.join(directory, dir, dir_file)):
f = open(os.path.join(directory, dir, dir_file))
# Create new textilefile
t = TextileFile(os.path.join(directory, dir, dir_file))
self.files.append(t)
# FIXME
def create_out_directory(self):
if os.path.isdir(self.out_directory):
try:
shutil.rmtree(self.out_directory)
except:
print "ERROR: Could not remove " + self.out_directory
sys.exit(1)
try:
os.mkdir(self.out_directory)
except:
print "ERROR: Could not create " + self.out_directory
# FIXME Need to only try to make once
#sys.exit(1)
# FIXME
def create_sub_directory(self, dir):
try:
os.mkdir(dir)
except:
print "ERROR: Could not create " + dir
def export_html(self):
for nt in self.files:
first_level_dir, file_name = os.path.split(nt.path)
root_level_dir, first_level_dir = os.path.split(first_level_dir)
file_name, extension = os.path.splitext(file_name)
file_name = file_name + '.html'
output_html_file = os.path.join(self.out_directory, first_level_dir, file_name)
self.create_sub_directory(os.path.join(self.out_directory, first_level_dir))
if os.path.isfile(nt.path):
html_file = open(output_html_file, 'w')
# Send the html as the "body" in the template
self.template.body = nt.html
self.template.file_name = file_name
self.template.toc = nt.TOC
# Finally write the template to the html file
html_file.write(self.template.respond())
def main(args):
try:
# Make sure required args are set
sys.argv[1]
sys.argv[2]
except:
print "ERROR: Usage: " + str(sys.argv[0]) + " input_dir output_dir"
sys.exit(1)
in_directory = sys.argv[1]
out_directory = sys.argv[2]
t = Template(file=TEMPLATE)
r = Reptile(t, in_directory, out_directory)
r.run()
if __name__ == '__main__':
main(sys.argv)
|
987,239 | 6e62a834acc45b0c7e8e95beb1c45b81c7d555a7 | import csv
file_to_load = "Resources/budget_data2.csv"
total_months = 0
total_revenue = 0
prev_revenue = 0
revenue_change = 0
max_i = ["", 0]
min_d = ["", 9999999999999999999999]
revenue_changes = []
with open(file_to_load) as revenue_data:
reader = csv.DictReader(revenue_data)
for row in reader:
total_months = total_months + 1
total_revenue = total_revenue + int(row["Revenue"])
revenue_change = int(row["Revenue"]) - prev_revenue
prev_revenue = int(row["Revenue"])
if (revenue_change > max_i[1]):
max_i[1] = revenue_change
max_i[0] = row["Date"]
if (revenue_change < min_d[1]):
min_d[1] = revenue_change
min_d[0] = row["Date"]
revenue_changes.append(int(row["Revenue"]))
revenue_avg = sum(revenue_changes) / len(revenue_changes)
print()
print()
print()
print("Financial Analysis")
print("-------------------------")
print("Total Months: " + str(total_months))
print("Total Revenue: " + "$" + str(total_revenue))
print("Average Change: " + "$" + str(round(sum(revenue_changes) / len(revenue_changes),2)))
print("Greatest Increase: " + str(max_i[0]) + " ($" + str(max_i[1]) + ")")
print("Greatest Decrease: " + str(min_d[0]) + " ($" + str(min_d[1]) + ")")
|
987,240 | 3e141e01eb00927d090c4d10195506577f338afd |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from rest_framework import viewsets
from rest_framework.response import Response
import datetime
from .forms import DoorCombinationForm
from .models import *
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
def index(request):
if not request.session.get('door-combination', False):
return HttpResponseRedirect('/booking/door-combination/')
context = { }
return render(request, 'booking/index.html', context)
def export(request):
if not request.session.get('door-combination', False):
return HttpResponseRedirect('/booking/door-combination/')
context = { }
return render(request, 'booking/export.html', context)
def latest_hobs(request):
if not request.session.get('door-combination', False):
return HttpResponseRedirect('/booking/door-combination/')
aircraft = request.GET.get('aircraft')
last=Booking.objects.filter(aircraft__rego=aircraft).order_by('-hobs_end', '-from_time').first()
if last:
data = { 'latest_hobs': last.hobs_end }
else:
data = { 'latest_hobs': 0 }
return JsonResponse(data)
def door_combination(request):
if request.session.get('door-combination', False):
return HttpResponseRedirect('/booking/')
if request.method == 'POST':
form = DoorCombinationForm(request.POST)
if form.is_valid():
request.session['door-combination'] = True
if request.GET.get('target') == 'members':
return HttpResponseRedirect('/booking/members')
else:
return HttpResponseRedirect('/booking/')
else:
form = DoorCombinationForm()
return render(request, 'booking/door-combination.html', { 'form': form })
def members(request):
if not request.session.get('door-combination', False):
return HttpResponseRedirect('/booking/door-combination/?target=members')
context = { 'members': Member.objects.all() }
return render(request, 'booking/members.html', context)
def reset(request):
del request.session['door-combination']
return HttpResponseRedirect('/booking/door-combination/')
def popup(request):
context = { }
return render(request, 'booking/popup.html', context)
|
987,241 | c83eaa7ba3ab76f1d31f97e449c5f5d3232379c7 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 9 20:11:21 2017
@author: ERIC
"""
#import the best module ever
from twython import Twython
#config file is in same directory
from TWITTER_CONFIG import APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET
#import keys from config file
twitter =Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
def twitter_bot(number_of_tweets):
topic = input("What do you want to find? ")
result = twitter.search(q=topic, count=number_of_tweets)
retweet_message = input("What would you like the message to be? ")
for i in result['statuses']:
print(i['text'].encode('ascii', 'ignore'))
tweet_id = i['id']
print(id)
twitter.create_favorite(id=tweet_id)
twitter.retweet(id=tweet_id)
twitter.update_status(status=retweet_message, in_reply_to_status_id=tweet_id)
twitter_bot()
#print(i['entities']['user_mentions'])
#photo = open(TweetPhoto,'rb')
#twitter.update_status_with_media(media=photo,status=TweetMessage)
|
987,242 | 839d14a11451b16cfde7628eb3a9454a52279388 | from suds.client import Client
from nova import exception
from nova import db
import logging
logging.getLogger('suds').setLevel(logging.INFO)
def update_for_run_instance(service_url, region_name, server_port1, server_port2, dpid1, dpid2):
# check region name
client = Client(service_url + "?wsdl")
client.service.setServerPort(dpid1, server_port1, region_name)
client.service.setServerPort(dpid2, server_port2, region_name)
client.service.save()
def update_for_terminate_instance(service_url, region_name, server_port1, server_port2, dpid1, dpid2, vlan_id):
client = Client(service_url + "?wsdl")
client.service.clearServerPort(dpid1, server_port1)
client.service.clearServerPort(dpid2, server_port2)
client.service.save()
dpid_datas = client.service.showSwitchDatapathId()
for dpid_data in dpid_datas:
ports = client.service.showPorts(dpid_data.dpid)
for port in ports:
if port.type != "ServerPort":
continue
if port.regionName == region_name:
return
remove_region(service_url, region_name, vlan_id)
def create_region(service_url, region_name, vlan_id):
client = Client(service_url + "?wsdl")
try:
client.service.createRegion(region_name)
client.service.save()
except:
raise exception.OFCRegionCreationFailed(region_name=region_name)
try:
switches = db.switch_get_all(None)
for switch in switches:
client.service.setOuterPortAssociationSetting(switch["dpid"], switch["outer_port"], vlan_id, 65535, region_name)
client.service.save()
except:
client.service.destroyRegion(region_name)
client.service.save()
raise exception.OFCRegionSettingOuterPortAssocFailed(region_name=region_name, vlan_id=vlan_id)
def remove_region(service_url, region_name, vlan_id):
client = Client(service_url + "?wsdl")
try:
switches = db.switch_get_all(None)
for switch in switches:
client.service.clearOuterPortAssociationSetting(switch["dpid"], switch["outer_port"], vlan_id)
client.service.save()
except:
pass
client.service.destroyRegion(region_name)
client.service.save()
def has_region(service_url, region_name):
client = Client(service_url + "?wsdl")
return region_name in [x.regionName for x in client.service.showRegion()]
|
987,243 | 2f267c35e3b984bba50ce62bb69770324fe018c8 | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def delete_dupliate_list_elements(self, dlist):
delete_indices = set()
i = 0
while i < len(dlist) - 1:
if dlist[i].val == dlist[i + 1].val:
delete_indices.add(i)
i += 2
else:
i += 1
for i in range(len(dlist) - 1, -1, -1):
if i in delete_indices:
del dlist[i]
def subtreeWithAllDeepest(self, root):
if not root:
return
stack = [(root, 0, None)]
parentMap = {root: None}
dlist = []
maxDepth = 0
while stack:
node, depth, parent = stack.pop(0)
parentMap[node] = parent
if depth > maxDepth:
dlist = []
dlist.append(node)
maxDepth = depth
elif depth == maxDepth:
dlist.append(node)
if node.left:
stack.append((node.left, depth+1, node))
if node.right:
stack.append((node.right, depth+1, node))
while len(dlist) > 0:
self.delete_dupliate_list_elements(dlist)
if len(dlist) ==1:
return dlist[0]
newList = [parentMap[node] for node in dlist]
dlist = newList
return dlist[0]
#
# while not all(elem.val == maxDepthList[0] for elem in maxDepthList):
# maxDepthList = [parentMap[node] for node in maxDepthList ]
# return maxDepthList
# depth = {None: -1}
# def dfs(node, parent = None):
# if node:
# depth[node] = depth[parent] +1
# dfs(node.left, node)
# dfs(node.right, node)
# dfs(root)
# return [ node.val for node, nodeDepth in depth.items() if nodeDepth==max(depth.values()) ]
root = TreeNode(3)
root.left = TreeNode(5)
root.right = TreeNode(1)
root.left.left = TreeNode(6)
root.left.left.left = TreeNode(8)
root.left.right = TreeNode(2)
root.left.right.left = TreeNode(7)
root.left.right.right = TreeNode(4)
root.right.left = TreeNode(0)
root.right.right = TreeNode(8)
solution = Solution()
dlist = solution.subtreeWithAllDeepest(root)
|
987,244 | e9d05b9902f59e61461843037094d72626dfe972 | #!/usr/bin/env python3
'''Extract gene nucleotide sequence in FASTA format from genbank records'''
import argparse
import pathlib
import Bio.SeqIO
FASTA_DESC_TEMPL = '>%s_%s %s'
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input_fp', required=True, type=pathlib.Path,
help='Input genabnk filepath')
args = parser.parse_args()
if not args.input_fp.exists():
parser.error('Input file %s does not exist' % args.input_fp)
return args
def main():
# Get command line arguments
args = get_arguments()
# Iterate genbank records; giving unique name to each gene
seen_names = dict()
with args.input_fp.open('r') as fh:
for genbank_record in Bio.SeqIO.parse(fh, 'genbank'):
for feature in genbank_record.features:
if feature.type != 'CDS':
continue
name = str()
if 'gene' in feature.qualifiers:
name = feature.qualifiers['gene'][0]
elif 'locus_tag' in feature.qualifiers:
name = feature.qualifiers['locus_tag'][0]
elif 'note' in feature.qualifiers:
name = feature.qualifiers['note'][0]
number = int()
try:
seen_names[(name, genbank_record.name)] += 1
except KeyError:
seen_names[(name, genbank_record.name)] = 1
finally:
number = seen_names[(name, genbank_record.name)]
fasta_desc = FASTA_DESC_TEMPL % (name, number, genbank_record.name)
sequence = feature.extract(genbank_record).seq
print(fasta_desc)
for line in [sequence[i:i+80] for i in range(0, len(sequence), 80)]:
print(line)
if __name__ == '__main__':
main()
|
987,245 | 5ca8f1cc7a273c07e0143d63b7e414845153ad2b | TOKEN = '1772291199:AAGqhnEVGIpLHDV3xe_u1BVqgAJYn5IBlX4' |
987,246 | d4af0066d6125b62f9b0de6d93f3a0c709566f2d | import matplotlib as matplt
matplt.use('Agg')
import os, sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../easy_reg'))
import tools.module_parameters as pars
from abc import ABCMeta, abstractmethod
from easyreg.piplines import run_one_task
from easyreg.reg_data_utils import write_list_into_txt, get_file_name, read_txt_into_list
import torch
torch.backends.cudnn.benchmark=True
class BaseTask():
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
@abstractmethod
def save(self):
pass
class DataTask(BaseTask):
"""
base module for data setting files (.json)
"""
def __init__(self, name, path='../settings/base_data_settings.json'):
super(DataTask, self).__init__(name)
self.data_par = pars.ParameterDict()
self.data_par.load_JSON(path)
def save(self, path='../settings/data_settings.json'):
self.data_par.write_ext_JSON(path)
class ModelTask(BaseTask):
"""
base module for task setting files (.json)
"""
def __init__(self, name, path='../settings/base_task_settings.json'):
super(ModelTask, self).__init__(name)
self.task_par = pars.ParameterDict()
self.task_par.load_JSON(path)
def save(self, path='../settings/task_settings.json'):
self.task_par.write_ext_JSON(path)
def force_test_setting(dm, tsm, output_path):
"""
To run in test mode, force set related param in datapro and tsk_set.
The updated param are saved in output_path/cur_data_setting.json and output_path/cur_task_setting.json
:param dm: ParameterDict, settings for data proprecessing (disabled if the settings have already put in tsk_set)
:param tsm: ParameterDict, settings for the task
:param output_path:
:return: None
"""
if dm is not None:
data_json_path = os.path.join(output_path, 'cur_data_setting.json')
dm.data_par['datapro']['dataset']['prepare_data'] = False
dm.data_par['datapro']['reg']['max_num_for_loading'] = [1, 1, -1, 1]
dm.save(data_json_path)
else:
tsm.task_par['dataset']['max_num_for_loading'] = [1, 1, -1, 1]
tsm.task_par['tsk_set']['train'] = False
tsm.task_par['tsk_set']['continue_train'] = False
tsk_json_path = os.path.join(output_path, 'cur_task_setting.json')
tsm.save(tsk_json_path)
def init_test_env(setting_path, output_path, file_list,fname_list):
"""
create test environment, the file list would be saved into output_path/reg/test/file_path_list.txt,
a corresponding auto-parsed filename list would also be saved in output/path/reg/test/file_name_list.txt
:param setting_path: the path to load 'cur_task_setting.json' and 'cur_data_setting.json' (optional if the related settings are in cur_task_setting)
:param output_path: the output path of the task
:param image_path_list: the image list, each item refers to the abstract path of the image
:param l_path_list:optional, the label of image list, each item refers to the abstract path of the image
:return: tuple of ParameterDict, datapro (optional) and tsk_set
"""
dm_json_path = os.path.join(setting_path, 'cur_data_setting.json')
tsm_json_path = os.path.join(setting_path, 'cur_task_setting.json')
assert os.path.isfile(tsm_json_path), "task setting not exists"
dm = DataTask('task_reg', dm_json_path) if os.path.isfile(dm_json_path) else None
tsm = ModelTask('task_reg', tsm_json_path)
file_num = len(file_list)
os.makedirs(os.path.join(output_path, 'seg/test'), exist_ok=True)
os.makedirs(os.path.join(output_path, 'seg/res'), exist_ok=True)
file_txt_path = os.path.join(output_path, 'seg/test/file_path_list.txt')
fn_txt_path = os.path.join(output_path, 'seg/test/file_name_list.txt')
has_label = len(file_list[0])==2
if fname_list is None:
if has_label:
fname_list = [get_file_name(file_list[i][0]) for i in range(file_num)]
else:
fname_list = [get_file_name(file_list[i]) for i in range(file_num)]
write_list_into_txt(file_txt_path, file_list)
write_list_into_txt(fn_txt_path, fname_list)
data_task_name = 'seg'
cur_task_name = 'res'
if dm is not None:
dm.data_par['datapro']['dataset']['output_path'] = output_path
dm.data_par['datapro']['dataset']['task_name'] = data_task_name
tsm.task_par['tsk_set']['task_name'] = cur_task_name
tsm.task_par['tsk_set']['output_root_path'] = os.path.join(output_path, data_task_name)
return dm, tsm
def do_segmentation_eval(args, segmentation_file_list):
"""
set running env and run the task
:param args: the parsed arguments
:param segmentation_file_list: list of segmentation file list, [image_list, label_list]
:return: None
"""
task_output_path = args.task_output_path
os.makedirs(task_output_path, exist_ok=True)
setting_folder_path = args.setting_folder_path
file_txt_path = ''
if args.file_txt_path:
file_txt_path = args.file_txt_path
fname_txt_path = os.path.join(os.path.split(file_txt_path)[0],"file_name_list.txt")
fname_list = read_txt_into_list(fname_txt_path) if os.path.isfile(fname_txt_path) else None
else:
print(segmentation_file_list)
fname_list = [[f.split('/')[-1].split('.')[0] for f in segmentation_file_list[0]]]*2
dm, tsm = init_test_env(setting_folder_path, task_output_path, segmentation_file_list, fname_list)
tsm.task_par['tsk_set']['gpu_ids'] = args.gpu_id
model_path= args.model_path
if model_path is not None:
assert os.path.isfile(model_path), "the model {} not exist".format_map(model_path)
tsm.task_par['tsk_set']['model_path'] = model_path
force_test_setting(dm, tsm, task_output_path)
dm_json_path = os.path.join(task_output_path, 'cur_data_setting.json') if dm is not None else None
tsm_json_path = os.path.join(task_output_path, 'cur_task_setting.json')
run_one_task(tsm_json_path, dm_json_path)
if __name__ == '__main__':
"""
A evaluation interface for segmentation network with pre-trained models.
Arguments:
input related:two input styles are supported,
1. given txt
--file_txt_path/-txt: the txt file recording the paths of images to segmentation
2. given image
--image_list/ -i: the image list, s1 s2 s3..sn
--limage_list/ -li: optional, the label list, ls1,ls2,ls3..lsn
other arguments:
--setting_folder_path/-ts :path of the folder where settings are saved
--task_output_path/ -o: the path of output folder
--model_path/ -m: the path of pretrained model, can be set here or set in setting file
--gpu_id/ -g: gpu_id to use
"""
import argparse
parser = argparse.ArgumentParser(description='An easy interface for evaluate various segmentation methods')
parser.add_argument('-ts', '--setting_folder_path', required=False, type=str,
default=None,
help='path of the folder where settings are saved,should include cur_task_setting.json')
parser.add_argument('-txt', '--file_txt_path', required=False, default=None, type=str,
help='the txt file recording the paths of images for segmentation') # 2
parser.add_argument('-i', '--image_list', nargs='+', required=False, default=None,
help='the image list, s1 s2 s3..sn')
parser.add_argument('-li', '--limage_list', nargs='+', required=False, default=None,
help='the image label list, ls1,ls2,ls3..lsn')
parser.add_argument('-o', "--task_output_path", required=True, default=None, help='the output path')
parser.add_argument('-m', "--model_path", required=False, default=None, help='the path of trained model')
parser.add_argument('-g', "--gpu_id", required=False, type=int, default=0, help='gpu_id to use')
args = parser.parse_args()
print(args)
file_txt_path = args.file_txt_path
image_list = args.image_list
limage_list = args.limage_list
image_label_list = []
assert file_txt_path is not None or image_list is not None, "either file_txt_path or source/target_list should be provided"
assert file_txt_path is None or image_list is None, " file_txt_path and source/target_list cannot be both provided"
if file_txt_path is not None:
image_label_list = read_txt_into_list(file_txt_path)
if limage_list is not None:
assert len(image_list) == len(limage_list), "the image_list and limage_list should be the same length"
with open('file_path_list.txt', 'w+') as f:
f.write('{}\t{}'.format(image_list[0], limage_list[0]))
args.file_txt_path = 'file_path_list.txt'
image_label_list = read_txt_into_list('file_path_list.txt')
args.image_list = None
args.limage_list = None
do_segmentation_eval(args, image_label_list)
|
987,247 | 8e625caa2db8fa2436e03422d9a4206761a8720f | # Copyright 2015-2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_nec.nwa.nwalib import client_fwaas
from networking_nec.nwa.nwalib import client_l2
from networking_nec.nwa.nwalib import client_l3
from networking_nec.nwa.nwalib import client_lbaas
from networking_nec.nwa.nwalib import client_tenant
from networking_nec.nwa.nwalib import nwa_restclient
class NwaClient(nwa_restclient.NwaRestClient):
'''Client class of NWA. '''
def __init__(self, *args, **kwargs):
super(NwaClient, self).__init__(*args, **kwargs)
self.tenant = client_tenant.NwaClientTenant(self)
self.l2 = client_l2.NwaClientL2(self)
self.l3 = client_l3.NwaClientL3(self)
self.fwaas = client_fwaas.NwaClientFWaaS(self)
self.lbaas = client_lbaas.NwaClientLBaaS(self)
|
987,248 | 3d83ad15a32a573bbc3e3f8f23a51544d8be923f | import json
numbers = [2, 3, 4, 5, 6, 7, 11, 12]
filename = 'numbers.json'
with open(filename, 'w') as f_obj:
json.dump(numbers, f_obj)
with open(filename) as f_obj:
numbers = json.load(f_obj)
print numbers
|
987,249 | 90e94392faf1c9a16d856e335c23908f25899223 | # Machine Learning Online Class
# Exercise 7 | Principle Component Analysis and K-Means Clustering
from scipy.io import loadmat
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from featureNormalize import featureNormalize
from pca import pca
from projectData import *
from recoverData import *
from displayData import displayData
import matplotlib.image as mpimg
from kMeansInitCentroids import *
from runkMeans import *
from plotDataPoints import *
# ================== Part 1: Load Example Dataset ===================
# We start this exercise by using a small dataset that is easily to
# visualize
# The following command loads the dataset. You should now have the
# variable X in your environment
data = loadmat('ex7data1.mat')
X = data['X']
# Visualize the example dataset
fig, ax = plt.subplots()
ax.plot(X[:, 0], X[:, 1], 'bo', fillstyle='none')
ax.set(aspect='equal', xlim=(0.5, 6.5), ylim=(2, 8))
fig.show()
input('Program paused. Press enter to continue.\n')
# =============== Part 2: Principal Component Analysis ===============
# You should now implement PCA, a dimension reduction technique.
print('\nRunning PCA on example dataset.\n\n')
# Before running PCA, it is important to first normalize X
X_norm, mu, sigma = featureNormalize(X)
# Run PCA
U, S = pca(X_norm)
# Draw the eigenvectors centered at mean of data. These lines show the
# directions of maximum variations in the dataset.
eigen1 = mu + 1.5 * S[0]*U[:, 0]
eigen2 = mu + 1.5 * S[1]*U[:, 1]
# Visualize the example dataset
fig, ax = plt.subplots()
ax.plot(X[:, 0], X[:, 1], 'bo', fillstyle='none')
ax.set(aspect='equal', xlim=(0.5, 6.5), ylim=(2, 8))
ax.plot([mu[0], eigen1[0]], [mu[1], eigen1[1]], '-k', linewidth=2)
ax.plot([mu[0], eigen2[0]], [mu[1], eigen2[1]], '-k', linewidth=2)
fig.show()
print('Top eigenvector: \n')
print(' U(:,0) = %f %f \n' % (U[0, 0], U[1, 0]))
print('\n(you should expect to see -0.707107 -0.707107)\n')
input('Program paused. Press enter to continue.\n')
# =================== Part 3: Dimension Reduction ===================
# You should now implement the projection step to map the data onto the
# first k eigenvectors. The code will then plot the data in this reduced
# dimensional space. This will show you what the data looks like when
# using only the corresponding eigenvectors to reconstruct it.
print('\nDimension reduction on example dataset.\n\n')
# Project the data onto K = 1 dimension
K = 1
Z = projectData(X_norm, U, K)
print('Projection of the first example: %f\n' % (Z[0]))
print('\n(this value should be about 1.481274)\n\n')
X_rec = recoverData(Z, U, K)
print('Approximation of the first example: %f %f\n' %
(X_rec[0, 0], X_rec[0, 1]))
print('\n(this value should be about -1.047419 -1.047419)\n\n')
# Plot the normalized dataset (returned from pca)
fig, ax = plt.subplots()
ax.plot(X_norm[:, 0], X_norm[:, 1], 'bo', fillstyle='none')
ax.set(aspect='equal', xlim=(-4, 3), ylim=(-4, 3))
# Draw lines connecting the projected points to the original points
ax.plot(X_rec[:, 0], X_rec[:, 1], 'ro', fillstyle='none')
for i in range(np.size(X_norm, 0)):
ax.plot([X_norm[i, 0], X_rec[i, 0]], [
X_norm[i, 1], X_rec[i, 1]], '--k', linewidth=1)
fig.show()
input('Program paused. Press enter to continue.\n')
# =============== Part 4: Loading and Visualizing Face Data =============
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment
print('\nLoading face dataset.\n\n')
# Load Face dataset
data = loadmat('ex7faces.mat')
X = data['X']
# Display the first 100 faces in the dataset
frame = displayData(X[:100, :])
plt.imshow(frame, cmap='gray')
plt.axis('off')
plt.show()
print('Program paused. Press enter to continue.\n')
# =========== Part 5: PCA on Face Data: Eigenfaces ===================
# Run PCA and visualize the eigenvectors which are in this case eigenfaces
# We display the first 36 eigenfaces.
print('\nRunning PCA on face dataset.\n'
'(this might take a minute or two ...)\n\n')
# Before running PCA, it is important to first normalize X by subtracting
# the mean value from each feature
X_norm, mu, sigma = featureNormalize(X)
# Run PCA
U, S = pca(X_norm)
# Visualize the top 36 eigenvectors found
frame = displayData(U[:, :36].transpose())
plt.imshow(frame, cmap='gray')
plt.axis('off')
plt.show()
# ============= Part 6: Dimension Reduction for Faces =================
# Project images to the eigen space using the top k eigenvectors
print('\nDimension reduction for face dataset.\n\n')
K = 100
Z = projectData(X_norm, U, K)
print('The projected data Z has a size of: ')
print(Z.shape)
input('\n\nProgram paused. Press enter to continue.\n')
# ==== Part 7: Visualization of Faces after PCA Dimension Reduction ====
# Project images to the eigen space using the top K eigen vectors and
# visualize only using those K dimensions
# Compare to the original input, which is also displayed
print('\nVisualizing the projected (reduced dimension) faces.\n\n')
K = 100
X_rec = recoverData(Z, U, K)
# Creates two subplots and unpacks the output array immediately
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
# Display normalized data
frame = displayData(X_norm[:100, :])
ax1.imshow(frame, cmap='gray', aspect = 'equal')
ax1.set_title('Original faces')
ax1.set_axis_off()
# Display reconstructed data from only k eigenfaces
frame = displayData(X_rec[:100, :])
ax2.imshow(frame, cmap='gray', aspect = 'equal')
ax2.set_title('Recovered faces')
ax2.set_axis_off()
fig.show()
input('Program paused. Press enter to continue.\n')
## === Part 8(a): Optional (ungraded) Exercise: PCA for Visualization ===
# One useful application of PCA is to use it to visualize high-dimensional
# data. In the last K-Means exercise you ran K-Means on 3-dimensional
# pixel colors of an image. We first visualize this output in 3D, and then
# apply PCA to obtain a visualization in 2D.
# Reload the image from the previous exercise and run K-Means on it
# Load an image of a bird
A = mpimg.imread('bird_small.png')
# Size of the image
img_size = A.shape
# Reshape the image into an Nx3 matrix where N = number of pixels.
# Each row will contain the Red, Green and Blue pixel values
# This gives us our dataset matrix X that we will use K-Means on.
X = np.reshape(A, (img_size[0]*img_size[1], img_size[2]))
# Run your K-Means algorithm on this data
K = 16
max_iters = 10
# When using K-Means, it is important the initialize the centroids
# randomly.
initial_centroids = kMeansInitCentroids(X, K)
# Run K-Means
centroids, idx = runkMeans(X, initial_centroids, max_iters)
# Sample 1000 random indexes (since working with all the data is
# too expensive. If you have a fast computer, you may increase this.
sel = np.floor(np.random.rand(1000, 1)*np.size(X,0)) + 1
sel = sel.astype(int)
# Setup Color Palette
colors = idx[sel, 0].flatten()
# Visualize the data and centroid memberships in 3D
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.scatter(X[sel, 0], X[sel, 1], X[sel,2], c = colors, cmap = 'prism')
ax.set_title('Pixel dataset plotted in 3D. Color shows centroid memberships')
fig.show()
input('Program paused. Press enter to continue.\n')
## === Part 8(b): Optional (ungraded) Exercise: PCA for Visualization ===
# Use PCA to project this cloud to 2D for visualization
# Subtract the mean to use PCA
X_norm, mu, sigma = featureNormalize(X)
# PCA and project the data to 2D
U, S = pca(X_norm)
Z = projectData(X_norm, U, 2)
# Plot in 2D
plotDataPoints(Z[sel.flatten(), :], idx[sel,0], K)
plt.title('Pixel dataset plotted in 2D, using PCA for dimensionality reduction');
plt.show()
input('Program paused. Press enter to continue.\n') |
987,250 | 1064b303a0273a0647c11e770eb64e1a9039367d | import json
from abc import ABC, abstractmethod
import numpy
import talib
import websockets
RSI_PERIOD = 14
RSI_OVERBOUGHT = 70
RSI_OVERSOLD = 30
class Watcher(ABC):
"""
Responsible for watching symbol candles and deciding when to order.
"""
def __init__(self, symbol):
self.SYMBOL = symbol + "/USDT"
self._ws = None
self._loop = None
self._closes = []
self._bought = False
self.SOCKET = f"wss://stream.binance.com:9443/ws/{symbol.lower()}usdt@kline_1m"
async def listen(self, loop):
self._ws = await websockets.connect(self.SOCKET, ping_interval=None)
self._loop = loop
async for message in self._ws:
await self._receive(message)
async def _receive(self, message):
kline = json.loads(message)['k']
if kline['x']:
print(kline)
self._closes.append(float(kline['c']))
if len(self._closes) > RSI_PERIOD:
np_closes = numpy.array(self._closes)
rsi = talib.RSI(np_closes, RSI_PERIOD)
print(rsi)
return await self._check(rsi[-1])
async def _check(self, rsi):
if rsi > RSI_OVERBOUGHT:
if self._bought:
print("==SELL==")
if await self._order("sell"):
self._bought = False
return True
else:
return False
elif rsi < RSI_OVERSOLD:
if not self._bought:
print("==BUY==")
if await self._order("buy"):
self._bought = True
return True
else:
return False
@abstractmethod
async def _order(self, side):
pass
|
987,251 | ec80412850dbe8365a6568fa9122160678586b4c | from Query import Query
import re
class ParserQuery():
def parseQRY(chemin):
"""
Fonction permettant de parser les fichers QRY (requêtes avec leurs identifiants et leur texte)
"""
file = open(chemin, 'r')
res = {}
currentI = None
currentBalise = None
currentQuery = None
while True:
#lis une seule ligne
line = file.readline()
#si ligne vide, fin du fichier
if not line:
break
#récupère la ligne sous forme de mots
words=line.split()
#Si la ligne n'est pas vide
if(len(words)>0):
#Test si on est sur une balise et laquelle
if(words[0]==".I"):
if(currentQuery != None):
#J'enregistre la requete courante avant d'en créer une autre
res[currentQuery.getIdentifiant()] = currentQuery
del currentQuery
currentQuery = Query(words[1])# Création d'une requete avec son identifiant
currentI = words[1]
currentBalise = 'I'
elif(words[0]==".W"):
currentBalise='W' #J'indique que je suis danc une balise W
elif(words[0][0]=='.'):
currentBalise='unknown' #J'indique que je suis dans une balsie inconnue
else:
#On est dans le contenu d'une balise
if(currentBalise=='W'):
currentQuery.addTexte(line)#J'ajoute la ligne au texte de la requête
#J'enregistre la requête courante avant de quitter
res[currentQuery.getIdentifiant()] = currentQuery
file.close()
return res
def parseREL(chemin, reqs):
file = open(chemin, 'r')
while True:
#lis une seule ligne
line = file.readline()
#si ligne vide, fin du fichier
if not line:
break
words=line.split()
if int(words[0][0]) == 0:
docPertinent = int(re.split('^0*',words[1])[1])
reqs[int(words[0][1])].addDocspertinents(docPertinent)
else:
docPertinent = int(re.split('^0*',words[1])[1])
reqs[int(words[0])].addDocspertinents(docPertinent)
file.close()
return reqs |
987,252 | a3eb3dc6e6a3a2f215b760cd0cdd9014b1a192eb | from genmod.commands import filter_command
from click.testing import CliRunner
ANNOTATED_VCF_FILE = "tests/fixtures/test_vcf_annotated.vcf"
from genmod import logger
from genmod.log import init_log
init_log(logger, loglevel="INFO")
def test_genmod_filter():
"""docstring for test_genmod_annotate_models"""
runner = CliRunner()
result = runner.invoke(filter_command, [ANNOTATED_VCF_FILE])
assert result.exit_code == 0
|
987,253 | 5370676b064e69e7eff1df1c15d04343c15f0650 | # Generated by Django 2.2.7 on 2019-12-25 15:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0004_auto_20191129_1720'),
]
operations = [
migrations.AlterField(
model_name='course',
name='pub_date',
field=models.DateField(default=datetime.date.today, null=True),
),
]
|
987,254 | dc54c1b0aadbfecdfa743fd98e1ba0c0133fa669 | # Generated by Django 3.2 on 2021-04-12 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("menu", "0005_auto_20210412_2137"),
]
operations = [
migrations.AlterField(
model_name="lunch",
name="details",
field=models.CharField(default="", max_length=200),
),
]
|
987,255 | 385aee105ef70b0597d7b7110dd3fece93ab0f46 | from string import ascii_lowercase
import numpy as np
from ref_optimize import ref_optimize
def read_train(filename):
#function to read training data
mapping = list(enumerate(ascii_lowercase))
mapping = { i[1]:i[0] for i in mapping }
with open(filename, "r") as f:
raw_data = f.read()
raw_data = raw_data.split("\n")
dataX, dataY = [], []
tempX, tempY = [], []
for row in raw_data[:-1]:
row = row.split(" ")
tempY.append( mapping[row[1]])
tempX.append( np.array(row[5:], dtype=float) )
if int(row[2]) < 0:
dataX.append(np.array(tempX))
dataY.append(np.array(tempY, dtype=int))
tempX, tempY = [], []
ret = zip(dataX, dataY)
return list(ret)
def read_test(filename):
#function to read testing data
mapping = list(enumerate(ascii_lowercase))
mapping = { i[1]:i[0] for i in mapping }
with open(filename, "r") as f:
raw_data = f.read()
raw_data = raw_data.split("\n")
dataX, dataY = [], []
tempX, tempY = [], []
for row in raw_data[:-1]:
row = row.split(" ")
tempY.append( mapping[row[1]])
tempX.append( np.array(row[5:], dtype=float) )
if int(row[2]) < 0:
dataX.append(np.array(tempX))
dataY.append(np.array(tempY, dtype=int))
tempX, tempY = [], []
ret = zip(dataX, dataY)
return list(ret)
def read_model():
#function to read model for 2a
with open("../data/model.txt", "r") as f:
raw_data = f.read()
raw_data = raw_data.split("\n")
W = np.array(raw_data[:26*128], dtype=float).reshape(26, 128)
# print "in read_model"
# print W
T = np.array(raw_data[26*128:-1], dtype=float).reshape(26, 26)
T = np.swapaxes(T, 0, 1)
return W, T
def main():
train_filename = "../data/train_small.txt"
test_filename = "../data/test.txt"
train_data = read_train(train_filename)
test_data = read_train(test_filename)
# print ref_optimize(train_data, test_data, c=1000)
#main()
|
987,256 | d77180854792ffd31298ff5b61b669aa721a6a30 | import ROOT, sys, os
import numpy as np
import time
start_time = time.time()
opts = [opt for opt in sys.argv[1:] if opt.startswith("-")]
outputTitle = "h_studyETauChannel"
isData = 0
if "-b" in opts:
isData = 0
if "-dj" in opts:
isData = 1
isJetHTSample = 1
isSingleElectronSample = 0
if "de" in opts:
isData = 1
isJetHTSample = 0
isSingleElectronSample = 1
ROOT.gInterpreter.Declare('#include "../../MiniAODSkimmer/interface/JetInfoDS.h"')
ROOT.gInterpreter.Declare('#include "../../MiniAODSkimmer/interface/MuonInfoDS.h"')
ROOT.gInterpreter.Declare('#include "../../MiniAODSkimmer/interface/ElectronInfoDS.h"')
ROOT.gInterpreter.Declare('#include "../../MiniAODSkimmer/interface/TauInfoDS.h"')
inputFileListName=sys.argv[1]
inputFileList=inputFileListName
if len(sys.argv)>2 and not sys.argv[2].startswith("-"):
outputFileDir=sys.argv[2]
else:
outputFileDir = "./output/"
outputFileName = outputFileDir+outputTitle+"_"+inputFileListName.split("/")[-1].replace(".txt",".root")
out=ROOT.TFile.Open(outputFileName,'recreate')
print(outputFileName)
fchain = ROOT.TChain('tcpNtuples/analysisTree')
chain2 = ROOT.TChain('tcpTrigNtuples/triggerTree')
if isData == 0:
chain3 = ROOT.TChain('lumiSummary/lumiTree')
chain4 = ROOT.TChain('tcpGenNtuples/genTree')
pi = np.pi
h = {}
event_cut = {
'jetPt': 70,
'dRl': 0.4,
'dRltau': 0.05,
'dRlj': 0.8,
'metcut': 100.0,
'mtcut': 50.0,
'dPhiml': 1,
'dPhimj': 2,
'mass' : 5
}
#define histograms here
def book_histogram():
h['hEvents'] = ROOT.TH1F ("NEvents", "Number of Events; ;N", 2, 0, 2)
h['hWeights'] = ROOT.TH1F ("hWeights", "Weights per events; weight; N", 100, 0, 2)
h['hGenWeights'] = ROOT.TH1F ("hGenWeights", "Genweights per events; genweight; N", 100, 0, 2)
h['hPuWeights'] = ROOT.TH1F ("hPuWeights", "PUweights per events; PUweight; N", 100, 0, 2)
# ---------- Objects ---------- #
h['hJetPt'] = ROOT.TH1F ("hJetPt", "Jet P_{T} ; P_{T} ; N", 1500, 0, 1500)
h['hDeepjet'] = ROOT.TH1F ("hDeepjet", "deepjet score ; score ; N", 100, 0, 1)
h['hBJetPt'] = ROOT.TH1F ("hBJetPt", "BJet P_{T} ; P_{T} ; N", 1500, 0, 1500)
h['hMuonPt'] = ROOT.TH1F ("hMuPt", "Muon P_{T} ; P_{T} ; N", 500, 0, 500)
h['hIsoMuonPt'] = ROOT.TH1F ("hIsoMuPt", "Isolated Muon P_{T} ; P_{T} ; N", 500, 0, 500)
h['hNonIsoMuonPt'] = ROOT.TH1F ("hNonIsoMuPt", "Non-Isolated Muon P_{T} ; P_{T} ; N", 500, 0, 500)
h['hElectronPt'] = ROOT.TH1F ("hEPt", "Electron P_{T} ; P_{T} ; N", 500, 0, 500)
h['hIsoElectronPt'] = ROOT.TH1F ("hIsoEPt", "Isolated Electron P_{T} ; P_{T} ; N", 500, 0, 500)
h['hNonIsoElectronPt'] = ROOT.TH1F ("hNonIsoEPt", "Non-Isolated Electron P_{T} ; P_{T} ; N", 500, 0, 500)
h['hTauECleanedPt'] = ROOT.TH1F ("hTauECleanedPt", "Electron-Cleaned Tau P_{T} ; P_{T} ; N", 500, 0, 500)
h['hTauMuCleanedPt'] = ROOT.TH1F ("hTauMuCleanedPt", "Muon-Cleaned Tau P_{T} ; P_{T} ; N", 500, 0, 500)
def book_event_histogram(region):
h[region+"_Count"] = ROOT.TH1F (region+"_Count", region+"_Count ; Events ; Events ", 1, 0, 1)
h[region+"_Mass"] = ROOT.TH1F (region+"_Mass", region+"_Mass ; M_{vis.} (GeV) ; Events ", 100, 0, 100)
h[region+"_Lepton1Pt"] = ROOT.TH1F (region+"_Lepton1Pt", region+"_Lepton1Pt ; P_{T} (GeV) ; Events ", 500, 0, 500)
h[region+"_Lepton2Pt"] = ROOT.TH1F (region+"_Lepton2Pt", region+"_Lepton2Pt ; P_{T} (GeV) ; Events ", 500, 0, 500)
h[region+"_JetPt"] = ROOT.TH1F (region+"_JetPt", region+"_JetPt ; JetP_{T} (GeV) ; Events ", 2000, 0, 2000)
h[region+"_MetPt"] = ROOT.TH1F (region+"_MetPt", region+"_MetPt ; MET (GeV) ; Events ", 500, 0, 500)
h[region+"_Mt"] = ROOT.TH1F (region+"_Mt", region+"_Mt ; M_{T} (GeV) ; Events ", 150, 0, 150)
h[region+"_Nj"] = ROOT.TH1F (region+"_Nj", region+"_Nj ; N_{j} ; Events ", 10, 0, 10)
h[region+"_dRl"] = ROOT.TH1F (region+"_dRl", region+"_dRl ; dR(leptons) ; Events", 100, 0, 5)
h[region+"_dRj"] = ROOT.TH1F (region+"_dRj", region+"_dRj ; dR(jet, ditau) ; Events", 100, 0, 5)
h[region+"_dPhil"] = ROOT.TH2F (region+"_dPhil", region+"_dPhil ; dPhi(met,lepton1) ; dPhi(met,lepton2)", 100, -pi, pi, 100, -pi, pi)
h[region+"_dPhi"] = ROOT.TH2F (region+"_dPhi", region+"_dPhi ; dPhi(met,ditau) ; dPhi(met,jet)", 100, -pi, pi, 100, -pi, pi)
def get_TLorentzVector(obj):
v = ROOT.TLorentzVector()
v.SetPtEtaPhiM(obj.pt, obj.eta, obj.phi, obj.mass)
return v
def pass_deltaR(l1, l2, j, channel):
if channel == "MuTau" or channel == "ETau":
if l1.DeltaR(l2) < event_cut["dRl"] and j.DeltaR(l1) > event_cut["dRlj"] and j.DeltaR(l2) > event_cut["dRlj"] and l1.DeltaR(l2) > event_cut["dRltau"]:
return 1
else:
return -9999
if channel == "MuMu" or channel == "EMu" or channel == "EE":
if l1.DeltaR(l2) < event_cut["dRl"] and j.DeltaR(l1) > event_cut["dRlj"] and j.DeltaR(l2) > event_cut["dRlj"]:
return 1
else:
return -9999
def Mt(lepton, met):
cos = np.cos(met.DeltaPhi(lepton))
Mt = np.sqrt(2*lepton.Pt()*met.Pt()*(1-cos))
return Mt
def plot_variable(region, l1, l2, j, m):
h[region+"_Count"].Fill(0, weight)
h[region+"_Mass"].Fill((l1+l2).M(), weight)
h[region+"_Lepton1Pt"].Fill(l1.Pt(), weight)
h[region+"_Lepton2Pt"].Fill(l2.Pt(), weight)
h[region+"_JetPt"].Fill(j.Pt(), weight)
h[region+"_MetPt"].Fill(m.Pt(), weight)
h[region+"_Mt"].Fill(Mt(l1, m), weight)
h[region+"_Nj"].Fill(len(s_jet), weight)
h[region+"_dRl"].Fill(l1.DeltaR(l2), weight)
h[region+"_dRj"].Fill(j.DeltaR(l1+l2), weight)
h[region+"_dPhil"].Fill(m.DeltaPhi(l1), m.DeltaPhi(l2), weight)
h[region+"_dPhi"].Fill(m.DeltaPhi(l1+l2), m.DeltaPhi(j), weight)
def mumu_channel():
isMuMu = 0
if s_isomuon[0].charge*s_isomuon[1].charge < 0 :
mu1 = get_TLorentzVector(s_isomuon[0])
mu2 = get_TLorentzVector(s_isomuon[1])
jet = get_TLorentzVector(s_jet[0])
if ( mu1.Pt() > 50 and isMu == 1 ) or ( mu1.Pt() > 27 and isIsoMu == 1 )\
or ( jet.Pt() > 500 and isHT == 1 ) :
if pass_deltaR(mu1, mu2, jet, 'MuMu') == 1 :
if met.Pt() > event_cut['metcut'] :
plot_variable('MuMu', mu1, mu2, jet, met)
isMuMu = 1
return isMuMu
def etau_channel():
e = get_TLorentzVector(s_electron[0])
tau = get_TLorentzVector(s_tauEclean[0])
jet = get_TLorentzVector(s_jet[0])
plot_variable('ETau', e, tau, jet, met)
def mutau_channel():
isMuTau = 0
if s_muon[0].charge*s_tauMuclean[0].charge < 0:
mu = get_TLorentzVector(s_muon[0])
tau = get_TLorentzVector(s_tauMuclean[0])
jet = get_TLorentzVector(s_jet[0])
if ( mu.Pt() > 50 and isMu == 1 ) or ( mu.Pt() > 27 and isIsoMu == 1 )\
or ( jet.Pt() > 500 and isHT == 1 ) :
if pass_deltaR(mu, tau, jet, 'MuTau') == 1 :
if Mt(mu,met) < 50 :
if met.Pt() > event_cut['metcut'] :
plot_variable('MuTau', mu, tau, jet, met)
isMuTau = 1
return isMuTau
def ee_channel():
isEE = 0
if s_isoelectron[0].charge*s_isoelectron[1].charge < 0 :
e1 = get_TLorentzVector(s_isoelectron[0])
e2 = get_TLorentzVector(s_isoelectron[1])
jet = get_TLorentzVector(s_jet[0])
if ( jet.Pt() > 500 and isHT == 1 ) or ( e1.Pt() > 35 and isIsoEle == 1 ) :
if pass_deltaR(e1, e2, jet, 'EE') == 1 :
if met.Pt() > event_cut['metcut'] :
plot_variable('EE', e1, e2, jet, met)
isEE = 1
return isEE
def emu_channel():
isEMu = 0
if s_isomuon[0].charge*s_isoelectron[0].charge < 0:
e = get_TLorentzVector(s_isoelectron[0])
mu = get_TLorentzVector(s_isomuon[0])
jet = get_TLorentzVector(s_jet[0])
if ( mu.Pt() > 50 and isMu == 1 ) or ( mu.Pt() > 27 and isIsoMu == 1 ) \
or ( ( ( mu.Pt() > 8 and e.Pt() > 23 ) or ( mu.Pt() > 23 and e.Pt() > 12 ) ) and isMuonEG == 1 ) \
or ( jet.Pt() > 500 and isHT == 1 ) :
if pass_deltaR(e, mu, jet, 'EMu') == 1 :
if met.Pt() > event_cut['metcut'] :
plot_variable('EMu', e, mu, jet, met)
isEMu = 1
return isEMu
regions = ['MuMu', 'MuTau', 'EMu', 'EE']
etauR = ['ETau_OS','ETau_SS']
etauM = ['highMt', 'lowMt']
for e in etauR :
regions.append(e)
regions.append(e+'_dRcut')
regions.append(e+'_dRcut_highMET')
regions.append(e+'_dRcut_lowMET')
for m in etauM :
regions.append(e+'_dRcut_highMET_'+m)
regions.append(e+'_dRcut_lowMET_'+m)
book_histogram()
for r in regions:
book_event_histogram(r)
for key in h.keys():
h[key].Sumw2()
#-------- File loop --------#
inputFileNames=open(inputFileList, 'r')
for inputFileName in inputFileNames:
inputFileName=inputFileName.replace("\n","")
print(inputFileName.replace("\n",""))
fchain.Add(inputFileName)
chain2.Add(inputFileName)
if isData == 0:
chain3.Add(inputFileName)
chain4.Add(inputFileName)
#------- Adding friends to the main chain -------#
fchain.AddFriend(chain2)
if isData == 0:
fchain.AddFriend(chain3)
fchain.AddFriend(chain4)
jets = ROOT.JetInfoDS()
muons = ROOT.MuonInfoDS()
electrons = ROOT.ElectronInfoDS()
tausUnCleaned = ROOT.TauInfoDS()
tausECleaned = ROOT.TauInfoDS()
tausMCleaned = ROOT.TauInfoDS()
tausBoosted = ROOT.TauInfoDS()
fchain.SetBranchAddress("Jets", ROOT.AddressOf(jets))
fchain.SetBranchAddress("Muons", ROOT.AddressOf(muons))
fchain.SetBranchAddress("Electrons", ROOT.AddressOf(electrons))
fchain.SetBranchAddress("TausUnCleaned", ROOT.AddressOf(tausUnCleaned))
fchain.SetBranchAddress("TausECleaned", ROOT.AddressOf(tausECleaned))
fchain.SetBranchAddress("TausMCleaned", ROOT.AddressOf(tausMCleaned))
fchain.SetBranchAddress("TausBoosted", ROOT.AddressOf(tausBoosted))
if isData == 0:
genParticle = ROOT.GenParticleInfoDS()
fchain.SetBranchAddress("GenParticleInfo", ROOT.AddressOf(genParticle))
#----------- Event loop ----------#
for iev in range(fchain.GetEntries()): # Be careful!!!
fchain.GetEntry(iev)
mets = fchain.GetBranch("Mets")
met_pt = mets.GetLeaf('pt').GetValue()
met_phi = mets.GetLeaf('phi').GetValue()
met = ROOT.TLorentzVector()
met.SetPtEtaPhiM(met_pt, 0, met_phi, 0)
if isData == 0:
genweight = fchain.GetLeaf('genWeight').GetValue()
puweight = fchain.GetLeaf('puWeight').GetValue()
else :
genweight = 1
puweight = 1
weight = genweight*puweight
h['hEvents'].Fill(0.5, 1)
h['hEvents'].Fill(1.5, weight)
h['hWeights'].Fill(weight)
h['hPuWeights'].Fill(puweight)
h['hGenWeights'].Fill(genweight)
isSingleJet = fchain.GetLeaf('isSingleJet').GetValue()
isHT = fchain.GetLeaf('isHT').GetValue()
isHTMHT = fchain.GetLeaf('isHTMHT').GetValue()
isMu = fchain.GetLeaf('isMu').GetValue()
isIsoMu = fchain.GetLeaf('isIsoMu').GetValue()
isIsoMuTau = fchain.GetLeaf('isIsoMuTau').GetValue()
isIsoEle = fchain.GetLeaf('isIsoEle').GetValue()
isEleTau = fchain.GetLeaf('isEleTau').GetValue()
isMuonEG = fchain.GetLeaf('isMuonEG').GetValue()
#------------ Objects loop ------------#
s_jet = []
s_bjet = []
if jets.size() > 0:
for i in range(jets.size()):
ijet = jets.at(i)
if abs(ijet.eta) < 2.5 :
if ijet.id >= 2:
h['hJetPt'].Fill(ijet.pt, weight)
h['hDeepjet'].Fill(ijet.deepjet, weight)
s_jet+=[ijet]
if ijet.deepjet >= 0.7476:
h['hBJetPt'].Fill(ijet.pt, weight)
s_bjet+=[ijet]
s_muon = []
s_isomuon = []
s_nonisomuon = []
if muons.size() > 0:
for i in range(muons.size()):
imuon = muons.at(i)
if abs(imuon.eta) < 2.4 :
if imuon.id >= 1: #loose Muons
h['hMuonPt'].Fill(imuon.pt, weight)
s_muon+=[imuon]
if imuon.iso <= 0.25:
h['hIsoMuonPt'].Fill(imuon.pt, weight)
s_isomuon+=[imuon]
if imuon.iso > 0.25:
h['hNonIsoMuonPt'].Fill(imuon.pt, weight)
s_nonisomuon+=[imuon]
s_electron = []
s_isoelectron = []
s_nonisoelectron = []
if electrons.size() > 0:
for i in range(electrons.size()):
ielectron = electrons.at(i)
if abs(ielectron.eta) < 2.5 :
if ielectron.id >= 1 :
h['hElectronPt'].Fill(ielectron.pt, weight)
s_electron+=[ielectron]
if ielectron.iso >= 1:
h['hIsoElectronPt'].Fill(ielectron.pt, weight)
s_isoelectron+=[ielectron]
if ielectron.iso == 0:
h['hNonIsoElectronPt'].Fill(ielectron.pt, weight)
s_nonisoelectron+=[ielectron]
s_tauEclean = []
if tausECleaned.size()>0:
for i in range(tausECleaned.size()):
itau = tausECleaned.at(i)
if abs(itau.eta) < 2.3 :
if itau.mvaid >= 4:
h['hTauECleanedPt'].Fill(itau.pt, weight)
s_tauEclean+=[itau]
s_tauMuclean = []
if tausMCleaned.size()>0:
for i in range(tausMCleaned.size()):
itau = tausMCleaned.at(i)
if abs(itau.eta) < 2.3 :
if itau.mvaid >= 4 :
h['hTauMuCleanedPt'].Fill(itau.pt, weight)
s_tauMuclean+=[itau]
# ---------- Event Selections --------- #
if len(s_isomuon) >= 2 and len(s_jet) >= 1 and len(s_bjet) == 0 :
if mumu_channel() == 1: continue
if len(s_isomuon) >= 1 and len(s_isoelectron) >= 1 and len(s_jet) >= 1 and len(s_bjet) == 0 :
if emu_channel() == 1 : continue
if len(s_muon) >= 1 and len(s_tauMuclean) >= 1 and len(s_jet) >= 1 and len(s_bjet) == 0 :
if mutau_channel() == 1 : continue
if len(s_isoelectron) >=2 and len(s_jet) >= 1 and len(s_bjet) == 0 :
if ee_channel() == 1 : continue
if len(s_electron) >= 1 and len(s_tauEclean) >= 1 and len(s_jet) >= 1 and len(s_bjet) == 0 :
isJetHTEvent = 0
isSingleElectronEvent = 0
e = get_TLorentzVector(s_electron[0])
tau = get_TLorentzVector(s_tauEclean[0])
jet = get_TLorentzVector(s_jet[0])
if ( jet.Pt() > 500 and isHT == 1 ) : isJetHTEvent = 1
if ( e.Pt() > 35 and isIsoEle == 1 ) : isSingleElectronEvent = 1
if (e+tau).M() > event_cut['mass'] :
if ( isData == 0 and ( isJetHTEvent == 1 or isSingleElectronEvent == 1 ) ) \
or ( isData == 1 and ( isJetHTSample == 1 and isJetHTEvent == 1 ) ) \
or ( isData == 1 and ( isSingleElectronSample == 1 and ( isJetHTEvent == 0 and isSingleElectronEvent == 1 ) ) ) :
if s_electron[0].charge*s_tauEclean[0].charge < 0 : #OS
plot_variable('ETau_OS', e, tau, jet, met)
if pass_deltaR(e, tau, jet, 'ETau') == 1:
plot_variable('ETau_OS_dRcut', e, tau, jet, met)
if met.Pt() > event_cut['metcut'] : #OS highMET
plot_variable('ETau_OS_dRcut_highMET', e, tau, jet, met)
if Mt(e, met) < event_cut['mtcut'] : #OS highMET lowMt
plot_variable('ETau_OS_dRcut_highMET_lowMt', e, tau, jet, met)
if Mt(e, met) > event_cut['mtcut'] : #OS highMET highMt
plot_variable('ETau_OS_dRcut_highMET_highMt', e, tau, jet, met)
if met.Pt() < event_cut['metcut'] : #OS lowMET
plot_variable('ETau_OS_dRcut_lowMET', e, tau, jet, met)
if Mt(e, met) < event_cut['mtcut'] : #OS lowMET lowMt
plot_variable('ETau_OS_dRcut_lowMET_lowMt', e, tau, jet, met)
if Mt(e, met) > event_cut['mtcut'] : #OS lowMET highMt
plot_variable('ETau_OS_dRcut_lowMET_highMt', e, tau, jet, met)
if s_electron[0].charge*s_tauEclean[0].charge > 0 : #SS
plot_variable('ETau_SS', e, tau, jet, met)
if pass_deltaR(e, tau, jet, 'ETau') == 1:
plot_variable('ETau_SS_dRcut', e, tau, jet, met)
if met.Pt() > event_cut['metcut'] : #SS highMET
plot_variable('ETau_SS_dRcut_highMET', e, tau, jet, met)
if Mt(e, met) < event_cut['mtcut'] : #SS highMET lowMt
plot_variable('ETau_SS_dRcut_highMET_lowMt', e, tau, jet, met)
if Mt(e, met) > event_cut['mtcut'] : #SS highMET highMt
plot_variable('ETau_SS_dRcut_highMET_highMt', e, tau, jet, met)
if met.Pt() < event_cut['metcut'] : #SS lowMET
plot_variable('ETau_SS_dRcut_lowMET', e, tau, jet, met)
if Mt(e, met) < event_cut['mtcut'] : #SS lowMET lowMt
plot_variable('ETau_SS_dRcut_lowMET_lowMt', e, tau, jet, met)
if Mt(e, met) > event_cut['mtcut'] : #SS lowMET highMt
plot_variable('ETau_SS_dRcut_lowMET_highMt', e, tau, jet, met)
out.cd()
for key in h.keys():
h[key].Write()
out.Close()
print("--- %s seconds ---" % (time.time() - start_time))
print(regions)
|
987,257 | ffe911b4598a8c26c2df5f53aff853789b19ceb9 | import sys
n, r = map(int, sys.stdin.readline().split())
r = min(n-r, r)
res = 1
for i in range(1, r + 1):
res *= ((n - i + 1) / (r - i + 1))
print(int(res))
|
987,258 | f9169f47597c351594c66321ed40e2010035fd6e | import torch
import os
from torch.utils.tensorboard import SummaryWriter
class Logger(object):
def __init__(self, log_dir, comment=''):
self.writer = SummaryWriter(log_dir=log_dir, comment=comment)
self.imgs_dict = {}
def scalar_summary(self, tag, value, step):
self.writer.add_scalar(tag, value, global_step=step)
self.writer.flush()
def combined_scalars_summary(self, main_tag, tag_scalar_dict, step):
self.writer.add_scalars(main_tag, tag_scalar_dict, step)
self.writer.flush()
def log(self, tag, text_string, step=0):
self.writer.add_text(tag, text_string, step)
self.writer.flush()
def log_model(self, model, inputs):
self.writer.add_graph(model, inputs)
self.writer.flush()
def get_dir(self):
return self.writer.get_logdir()
def log_model_state(self, model, name='tmp'):
path = os.path.join(self.writer.get_logdir(), type(model).__name__ + '_%s.pt' % name)
torch.save(model.state_dict(), path)
def log_video(self, tag, global_step=None, img_tns=None,finished_video=False, video_tns=None, debug=False):
'''
Logs video to tensorboard.
Video_tns will be empty. If given image tensors, then when finished_video = True, the video of the past tensors will be made into one video.
If vide_tns is not empty, then that will be marked the video and the other arguments will be ignored.
'''
if debug:
import pdb; pdb.set_trace()
if img_tns is None and video_tns is None:
if not finished_video or tag not in self.imgs_dict.keys():
return None
lst_img_tns = self.imgs_dict[tag]
self.writer.add_video(tag, torch.tensor(lst_img_tns), global_step=global_step, fps=4)
self.writer.flush()
self.imgs_dict[tag] = []
return None
elif video_tns is not None:
self.writer.add_video(tag, video_tns, global_step=global_step, fps=4)
self.writer.flush()
return None
if tag in self.imgs_dict.keys():
lst_img_tns = self.imgs_dict[tag]
else:
lst_img_tns = []
self.imgs_dict[tag] = lst_img_tns
lst_img_tns.append(img_tns)
if finished_video:
self.writer.add_video(tag, torch.tensor(lst_img_tns), global_step=global_step, fps=4)
self.writer.flush()
self.imgs_dict[tag].clear()
def close(self):
self.writer.close()
|
987,259 | ef7e100bb1425d3aaf817141b4ca2f4b896070b6 | """learn_django_pro_V_2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from app.views import home, view_books, view_cached_books
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', home, name='home'),
path('books/', view_books),
path('view_cached_books/', view_cached_books, name='view_cached_books'),
path('products/', include('payment.urls'))
]
|
987,260 | e2c01fce7ad1afb4e864ea50502cacadaa223dd4 | import re
# 處理繁體中文
import jieba2 as jieba
import jieba2.analyse
import sqlite3
import pickle
from collections import defaultdict
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.svm import LinearSVC
# 回傳停用詞
def stopwordslist(filepath):
with open(filepath, 'r', encoding='utf-8') as f:
stopwords = [line.strip() for line in f.readlines()]
return stopwords
# 移除停用詞
def removestopwords(content, stopwords):
non_stopwords = []
for i in jieba2.cut(content):
if i not in stopwords:
non_stopwords.append(i)
return non_stopwords
stopwords = stopwordslist('./stopwords.txt')
conn = sqlite3.connect('./dataSet/PTT Data Set.db')
cursor = conn.cursor()
pushes = cursor.execute('SELECT Title, Push_tag FROM PTT_Gossiping')
# Title, Push_tag
scores = []
temp = None
temp_score = 0
count = 0
for push in pushes:
if temp is None:
temp = push[0]
if push[0] in temp:
if push[1] == '推':
temp_score += 1
elif push[1] == '噓':
temp_score -= 1
elif push[0] not in temp:
scores.append(temp_score)
temp = push[0]
if count % 150 == 0:
print('temp_score: ', temp_score)
temp_score = 0
count += 1
print(len(scores))
rlt = []
corpus = cursor.execute('SELECT Title, Content FROM PTT_Gossiping ')
temp_title = None
words = defaultdict(int)
for content in corpus:
if temp_title is None:
temp_title = content[0]
if content[0] in temp_title:
if content[1] is None:
continue
else:
old_sentence = str(content[1]).replace("\\n", '')
# 只保留繁體字
new_sentence = re.sub(r'[^\u4e00-\u9fa5]', '', old_sentence)
non_stopword_sentence = removestopwords(new_sentence, stopwords)
for w in non_stopword_sentence:
words[w] += 1
elif content[0] not in temp_title:
rlt.append(words)
words = defaultdict(int)
temp_title = content[0]
print(len(rlt))
dvec = DictVectorizer()
tfidf = TfidfTransformer()
X = tfidf.fit_transform(dvec.fit_transform(rlt))
svc = LinearSVC()
svc.fit(X, scores)
with open('./SVM_LABEL.pickle', 'wb') as f:
pickle.dump(dvec.get_feature_names(), f)
with open('./SVM_coef.pickle', 'wb') as f:
t = []
for i in svc.coef_[0]:
t.append(i)
pickle.dump(t, f)
print(dvec.get_feature_names()[:10])
print(svc.coef_[0][:10])
|
987,261 | 72548887be201de4d8b279dcb1fff83d6ab89d33 | """ crear una fucnion que muestre el nombre y el apellido """
""" def nom (name, apellido):
print("Hola", name,apellido)
name = input("Digite su Nombre: ")
ape = input("Digite su Apellido: ")
nom(name, ape)
"""
"""
def f1():
x=100
print(x)
x=+1
f1()
"""
"""
EJERCICIOS DE CLASE
1. Solicitar al usuario que ingrese su dirección email. Imprimir un mensaje indicando si la dirección es válida o no, valiéndose de una función para decidirlo. Una dirección se considerará válida si contiene el símbolo "@".
"""
""" def validarCorreo(a):
buscar = a.rfind("@")
if buscar < 0:
print("Correo Invalido")
else:
print("Correo Valido")
correo = input("Ingrese su correo: ")
validarCorreo(correo)
"""
"""
2 Escribir una función que, dado un número de CC y el nombre, retorne el documento y el nombre si el número es válido y False retorne el mensaje "documento equivocado". Para que un número de CC sea válido debe tener entre 9 y 11 dígitos.
"""
def validarCorreo(docu,name):
cuenta = len(docu)
if cuenta in range(9, 11): #9 <= cuenta <= 11:
print(f"---------\nDatos\nN° documento:{docu}\nNombre: {name}")
else:
print("Documento Equivocado")
docu = input("Ingrese su número de documento: ")
name = input("Ingrese su nombre completo: ")
validarCorreo(docu,name) |
987,262 | d558dbaf4e20def09e7eeeae8fa522752d192bfd | import logging
import tornado.ioloop
import tornado.web
import tornado.websocket
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 8646
logging.basicConfig(level=logging.WARNING)
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def on_message(self, message):
self.write_message(message, binary=isinstance(message, bytes))
app = tornado.web.Application(
[
(r'/', EchoWebSocket)
],
websocket_max_message_size=2 ** 25)
app.listen(SERVER_PORT, SERVER_HOST)
tornado.ioloop.IOLoop.current().start()
|
987,263 | 26ea434eb570d3a9ec89ba2558116a873f497617 | def period(num, div):
assert(div > num)
idx = 0
nums = {}
while num != 0:
num = num % div
while num < div and num != 0:
num = num * 10
digit = num // div
if nums.get(num) is not None:
return idx - nums[num]
nums[num] = idx
idx += 1
return 0
def test_period():
assert(period(1, 2) == 0)
assert(period(1, 3) == 1)
assert(period(1, 7) == 6)
assert(period(1, 8) == 0)
assert(period(1, 10) == 0)
def longest_rep(fl):
dg = int(num) % 10
def max_idx_period_under(n):
mp = 0
ix = 0
for i in range(2, n):
p = period(1, i)
if p > mp:
mp = p
ix = i
return ix
if __name__ == '__main__':
test_period()
print(max_idx_period_under(1000))
|
987,264 | 2d3fd113a5f2e91c8b10a1e0218d472bf1a05741 | # Generated by Django 3.0.3 on 2020-07-21 00:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20200719_1404'),
]
operations = [
migrations.AddField(
model_name='board',
name='b_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='boards', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
987,265 | c25f0e08f235f4f00ac134b44fa6b50849f1e5d2 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import os
import jpype
from future.utils import iteritems
from pyathenajdbc import (
ATHENA_CONNECTION_STRING,
ATHENA_DRIVER_CLASS_NAME,
ATHENA_JAR,
LOG4J_PROPERTIES,
)
from pyathenajdbc.converter import JDBCTypeConverter
from pyathenajdbc.cursor import Cursor
from pyathenajdbc.error import NotSupportedError, ProgrammingError
from pyathenajdbc.formatter import ParameterFormatter
from pyathenajdbc.util import attach_thread_to_jvm, synchronized
_logger = logging.getLogger(__name__)
class Connection(object):
_ENV_S3_STAGING_DIR = "AWS_ATHENA_S3_STAGING_DIR"
_ENV_S3_OUTPUT_LOCATION = "AWS_ATHENA_S3_OUTPUT_LOCATION"
_ENV_WORK_GROUP = "AWS_ATHENA_WORK_GROUP"
_BASE_PATH = os.path.dirname(os.path.abspath(__file__))
def __init__(
self,
jvm_path=None,
jvm_options=None,
converter=None,
formatter=None,
driver_path=None,
log4j_conf=None,
**driver_kwargs
):
self._start_jvm(jvm_path, jvm_options, driver_path, log4j_conf)
self._driver_kwargs = driver_kwargs
self.region_name = self._driver_kwargs.get(
"AwsRegion", os.getenv("AWS_DEFAULT_REGION", None)
)
self.schema_name = self._driver_kwargs.get("Schema", "default")
self.work_group = self._driver_kwargs.get(
"Workgroup", os.getenv(self._ENV_WORK_GROUP, None)
)
props = self._build_driver_args()
jpype.JClass(ATHENA_DRIVER_CLASS_NAME)
if self.region_name:
self._jdbc_conn = jpype.java.sql.DriverManager.getConnection(
ATHENA_CONNECTION_STRING.format(region=self.region_name), props
)
else:
self._jdbc_conn = jpype.java.sql.DriverManager.getConnection()
self._converter = converter if converter else JDBCTypeConverter()
self._formatter = formatter if formatter else ParameterFormatter()
@classmethod
@synchronized
def _start_jvm(cls, jvm_path, jvm_options, driver_path, log4j_conf):
if jvm_path is None:
jvm_path = jpype.get_default_jvm_path()
if driver_path is None:
driver_path = os.path.join(cls._BASE_PATH, ATHENA_JAR)
if log4j_conf is None:
log4j_conf = os.path.join(cls._BASE_PATH, LOG4J_PROPERTIES)
if not jpype.isJVMStarted():
_logger.debug("JVM path: %s", jvm_path)
args = [
"-server",
"-Djava.class.path={0}".format(driver_path),
"-Dlog4j.configuration=file:{0}".format(log4j_conf),
]
if jvm_options:
args.extend(jvm_options)
_logger.debug("JVM args: %s", args)
if jpype.__version__.startswith("0.6"):
jpype.startJVM(jvm_path, *args)
else:
jpype.startJVM(
jvm_path, *args, ignoreUnrecognized=True, convertStrings=True
)
cls.class_loader = (
jpype.java.lang.Thread.currentThread().getContextClassLoader()
)
if not jpype.isThreadAttachedToJVM():
jpype.attachThreadToJVM()
if not cls.class_loader:
cls.class_loader = (
jpype.java.lang.Thread.currentThread().getContextClassLoader()
)
class_loader = jpype.java.net.URLClassLoader.newInstance(
[jpype.java.net.URL("jar:file:{0}!/".format(driver_path))],
cls.class_loader,
)
jpype.java.lang.Thread.currentThread().setContextClassLoader(class_loader)
def _build_driver_args(self):
props = jpype.java.util.Properties()
props.setProperty(
"AwsCredentialsProviderClass",
"com.simba.athena.amazonaws.auth.DefaultAWSCredentialsProviderChain",
)
s3_staging_dir = os.getenv(self._ENV_S3_STAGING_DIR, None)
if s3_staging_dir:
props.setProperty("S3OutputLocation", s3_staging_dir)
s3_output_location = os.getenv(self._ENV_S3_OUTPUT_LOCATION, None)
if s3_output_location:
props.setProperty("S3OutputLocation", s3_output_location)
if self.region_name:
props.setProperty("AwsRegion", self.region_name)
if self.schema_name:
props.setProperty("Schema", self.schema_name)
if self.work_group:
props.setProperty("Workgroup", self.work_group)
for k, v in iteritems(self._driver_kwargs):
if k and v:
props.setProperty(k, v)
return props
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@attach_thread_to_jvm
def cursor(self):
if self.is_closed:
raise ProgrammingError("Connection is closed.")
return Cursor(self._jdbc_conn, self._converter, self._formatter)
@attach_thread_to_jvm
@synchronized
def close(self):
if not self.is_closed:
self._jdbc_conn.close()
self._jdbc_conn = None
@property
@attach_thread_to_jvm
def is_closed(self):
return self._jdbc_conn is None or self._jdbc_conn.isClosed()
def commit(self):
"""Athena JDBC connection is only supported for auto-commit mode."""
pass
def rollback(self):
raise NotSupportedError(
"Athena JDBC connection is only supported for auto-commit mode."
)
|
987,266 | 907c242c055d28432945418379fff46392a3d062 | # Validate BST: Implement a function to check if a binary tree is a binary search tree.
from typing import Tuple
# Option 1 - do in-order traversal and copy values to array then check that array is sorted
class Node:
def __init__(self, val: int) -> None:
self.left = None
self.right = None
self.val = val
# Not my solution
# O(N) time and space
# This question is tricky, I just wanted to compare parent to children, but that's not correct
def validate_binary_tree(head: Node) -> bool:
if head == None: return False
return is_bst(head, None, None)
def is_bst(node: Node, min: int, max: int) -> bool:
if node == None: return True
if max != None and node.val > max:
return False
if min != None and node.val <= min:
return False
return is_bst(node.left, min, node.val) and is_bst(node.right, node.val, max)
node = Node(5)
node.left = Node(5)
node.right = Node(8)
node.left.left = Node(2)
node.right.left = Node(6)
node.right.right = Node(10)
node.right.right = Node(12)
print(validate_binary_tree(node)) # True
print(validate_binary_tree(Node(6))) # True
node.left.right = Node(6)
print(validate_binary_tree(node)) # False |
987,267 | d413c43d396dd9877352570a08eab4454b999bec | nilai = [{'nim' : 'A01', 'nama' : 'Agustina', 'mid' : 50, 'uas' : 80},
{'nim' : 'A02', 'nama' : 'Budi', 'mid' : 40, 'uas' : 90},
{'nim' : 'A03', 'nama' : 'Chicha', 'mid' : 100, 'uas' : 50},
{'nim' : 'A04', 'nama' : 'Donna', 'mid' : 20, 'uas' : 100},
{'nim' : 'A05', 'nama' : 'Fatimah', 'mid' : 70, 'uas' : 100}]
print("="*60)
print("NIM".ljust(10), "NAMA".ljust(15), "N. MID".rjust(5), "N. UAS".rjust(10))
print("="*60)
m = 0
for a in nilai:
print(nilai[m]["nim"],end="")
x = len(nilai[m]["nama"])
print(nilai[m]["nama"].rjust(5+x),end="")
y = str(nilai[m]["mid"])
print(y.rjust(21-x),end="")
y= str(nilai[m]["uas"])
print(y.rjust(16))
m = m+1
print("="*60)
|
987,268 | e68987180add0eab8abe669e0833fa63cabea811 | from tkinter import *
from functools import partial
from Resolution.resolution_hidato import *
def main_window_hidato():
root = Tk()
root.title("Résolution de Hidato")
root.resizable(0,0)
root.geometry('150x200')
button_frame=Frame(root)
grid = [['','','','','','/','/','/'],['','','','','','/','/','/'],['','','','','','','/','/'],['','','','','','','/','/'],['','','','','','','','/'],['/','/','','','','','','/'],['/','/','/','/','','','',''],['/','/','/','/','/','/','','']]
saisir_button = Button(button_frame,text="Saisir la grille",command=partial(saisir_grille_hidato,root))
quit_button = Button(root,text="Quitter",command=quit)
saisir_button.grid(row=0,column=0,sticky=N+S+E+W)
saisir_button.grid(row=0,column=0,ipady=15,padx=15,sticky=N+S+E+W)
button_frame.grid(row=0,column=0)
quit_button.grid(row=1,column=0)
Grid.rowconfigure(root,0,weight=1)
Grid.rowconfigure(root,1,weight=1)
Grid.columnconfigure(button_frame,0,weight=1)
root.mainloop()
def saisir_grille_hidato(root):
grid = [['','','','','','/','/','/'],['','','','','','/','/','/'],['','','','','','','/','/'],['','','','','','','/','/'],['','','','','','','','/'],['/','/','','','','','','/'],['/','/','/','/','','','',''],['/','/','/','/','/','/','','']]
def grid_to_list():
hidato_grid = []
for i in range(8):
hidato_grid.append([])
for j in range(8):
if len(graphical_grid[i][j])==1:
hidato_grid[i].append('/')
elif graphical_grid[i][j][1].get()=='':
hidato_grid[i].append('')
else:
value = graphical_grid[i][j][1].get()
hidato_grid[i].append(int(value))
print(hidato_grid)
affiche_grille_hidato(root,renvoie(hidato_grid))
window.destroy()
def check_grid():
"""Verifie si la grille qui est envoyé est correcte """
hidato_grid = []
for i in range(8):
hidato_grid.append([])
for j in range(8):
if len(graphical_grid[i][j])==1:
hidato_grid[i].append('/')
elif graphical_grid[i][j][1].get()=='':
hidato_grid[i].append('')
else:
value = graphical_grid[i][j][1].get()
hidato_grid[i].append(int(value))
correcte = is_grille_correcte(hidato_grid)
def popupcheck(correcte):
"""Popup qui affiche si la grille est correcte"""
popup = Tk()
def destroy():
popup.destroy()
if correcte:
root.destroy
if correcte:
popup.wm_title("Hidato")
label = Label(popup, text="La grille est correcte vous avez gagné!")
else:
popup.wm_title("Hidato")
label = Label(popup, text="La grille est incorrecte, veuillez rééssayer")
label.pack(side="top", fill="x", pady=10)
B1 = Button(popup, text="Okay", command = destroy)
B1.pack()
popup.mainloop()
popupcheck(correcte)
window = Toplevel(root)
window.title("Saisir une grille de Hidato")
window.grid()
grid_frame = Frame(window)
button_frame = Frame(window)
solve_button = Button(button_frame,text="Résoudre",command=grid_to_list)
check_button = Button(button_frame,text="Vérifier la grille",command=check_grid)
grid_frame.grid(row=0,column=0,sticky=N+S+E+W)
button_frame.grid(row=1,column=0,sticky=N+S+E+W)
solve_button.grid(row=0,column=1,sticky=N+S+E+W,pady=10)
Grid.rowconfigure(window,0,weight=1)
Grid.columnconfigure(window,0,weight=1)
graphical_grid = []
for i in range(8):
ligne = []
for j in range(8):
if grid[i][j]!='/':
f = Frame(grid_frame, bd=1, relief='solid',bg="white", height =80, width = 80)
e = Entry(f, font="Arial 20",justify="center",bd=0)
e.insert(0,str(grid[i][j]))
ligne.append((f,e))
ligne[j][1].pack(expand = YES)
else:
f = Frame(grid_frame,bd=0,height=100,width=100)
ligne.append([f])
graphical_grid.append(ligne)
for i in range(8):
for j in range(8):
graphical_grid[i][j][0].grid(row=i, column = j, sticky=N+S+E+W)
for x in range(8):
Grid.columnconfigure(grid_frame, x, weight=1)
for y in range(8):
Grid.rowconfigure(grid_frame, y, weight=1)
def affiche_grille_hidato(root,grid):
print_grid = Toplevel(root)
print_grid.title("Hidato")
print(grid)
graphical_grid=[]
for i in range(len(grid)):
ligne = []
for j in range(len(grid)):
print(grid[i][j])
if grid[i][j] != '/':
f = Frame(print_grid,bg="white", bd=1, relief='solid', height =80, width = 80)
ligne.append((f,Label(f,font="Arial 20",justify="center",bg="white",bd=0,text=str(grid[i][j]))))
ligne[j][1].pack(expand = YES)
else:ligne.append([Frame(print_grid,bg="white",bd=0,height=100,width=100)])
graphical_grid.append(ligne)
print("ok")
for i in range(len(grid)):
for j in range(len(grid)):
print(graphical_grid[i][j])
graphical_grid[i][j][0].grid(row=i, column = j,sticky=N+S+E+W)
Grid.columnconfigure(print_grid, i ,weight=1)
Grid.rowconfigure(print_grid, i ,weight=1)
print("ok")
|
987,269 | 58158d04182032ce4e81844993cc1cc34eb5f9da | from __future__ import division
import numpy as np
from pysd import functions
from pysd import builder
class Components(builder.ComponentClass):
def test_modulo(self):
"""Type: Flow or Auxiliary
"""
return np.mod(self.test_pulse_train(), 17)
def flow(self):
"""Type: Flow or Auxiliary
"""
return self.test_tan()
def test_pulse_train(self):
"""Type: Flow or Auxiliary
"""
return self.functions.pulse_train(self.test_min(), 2 , 5 , 50 )
def test_cos(self):
"""Type: Flow or Auxiliary
"""
return np.cos(self.test_pulse())
def test_exp(self):
"""Type: Flow or Auxiliary
"""
return np.exp(self.test_step())
def test_if_then_else(self):
"""Type: Flow or Auxiliary
"""
return self.functions.if_then_else(self.test_sqrt()>0, 65.5 , -9.2 )
def test_integer(self):
"""Type: Flow or Auxiliary
"""
return int(self.test_sin())
def test_ln(self):
"""Type: Flow or Auxiliary
"""
return self.test_random_uniform()+ np.log(4)
def test_max(self):
"""Type: Flow or Auxiliary
"""
return max(self.test_random_normal(), 4)
def test_min(self):
"""Type: Flow or Auxiliary
"""
return min(self.test_ramp(), 71.993)
def test_random_uniform(self):
"""Type: Flow or Auxiliary
"""
return np.random.rand(self.test_integer(), 2* self.test_integer(), 0 )
def test_pulse(self):
"""Type: Flow or Auxiliary
"""
return self.functions.pulse(self.test_modulo(), 5 )
def test_sqrt(self):
"""Type: Flow or Auxiliary
"""
return np.sqrt(self.test_exp())
def test_ramp(self):
"""Type: Flow or Auxiliary
"""
return self.functions.ramp(self.test_max(), 5 , 10 )
def test_random_normal(self):
"""Type: Flow or Auxiliary
"""
return self.functions.bounded_normal(0 , 1000 , self.test_ln(), 5 , 0 )
def test_tan(self):
"""Type: Flow or Auxiliary
"""
return np.tan(self.test_cos())
def test_sin(self):
"""Type: Flow or Auxiliary
"""
return np.sin(self.test_if_then_else())
def test_step(self):
"""Type: Flow or Auxiliary
"""
return self.functions.step(self.test_abs(), 10 )
def ddummy_dt(self):
return self.flow()
def dummy_init(self):
return 0
def dummy(self):
""" Stock: dummy =
self.flow()
Initial Value: 0
Do not overwrite this function
"""
return self.state["dummy"]
def test_abs(self):
"""Type: Flow or Auxiliary
"""
return abs(-5)
def final_time(self):
"""Type: Flow or Auxiliary
"""
return 100
def initial_time(self):
"""Type: Flow or Auxiliary
"""
return 0
def saveper(self):
"""Type: Flow or Auxiliary
"""
return self.time_step()
def time_step(self):
"""Type: Flow or Auxiliary
"""
return 1
|
987,270 | 2570a2cd12aea6ef848e4759441095d89ff8a5e8 | import uuid
from django.db import models
# Create your models here.
class Contract(models.Model):
contract_id = models.UUIDField(primary_key=True, unique=True, default=uuid.uuid4, editable=False)
client_id = models.UUIDField(editable=False)
predial = models.FloatField()
address = models.CharField(max_length=30)
meter = models.UUIDField()
active = models.BooleanField()
|
987,271 | eeac73ed8e9bfc1f6e673efab3919e4c78c95638 | from __future__ import print_function, division
import chronostar.component
"""
A script which gathers all plotting of all relevant figures into
one spot to facilitate quick and simple replotting as needed.
TODO: Maybe do again, but with the range buffer lowered to 0.1 (from 0.2)
"""
import corner
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.insert(0, '../..')
import chronostar.retired2.datatool as dt
import chronostar.fitplotter as fp
debugging_circles=False
# PLOTTING FLAGS
PLOT_CORNER = False
# PLOT_CORNER = True
PLOT_FED_STARS = False
# PLOT_FED_STARS = True
PLOT_MUTLI_SYNTH = False
# PLOT_MUTLI_SYNTH = True
PLOT_SYNTH_BPMG2 = False
# PLOT_SYNTH_BPMG2 = True
# PLOT_BPMG_REAL = False
PLOT_BPMG_REAL = True
PLOT_FAILURE = False
DEFAULT_DIMS = ((0,1), (0,3), (1,4), (2,5))
COLOR_LABELS = ['Fitted {}'.format(ch) for ch in 'ABCDEF']
acronyms = {
'beta Pictoris':r'$\beta$PMG',
'Tucana-Horologium':'Tuc-Hor',
# 'Columba':'Columba',
# 'Carina':'CAR',
# 'TW Hya':'TWA',
'Upper Centaurus Lupus':'UCL',
}
def displayRanges(ranges):
print([ranges[dim][1] - ranges[dim][0] for dim in (0,1,2,3,4,5)])
def calcRanges(star_pars, sep_axes=False, scale=True):
"""Simple function to calculate span in each dimension of stars with
10% buffer"""
ranges = {}
for dim in range(star_pars['xyzuvw'].shape[1]):
ranges[dim] = [
np.min(star_pars['xyzuvw'][:, dim]),
np.max(star_pars['xyzuvw'][:, dim]),
]
buffer = 0.05 * (ranges[dim][1] - ranges[dim][0])
ranges[dim][0] -= buffer
ranges[dim][1] += buffer
# adjust ranges so the span is consistent across pos axes and vel axes
for dim in (3,4,5):
print(ranges[dim][1] - ranges[dim][0])
if sep_axes:
xranges = {}
yranges = {}
for key in ranges.keys():
xranges[key] = ranges[key][:]
yranges[key] = ranges[key][:]
if scale:
scaleRanges(xranges, dims=(0,1,2))
scaleRanges(xranges, dims=(3,4,5))
scaleRanges(yranges, dims=(0,1,2))
scaleRanges(yranges, dims=(3,4,5))
return xranges, yranges
else:
if scale:
scaleRanges(ranges, dims=(0,1,2))
scaleRanges(ranges, dims=(3,4,5))
return ranges
def scaleRanges(ranges, dims=(0,1,2)):
"""
Rescale elements (inplace) in range such that span is equivalent
"""
max_pos_span = np.max([ranges[dim][1] - ranges[dim][0] for dim in
dims])
for k in ranges:
ranges[k] = list(ranges[k])
for dim in dims:
midpoint = 0.5 * (ranges[dim][1] + ranges[dim][0])
# import pdb; pdb.set_trace()
ranges[dim][1] = midpoint + 0.5 * max_pos_span
ranges[dim][0] = midpoint - 0.5 * max_pos_span
LABELS = 'xyzuvw'
if PLOT_CORNER:
chain_files = [
'../../results/em_fit/beta_Pictoris_wgs_inv2_5B_res/final_chain.npy',
'../../results/em_fit/beta_Pictoris_wgs_inv2_5B_tuc-hor_res/final_chain.npy',
]
plot_names = [
'bpmg_5B_corner.pdf',
'tuc-hor_5B_corner.pdf',
]
for chain_file, plot_name in zip(chain_files, plot_names):
axis_labels = [
'X [pc]',
'Y [pc]',
'Z [pc]',
'U [km/s]',
'V [km/s]',
'W [km/s]',
r'$\sigma_{xyz}$ [pc]',
r'$\sigma_{uvw}$ [km/s]',
't [Myr]',
]
print("Plotting {}".format(plot_name))
chain = np.load(chain_file).reshape(-1,9)
chain[:,6:8] = np.exp(chain[:,6:8])
# plt.tick_params(direction='in')
fig = corner.corner(
chain,
labels=axis_labels,
# reverse=True,
label_kwargs={'fontsize':'xx-large'},
max_n_ticks=4,
)
print("Applying tick parameters")
for ax in fig.axes:
ax.tick_params(direction='in', labelsize='x-large', top=True,
right=True)
print("... saving")
plt.savefig(plot_name)
if PLOT_BPMG_REAL:
for iteration in ['5B']: #, '6C']:
star_pars_file = '../../data/beta_Pictoris_with_gaia_small_xyzuvw.fits'
star_pars = dt.loadXYZUVW(star_pars_file)
fit_name = 'bpmg_and_nearby'
rdir = '../../results/em_fit/beta_Pictoris_wgs_inv2_{}_res/'.format(iteration)
memb_file = rdir + 'final_membership.npy'
groups_file = rdir + 'final_groups.npy'
z = np.load(memb_file)
groups = dt.loadGroups(groups_file)
# Assign markers based on BANYAN membership
gt_sp = dt.loadDictFromTable('../../data/banyan_with_gaia_near_bpmg_xyzuvw.fits')
banyan_membership = len(star_pars['xyzuvw']) * ['N/A']
for i in range(len(star_pars['xyzuvw'])):
master_table_ix = np.where(gt_sp['table']['source_id']==star_pars['gaia_ids'][i])
banyan_membership[i] = gt_sp['table']['Moving group'][master_table_ix[0][0]]
# assign markers based on present moving groups, keep track of
# assoc -> marker relationship incase a legend is called for
banyan_membership=np.array(banyan_membership)
banyan_markers = np.array(len(banyan_membership) * ['.'])
banyan_memb_set = set(banyan_membership)
for bassoc in set(gt_sp['table']['Moving group']):
if bassoc not in acronyms.keys():
acronyms[bassoc] = bassoc
banyan_markers[np.where(banyan_membership=='beta Pictoris')] = 'v'
marker_label = []
banyan_memb_set.remove('beta Pictoris')
marker_label.append(acronyms['beta Pictoris'])
marker_style = []
marker_style.append('v')
banyan_markers[np.where(banyan_membership=='Tucana-Horologium')] = '*'
banyan_memb_set.remove('Tucana-Horologium')
marker_label.append(acronyms['Tucana-Horologium'])
marker_style.append('*')
banyan_memb_set.remove('N/A')
for banyan_assoc, marker in zip(banyan_memb_set, ('s', 'p', 'D', 'X', 'H', 'D')): #''''''^', '<', '>', '8', 's', 'p', 'h', 'H', 'D', 'd', 'P', 'X')):
banyan_markers[np.where(banyan_membership==banyan_assoc)] = marker
marker_label.append(acronyms[banyan_assoc])
marker_style.append(marker)
# First do all, then just do possible membs of BPMG
if True:
x_nearby_ranges, y_nearby_ranges =\
calcRanges(star_pars, sep_axes=True, scale=False)
# nearby_star_pars = {}
# for key in ['xyzuvw', 'xyzuvw_cov']:
# nearby_star_pars[key] = np.copy(star_pars[key])
#
# # Replace cov matrices with None for bg stars
# nearby_star_pars['xyzuvw_cov'][
# np.where(z.argmax(axis=1)==z.shape[1]-1)
# ] = None
# Set to None all covariance matrices not part of BPMG or THOR
bpmg_ix = 0
thor_ix = 3
bg_mask = np.where(np.logical_not(
np.isin(np.argmax(z, axis=1), [bpmg_ix,thor_ix])
))
nearby_star_pars = {}
nearby_star_pars['xyzuvw'] = star_pars['xyzuvw']
nearby_star_pars['xyzuvw_cov'] = np.copy(star_pars['xyzuvw_cov'])
nearby_star_pars['xyzuvw_cov'][bg_mask] = None
nearby_star_pars['indices'] = np.array(star_pars['indices'])
for dim1, dim2 in DEFAULT_DIMS: #[(0,1), (0,3), (1,4), (2,5)]: #, 'yv', 'zw']:
# # force the XY plot to have same scales
# if dim1==0 and dim2==1 and debugging_circles:
# temp_range = nearby_range[1]
# nearby_range[1] = [-120,80]
x_nearby_ranges[dim1], y_nearby_ranges[dim2] = fp.plotPane(
dim1,
dim2,
groups=groups,
star_pars=nearby_star_pars,
group_now=True,
membership=z,
# true_memb=true_memb,
savefile='{}_{}_{}{}.pdf'.format(fit_name, iteration,
LABELS[dim1], LABELS[dim2]),
with_bg=True,
range_1=x_nearby_ranges[dim1], #range_1,
range_2=y_nearby_ranges[dim2], #range_2,
markers=banyan_markers,
marker_style=marker_style,
marker_labels=marker_label if dim1 == 2 else None,
color_labels=COLOR_LABELS[:len(groups)] if
dim1 == 2 else None,
isotropic=(int(dim1/3) == int(dim2/3)),
)
# # undo forced change
# if dim1 == 0 and dim2 == 1 and debugging_circles:
# nearby_range[1] = temp_range
scaleRanges(x_nearby_ranges, (0,1,2))
scaleRanges(x_nearby_ranges, (3,4,5))
# scaleRanges(y_nearby_ranges, (0,1,2))
scaleRanges(y_nearby_ranges, (3,4,5))
# Only include stars that, if they weren't bg, they'd most likely be BPMG
if False:
if iteration == '5B':
fit_name = 'bpmg_candidates'
# extract_group_ix = [0,2]
extract_group_ixs_by_iteration = {
'5B':[0,3],
'6C':[0,2],
}
extract_group_ix = extract_group_ixs_by_iteration[iteration]
# bpmg_mask = np.where(z[:,extract_group_ix]>0.1)
bpmg_star_pars = {}
# bpmg_mask = np.where(np.isin(np.argmax(z[:,:-1], axis=1), extract_group_ix))# == extract_group_ix)
bpmg_mask = np.where(np.isin(np.argmax(z, axis=1), extract_group_ix))# == extract_group_ix)
bg_mask = np.where(np.logical_not(
np.isin(np.argmax(z, axis=1), extract_group_ix)
))
bpmg_star_pars['xyzuvw'] = star_pars['xyzuvw'] #[bpmg_mask]
bpmg_star_pars['xyzuvw_cov'] = np.copy(star_pars['xyzuvw_cov']) #[bpmg_mask]
bpmg_star_pars['xyzuvw_cov'][bg_mask] = None
bpmg_star_pars['indices'] = np.array(star_pars['indices']) #[bpmg_mask]
# z = z[bpmg_mask]#, (0,-1),]
z = z[:,(extract_group_ix+[-1]),]
# bpmg_range = calcRanges(bpmg_star_pars)
# import pdb; pdb.set_trace()
for dim1, dim2 in DEFAULT_DIMS: #[(0,1), (0,3), (1,4)]: #, (2,5)]: #, 'yv', 'zw']:
# force the XY plot to have same scales
# if dim1==0 and dim2==1 and debugging_circles:
# temp_range = bpmg_range[1]
# bpmg_range[1] = [-120,80]
# import pdb; pdb.set_trace()
dim1_range, dim2_range = fp.plotPane(
dim1,
dim2,
groups=groups[extract_group_ix],
star_pars=bpmg_star_pars,
group_now=True,
membership=z,
savefile='{}_{}_{}{}.pdf'.format(fit_name,
iteration,
LABELS[dim1],
LABELS[dim2]),
with_bg=True,
# range_1=bpmg_range[dim1],
range_1=x_nearby_ranges[dim1],
# range_2=bpmg_range[dim2],
range_2=y_nearby_ranges[dim2],
# residual=True,
markers=banyan_markers,
marker_style=marker_style,
marker_labels=marker_label if dim1==2 else None,
color_labels=[r'Fitted $\beta$PMG'] if dim1==2 else None,
# isotropic=(int(dim1/3) == int(dim2/3))
)
# # undo forced change
# if dim1 == 0 and dim2 == 1 and debugging_circles:
# bpmg_range[1] = temp_range
# To ensure consistency, we now plot the BANYAN bpmg stars only,
# and use the ragnes from previous plot
fit_name = 'banyan_bpmg'
rdir = '../../results/em_fit/beta_Pictoris/'
memb_file = rdir + 'final_membership.npy'
groups_file = rdir + 'final_best_groups.npy'
star_pars_file = '../../data/beta_Pictoris_with_gaia_small_xyzuvw.fits'
z = np.load(memb_file)
groups = dt.loadGroups(groups_file)
star_pars = dt.loadDictFromTable(star_pars_file, 'beta Pictoris')
nstars = len(star_pars['xyzuvw'])
# First do all, then just do possible membs of BPMG
for dim1, dim2 in DEFAULT_DIMS: #[(0,1), (0, 3), (1, 4), (2,5)]: #, (2, 5)]: # , 'yv', 'zw']:
# if dim1 == 0 and dim2 == 1 and debugging_circles:
# temp_range = bpmg_range[1]
# bpmg_range[1] = [-120, 80]
# import pdb; pdb.set_trace()
fp.plotPane(
dim1,
dim2,
groups=groups,
star_pars=star_pars,
group_now=True,
membership=z,
# true_memb=true_memb,
savefile='{}_{}{}.pdf'.format(fit_name, LABELS[dim1],
LABELS[dim2]),
with_bg=True,
range_1=x_nearby_ranges[dim1],
range_2=y_nearby_ranges[dim2],
markers=nstars*['v'],
marker_labels=[r'BANYAN $\beta$PMG'] if dim1==2 else None,
color_labels=[r'Chronostar $\beta$PMG'] if dim1==2 else None,
isotropic=(int(dim1/3) == int(dim2/3)),
)
# undo forced change
# if dim1 == 0 and dim2 == 1 and debugging_circles:
# bpmg_range[1] = temp_range
# plotting federrath stars
if PLOT_FED_STARS:
print("Plotting fed stars)")
synth_fit = 'fed_stars'
# rdir = '../../results/fed_fits/30/gaia/'
rdir = '../../results/fed_fits/20/gaia/'
origins_file = rdir + 'origins.npy'
chain_file = rdir + 'final_chain.npy'
lnprob_file = rdir + 'final_lnprob.npy'
star_pars_file = rdir + 'xyzuvw_now.fits'
# init_xyzuvw_file = '../../data/sink_init_xyzuvw.npy'
init_xyzuvw_file = rdir + '../xyzuvw_init_offset.npy'
# perf_xyzuvw_file = rdir + '../perf_xyzuvw.npy'
# star_pars_file = '../../data/fed_stars_20_xyzuvw.fits'
chain = np.load(chain_file).reshape(-1,9)
lnprobs = np.load(lnprob_file)
# best_fit_pars = np.load(chain_file)[np.unravel_index(np.argmax(lnprobs), lnprobs.shape)]
best_fit_pars = chain[np.argmax(lnprobs)]
groups = [chronostar.component.Component(best_fit_pars, internal=True)]
origins = dt.loadGroups(origins_file)
raw_init_xyzuvw = np.load(init_xyzuvw_file)
# perf_xyzuvw = np.load(perf_xyzuvw_file)
# init_xyzuvw = torb.traceManyOrbitXYZUVW(perf_xyzuvw, -origins[0].age,
# single_age=True)
init_xyzuvw = np.load(init_xyzuvw_file)
star_pars = dt.loadXYZUVW(star_pars_file)
fed_xranges, fed_yranges = calcRanges(
{'xyzuvw':np.vstack((star_pars['xyzuvw'],init_xyzuvw))},
sep_axes=True,
)
# import pdb; pdb.set_trace()
for dim1, dim2 in DEFAULT_DIMS: #[(0,1), (0,3), (1,4), (2,5)]:
# plt.clf()
fed_xranges[dim1], fed_yranges[dim2] = fp.plotPane(
dim1,
dim2,
groups=groups,
star_pars=star_pars_file,
origin_star_pars={'xyzuvw':init_xyzuvw},
group_then=True,
group_now=True,
star_orbits=True,
savefile='{}_both_{}{}.pdf'.format(synth_fit,
LABELS[dim1],
LABELS[dim2]),
marker_legend={'current-day':'.', 'origin':'s'} if dim1==2 else None,
color_legend={'current-day':'xkcd:blue', 'origin':'xkcd:blue'} if dim1==2 else None,
star_pars_label='current-day',
origin_star_pars_label='origin',
isotropic=(int(dim1/3) == int(dim2/3)),
range_1=fed_xranges[dim1],
range_2=fed_yranges[dim2],
)
scaleRanges(fed_xranges, (0, 1, 2))
scaleRanges(fed_xranges, (3, 4, 5))
# scaleRanges(fed_yranges, (0, 1, 2))
# scaleRanges(fed_yranges, (3, 4, 5))
# scaleRanges(fed_xranges, (0,1,2))
# scaleRanges(fed_xranges, (3,4,5))
# plotting Multi-component synth fits
if PLOT_MUTLI_SYNTH:
print("Plotting synth plots")
synth_fits = [
# 'synth_bpmg',
'four_assocs',
'assoc_in_field',
'same_centroid',
# 'synth_bpmg2',
]
rdir_suffix = {
# 'synth_bpmg':'',
'four_assocs':'_res',
'assoc_in_field':'_res',
'same_centroid':'_res',
'synth_bpmg2':'_res',
}
planes = {
# 'synth_bpmg':['xu', 'zw'], #['xu', 'zw', 'xy']#, 'yz'],
'four_assocs':['xu', 'zw'], #['xy', 'yv'],
'assoc_in_field':['xu', 'zw'], #['uv', 'xu'],
'same_centroid':['xu', 'zw'], #['xu', 'yv'],
'synth_bpmg2':['xu', 'zw'], #['xu', 'zw', 'xy']#, 'yz'],
}
with_bg = {
# 'synth_bpmg':True,
'four_assocs':False,
'assoc_in_field':False,
'same_centroid':False,
'synth_bpmg2':True,
}
ordering = {
# 'synth_bpmg':[1, 0],
'assoc_in_field':[1, 0],
'four_assocs':[3, 2, 0, 1],
'same_centroid':[1, 0],
'synth_bpmg2':[1, 0],
}
legend_proj = {
# 'synth_bpmg':(0,3),
'assoc_in_field':(2,5),
'four_assocs':(2,5),
'same_centroid':(2,5),
'synth_bpmg2':(0,3),
}
MARKER_LABELS = np.array(['True {}'.format(ch) for ch in 'ABCD'])
for synth_fit in synth_fits:
print(" - plotting {}".format(synth_fit))
rdir = '../../results/em_fit/{}{}/'.format(synth_fit,
rdir_suffix[synth_fit])
groups_file = rdir + 'final_best_groups.npy'
# star_pars_file = rdir + '{}_xyzuvw.fits'.format(synth_fit)
groups = dt.loadGroups(groups_file)
star_pars_file = '../../data/{}_xyzuvw.fits'.format(synth_fit)
memb_file = rdir + 'final_membership.npy'
origins_file = rdir + 'synth_data/origins.npy'
true_memb = dt.getZfromOrigins(origins_file, star_pars_file)
ranges = calcRanges(dt.loadXYZUVW(star_pars_file))
xaxis_ranges, yaxis_ranges = calcRanges(dt.loadXYZUVW(star_pars_file),
sep_axes=True, scale=True)
# yaxis_ranges = {}
# for key in ranges.keys():
# xaxis_ranges[key] = ranges[key][:]
# yaxis_ranges[key] = ranges[key][:]
for dim1, dim2 in DEFAULT_DIMS: #planes[synth_fit]:
print(" - {} and {}".format(dim1, dim2))
# import pdb; pdb.set_trace()
xaxis_ranges[dim1], yaxis_ranges[dim2] = fp.plotPaneWithHists(
dim1,
dim2,
groups=groups_file,
star_pars=star_pars_file,
group_now=True,
membership=memb_file,
true_memb=true_memb,
savefile='{}_{}{}.pdf'.format(synth_fit,
LABELS[dim1],
LABELS[dim2]),
with_bg=with_bg[synth_fit],
group_bg=(synth_fit == 'assoc_in_field'),
isotropic=(int(dim1/3) == int(dim2/3)),
range_1=xaxis_ranges[dim1],
range_2=yaxis_ranges[dim2],
color_labels=COLOR_LABELS[:len(groups)]
if (dim1, dim2) == legend_proj[synth_fit]
else None,
marker_labels=MARKER_LABELS[:len(groups)] #[ordering[synth_fit]]
if (dim1, dim2) == legend_proj[synth_fit]
else None,
ordering=ordering[synth_fit],
marker_order=ordering[synth_fit],
no_bg_covs=with_bg[synth_fit],
)
# import pdb; pdb.set_trace()
scaleRanges(xaxis_ranges, (0, 1, 2))
scaleRanges(xaxis_ranges, (3, 4, 5))
# scaleRanges(yaxis_ranges, (0, 1, 2))
# scaleRanges(yaxis_ranges, (3, 4, 5))
# plotting Multi-component synth fits
if PLOT_SYNTH_BPMG2:
print("Plotting synth plots")
synth_fits = [
'synth_bpmg2',
]
rdir_suffix = {
'synth_bpmg2':'_res',
}
planes = {
'synth_bpmg2':['xu', 'zw'], #['xu', 'zw', 'xy']#, 'yz'],
}
with_bg = {
'synth_bpmg2':True,
}
ordering = {
'synth_bpmg2':[1, 0],
}
legend_proj = {
'synth_bpmg2':(0,3),
}
MARKER_LABELS = np.array(['True {}'.format(ch) for ch in 'ABCD'])
for synth_fit in synth_fits[-1:]:
print(" - plotting {}".format(synth_fit))
rdir = '../../results/em_fit/{}{}/'.format(synth_fit,
rdir_suffix[synth_fit])
groups_file = rdir + 'final_best_groups.npy'
# star_pars_file = rdir + '{}_xyzuvw.fits'.format(synth_fit)
groups = dt.loadGroups(groups_file)
star_pars_file = '../../data/{}_xyzuvw.fits'.format(synth_fit)
memb_file = rdir + 'final_membership.npy'
origins_file = rdir + 'synth_data/origins.npy'
true_memb = dt.getZfromOrigins(origins_file, star_pars_file)
ranges = calcRanges(dt.loadXYZUVW(star_pars_file))
xaxis_ranges, yaxis_ranges = calcRanges(dt.loadXYZUVW(star_pars_file),
sep_axes=True, scale=True)
# yaxis_ranges = {}
# for key in ranges.keys():
# xaxis_ranges[key] = ranges[key][:]
# yaxis_ranges[key] = ranges[key][:]
for dim1, dim2 in DEFAULT_DIMS: #planes[synth_fit]:
print(" - {} and {}".format(dim1, dim2))
# import pdb; pdb.set_trace()
xaxis_ranges[dim1], yaxis_ranges[dim2] = fp.plotPane(
dim1,
dim2,
groups=groups_file,
star_pars=star_pars_file,
group_now=True,
membership=memb_file,
true_memb=true_memb,
savefile='{}_{}{}.pdf'.format(synth_fit,
LABELS[dim1],
LABELS[dim2]),
with_bg=with_bg[synth_fit],
group_bg=(synth_fit == 'assoc_in_field'),
isotropic=(int(dim1/3) == int(dim2/3)),
range_1=xaxis_ranges[dim1],
range_2=yaxis_ranges[dim2],
color_labels=COLOR_LABELS[:len(groups)]
if (dim1, dim2) == legend_proj[synth_fit]
else None,
marker_labels=MARKER_LABELS[:len(groups)] #[ordering[synth_fit]]
if (dim1, dim2) == legend_proj[synth_fit]
else None,
ordering=ordering[synth_fit],
# marker_order=ordering[synth_fit],
no_bg_covs=with_bg[synth_fit],
)
# import pdb; pdb.set_trace()
scaleRanges(xaxis_ranges, (0, 1, 2))
scaleRanges(xaxis_ranges, (3, 4, 5))
# scaleRanges(yaxis_ranges, (0, 1, 2))
# scaleRanges(yaxis_ranges, (3, 4, 5))
if PLOT_FAILURE:
synth_fit='failure_mode'
labels = ['a', 'b']
groups = []
for label in labels:
rdir = '../../results/synth_fit/30_2_1_25_{}_double/'.format(label)
# rdir = '../../results/new_fed_stars_20/gaia/'
# origins_file = rdir + 'origins.npy'
chain_file = rdir + 'final_chain.npy'
lnprob_file = rdir + 'final_lnprob.npy'
# init_xyzuvw_file = '../../data/sink_init_xyzuvw.npy'
# init_xyzuvw_file = rdir + '../xyzuvw_init_offset.npy'
# perf_xyzuvw_file = rdir + '../perf_xyzuvw.npy'
# star_pars_file = '../../data/fed_stars_20_xyzuvw.fits'
chain = np.load(chain_file).reshape(-1,9)
lnprobs = np.load(lnprob_file)
# best_fit_pars = np.load(chain_file)[np.unravel_index(np.argmax(lnprobs), lnprobs.shape)]
best_fit_pars = chain[np.argmax(lnprobs)]
groups.append(
chronostar.component.Component(best_fit_pars, internal=True))
# origins = dt.loadGroups(origins_file)
# raw_init_xyzuvw = np.load(init_xyzuvw_file)
# perf_xyzuvw = np.load(perf_xyzuvw_file)
# init_xyzuvw = torb.traceManyOrbitXYZUVW(perf_xyzuvw, -origins[0].age,
# single_age=True)
# init_xyzuvw = np.load(init_xyzuvw_file)
# this luckiliy picks out sample 'b' which is what we want.
star_pars_file = rdir + 'xyzuvw_now.fits'
for dim1, dim2 in DEFAULT_DIMS: #['xy', 'xu', 'yv', 'zw', 'uv', 'uw']:
fp.plotPane(
dim1,
dim2,
groups=groups[::-1], # reverse groups so failure is coloured
star_pars=star_pars_file,
group_now=True,
group_then=True,
star_orbits=True,
group_orbit=True,
membership=None,
true_memb=None,
savefile='{}_{}{}.pdf'.format(synth_fit,
LABELS[dim1],
LABELS[dim2]),
isotropic=(int(dim1/3) == int(dim2/3)),
)
|
987,272 | a4c50e20578681884054d7be779e1ce92caf732a | #!/usr/bin/python2.4 -tt
# Copyright 2008 Google Inc. All Rights Reserved.
"""Mimic pyquick exercise -- optional extra exercise.
"""
__author__ = 'nparlante@google.com (Nick Parlante)'
import random
import sys
def A(arg1, arg2):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
return None
def B(arg1, arg2):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
print('implement me')
# Provided main(), calls MimicDict() and Mimic()
def main():
seq = ['TTT', 'TTH', 'THT', 'THH', 'HTT', 'HTH', 'HHT', 'HHH']
strings = raw_input("")
inputs = []
count = 0
for x in xrange(len(string) - len(seq) + 1):
if string[x:x+len(seq)] == seq:
count += 1
print('%d', count)
return count
if __name__ == '__main__':
main()
'''
#!/usr/bin/env python
# encoding: utf-8
"""
penny.py
penny's game
Created by André Baumann on 2011-08-24.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import sys
import os
def main():
seq = ['TTT', 'TTH', 'THT', 'THH', 'HTT', 'HTH', 'HHT', 'HHH']
string = raw_input('')
inputs = []
count = 0
for x in xrange(len(string) - len(seq) + 1):
if string[x:x+len(seq)] == seq:
count += 1
print(%d, count)
return count
sets = raw_input('')
if (sets < 1 or sets > 1000):
compare(line1)
else:
sys.exit(1)
iterations = int(raw_input'')
line = raw_input('')
def compare(string, line):
print('compare')
def function(string, str_to_search_for):
function('1011101111','11')
returns 5
import re
>>> aString = 'this is a string where the substring "is" is repeated several times'
>>> print [(a.start(), a.end()) for a in list(re.finditer('is', astring))]
[(2, 4), (5, 7), (38, 40), (42, 44)]
''' |
987,273 | ccc1a7d4d895d7fe9e971ed66b1895914df8ef72 | from math import *
N = 8
V = 4
Z = 2
C = 1
wordSign = 1 << 11
wordMask = (1 << 12) - 1
byteSign = 1 << 7
byteMask = (1 << 8) - 1
def adder(op, a, b, cin, siz=False):
if siz:
sign = wordSign
mask = wordMask
else:
sign = byteSign
mask = byteMask
auL = mask & a
if op == 1:
auR = mask & ~b
else:
auR = mask & b
din = mask & b
sum = auL + auR + cin
nvzc = 0
if sign & sum:
nvzc |= N
if (~(auL ^ auR) & (auL ^ sum)) & sign:
nvzc |= V
if (mask & sum) == 0:
nvzc |= Z
if sum > mask:
nvzc |= C
sum &= mask
return nvzc, sum, auL, auR
print(N, V, Z, C, '%04X %04X %02X %02X' % (wordSign, wordMask, \
byteSign, byteMask))
stat ={0 :'----',
1 :'---C',
2 :'--Z-',
3 :'--ZC',
4 :'-V--',
5 :'-V-C',
6 :'-VZ-',
7 :'-VZC',
8 :'N---',
9 :'N--C',
10:'N-Z-',
11:'N-ZC',
12:'NV--',
13:'NV-C',
14:'NVZ-',
15:'NVZC' }
with open("adder08b.txt", "wt") as fout:
k = 0
for i in range(256):
for j in range(256):
nvzc, sum, auL, auR = adder(0, i, j, 0)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 0, sum, i, j, auL, auR, 0), file=fout)
nvzc, sum, auL, auR = adder(0, i, j, 1)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 0, sum, i, j, auL, auR, 1), file=fout)
nvzc, sum, auL, auR = adder(1, i, j, 1)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 1, sum, i, j, auL, auR, 1), file=fout)
nvzc, sum, auL, auR = adder(1, i, j, 0)
print('%s, %1d, %02X, %02X, %02X, %02X, %02X, %1d' % \
(stat[nvzc], 1, sum, i, j, auL, auR, 0), file=fout)
if k == 0:
print('%02X' % (i), end='')
else:
print(', %02X' % (i), end='')
k += 1
if k == 16:
print()
k = 0
|
987,274 | 1a79baaf526ec0dede78ea1d4d7b105d50c3775f | #permite agrupar datos ys u comportamiento
#es como seguridad por que nos prmite controlar las modificaciones
#tambien es aplicada la programacion defensiva,para saver cuando y como se
#mofica una calase
#controla el acceso a dicho datos
#previene modificaciones no autorizadas
#decoradores se definesn con el simbolo @
# Getter: Se encargará de interceptar la lectura del atributo. (get = obtener)
# Setter : Se encarga de interceptar cuando se escriba. (set = definir o escribir)
# Deleter : Se encarga de interceptar cuando es borrado. (delete = borrar)
# doc : Recibirá una cadena para documentar el atributo. (doc = documentación)
class CasillaDeVotacion:
def __init__(self, identificador, pais):
self._identificador = identificador
self._pais = pais
self._region = None
#el siguente decorador nos permite modificcar la proiedad
#de la region definida en la funcion
@property
def region(self):
return self._region
#el siguiente decorador es para setear lo que desea obtener
#sin esto no podremo utilizar lo que region nos brinda
#(lo que se lleno previamente)
@region.setter
def region(self,region):
if region in self._pais:
self._region=region
else :
raise ValueError(f'La region {region} no es valido en el {self._pais}')
casilla = CasillaDeVotacion(123,['Mexico','Morelos'])
print(casilla.region)
casilla.region = 'Mexico'
print(casilla.region) |
987,275 | ee3aeccf4e7209f8d1da0969d38fa2a51f09fbfc | """Evaluate models on ImageNet-A"""
import os
import sys
import time
import argparse
import numpy as np
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torch.utils.data
import torchvision
import torchvision.models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from calibration_tools import show_calibration_results
parser = argparse.ArgumentParser(description='Evaluate models on ImageNet-A')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--model-name', type=str, default='resnext101_32x16d_wsl',
choices=['resnext101_32x8d', 'resnext101_32x8d_wsl', 'resnext101_32x16d_wsl',
'resnext101_32x32d_wsl', 'resnext101_32x48d_wsl'], help='evaluated model')
parser.add_argument('--workers', default=4, type=int, help='no of data loading workers')
parser.add_argument('--batch-size', default=64, type=int, help='mini-batch size')
parser.add_argument('--gpu', default=0, type=int, help='GPU id to use')
def load_model(model_name):
"Loads one of the pretrained models."
if model_name in ['resnext101_32x8d_wsl', 'resnext101_32x16d_wsl', 'resnext101_32x32d_wsl',
'resnext101_32x48d_wsl']:
model = torch.hub.load('facebookresearch/WSL-Images', model_name)
elif model_name == 'resnext101_32x8d':
model = torchvision.models.resnext101_32x8d(pretrained=True)
else:
raise ValueError('Model not available.')
model = torch.nn.DataParallel(model).cuda()
print('Loaded model:', model_name)
return model
def to_np(x):
return x.data.to('cpu').numpy()
def get_net_results(net, loader):
from utils import indices_in_1k
confidence = []
correct = []
net.eval()
num_correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(loader):
data, target = data.cuda(), target.cuda()
output = net(data)[:, indices_in_1k]
# accuracy
pred = output.data.max(1)[1]
num_correct += pred.eq(target.data).sum().item()
confidence.extend(to_np(F.softmax(output, dim=1).max(1)[0]).squeeze().tolist())
pred = output.data.max(1)[1]
correct.extend(pred.eq(target).to('cpu').numpy().squeeze().tolist())
return num_correct / len(loader.dataset), confidence.copy(), correct.copy()
if __name__ == "__main__":
args = parser.parse_args()
model = load_model(args.model_name)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(args.data, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# evaluate
acc, test_confidence, test_correct = get_net_results(model, val_loader)
print('ImageNet-A Accuracy (%):', round(100 * acc, 4))
cal_err, aur = show_calibration_results(np.array(test_confidence), np.array(test_correct))
np.savez('imaageneta_' + args.model_name + '.npz', acc=acc, cal_err=cal_err, aur=aur) |
987,276 | 1dec761b5e51c20d44dc2e6d27080cc94a0a0a23 | ## HEADS == 1
## TAILS == 0
import random
def coin():
heads = 0
tails = 0
for x in range(1, 5001):
rand = random.randint(0,1)
if (rand == 0):
tails += 1
print "Attempt #", x , ": Throwing a coin. . . It's a tail!.... Got ", heads, " heads and ", tails, " tails so far"
else:
heads += 1
print "Attempt #", x , ": Throwing a coin. . . It's a head!.... Got ", heads, " heads and ", tails, " tails so far"
coin()
|
987,277 | 331808d8f58c9d5329b47623788a1e08d4875831 | '''attack an image classification model'''
import os
import argparse
import random
import logging
import pickle as pkl
from datetime import datetime
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
import numpy as np
# visualization packages
from sklearn.manifold import TSNE
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import network
import attack_func
from prep_data import get_dataset
from train import test
# create logger
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
def run_attack(args, model, attack, device, test_loader):
'''attack input model,
return adv. examples and labels, and bottleneck features of clean and adv. examples'''
# frozen set for targeted attacks
y_space = set(range(args.n_classes))
# Accuracy counter
correct = 0
# save adversarial examples
adv_examples = []
adv_gt_labels = [] # ground truth labels of adversarial examples
attack_success_labels = [] # whether the attack is successful, 0=fail, 1=success
h_clean = [] # bottleneck features of clean examples
h_adv = [] # bottleneck features of adv. examples
grads = [] # grad w.r.t input images
# Loop over all examples in test set
for data, target in tqdm(test_loader):
# Send the data and label to the device
data, target = data.to(device), target.to(device)
# Set requires_grad attribute of tensor. Important for Attack
data.requires_grad = True
# Forward pass the data through the model
h_data, output = model(data, return_feat=True)
# get the index of the max log-probability
init_pred = output.max(1, keepdim=True)[1]
# If the initial prediction is wrong, dont bother attacking, just move
# on
if init_pred.item() != target.item():
continue
# non-targeted attack
if args.targeted == False:
perturbed_data, data_grad = attack.attack_batch(
model, data, target)
else:
# random targets for targeted attacks
# print('target: {}, candidates: {}'.format(target, y_space.difference(target.item())))
random_target = torch.LongTensor(
[random.choice(list(y_space.difference([target.item()])))]).to(device)
perturbed_data, data_grad = attack.attack_batch(
model, data, random_target)
# Re-classify the perturbed image
h_perturbed, output = model(perturbed_data, return_feat=True)
assert(torch.abs(torch.max(perturbed_data - data)) <= attack.eps)
# Check for success
# get the index of the max log-probability
final_pred = output.max(1, keepdim=True)[1]
adv_examples.append(perturbed_data.detach().cpu().numpy())
h_adv.append(h_perturbed.detach().cpu().numpy())
h_clean.append(h_data.detach().cpu().numpy())
adv_gt_labels.append(target.cpu().item())
grads.append(data_grad.detach().cpu().numpy())
if final_pred.item() == target.item():
correct += 1
attack_success_labels.append(0)
else:
attack_success_labels.append(1)
# Calculate final accuracy for this epsilon
final_acc = correct / float(len(test_loader))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(attack.eps,
correct, len(test_loader), final_acc))
# Return the accuracy and an adversarial example
return final_acc, adv_examples, adv_gt_labels, attack_success_labels, h_clean, h_adv, grads
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='White-box Attack PyTorch classification models')
parser.add_argument('--dataset', type=str,
choices=['mnist', 'svhn', 'usps', "syn_digits",
'cifar10', 'stl10', ],
help='dataset')
parser.add_argument('--arch', type=str, help='network architecture')
parser.add_argument('--ckpt_file', type=str,
help='path to load model ckpt. ')
parser.add_argument('--attack_method', default='FGSM',
help='attack method for adversarial training')
parser.add_argument('--targeted', action='store_true',
help='if set, targeted attack')
parser.add_argument('--train_ratio', type=float, default=1.0,
help='sampling ratio of training data')
parser.add_argument(
'--eps', type=str, help='attack eps values, integers [0,255], split with comma')
parser.add_argument('-g', action='store_true',
help='generate and save adversarial examples')
args = parser.parse_args()
torch.manual_seed(1)
random.seed(1)
log_dir = os.path.split(args.ckpt_file)[0]
output_path = os.path.join(log_dir, 'white-box', args.attack_method, 'targeted' if args.targeted else 'non_targeted',
datetime.strftime(datetime.now(), "%d%b%y-%H%M%S"))
if not os.path.exists(output_path):
os.makedirs(output_path)
logger.info('output to {}'.format(output_path))
epsilons = [float(v) / 255. for v in args.eps.split(',')]
pretrained_model = args.ckpt_file
use_cuda = True
cudnn.benchmark = True
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
get_dataset(args.dataset, 'test', True,
train_size=args.train_ratio, test_size=0.),
batch_size=1, shuffle=True, **kwargs)
# Define what device we are using
print("CUDA Available: ", torch.cuda.is_available())
device = torch.device("cuda" if (
use_cuda and torch.cuda.is_available()) else "cpu")
if args.dataset in ["cifar10", "stl10"]:
args.n_classes = 9
elif args.dataset in ["usps", "mnist", "svhn", "syn_digits"]:
args.n_classes = 10
else:
raise ValueError('invalid dataset option: {}'.format(args.dataset))
# Initialize the network
if args.arch == "DTN":
model = network.DTN().to(device)
elif args.arch == 'wrn':
model = network.WideResNet(
depth=28, num_classes=args.n_classes, widen_factor=10, dropRate=0.0).to(device)
else:
raise ValueError('invalid network architecture {}'.format(args.arch))
# Load the pretrained model
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
# Set the model in evaluation mode. In this case this is for the Dropout
# layers
model.eval()
print('test accuracy w/ input transform. ')
clean_accuracy = test(None, model, device, test_loader)
accuracies = []
# Run test for each epsilon
for eps in epsilons:
if args.attack_method == "FGSM":
# TODO: magic number
attacker = attack_func.FGSM(
epsilon=eps, clip_min=-1., clip_max=1., targeted=args.targeted)
else:
raise NotImplementedError(
'attack method {} not implemented. '.format(args.attack_method))
acc, adv_ex, adv_gt, attack_success_labels, h_clean, h_adv, grads = run_attack(
args, model, attacker, device, test_loader)
attack_success_labels = np.array(attack_success_labels)
accuracies.append(acc)
# save adversarial examples
if args.g:
with open(os.path.join(log_dir, '{}_{}_eps{:.4f}_adv_ex.pkl'.format(args.attack_method, 'targeted' if args.targeted else 'non_targeted', eps)), 'wb') as pkl_file:
pkl.dump({"data": np.concatenate(adv_ex, axis=0),
"target": np.array(adv_gt),
"attack_success_flag": attack_success_labels,
"features": np.concatenate(h_adv, axis=0),
"grads": np.concatenate(grads, axis=0)}, pkl_file)
# summarize results
logger.info('eps: {}'.format(",".join([str(eps) for eps in epsilons])))
logger.info('accuracy: {}'.format(",".join([str(acc) for acc in accuracies])))
with open(os.path.join(output_path, 'attack_results.txt'.format()), 'w') as log_file:
log_file.write('configs: {}\n'.format(args))
print('clean accuracy: {:.4f}'.format(clean_accuracy), file=log_file)
print('eps: {}'.format(",".join([str(eps)
for eps in epsilons])), file=log_file)
print('accuracy: {}'.format(
",".join([str(acc) for acc in accuracies])), file=log_file)
|
987,278 | 855a40fd30eff3b85a0f242608155d202abba9d7 | from django.db import models
from apps.gallery.models import Gallery
class Section(models.Model):
slug = models.CharField(max_length=100, unique=True)
name = models.CharField(max_length=200, null=False)
title = models.CharField(max_length=35, null=False)
sub_title = models.TextField(max_length=200, null=True, blank=True)
text1 = models.TextField(max_length=800, null=True, blank=True)
text2 = models.TextField(max_length=800, null=True, blank=True)
gallery = models.ForeignKey(Gallery, related_name='section', null=True, blank=True, on_delete=models.CASCADE)
# class GenericSection(models.Model):
# name = models.CharField(max_length=15, null=False, blank=False)
# title = models.CharField(max_length=35, null=False, blank=False)
# sub_title = models.TextField(max_length=200, null=False, blank=False)
# text1 = models.TextField(max_length=800, blank=False)
# text2 = models.TextField(max_length=800, blank=False)
# @property
# def section(self):
# return Section.objects.get(name=self.name).
|
987,279 | 7a2f856fb9cb087fd772dadaa08ce8da2acba349 | # banner = list("Congratulations")
# print(banner)
# temperatures = []
# temperatures.append(99.6)
# temperatures.append(98.4)
# print(temperatures)
# er_temp = [101.1, 100.8, 99.9]
# print(er_temp)
# total_temps = temperatures + er_temp
# print(total_temps)
# attendees = ["Ken", "Alena", "Treasure"]
# attendees.append("Ashley")
# attendees.extend(["James", "Guil"])
# optional_invitees = ["Ben J.", "Dave"]
# potential_attendees = attendees + optional_invitees
# print("There are", len(potential_attendees), "potential attendees currently.")
# books = [
# "Automate the Boring Stuff with Python: Practical Programming for Total Beginners - Al Sweigart",
# "Python for Data Analysis - Wes McKinney",
# "Fluent Python: Clear, Concise, and Effective Programming - Luciano Ramalho",
# "Python for Kids: A Playful Introduction To Programming - Jason R. Briggs",
# "Hello Web App: Learn How to Build a Web App - Tracy Osborn",
# ]
# video_games = [
# "The Legend of Zelda: Breath of the Wild",
# "Splatoon 2",
# "Super Mario Odyssey",
# ]
# print("Suggested gift: {}".format(books[0]))
# print("Books:")
# for book in books:
# print("* " + book)
# def display_wishlist(display_name, wishes):
# items = wishes.copy()
# print(display_name + ":")
# suggested_gift = items.pop(0)
# print("=====>", suggested_gift, "<=====")
# for item in items:
# print("* " + item)
# print()
# display_wishlist("Books", books)
# display_wishlist("Video Games", video_games)
#insert at beginning of list
# books.insert(0, "Learning Python: Powerful Object-Oriented Programming - Mark Lutz")
# print(books)
# lauries_taco = "\N{TACO}"
# print(lauries_taco)
# continents = [
# 'Asia',
# 'South America',
# 'North America',
# 'Africa',
# 'Europe',
# 'Antarctica',
# 'Australia',
# ]
# # Your code here
# for continent in continents:
# if continent[0] == "A":
# print("* " + continent)
# inventory = ["shield", "apple", "sword", "bow", "boomerang"]
# for item in inventory.copy():
# inventory.remove(item)
turtles = [
"Michelangelo",
"Leonardo",
"Raphael",
"Donatello",
]
def shredder(names):
if len(names) >= 1:
names[0] = "Bebop"
shredder(turtles)
for turtle in turtles:
print("* " + turtle) |
987,280 | 01dc823e4501a3890df1c6a54abd8ee12f4e1bc0 | import pandas as pd
import numpy as np
df=pd.read_csv("AAAI.csv")
a=df["Topics"]
b=[]
for i, v in a.iteritems():
c=v.splitlines()
d=set(c)
b.append(d)
o=0
e=np.zeros((len(b), len(b)))
for x in range(150):
for i in range(len(b)):
for j in range(len(b)):
e[i][j]=(len(b[i].intersection(b[j]))/len(b[i].union(b[j])))
frst_max = secnd_max =0
for i in range(len(b)):
for j in range(len(b)):
if(e[i][j] > frst_max):
secnd_max = frst_max
frst_max = e[i][j]
elif(e[i][j] > secnd_max and e[i][j] != frst_max):
secnd_max = e[i][j]
for m in range(o):
for n in range(m):
if(e[m][n]==secnd_max): #frst_max is always 1
try:
b[m]=(b[m].union(b[n]))
del b[n]
except:
continue
o=len(b)
if(len(b)==9):
break |
987,281 | a99019b798d5bb924f9184bf0467916ab5bf2e80 | import cv2
import numpy as np
from matplotlib import pyplot as plt
images = ['../facedetect/images_resize/face_of_1.jpg','../facedetect/images_resize/face_of_2.jpg','../facedetect/images_resize/face_of_3.jpg']
for image in images:
gray_img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
cv2.imshow('1',gray_img)
hist = cv2.calcHist([gray_img],[0],None,[256],[0,256]) # Y - is number of pixels, X - pixel value (gray)
plt.hist(gray_img.ravel(),256,[0,256])
plt.title('Histogram for gray scale picture')
plt.show()
while True:
k = cv2.waitKey(0) & 0xFF
if k == 27: break # ESC key to exit
cv2.destroyAllWindows() |
987,282 | 79d580bb349669ede61ec3bdd5332dd1a89d9df8 | # Generated by Django 3.1.1 on 2021-03-03 15:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Post', '0025_auto_20210303_1234'),
]
operations = [
migrations.AddField(
model_name='post',
name='citizenship_photo',
field=models.ImageField(blank=True, default=True, null=True, upload_to='images/'),
),
migrations.AddField(
model_name='post',
name='land_map_photo',
field=models.ImageField(blank=True, default=True, null=True, upload_to='images/'),
),
migrations.AddField(
model_name='post',
name='land_ownership_document_photo',
field=models.ImageField(blank=True, default=True, null=True, upload_to='images/'),
),
]
|
987,283 | ce902b5d88b54f949ed95bf1fadcbe17d8fce169 | import spacy
from elasticsearchapp.query_results import get_all_analyzed_data, elastic_greek_stemmer
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import model_selection, naive_bayes, svm
from sklearn.metrics import accuracy_score
np.random.seed(500)
nlp = spacy.load('el_core_news_lg')
def export_dataset_df():
# this exports my train dataset
tokenized_data, raw_type = get_all_analyzed_data()
total_data = []
total_types = []
for data, type in zip(tokenized_data, raw_type):
total_data.append(data)
total_types.append(type)
print(total_data)
print(total_types)
df = pd.DataFrame({'article_tokens': total_data, 'crime_type': total_types})
df.to_csv('../dfs/newsbomb_article.csv', encoding='utf-8-sig', index=False)
def classify_crime_type(content):
corpus = pd.read_csv('../../dfs/newsbomb_article.csv')
corpus = corpus[corpus['crime_type'] != 'ΑΛΛΟ ΕΓΚΛΗΜΑ'] # todo: might have to put that back in
train_X, test_X, train_Y, test_Y = model_selection.train_test_split(corpus['article_tokens'],
corpus['crime_type'],
test_size=0.2)
Encoder = LabelEncoder()
train_Y = Encoder.fit_transform(train_Y)
test_Y = Encoder.fit_transform(test_Y)
# check the given class id
integer_mapping = {l: i for i, l in enumerate(Encoder.classes_)}
print(integer_mapping)
Tfidf_vect = TfidfVectorizer(max_features=5000)
Tfidf_vect.fit(corpus['article_tokens'])
Train_X_Tfidf = Tfidf_vect.transform(train_X)
Test_X_Tfidf = Tfidf_vect.transform(test_X)
# SVM Classifier
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM.fit(Train_X_Tfidf, train_Y)
predictions_SVM = SVM.predict(Test_X_Tfidf)
print("SVM Accuracy Score: ", accuracy_score(predictions_SVM, test_Y) * 100)
# test unknown dataset -> not in db
stemmed_and_analyzed = elastic_greek_stemmer(content)
df = pd.DataFrame({'article_tokens': [stemmed_and_analyzed]})
df.to_csv('../../dfs/newsbomb_article_predict.csv', encoding='utf-8-sig', index=False)
no_label_corpus = pd.read_csv('../../dfs/newsbomb_article_predict.csv')
test_unknown = no_label_corpus['article_tokens']
test_unknown_Tfidf = Tfidf_vect.transform(test_unknown)
predictions_SVM = SVM.predict(test_unknown_Tfidf)
for crime_type, crime_key in integer_mapping.items():
if crime_key == predictions_SVM[0]:
return crime_type
|
987,284 | 7467f0f21cec578f5ebc144d443199e8076dd3bd | """Default values."""
DEFAULT_FILE = "data.csv"
DEFAULT_NUM_OF_TOPICS = 3
|
987,285 | 2d7906bfd73ea52be0cb167fef239da792a1f874 | # -*- coding:utf-8 -*-
"""
__author__ = 'lijianbin'
这个网站老变态了
"""
from __future__ import unicode_literals
import re
import json
from PIL import Image
from io import BytesIO
from EnterpriseCreditCrawler.common import image_recognition
from EnterpriseCreditCrawler.common import url_requests
from EnterpriseCreditCrawler.common.uf_exception import RequestError
# 获取验证码图片
def get_cookies():
"""获取验证码图片Image对象,并返回cookies"""
url = 'https://www.szcredit.org.cn/web/WebPages/Member/CheckCode.aspx'
headers = {
'Host': 'www.szcredit.org.cn',
'User-Agent':('Mozilla/5.0 (Windows NT 6.1; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/54.0.2840.87 Safari/537.36'),
'Referer': 'https://www.szcredit.org.cn/web/GSPT/ShowCheckCode.aspx'
}
resp = url_requests.get(url, headers=headers, verify=False,
proxies=proxies)
cookies = resp.cookies
file = BytesIO(resp.content)
image = Image.open(file)
# image.show()
return image, cookies
# 校验验证码
def verifyCode(checkCode, cookies):
"""
:param checkCode: 识别后的验证码
:return: 携带着的cookies
"""
url = 'https://www.szcredit.org.cn/web/AJax/Ajax.ashx'
headers = {
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
'Host':'www.szcredit.org.cn',
'Origin':'https://www.szcredit.org.cn',
'Referer':'https://www.szcredit.org.cn/web/GSPT/ShowCheckCode.aspx',
'User-Agent':('Mozilla/5.0 (Windows NT 6.1; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/54.0.2840.87 Safari/537.36'),
'X-Requested-With':'XMLHttpRequest'
}
data = {
'action': 'GetCheckCode',
'checkcode': checkCode
}
judge = url_requests.post(url, data=data, headers=headers,
cookies=cookies, proxies=proxies)
judge = judge.text
return judge
# 判断验证码是否识别正确
def recognize_code():
"""判断验证码是否识别正确
正确就返回cookies,错误就返回None
"""
image, cookies = get_cookies()
# image.show()
code = image_recognition.image_recognition(image, 'shenzhen')
if not code:
return None
judge = verifyCode(checkCode=code, cookies=cookies)
if '计算错误' in judge:
return None
return cookies
def get_results(company, cookies):
""""""
url = 'https://www.szcredit.org.cn/web/AJax/Ajax.ashx'
headers = {
'Host':'www.szcredit.org.cn',
'Origin':'https://www.szcredit.org.cn',
'User-Agent':('Mozilla/5.0 (Windows NT 6.1; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/54.0.2840.87 Safari/537.36'),
'X-Requested-With':'XMLHttpRequest'
}
data = {
'action': 'GetEntList',
'keyword': company,
'type': 'load'
}
resp = url_requests.post(url=url,
data=data,
cookies=cookies,
headers=headers,
proxies=proxies)
if '未查询到该企业的信息' in resp.text:
print '未查询到该企业的信息,请重新输入! '
return []
dic = json.loads(resp.text)
msg = dic['msg'].split('(')[0]
print msg
resultList = dic['resultlist']
results = []
for each in resultList:
item = {}
# 获取企业名称
patter = re.compile('<.*?>', re.S)
item['company'] = re.sub(patter, '', each['EntName']).strip()
# 组合链接
detail = {}
patter = re.compile('<.*?>', re.S)
detail['company_name'] = re.sub(patter, '', each['EntName']).strip()
detail['link'] = (
'https://www.szcredit.org.cn/web/gspt/newGSPTDetail3.aspx?ID='+
each['RecordID'])
item['detail'] = detail
results.append(item)
return results
def main(**kwargs):
name = kwargs.get('name')
global proxies
proxies = kwargs.get('proxies')
# proxies = None # 暂不使用动态IP
# 限制验证码识别次数
times = 1
cookies = None
while times < 20 and cookies == None:
times += 1
cookies = recognize_code()
if cookies == None:
raise RequestError('深圳验证码识别20次依然错误'.encode('utf-8'))
results = get_results(company=name, cookies=cookies)
return results
if __name__ == '__main__':
name = ['深圳市兴业丰田汽车销售服务有限公司',
'深圳市益群实业有限公司',
'深圳市华星光电技术有限公司']
main(name=name[2])
|
987,286 | bad1b8f77214b66edde61c99036b2ba971bb96eb | import re
# notice the use of pipe character below
bat_regex = re.compile(r'bat(man|mobile|women)')
mo = bat_regex.findall("I am batman vs batwomen")
print(type(mo))
for i in mo:
print(i)
|
987,287 | 0442d83409b28fe8a6a3352e1ec9df222a17331b | ######################################
# Time:2020/02/11
# alter: ZWQ
######################################
from flask_restplus import Api
from flask import Blueprint
from .views import find_task,add_task,del_task,updata_task
authorizations = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
task_mold = Blueprint('task_mold',__name__)
api = Api(task_mold, version='1.0', title='Task API',
description='ToDoList 任务列表增删改查 API',authorizations =authorizations,security='apikey')
api.add_resource(find_task,'/find')
api.add_resource(add_task,'/add')
api.add_resource(del_task,'/del')
api.add_resource(updata_task,'/updata')
|
987,288 | 1cfb1aec0158e66ce443f68ba1667e67b698e599 | # -*- coding: UTF-8 -*-
'''
Task
Given a positive integral number n, return a strictly increasing sequence
(list/array/string depending on the language) of numbers, so that the sum of the
squares is equal to n².
If there are multiple solutions (and there will be), return the result with the
largest possible values:
Examples
decompose(11) must return [1,2,4,10]. Note that there are actually two ways to
decompose 11², 11² = 121 = 1 + 4 + 16 + 100 = 1² + 2² + 4² + 10² but don't
return [2,6,9], since 9 is smaller than 10.
For decompose(50) don't return [1, 1, 4, 9, 49] but [1, 3, 5, 8, 49] since [1,
1, 4, 9, 49] doesn't form a strictly increasing sequence.
decompose 50 `shouldBe` Just [1,3,5,8,49]
decompose 4 `shouldBe` Nothing
Note
Neither [n] nor [1,1,1,…,1] are valid solutions. If no valid solution exists,
return nil, null, Nothing, None or "".
The function "decompose" will take a positive integer n and return the
decomposition of N = n² as:
[x1 ... xk]
Hint
Very often xk will be n-1.
'''
import math
def decompose(n):
def _recurse(s, i):
if s < 0:
return None
if s == 0:
return []
for j in xrange(i - 1, 0, -1):
sub = _recurse(s - j ** 2, j)
if sub != None:
return sub + [j]
return _recurse(n ** 2, n)
import math
def decompose(n, s=None):
if not s:
s = n * n
m = int(math.sqrt(s))
if m * m == s and m != n:
return [m]
for i in range(min(n, int(math.ceil(math.sqrt(s)))) - 1, 1, -1):
sol = decompose(i, s - i * i)
if sol:
return sol + [i]
print decompose(50)
|
987,289 | 70b811a3323d52c40c24cf54d0692960c77a7353 | # 给定一个只包含 '(' 和 ')' 的字符串,找出最长的包含有效括号的子串的长度。
#
# 示例 1:
#
# 输入: "(()"
# 输出: 2
# 解释: 最长有效括号子串为 "()"
#
#
# 示例 2:
#
# 输入: ")()())"
# 输出: 4
# 解释: 最长有效括号子串为 "()()"
#
# Related Topics 字符串 动态规划
# 👍 948 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# 1. 左右括号数量相等
# 2. 任意前缀中左括号数量大于等于右括号数量
class Solution:
def longestValidParentheses(self, s: str) -> int:
stack = []
res, start = 0, -1
for i, c in enumerate(s):
if c == '(':
stack.append(i)
else:
if stack:
stack.pop(-1)
if stack:
res = max(res, i - stack[-1])
else:
res = max(res, i - start)
else:
start = i
return res
# leetcode submit region end(Prohibit modification and deletion)
|
987,290 | c18d6c124f5e0af562fccc01b13e23c9406478a8 | from snippet.lazy_dict import LazyDict, lazy_property
from nose import tools
class Book(LazyDict):
def __init__(self, book_id):
super(Book, self).__init__()
self.id = book_id
self.__subset__ = {'review'}
@lazy_property
def author(self):
return self._get_author()
def _get_author(self):
# read file content from book_id.json
info = {'author': 'author'}
return info.get('author')
@lazy_property
def content(self):
cmd = 'scp remote_host:/data/book/book_id.pdf /data/book/book_id/content.pdf'
# run cmd
return 'binary stream of /data/book/book_id/content.pdf'
@lazy_property
def review(self):
return Review(self.id)
class Review(LazyDict):
def __init__(self, book_id):
super(Review, self).__init__()
self.id = book_id
@lazy_property
def count(self):
sql = 'select count(*) from db.review where book_id=self.id'
# run sql
return 10
@lazy_property
def latest(self):
sql = 'select * from db.review where book_id=self.id order by time desc limit 1'
# run sql
return 'Latest review'
def test():
book = Book('book1 id')
tools.eq_(len(book.__dict__), 2, 'Book should have only 2 properties, id and _subset')
author = book['author']
tools.eq_(len(book.__dict__), 3, 'Book should have 1 more property, author')
review = book.review
tools.eq_(len(book.__dict__), 4, 'Book should have 1 more property, review')
tools.eq_(len(book.review.__dict__), 2, 'Review should have only 2 properties, id and _subset')
latest = book.review.latest
tools.eq_(len(book.review.__dict__), 3, 'Review should have 1 more property, review')
structure = book._get_structure()
tools.eq_(len(structure), 4, 'Structure should have 4 properties.')
tools.eq_(structure['id'], None, 'Structure should have id with no value.')
# structure should not have _subset
tools.assert_raises(KeyError, structure.__getitem__, '__subset__')
data = book._to_dict()
tools.eq_(len(data), 4, 'Data should have 4 properties.')
tools.eq_(data['review']['count'], 10, 'Data should have actual data.') |
987,291 | c46d64d71d4a52719af8d719961336edeff2bea3 | import tensorflow as tf
import DS_input as deepSpeech_input
import rnn_cell
from helper_routines import _variable_on_cpu
from helper_routines import _variable_with_weight_decay
from helper_routines import _activation_summary
# Global constants describing the speech data set.
NUM_CLASSES = deepSpeech_input.NUM_CLASSES
NUM_PER_EPOCH_FOR_TRAIN = deepSpeech_input.NUM_PER_EPOCH_FOR_TRAIN
NUM_PER_EPOCH_FOR_EVAL = deepSpeech_input.NUM_PER_EPOCH_FOR_EVAL
NUM_PER_EPOCH_FOR_TEST = deepSpeech_input.NUM_PER_EPOCH_FOR_TEST
def inputs(eval_data, data_dir, batch_size, use_fp16, shuffle):
"""Construct input for LibriSpeech model evaluation using the Reader ops.
Args:
eval_data: 'train', 'test' or 'eval'
data_dir: folder containing the pre-processed data
batch_size: int,size of mini-batch
use_fp16: bool, if True use fp16 else fp32
shuffle: bool, to shuffle the tfrecords or not.
Returns:
feats: MFCC. 4D tensor of [batch_size, T, F, 1] size.
labels: Labels. 1D tensor of [batch_size] size.
seq_lens: SeqLens. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not data_dir:
raise ValueError('Please supply a data_dir')
feats, labels, seq_lens = deepSpeech_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=batch_size,
shuffle=shuffle)
if use_fp16:
feats = tf.cast(feats, tf.float16)
return feats, labels, seq_lens
def inference(feats, seq_lens, params):
"""Build the deepSpeech model.
Args:
feats: MFCC features returned from distorted_inputs() or inputs().
seq_lens: Input sequence length per utterance.
params: parameters of the model.
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU
# training runs. If we only ran this model on a single GPU,
# we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
if params.use_fp16:
dtype = tf.float16
else:
dtype = tf.float32
feat_len = feats.get_shape().as_list()[-1]
# convolutional layers
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay(
'weights',
shape=[11, feat_len, 1, params.num_filters],
wd_value=None, use_fp16=params.use_fp16)
feats = tf.expand_dims(feats, dim=-1)
conv = tf.nn.conv2d(feats, kernel,
[1, params.temporal_stride, 1, 1],
padding='SAME')
# conv = tf.nn.atrous_conv2d(feats, kernel, rate=2, padding='SAME')
biases = _variable_on_cpu('biases', [params.num_filters],
tf.constant_initializer(-0.05),
params.use_fp16)
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# dropout
conv1_drop = tf.nn.dropout(conv1, params.keep_prob)
# recurrent layers
with tf.variable_scope('rnn') as scope:
# Reshape conv output to fit rnn input
rnn_input = tf.reshape(conv1_drop, [params.batch_size, -1,
feat_len*params.num_filters])
# Permute into time major order for rnn
rnn_input = tf.transpose(rnn_input, perm=[1, 0, 2])
# Make one instance of cell on a fixed device,
# and use copies of the weights on other devices.
cell = rnn_cell.CustomRNNCell(
params.num_hidden, activation=tf.nn.relu6,
use_fp16=params.use_fp16)
drop_cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=params.keep_prob)
multi_cell = tf.contrib.rnn.MultiRNNCell(
[drop_cell] * params.num_rnn_layers)
seq_lens = tf.div(seq_lens, params.temporal_stride)
if params.rnn_type == 'uni-dir':
rnn_outputs, _ = tf.nn.dynamic_rnn(multi_cell, rnn_input,
sequence_length=seq_lens,
dtype=dtype, time_major=True,
scope='rnn',
swap_memory=True)
else:
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
multi_cell, multi_cell, rnn_input,
sequence_length=seq_lens, dtype=dtype,
time_major=True, scope='rnn',
swap_memory=True)
outputs_fw, outputs_bw = outputs
rnn_outputs = outputs_fw + outputs_bw
_activation_summary(rnn_outputs)
# Linear layer(WX + b) - softmax is applied by CTC cost function.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay(
'weights', [params.num_hidden, NUM_CLASSES],
wd_value=None,
use_fp16=params.use_fp16)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0),
params.use_fp16)
logit_inputs = tf.reshape(rnn_outputs, [-1, cell.output_size])
logits = tf.add(tf.matmul(logit_inputs, weights),
biases, name=scope.name)
logits = tf.reshape(logits, [-1, params.batch_size, NUM_CLASSES])
_activation_summary(logits)
return logits
def loss(logits, labels, seq_lens):
"""Compute mean CTC Loss.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
seq_lens: Length of each utterance for ctc cost computation.
Returns:
Loss tensor of type float.
"""
# Calculate the average ctc loss across the batch.
ctc_loss = tf.nn.ctc_loss(inputs=tf.cast(logits, tf.float32),
labels=labels, sequence_length=seq_lens)
ctc_loss_mean = tf.reduce_mean(ctc_loss, name='ctc_loss')
tf.add_to_collection('losses', ctc_loss_mean)
# The total loss is defined as the cross entropy loss plus all
# of the weight decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in deepSpeech model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for each_loss in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average
# version of the loss as the original loss name.
tf.scalar_summary(each_loss.op.name + ' (raw)', each_loss)
tf.scalar_summary(each_loss.op.name, loss_averages.average(each_loss))
return loss_averages_op
|
987,292 | 0ee8c0d834fe133fbb36ceece601436f24b4117f | import pygame
from Settings import Settings
from function import *
from pygame.sprite import Group
from Alien import *
def run_game():
pygame.init()
settings=Settings()
screen=pygame.display.set_mode((settings.screenWidth,settings.screenHeight))
pygame.display.set_caption('Alien Invasion')
ship=Ship(screen,settings)
bullets=Group()
aliens=[]
create_fleet(settings,screen,aliens)
#print(len(aliens))
while True:
check_event(settings,ship,screen,bullets)
update_screen(screen,ship,settings,bullets,aliens=aliens)
is_forward=True
run_game()
|
987,293 | cffdde6a558cb29c15f5b43c27ae1ff42a1b8bf1 | # 39. Escrever um algoritmo que leia uma variável n e calcule
# a tabuada de 1 até n. Mostre a tabuada na forma:
# 1 x n = n
# 2 x n = 2n
# 3 x n = 3n
# ...............
# n x n = n2
# minha
n = float(input("Digite um número, para sua tabuada: "))
contador = 0
while n * n > contador * n:
contador = contador + 1
print(f"{contador} x {n} = {contador * n}")
# marco
numero = int(input("Digite um numero: "))
contador = 1
while contador <= numero:
print (f"{contador} x {numero} = {contador * numero}")
contador = contador + 1
|
987,294 | fda1bfa852936d35a3ff063e18353de011b13b91 | '''클라이언트 소켓을 담당하는 서버 클래스'''
class client:
def __init__(self, cs, ip):
self.cs = cs
self.ip = ip
def send_msg(self, message):
self.cs.send(message.encode('utf-8'))
|
987,295 | 48fb18d2c94b1f0967e5f156975aeb53ac6b6271 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-08 21:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lavajato_agenda', '0004_auto_20181220_1839'),
]
operations = [
migrations.RemoveField(
model_name='agenda',
name='pagamento',
),
migrations.AddField(
model_name='agenda',
name='estado',
field=models.CharField(choices=[(b'1', b'Aberto'), (b'2', b'Desmarcado'), (b'3', b'Pago')], default=1, max_length=1),
preserve_default=False,
),
]
|
987,296 | ad1dd5cda6d5784284c89bba1eef0cc269e573b8 | def mi():
return map(int, input().split())
def main():
N, K = mi()
R, S, P = mi()
T = input()
pt = 0
my_choices = ['']*N
for i in range(K):
tmp = T[i::K]
for j in range(len(tmp)):
if j == 0:
if tmp[j] == 'r':
my_choices[i] = 'p'
pt += P
if tmp[j] == 's':
my_choices[i] = 'r'
pt += R
if tmp[j] == 'p':
my_choices[i] = 's'
pt += S
else:
if tmp[j] == 'r' and my_choices[i+(j-1)*K] != 'p':
my_choices[i+j*K] = 'p'
pt += P
if tmp[j] == 's' and my_choices[i+(j-1)*K] != 'r':
my_choices[i+j*K] = 'r'
pt += R
if tmp[j] == 'p' and my_choices[i+(j-1)*K] != 's':
my_choices[i+j*K] = 's'
pt += S
print(pt)
if __name__ == '__main__':
main()
|
987,297 | f709aaa56f2a1514dd04b8744f18233842390ef9 | # ----------------------------------------------------------
# -------- CS50 Final Project --------
# ----------------------------------------------------------
# ----------------------------------------------------------
#Name:Prince Michael Agbo
#Title: Snakes and Ladders
#Number of Players: 2
#IDE: IDLE
#Location: International student from Nigeria studying in the USA.
#
#
#
#
# ----------------------------------------------------------
import turtle
import random
import time
WIDTH = 10
HEIGHT = 10
snake1 = [97,85,75,67,53,49]
snake2 = [63,57,45,35,27,13]
ladder1 = [39,42,59,62,79,82]
ladder2 = [4,16,26,34,48,52]
def valid_input(player_num):
"""
This function ensures that the players enter the valid input of "r" to roll the die
"""
player_input = input("Player "+str(player_num)+ " enter r to roll the die: ")
player_input = player_input.lower()
while player_input != "r":
print("Invalid input")
player_input = input("Player "+str(player_num)+" enter r to roll the die: ")
player_input = player_input.lower()
def winner(position, player):
"""
This function checks if a player has gotten to the 100th square and has thus won the game
"""
if position == 100:
print("Congratulations,",player + ", you have won.")
return True
else:
return False
def create_dictionary():
"""
This function creates a dictionary that maps a numerical position (from 1 to 1000
to an x,y axis location so that squares can be easily referenced with numbers (from 1 to 100)
instead of x,y axis positions
"""
d = {}
for y in range(HEIGHT):
if (y % 2) != 0:
pos = (10*y)+10
else:
pos =((10*y)-9)+10
for x in range(WIDTH):
xy_tuple = (x,y)
d[pos] = xy_tuple
if (y % 2) != 0:
pos = pos - 1
else:
pos = pos + 1
return d
def write_num(x,y,t,pos):
"""
This function writes the square number in the middle of
square
"""
moveturtle(x+0.5,y+0.5,t)
t.write(pos)
moveturtle(x,y,t)
def moveturtle(x,y,t):
"""
This function moves the turtle to the coordinates (x,y)
without leaving a line trail.
"""
t.penup()
t.goto(x,y)
t.pendown()
def filldraw_rectangle(x,y,width,height,t,color):
moveturtle(x,y,t)
t.fillcolor(color)
t.begin_fill()
for draw in range(2):
t.forward(width)
t.left(90)
t.forward(height)
t.left(90)
t.end_fill()
def positions1(player1_pos,player2_pos,d,t,roll_num1):
#if snake positions change and its tail is in first row, will need to include
#if (player1_pos - roll_num1) != 0: before any code that involves (player1_pos - roll_num1)
#to recolor last position
if (player1_pos - roll_num1) not in snake1 and (player1_pos - roll_num1) not in snake2 and (player1_pos - roll_num1) not in ladder1 and (player1_pos - roll_num1) not in ladder2:
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"red")#recolor last position red if it was not a snake and not a ladder
write_num(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],t,player1_pos-roll_num1)
else:
if (player1_pos - roll_num1) in snake1 or (player1_pos - roll_num1) in snake2:
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"green")#recolor last position green if it was a snake
write_num(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],t,player1_pos-roll_num1)
elif (player1_pos - roll_num1) in ladder1 or (player1_pos - roll_num1) in ladder2:
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"yellow")#recolor last position yellow if it was a ladder
write_num(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],t,player1_pos-roll_num1)
if player1_pos not in snake1 and player1_pos not in snake2 and player1_pos not in ladder1 and player1_pos not in ladder2:
#if current position is not a snake body and not a ladder body, color current position white
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"white")
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
else:
if player1_pos in snake1 or player1_pos in snake2: #if current position is snake body
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"green") #make current position green
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
if (player1_pos - roll_num1) not in ladder1 and (player1_pos - roll_num1) not in ladder2 and (player1_pos - roll_num1) not in snake1 and (player1_pos - roll_num1) not in snake2:
#if last position is not a ladder or snake body
if (player1_pos - roll_num1) != player2_pos:#if last position is also not player 2 position
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"red") #color last position red
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
else:#if last position is also player 2 position
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"orange") #color last position orange
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
elif (player1_pos - roll_num1) in ladder1 or (player1_pos - roll_num1) in ladder2:
#if last position is a ladder body
if (player1_pos - roll_num1) != player2_pos:#if last position is also not player 2 position
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"yellow") #color last position yellow
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
else:#if last position is also player 2 position
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"orange") #color last position orange
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
elif (player1_pos - roll_num1) in snake1 or (player1_pos - roll_num1) not in snake2:
#if last position is a snake body
if (player1_pos - roll_num1) != player2_pos:#if last position is also not player 2 position
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"green") #color last position yellow
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
else:#if last position is also player 2 position
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"orange") #color last position orange
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
if player1_pos in snake1: #if current position in snake1 body
if player1_pos != snake1[5]:
print ("Player 1 has hit a snake and will slide down to", snake1[5])
player1_pos = snake1[5] #slide down by updating current position to snake 1 tail
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"white")#color snake tail white
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
elif player1_pos in snake2: #if current position in snake2 body
if player1_pos != snake2[5]:
print ("Player 1 has hit a snake and will slide down to", snake2[5])
player1_pos = snake2[5] #slide down by updating current position to snake 1 tail
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"white")#color snake tail white
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
elif player1_pos in ladder1 or player1_pos in ladder2:#if current position is ladder body
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"yellow") #make current position yellow
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
if (player1_pos - roll_num1) not in snake1 and (player1_pos - roll_num1) not in snake2 and (player1_pos - roll_num1) not in ladder1 and (player1_pos - roll_num1) not in ladder2:
#if last position is not a snake or ladder body
if (player1_pos - roll_num1) != player2_pos:#if last position is also not player 2 position
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"red") #color last position red
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
else:#if last position is also player 2 position
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"orange") #color last position orange
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
elif (player1_pos - roll_num1) in snake1 or (player1_pos - roll_num1) in snake2:
#if last position is a snake body
if (player1_pos - roll_num1) != player2_pos:#if last position is also not player 2 position
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"green") #color last position green
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
else:#if last position is also player 2 position
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"orange") #color last position orange
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
elif (player1_pos - roll_num1) in ladder1 or (player1_pos - roll_num1) in ladder2:
#if last position is a ladder body
if (player1_pos - roll_num1) != player2_pos:#if last position is also not player 2 position
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"yellow") #color last position yellow
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
else:#if last position is also player 2 position
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player1_pos - roll_num1][0],d[player1_pos - roll_num1][1],1,1,t,"orange") #color last position orange
write_num(d[player1_pos-roll_num1][0],d[player1_pos-roll_num1][1],t,player1_pos-roll_num1)
if player1_pos in ladder1: #if current position in ladder1 body
if player1_pos != ladder1[5]:
print ("Player 1 has hit a ladder and will climb up to", ladder1[5])
player1_pos = ladder1[5] #climb up by updating current position to ladder 1 top
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"white")#color ladder 1 head white
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
elif player1_pos in ladder2: #if current position in ladder2 body
if player1_pos != ladder2[5]:
print ("Player 1 has hit a ladder and will climb up to", ladder2[5])
player1_pos = ladder2[5] #climb up by updating current position to ladder 2 top
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"white")#color ladder 2 head white
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
if player2_pos == player1_pos or player2_pos == (player1_pos - roll_num1): #if player 2 is or was in the same position as player 1, make the current or past position orange
if (player1_pos - roll_num1) != 0:
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"orange")
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
return player1_pos
def positions2(player2_pos,player1_pos,d,t, roll_num2):
#to recolor last position
if (player2_pos - roll_num2) not in snake1 and (player2_pos - roll_num2) not in snake2 and (player2_pos - roll_num2) not in ladder1 and (player2_pos - roll_num2) not in ladder2:
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"red")#recolor last position red if it was not a snake and not a ladder
write_num(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],t,player2_pos-roll_num2)
else:
if (player2_pos - roll_num2) in snake1 or (player2_pos - roll_num2) in snake2:
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"green")#recolor last position green if it was a snake
write_num(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],t,player2_pos-roll_num2)
elif (player2_pos - roll_num2) in ladder1 or (player2_pos - roll_num2) in ladder2:
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"yellow")#recolor last position yellow if it was a ladder
write_num(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],t,player2_pos-roll_num2)
if player2_pos not in snake1 and player2_pos not in snake2 and player2_pos not in ladder1 and player2_pos not in ladder2:
#if current position is not a snake body and not a ladder body, color current position orange
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"orange")
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
else:
if player2_pos in snake1 or player2_pos in snake2: #if current position is snake body
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"green") #make current position green
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
if (player2_pos - roll_num2) not in ladder1 and (player2_pos - roll_num2) not in ladder2 and (player2_pos - roll_num2) not in snake1 and (player2_pos - roll_num2) not in snake2:
#if last position is not a ladder or snake body
if (player2_pos - roll_num2) != player1_pos:#if last position is also not player 1 position
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"red") #color last position red
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
else:#if last position is also player 1 position
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"white") #color last position white
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
elif (player2_pos - roll_num2) in ladder1 or (player2_pos - roll_num2) in ladder2:
#if last position is a ladder body
if (player2_pos - roll_num2) != player1_pos:#if last position is also not player 1 position
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"yellow") #color last position yellow
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
else:#if last position is also player 1 position
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"white") #color last position white
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
elif (player2_pos - roll_num2) in snake1 or (player2_pos - roll_num2) in snake2:
#if last position is a snake body
if (player2_pos - roll_num2) != player1_pos:#if last position is also not player 1 position
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"green") #color last position green
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
else:#if last position is also player 1 position
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"white") #color last position white
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
if player2_pos in snake1: #if current position in snake1 body
if player2_pos != snake1[5]:
print ("Player 2 has hit a snake and will slide down to", snake1[5])
player2_pos = snake1[5] #slide down by updating current position to snake 1 tail
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"orange")#color snake tail orange
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
elif player2_pos in snake2: #if current position in snake2 body
if player2_pos != snake2[5]:
print ("Player 2 has hit a snake and will slide down to", snake2[5])
player2_pos = snake2[5] #slide down by updating current position to snake 1 tail
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"orange")#color snake tail orange
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
elif player2_pos in ladder1 or player2_pos in ladder2:#if current position is ladder body
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"yellow") #make current position yellow
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
if (player2_pos - roll_num2) not in ladder1 and (player2_pos - roll_num2) not in ladder2 and (player2_pos - roll_num2) not in snake1 and (player2_pos - roll_num2) not in snake2:
#if last position is not a snake or ladder body
if (player2_pos - roll_num2) != player1_pos:#if last position is also not player 1 position
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"red") #color last position red
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
else:#if last position is also player 1 position
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"white") #color last position white
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
elif (player2_pos - roll_num2) in snake1 or (player2_pos - roll_num2) in snake2:
#if last position is a snake body
if (player2_pos - roll_num2) != player1_pos:#if last position is also not player 1 position
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"green") #color last position green
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
else:#if last position is also player 1 position
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"white") #color last position white
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
elif (player2_pos - roll_num2) in ladder1 or (player2_pos - roll_num2) in ladder2:
#if last position is a ladder body
if (player2_pos - roll_num2) != player1_pos:#if last position is also not player 1 position
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"yellow") #color last position yellow
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
else:#if last position is also player 1 position
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player2_pos - roll_num2][0],d[player2_pos - roll_num2][1],1,1,t,"white") #color last position white
write_num(d[player2_pos-roll_num2][0],d[player2_pos-roll_num2][1],t,player2_pos-roll_num2)
if player2_pos in ladder1: #if current position in ladder1 body
if player2_pos != ladder1[5]:
print ("Player 2 has hit a ladder and will climb up to", ladder1[5])
player2_pos = ladder1[5] #climb up by updating current position to ladder 1 top
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"orange")#color ladder 1 head orange
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
elif player2_pos in ladder2: #if current position in ladder2 body
if player2_pos != ladder2[5]:
print ("Player 2 has hit a ladder and will climb up to", ladder2[5])
player2_pos = ladder2[5] #climb up by updating current position to ladder 2 top
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"orange")#color ladder 2 head orange
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
if player1_pos == player2_pos or player1_pos == (player2_pos - roll_num2):#if player 1 is or was in the same position as player 2, make the current or past position white
if (player2_pos - roll_num2) != 0:
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"white")
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
return player2_pos
def track_player1(player1_pos, d, t, player2_pos):
"""
This function controls and tracks player 1
"""
valid_input(1)
roll_num = die_roll()
player1_pos = player1_pos + roll_num
print("Player 1 rolls",roll_num,end="")
if player1_pos <= 100:
print(" and moves to box",player1_pos)
else:
print(", which makes the next position greater than 100")
player1_pos = player1_pos - roll_num
if player2_pos == player1_pos:#accounting for bounceback, if player2 was in same position as player1 before player1 bounces back from 100
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"orange")
else: #else color it red (will be colored white if player1 bounces back to same position)
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"red")
write_num(d[player1_pos][0],d[player1_pos][1],t,player1_pos)
player1_pos = 100 - ((player1_pos + roll_num) - 100)
print("Player 1 bounces back to", player1_pos)
return (player1_pos, roll_num)
def track_player2(player2_pos, d, t, player1_pos):
"""
This function controls and tracks player 2
"""
valid_input(2)
roll_num = die_roll()
player2_pos = player2_pos + roll_num
print("Player 2 rolls",roll_num, end="")
if player2_pos <= 100:
print(" and moves to box",player2_pos)
else:
print(", which makes the next position greater than 100")
player2_pos = player2_pos - roll_num
if player1_pos == player2_pos:#accounting for bounceback, if player1 was in same position as player2 before player2 bounces back from 100
filldraw_rectangle(d[player1_pos][0],d[player1_pos][1],1,1,t,"white")
filldraw_rectangle(d[player2_pos][0],d[player2_pos][1],1,1,t,"red")
write_num(d[player2_pos][0],d[player2_pos][1],t,player2_pos)
player2_pos = 100 - ((player2_pos + roll_num) - 100)
print("Player 2 bounces back to", player2_pos)
return (player2_pos, roll_num)
def die_roll():
"""
This function rolls the die
"""
roll = random.randint(1,6)
return roll
def setup(x, y, w, h, t):
"""
This function setups the game board for snakes and ladders.
"""
filldraw_rectangle(x,y,w,h,t,"red")
for y in range(10):
if (y % 2) != 0:
pos = (10*y)+10
else:
pos =((10*y)-9)+10
for x in range(10):
filldraw_rectangle(x,y,1,1,t,"red")
if pos in snake1 or pos in snake2:
filldraw_rectangle(x,y,1,1,t,"green")
if pos in ladder1 or pos in ladder2:
filldraw_rectangle(x,y,1,1,t,"yellow")
write_num(x,y,t,pos)
if (y % 2) != 0:
pos = pos - 1
else:
pos = pos + 1
def the_game(t):
"""
This function controls the playing of the game, how it starts and when it stops.
"""
d = create_dictionary()
stopplay = False
player1_pos = 0
player2_pos = 0
while stopplay == False:
tuple_pos1_rollnum = track_player1(player1_pos, d, t, player2_pos)
player1_pos = tuple_pos1_rollnum[0]
roll_num1 = tuple_pos1_rollnum[1]
stopplay = winner (player1_pos,"player 1")
player1_pos = positions1(player1_pos,player2_pos,d,t,roll_num1)
if stopplay == False:
tuple_pos2_rollnum = track_player2(player2_pos, d, t, player1_pos)
player2_pos = tuple_pos2_rollnum[0]
roll_num2 = tuple_pos2_rollnum[1]
stopplay = winner (player2_pos,"player 2")
player2_pos = positions2(player2_pos,player1_pos,d,t,roll_num2)
print("That's the end of the game.")
def intro_instructions():
"""
This function prints out the instructions on how the game works
"""
print("The board will be updated after each move.")
print("Watch both the board and the python prompt after each move.")
print("Player 1 is white and player 2 is orange")
print("Green boxes are snakes and yellow boxes are ladders.")
print("If you hit any part of the snake(not just the head), you will slide down to the snakes tail")
print("If you hit any part of the ladder(not just the bottom), you will climb to the ladder's top")
print("May the luckiest player win")
def create_board_window():
"""
This function creates a window with a canvas
"""
wn = turtle.Screen()
wn.setworldcoordinates(0, 0, WIDTH+1, HEIGHT+1)
t = turtle.Turtle()
t.pensize(1)
t.speed(0)
t.hideturtle()
return (wn, t)
def main():
intro_instructions()
wn_turtle_tuple = create_board_window()
wn = wn_turtle_tuple[0]
t = wn_turtle_tuple[1]
setup(0, 0, WIDTH, HEIGHT, t)
the_game(t)
wn.exitonclick()
if __name__ == '__main__':
main()
|
987,298 | 981fec62fc686d49073c364517e78f7f8388179f | # ## Getting Perpsective Transform
#Import necessary libraries
import cv2
import numpy as np
import matplotlib.pyplot as plt
#Load/Read input image
input_image2 = cv2.imread('ba.jpg')
#show the original image
cv2.imshow('Original', input_image2)
cv2.waitKey(0)
# Cordinates of the 4 corners of the original source image
# These coordinates we get it from Paint application
orig_img_coor = np.float32([[89,1625], [2825,1649], [49,3489], [2817,3497]])
for x in range(0,4):
cv2.circle(input_image2, (orig_img_coor[x][0], orig_img_coor[x][1]), 5, (255,0,0), -1)
#show the original image
cv2.imshow('Coordinates Marked', input_image2)
cv2.waitKey(0)
height, width = 450, 350
# Cordinates of the 4 corners of the target output
new_img_coor = np.float32([[0,0], [width,0], [0, height], [width,height]])
# Use the two sets of four points to compute the Perspective Transformation matrix, P
P = cv2.getPerspectiveTransform(orig_img_coor, new_img_coor)
perspective = cv2.warpPerspective(input_image2, P, (width, height))
cv2.imshow('warpPerspective', perspective)
cv2.waitKey(0)
cv2.destroyAllWindows() |
987,299 | 448b5460831f4a2f43094bee63894a71f04b5dae | home_page = dict(
searchFieldByID="search_query_top",
searchButtonByName="submit_search",
cartButtonCSSSelector="a b",
dressesButtonByXpath="(//a[@class='sf-with-ul'])[4]",
dressesButtonActivateByXpath="(//ul[@class='submenu-container clearfix first-in-line-xs'])[2]",
dressesButtonCassualDressesByXpath="(//a[@title='Summer Dresses'])[2]",
womenButtonByXpath="//a[@title='Women']"
)
product_page = dict(
selectProductByClassName="replace-2x",
hoverOverProductBoxByClassName="right-block",
productAddButtonByXpath="//span[contains(text(), 'Add to cart')]",
modalWindowByXpath="//i[@class='icon-chevron-left left']"
)
shopping_cart = dict(
blouseItemInTheCartByLinkText="Blouse",
pinnedSummerDressByLinkText="Printed Summer Dress",
pinnedChiffonDress="Printed Chiffon Dress"
)
dresses_page = dict(
summerDressesImageButtonByXpath="(//a[@title='Summer Dresses'])[3]",
locateAllProductsByXpath="//div[@class='right-block']",
addProductsToCartByXpath="//a[@title='Add to cart']",
scrollIntoViewByXpath="//ul[@class='product_list grid row']",
hoverAllElementsByXpath="(//div[@class='right-block'])",
locateSelectedElementsByXpath="(//div[@class='right-block'])[{0}]",
productAddButtonByXpath="(//span[contains(text(), 'Add to cart')])[{0}]"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.