text stringlengths 38 1.54M |
|---|
#!/bin/env python
# -*- coding: utf-8 -*-.
import os
import asyncio
import pyocr as pyocr
from config import FOLDER_PATH, TEXT
import docx
import pytesseract
from PIL import Image
async def list_sorted_pic_folder(FOLDER_PATH) -> list[str]:
list_sorted = []
for pic in os.listdir(FOLDER_PATH):
list_sorted.append(pic)
filename = sorted(list_sorted)
return filename
# вернул отсортированый список по имени файла
async def gen_read_pic():
filename = await list_sorted_pic_folder(FOLDER_PATH)
for file in filename:
if '.jpg' or '.png' in filename:
img_path = os.path.join(FOLDER_PATH, file)
img = Image.open(img_path)
text = TEXT + pytesseract.image_to_string(img,
lang="rus",
config='--oem 1',
builder=pyocr.builders.WordBoxBuilder()
).strip()
yield text
async def write_file_to_doc():
async for return_value in gen_read_pic():
print(return_value)
print(f'page', 20 * '#')
with open('output.txt', mode="a+", encoding="utf-8") as file_write_name:
file_write_name.write(return_value)
async def main() -> None:
await write_file_to_doc()
if __name__ == '__main__':
asyncio.run(main()) |
import requests
import json
import tkinter
from tkinter import Menu
import tkinter as tk
import tkinter.ttk as ttk
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
from tkinter.ttk import Combobox
import datetime
from matplotlib.ticker import FixedLocator, FixedFormatter
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import MaxNLocator
import matplotlib.ticker as ticker
import numpy as np
from collections import Counter
RECORDS_URL = 'https://lmnad.nntu.ru/api/v1/records/'
API_KEY = 'd837d31970deb03ee35c416c5a66be1bba9f56d3'
x_label = ['Карта', 'График', 'Спутниковый снимок', 'Запись', 'Таблица']
labels = [1, 2, 3, 4, 5]
label = ['Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', 'Июль', 'Август', 'Сентябрь', 'Октябрь', 'Ноябрь',
'Декабрь']
x_label_season = ['Зима', 'Весна', 'Лето', 'Осень']
def fetch_records(limit=3000):
resp = requests.get(RECORDS_URL, params=dict(api_key=API_KEY, is_yandex_map=0, limit=3000))
records = resp.json()
save_records(records)
def load_records():
with open('records.json', 'r') as f:
recs = json.load(f)
return recs
def save_records(result):
with open('records.json', 'w+') as f:
json.dump(result, f)
def is_inside_sea(p_lat, p_lon, lon, lon1, lat, lat1):
return lon <= p_lon <= lon1 and lat <= p_lat <= lat1
def increment_type_to_sea(def_sea_records, sea_types):
for type_str in def_sea_records:
value = type_str['value']
if value not in sea_types:
sea_types[value] = 0
sea_types[value] += 1
def _quit():
window.quit()
window.destroy()
def init_menu_bar():
menu = Menu(window)
# Add button 'Exit'
new_item = Menu(menu, tearoff=0)
new_item.add_command(label='Exit', command=_quit)
menu.add_cascade(label='File', menu=new_item)
window.config(menu=menu)
def init_plot_bar():
fig = Figure(figsize=(5, 4), dpi=100)
img_area_1, img_area_2 = fig.subplots(1, 2, sharey=True)
canvas = FigureCanvasTkAgg(fig, master=window) # A tk.DrawingArea.
canvas.draw()
canvas.get_tk_widget().pack(side=tkinter.RIGHT, fill=tkinter.BOTH, expand=1)
toolbar = NavigationToolbar2Tk(canvas, window)
toolbar.update()
canvas.get_tk_widget().pack(side=tkinter.BOTTOM, fill=tkinter.BOTH, expand=1)
return canvas, img_area_1, img_area_2
def plot_season():
plot_bars_season(img_area_1, ComboBox1.get())
plot_bars_season(img_area_2, ComboBox2.get())
canvas.draw()
def plot_bars_season(img_area, combobox):
img_area.cla()
sea_name_def = combobox
sea_season_x = [0, 1, 2, 3]
sea_season_winter = []
sea_season_spring = []
sea_season_summer = []
sea_season_autumn = []
for i in sea_dic[sea_name_def]['month']:
if sea_name_def in sea_dic:
if i == 12 or i == 1 or i == 2:
sea_season_winter.append(1)
if i == 3 or i == 4 or i == 5:
sea_season_spring.append(1)
if i == 6 or i == 7 or i == 8:
sea_season_summer.append(1)
if i == 9 or i == 10 or i == 11:
sea_season_autumn.append(1)
sea_season_y = [len(sea_season_winter), len(sea_season_spring), len(sea_season_summer), len(sea_season_autumn)]
img_area.bar(sea_season_x, sea_season_y, tick_label=x_label_season)
img_area.set_xlabel('Сезоны')
img_area.set_ylabel('Количество наблюдений')
img_area.set_title(sea_name_def, fontsize=16)
def plot_month():
plot_bars_month(img_area_1, ComboBox1.get())
plot_bars_month(img_area_2, ComboBox2.get())
canvas.draw()
def plot_bars_month(img_area, combobox):
img_area.cla()
sea_name_def = combobox
sea_month_y = []
sea_month_x = []
month_x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
for i in month_x:
y = 0
if sea_name_def in sea_dic:
if i in sea_dic[sea_name_def]['month']:
y = sea_dic[sea_name_def]['month'][i]
sea_month_y.append(y)
sea_month_x.append(i)
img_area.bar(sea_month_x, sea_month_y, tick_label=label)
img_area.tick_params(axis="x", labelrotation=50)
img_area.set_xlabel('Месяца')
img_area.set_ylabel('Количество наблюдений')
img_area.set_title(sea_name_def, fontsize=16)
def plot_bars_year():
plot_bars_year1(img_area_1, ComboBox1.get())
plot_bars_year1(img_area_2, ComboBox2.get())
canvas.draw()
def plot_bars_year1(img_area, combobox):
img_area.cla()
sea_name_def = combobox
sea_years_y = []
sea_years_x = []
range_x = range(min_year, max_year, 1)
for i in range_x:
y = 0
if sea_name_def in sea_dic:
if i in sea_dic[sea_name_def]['year']:
y = sea_dic[sea_name_def]['year'][i]
sea_years_y.append(y)
sea_years_x.append(i)
img_area.bar(sea_years_x, sea_years_y, tick_label=sea_years_x)
img_area.xaxis.set_major_locator(MaxNLocator(max_year - min_year + 1))
img_area.tick_params(axis="x", labelrotation=90, labelsize=6.5)
img_area.set_xlabel('Года')
img_area.set_ylabel('Количество наблюдений')
img_area.set_title(sea_name_def, fontsize=16)
def plot_bars():
print("Plot or update bar!")
plot_bars_type(img_area_1, ComboBox1.get())
plot_bars_type(img_area_2, ComboBox2.get())
canvas.draw()
def plot_bars_type(img_area, combobox):
img_area.cla()
sea_name_def = combobox
sea_type_y = []
sea_type_x = [0, 1, 2, 3, 4]
for e in sea_type_x:
os_y = 0
if sea_name_def in sea_dic:
if e in sea_dic[sea_name_def]['new_types']:
os_y = sea_dic[sea_name_def]['new_types'][e]
sea_type_y.append(os_y)
img_area.bar(sea_type_x, sea_type_y, tick_label=x_label)
img_area.tick_params(labelsize=7)
img_area.set_xlabel('Типы наблюдений')
img_area.set_ylabel('Количество наблюдений')
img_area.set_title(sea_name_def, fontsize=16)
def get_sea_name(point_get):
if is_inside_sea(point_get['lat'], point_get['lon'], 27.166665, 41.895011, 41.196970, 48.015644):
return black_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 47.706201, 54.112157, 35.831338, 46.817235):
return kasp_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 11.033512, 22.344357, 53.925258, 60.364091):
return balt_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 34.689990, 38.185911, 45.225182, 47.007205):
return azov_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 27.082836, 67.464416, 71.276162, 79.814267):
return barenz_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 143.306583, 179.883581, 66.459554, 76.588341):
return ist_sib_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 55.108884, 95.543544, 68.865791, 81.424224):
return kar_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 102.730621, 138.622760, 72.848152, 79.230808):
return sea_lapt_name
if is_inside_sea(point_get['lat'], point_get['lon'], -179.929840, -161.653438, 66.645032, 71.446011):
return chuk_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 33.518473, 40.385833, 63.654762, 66.678821):
return white_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 156.640431, -156.888830, 50.764496, 66.304490):
return bering_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 135.007656, 156.231867, 51.571616, 61.230127):
return okhotsk_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 127.344542, 142.468475, 33.217811, 51.962581):
return japan_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], -89.078428, -66.502074, 5.203730, 23.157922):
return karib_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 45.734804, 76.933129, 0.989934, 25.688408):
return arav_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 107.235153, 121.277466, 2.012984, 22.613627):
return south_kit_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 120.021048, 129.481133, 25.781484, 33.172170):
return ist_kit_sea_name
if is_inside_sea(point_get['lat'], point_get['lon'], 151.004796, 173.163069, -47.869489, -34.236905):
return tas_sea_name
return mir_okean_name
if __name__ == '__main__':
# fetch_records()
records = load_records()
if __name__ == '__main__':
black_sea_name = "Чёрное море"
kasp_sea_name = "Каспийское море"
balt_sea_name = "Балтийское море"
azov_sea_name = "Азовское море"
barenz_sea_name = "Баренцево море"
ist_sib_sea_name = "Восточно-Сибирское море"
kar_sea_name = "Карское море"
sea_lapt_name = "Море Лаптевых"
chuk_sea_name = "Чукотское море"
white_sea_name = "Белое море"
bering_sea_name = "Берингово море"
okhotsk_sea_name = "Охотское море"
japan_sea_name = "Японское море"
karib_sea_name = "Карибское море"
arav_sea_name = "Аравийское море"
south_kit_sea_name = "Южно-Китайское море"
ist_kit_sea_name = "Восточно-Китайсоке море"
tas_sea_name = "Тасманово море"
mir_okean_name = "Мировой океан"
window = tkinter.Tk()
window.wm_title("IGWAtlas_статистика")
count_records = records['count']
results_records = records['results']
sea_dic = dict()
max_year = -1
min_year = 5000
for point in results_records:
sea_name = get_sea_name(point)
if sea_name is not None:
if sea_name not in sea_dic:
sea_dic[sea_name] = dict()
sea_dic[sea_name]['year'] = dict()
sea_dic[sea_name]['new_types'] = dict()
sea_dic[sea_name]['month'] = dict()
sea_dic[sea_name]['season'] = dict()
increment_type_to_sea(point['new_types'], sea_dic[sea_name]['new_types'])
if point['date'] is not None:
date_time_obj = datetime.datetime.strptime(point['date'], '%Y-%m-%dT%H:%M:%SZ')
if date_time_obj.year not in sea_dic[sea_name]['year']:
sea_dic[sea_name]['year'][date_time_obj.year] = 0
sea_dic[sea_name]['year'][date_time_obj.year] += 1
max_year = max(date_time_obj.year, max_year)
min_year = min(date_time_obj.year, min_year)
if date_time_obj.month not in sea_dic[sea_name]['month']:
sea_dic[sea_name]['month'][date_time_obj.month] = 0
sea_dic[sea_name]['month'][date_time_obj.month] += 1
canvas, img_area_1, img_area_2 = init_plot_bar()
button_type = tkinter.Button(master=window, text='По типам наблюдений', command=plot_bars)
button_type.pack(side=tkinter.LEFT)
button_year = tkinter.Button(master=window, text='По годам', command=plot_bars_year)
button_year.pack(side=tkinter.LEFT)
button_season = tkinter.Button(master=window, text='По сезонам', command=plot_season)
button_season.pack(side=tkinter.LEFT)
button_month = tkinter.Button(master=window, text='По месяцам', command=plot_month)
button_month.pack(side=tkinter.LEFT)
ComboBox2 = Combobox(master=window)
ComboBox2['values'] = (
black_sea_name, kasp_sea_name, balt_sea_name, barenz_sea_name, kar_sea_name,
sea_lapt_name, bering_sea_name, okhotsk_sea_name, japan_sea_name, karib_sea_name, arav_sea_name,
south_kit_sea_name, ist_kit_sea_name, tas_sea_name, mir_okean_name)
ComboBox2.current(0)
ComboBox2.pack(side=tkinter.RIGHT)
ComboBox1 = Combobox(master=window)
ComboBox1['values'] = (
black_sea_name, kasp_sea_name, balt_sea_name, barenz_sea_name, kar_sea_name,
sea_lapt_name, bering_sea_name, okhotsk_sea_name, japan_sea_name, karib_sea_name, arav_sea_name,
south_kit_sea_name, ist_kit_sea_name, tas_sea_name, mir_okean_name)
ComboBox1.current(0)
ComboBox1.pack()
tkinter.mainloop()
print(map_types)
|
# Generated by Django 2.1.7 on 2019-03-22 10:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20190322_0922'),
]
operations = [
migrations.AddField(
model_name='restaurant',
name='user',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='restaurant',
name='cuisine',
field=models.CharField(max_length=30),
),
]
|
# Import relevant modules
import pymc
import numpy
import pandas as pd
class LogDModel(object):
def __init__(self, df):
"""
Parameters
----------
df - pandas dataframe
Returns
-------
"""
assert type(df) == pd.DataFrame
self.logd = dict()
sigma_guess = 0.2
logsigma_chx = pymc.Uniform("Sigma cyclohexane", -4., 4., numpy.log(sigma_guess))
logsigma_pbs = pymc.Uniform("Sigma buffer", -4., 4., numpy.log(sigma_guess))
logsigma_ms_chx = pymc.Uniform("Sigma MS cyclohexane", -4., 4., numpy.log(sigma_guess))
logsigma_ms_pbs = pymc.Uniform("Sigma MS buffer", -4., 4., numpy.log(sigma_guess))
self.model = dict(logsigma_chx=logsigma_chx, logsigma_pbs=logsigma_pbs, logsigma_ms_chx=logsigma_ms_chx, logsigma_ms_pbs=logsigma_ms_pbs)
# Every compound
for compound, compound_group in df.groupby("Sample Name"):
# Concentration in each solvent phase
for phase, phase_group in compound_group.groupby("Solvent"):
phase = phase.lower()
parameter_name = 'log10_{0}_{1}'.format(compound, phase)
mean_concentration = phase_group["Area/Volume"].mean()
# logsig = numpy.log(phase_group["Area/Volume"].std())
min_concentration = 1/2.0
max_concentration = 1.e8
# The log10 of the concentration is modelled with a uniform prior
self.model[parameter_name] = pymc.Uniform(parameter_name, lower=numpy.log10(min_concentration), upper=numpy.log10(max_concentration), value=numpy.log10(mean_concentration))
# Corresponds to independent repeats
for (batch, repeat), repeat_group in phase_group.groupby(["Set", "Repeat"]):
repeat_parameter_name = '{0}_{1}_{2}-{3}'.format(compound, phase, batch, repeat)
mu = pymc.Lambda(repeat_parameter_name + "-MU", lambda mu=pow(10.0, self.model[parameter_name]), ls=pow(10.0,self.model[parameter_name])*pymc.exp(self.model["logsigma_{}".format(phase)]): self._mu_lognorm(mu, ls))
tau = pymc.Lambda(repeat_parameter_name + "-TAU", lambda mu=pow(10.0,self.model[parameter_name]), ls=pow(10.0,self.model[parameter_name])*pymc.exp(self.model["logsigma_{}".format(phase)]): self._tau_lognorm(mu, ls))
# True concentration of independent repeats
self.model[repeat_parameter_name] = pymc.Lognormal(repeat_parameter_name, mu=mu, tau=tau, value=mean_concentration)
# likelihood of each observation
for replicate, repl_group in repeat_group.groupby("Replicate"):
replicate_parameter_name = '{0}_{1}_{2}-{3}_{4}'.format(compound, phase, batch, repeat, replicate)
# Extract the observed concentration
assert len(repl_group) == 1 # failsafe
value = repl_group["Area/Volume"]
mu = pymc.Lambda(replicate_parameter_name + "-MU", lambda mu=self.model[repeat_parameter_name], ls=self.model[repeat_parameter_name]*pymc.exp(self.model["logsigma_ms_{}".format(phase)]): self._mu_lognorm(mu, ls))
tau = pymc.Lambda(replicate_parameter_name + "-TAU", lambda mu=self.model[repeat_parameter_name], ls=self.model[repeat_parameter_name]*pymc.exp(self.model["logsigma_ms_{}".format(phase)]): self._tau_lognorm(mu, ls))
# Observed concentration from replicate experiment
self.model[replicate_parameter_name] = pymc.Lognormal(replicate_parameter_name, mu=mu, tau=tau, value=value, observed=True) #, value=1.0)
self.logd[compound] = pymc.Lambda("LogD_{}".format(compound), lambda c=self.model["log10_{}_chx".format(compound)], p=self.model["log10_{}_pbs".format(compound)]: c-p)
def _mu_lognorm(self, mu, sigma):
"""
Transform a gaussian mu to the lognormal mu
Parameters
----------
mu - float
the mean of a gaussian variable
sigma - float
sigma of a gaussian variable
Returns
-------
float mu
"""
# sigma = numpy.exp(logsigma)
return numpy.log(mu**2 / numpy.sqrt(sigma**2 + mu**2))
def _tau_lognorm(self, mu, sigma):
"""
Get lognormal tau from gaussian parameters
Parameters
----------
mu - float
the mean of a gaussian variable
sigma - float
sigma of a gaussian variable
Returns
-------
"""
# sigma = numpy.exp(logsigma)
return numpy.sqrt(numpy.log(1.0 + (sigma/mu)**2))**(-2)
|
from sqlalchemy import Sequence
from sqlalchemy import Column, Integer, BigInteger, Date
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, backref
from Base import Base
from SessionFactory import SessionFactory
from Match import Match
class Game(Base):
__tablename__ = 'games'
id = Column(Integer, Sequence('games_id_seq'), primary_key=True)
match_id = Column(Integer, ForeignKey('matches.id'))
match = relationship("Match", backref=backref('games', order_by=id))
#scores = relationship("Score", backref="game")
def __init__(self, match):
self.match = match
|
import snap
nodes = 10
G = snap.GenFull(snap.PNEANet,nodes)
# define int, float and str attributes on nodes
G.AddIntAttrN("NValInt", 0)
G.AddFltAttrN("NValFlt", 0.0)
G.AddStrAttrN("NValStr", "0")
# define an int attribute on edges
G.AddIntAttrE("EValInt", 0)
# add attribute values, node ID for nodes, edge ID for edges
for NI in G.Nodes():
nid = NI.GetId()
val = nid
G.AddIntAttrDatN(nid, val, "NValInt")
G.AddFltAttrDatN(nid, float(val), "NValFlt")
G.AddStrAttrDatN(nid, str(val), "NValStr")
for nid1 in NI.GetOutEdges():
eid = G.GetEId(nid,nid1)
val = eid
G.AddIntAttrDatE(eid, val, "EValInt")
# print out attribute values
for NI in G.Nodes():
nid = NI.GetId()
ival = G.GetIntAttrDatN(nid, "NValInt")
fval = G.GetFltAttrDatN(nid, "NValFlt")
sval = G.GetStrAttrDatN(nid, "NValStr")
print "node %d, NValInt %d, NValFlt %.2f, NValStr %s" % (nid, ival, fval, sval)
for nid1 in NI.GetOutEdges():
eid = G.GetEId(nid, nid1)
val = G.GetIntAttrDatE(eid, "EValInt")
print "edge %d (%d,%d), EValInt %d" % (eid, nid, nid1, val)
|
# @author: Manish Bhattarai
import glob
import os
import h5py
import pandas as pd
from scipy.io import loadmat
from .utils import *
class data_read():
r"""Class for reading data.
Parameters
----------
args : class
Class which comprises following attributes
fpath : str
Directory path of file to be read
pgrid : tuple
Cartesian grid configuration
ftype : str
Type of data to read(mat/npy/csv/folder)
fname : str
Name of the file to read
comm (object): comm object for distributed read
"""
@comm_timing()
def __init__(self, args):
self.fpath = args.fpath
if "grid" in vars(args) and args.grid:
self.pgrid = args.grid
else:
self.pgrid = [args.p_r, args.p_c]
self.ftype = args.ftype
self.fname = args.fname
self.comm = args.comm1
self.rank = self.comm.rank
self.precision = args.precision if args.precision else 'float32'
self.data = 0
if self.ftype == 'folder':
self.file_path = self.fpath + self.fname + str(self.comm.rank) + '.npy'
else:
self.file_path = self.fpath + self.fname + '.' + self.ftype
@comm_timing()
def read(self):
r"""Data read function"""
return self.read_dat()
@comm_timing()
def read_file_npy(self):
r"""Numpy data read function"""
self.data = np.load(self.file_path)
@comm_timing()
def read_file_csv(self):
r"""CSV data read function"""
self.data = pd.read_csv(self.file_path, header=None).values
@comm_timing()
def read_file_mat(self):
r"""mat file read function"""
self.data = loadmat(self.file_path)['X']
@comm_timing()
def data_partition(
self):
r"""
This function divides the input matrix into chunks as specified by grid configuration.
Return n array of shape (nrows_i, ncols_i) where i is the index of each chunk.
\Sum_i^n ( nrows_i * ncols_i ) = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
dtr_blk_shp = determine_block_params(self.rank, self.pgrid, self.data.shape)
blk_indices = dtr_blk_shp.determine_block_index_range_asymm()
self.data = self.data[blk_indices[0][0]:blk_indices[1][0] + 1, blk_indices[0][1]:blk_indices[1][1] + 1]
@comm_timing()
def save_data_to_file(self, fpath):
r"""This function saves the splitted data to numpy array indexed with chunk number"""
fname = fpath + 'A_' + self.comm.rank + '.npy'
np.save(fname, self.data)
@comm_timing()
def read_dat(self):
r"""Function for reading the data and split into chunks to be reach by each MPI rank"""
if self.ftype == 'npy':
self.read_file_npy()
self.data_partition()
elif self.ftype == 'csv' or self.ftype == 'txt':
self.read_file_csv()
self.data_partition()
elif self.ftype == 'mat':
self.read_file_mat()
self.data_partition()
if self.ftype == 'folder':
self.read_file_npy()
return self.data.astype(self.precision)
class split_files_save():
r"""Rank 0 based data read, split and save"""
@comm_timing()
def __init__(self, data, pgrid, fpath):
self.data = data
self.pgrid = pgrid
self.p_r = pgrid[0]
self.p_c = pgrid[1]
self.fpath = fpath
try:
os.makedirs(self.fpath, exist_ok=True)
except:
pass
@comm_timing()
def split_files(self):
r"""Compute the index range for each block and partition the data as per the chunk"""
dtr_blk_idx = [determine_block_params(rank, self.pgrid, self.data.shape).determine_block_index_range_asymm() for
rank in range(np.product(self.pgrid))]
self.split = [self.data[i[0][0]:i[1][0] + 1, i[0][1]:i[1][1] + 1] for i in dtr_blk_idx]
return self.split
@comm_timing()
def save_data_to_file(self):
r"""Function to save the chunks into numpy files"""
self.split = self.split_files()
for i in range(self.p_r * self.p_c):
name = 'A_' + str(i) + '.npy'
fname = self.fpath + name
arr = self.split[i]
np.save(fname, self.data)
class data_write():
r"""Class for writing data/results.
Parameters
----------
args (class): class which comprises following attributes
results_path (str): Directory path of file to write
pgrid (tuple): Cartesian grid configuration
ftype (str): Type of data to read(mat/npy/csv/folder)
comm (object): comm object for distributed read
"""
@comm_timing()
def __init__(self, args):
self.p_r, self.p_c = args.p_r, args.p_c
self.pgrid = [self.p_r, self.p_c]
self.ftype = args.ftype
self.comm = args.comm1
self.params = args
self.fpath = self.params.results_paths
self.rank = self.comm.rank
@comm_timing()
def create_folder_dir(self, fpath):
r"""Create directory if not present"""
try:
os.mkdir(fpath)
except:
pass
@comm_timing()
def save_factors(self, factors, reg=False):
r"""Save the W and H factors for each MPI process"""
self.create_folder_dir(self.fpath)
if reg == True:
W_factors_pth = self.fpath + 'W_reg_factors/'
H_factors_pth = self.fpath + 'H_reg_factors/'
else:
W_factors_pth = self.fpath + 'W_factors/'
H_factors_pth = self.fpath + 'H_factors/'
self.create_folder_dir(W_factors_pth)
self.create_folder_dir(H_factors_pth)
if self.p_r == 1 and self.p_c != 1:
if self.rank == 0:
np.save(W_factors_pth + 'W.npy', factors[0])
np.save(H_factors_pth + 'H_' + str(self.rank) + '.npy', factors[1])
elif self.p_c == 1 and self.p_r != 1: # Saving results for each K
if self.rank == 0:
np.save(H_factors_pth + 'H.npy', factors[1])
np.save(W_factors_pth + 'W_' + str(self.rank) + '.npy', factors[0])
else:
np.save(H_factors_pth + 'H_' + str(self.rank) + '.npy', factors[1])
np.save(W_factors_pth + 'W_' + str(self.rank) + '.npy', factors[0])
@comm_timing()
def save_cluster_results(self, params):
r"""Save cluster results to a h5 file with rank 0"""
if self.rank == 0:
with h5py.File(self.fpath + 'results.h5', 'w') as hf:
hf.create_dataset('clusterSilhouetteCoefficients', data=params['clusterSilhouetteCoefficients'])
hf.create_dataset('avgSilhouetteCoefficients', data=params['avgSilhouetteCoefficients'])
hf.create_dataset('L_err', data=params['L_err'])
hf.create_dataset('L_errDist', data=params['L_errDist'])
hf.create_dataset('avgErr', data=params['avgErr'])
hf.create_dataset('ErrTol', data=params['recon_err'])
hf.create_dataset('AIC', data=params['AIC'])
class read_factors():
r"""Class for reading saved factors.
Parameters
----------
factors_path : str
Directory path of factors to read from
pgrid : tuple
Cartesian grid configuration
"""
@comm_timing()
def __init__(self, factors_path, pgrid):
self.factors_path = factors_path
self.W_path = self.factors_path + 'W_reg_factors/*'
self.H_path = self.factors_path + 'H_reg_factors/*'
self.p_grid = pgrid
self.load_factors()
@comm_timing()
def custom_read_npy(self, fpath):
r"""Read numpy files"""
data = np.load(fpath)
return data
@comm_timing()
def read_factor(self, fpath):
"""Read factors as chunks and stack them"""
files = glob.glob(fpath)
data = []
if len(files) == 1:
data = self.custom_read_npy(files[0])
else:
for file in np.sort(files):
data.append(self.custom_read_npy(file))
return data, len(files)
@comm_timing()
def load_factors(self):
r"""Load the final stacked factors for visualization"""
W_data, ct_W = self.read_factor(self.W_path)
H_data, ct_H = self.read_factor(self.H_path)
if ct_W > 1: W_data = np.vstack((W_data))
if ct_H > 1:
if ct_W > 1:
H_idxs = transform_H_index(self.p_grid).rankidx2blkidx()
H_data = np.hstack(([H_data[i] for i in H_idxs]))
else:
H_data = np.hstack((H_data))
return W_data, H_data
|
charA = 'A'
stringA = str(charA)
charStr = "Character " + charA
print(stringA)
print(charStr) |
# Skall innehålla info om email-servern
smtpservername = "smtp.gmail.com"
smtpusername = "bunke309@gmail.com"
myaddress = "bunke309@gmail.com"
myname = "Hunke" |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import VGG
class PerceptualLoss(nn.Module):
"""
PyTorch module for perceptual loss.
Parameters
---
model_type : str
select from [`vgg11`, `vgg11bn`, `vgg13`, `vgg13bn`,
`vgg16`, `vgg16bn`, `vgg19`, `vgg19bn`, ].
target_layers : str
the layer name you want to compare.
norm_type : str
the type of norm, select from ['mse', 'fro']
"""
def __init__(self,
model_type: str = 'vgg19',
target_layer: str = 'relu5_1',
norm_type: str = 'fro'):
super(PerceptualLoss, self).__init__()
assert norm_type in ['mse', 'fro']
self.model = VGG(model_type=model_type)
self.target_layer = target_layer
self.norm_type = norm_type
def forward(self, x, y):
x_feat, *_ = self.model(x, [self.target_layer]).values()
y_feat, *_ = self.model(y, [self.target_layer]).values()
# frobenius norm in the paper, but mse loss is actually used in
# https://github.com/ZZUTK/SRNTT/blob/master/SRNTT/model.py#L376.
if self.norm_type == 'mse':
loss = F.mse_loss(x_feat, y_feat)
elif self.norm_type == 'fro':
loss = torch.norm(x_feat - y_feat, p='fro')
return loss
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 21:56:24 2018
@author: raja
"""
import pandas as pd
import re
from wordcloud import WordCloud
df = pd.read_csv('/home/raja/Desktop/test.txt', header=None, delimiter="\t")
final=[]
for one in df[0]:
new=one.split(':')
final.append(new)
f1=pd.DataFrame(final)
f1=f1.fillna(value=" ")
f2=f1[2]
f2_list=list(f2)
ham_words = ' '.join(f2_list)
ham_wc = WordCloud(width = 512,height = 512).generate(ham_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(ham_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
|
#Prompt: To save James Bond's reputation as a gambler, you must implement a prediction of whether the Casino Royale is cheating on every observed roll. You are given a spreadsheet of dice rolls and their probabilities under two models: F, "fair dice" where all rolls have equal probability (1/6); and L, "loaded dice" which rolls a 6 half the time, and any of the other rolls (1, 2, 3, 4, 5) with 10% probability each. The casino switches from F to L with 5% probability on each roll, and from L to F with 10% probability on each roll.
#Emission Probabilities
#P(1|F) = 1/6, P(2|F) = 1/6, ..., P(6|F) = 1/6
#P(1|L) = 1/10, P(1|L) = 1/10, ..., P(5|L) = 1/10, P(6|L) = 1/2
#Transition Probabilities
#P(F|L) = 0.05, p(L|L) = 0.95
#P(L|F) = 0.10, P(F|F) = 0.90
#Given the likelihood, a list of dice rolls, calculate the forward probability under two models– F (fair dice), and L (loaded dice)
#Input:
# iks: a list of (p(roll|F), p(roll|L)) tuples, for each successive dice roll
# ptrans: the transition matrix in the form ((p(F|F), p(L|F)), (p(F|L), p(L|L))
# prior: a tuple of the prior probabilities (p(F), p(L))
#Sample Input:
# params,0.95,0.9,0.6666666666666666,0.3333333333333333
# 6,0.16666666666666666,0.5
# 1,0.16666666666666666,0.1
# 1,0.16666666666666666,0.1
# 2,0.16666666666666666,0.1
# 6,0.16666666666666666,0.5
#Output:
#forward probability f_F and f_L
#Sample Output:
# 0.1111111111111111,0.16666666666666666
# 0.02037037037037037,0.015555555555555555
# 0.003484567901234567,0.001501851851851852
# 0.0005767541152263372,0.00015258950617283955
# 9.386256001371738e-05,8.308413065843624e-05
def calc_forward(liks, ptrans, prior): # don't change this line
'''liks is list of (p(roll|F), p(roll|L)); ptrans is p[from][to];
prior is (p(F), p(L))'''
#first element is (p(X1, theta1 = F), p(X1, theta1 = L))
forward = [( liks[0][0]*prior[0],liks[0][1]*prior[1] )] # replace this with your calculation
for i in range(1,len(liks)): #iterate through roll indexed 1-4
#calculate f_F
f_F = (forward[i-1][0]*ptrans[0][0] + forward[i-1][1]*ptrans[1][0])*liks[i][0]
#calculate f_L
f_L = (forward[i-1][0]*ptrans[0][1] + forward[i-1][1]*ptrans[1][1])*liks[i][1]
forward.append((f_F, f_L))
pass
return forward
#Calculate backward probability b_F and b_L
# Sample Input:
# params,0.95,0.9,0.6666666666666666,0.3333333333333333
# 2,0.16666666666666666,0.1
# 6,0.16666666666666666,0.5
# 2,0.16666666666666666,0.1
# 1,0.16666666666666666,0.1
# 6,0.16666666666666666,0.5
# Sample Output:
# 0.1111111111111111,0.03333333333333333,0.0009363167438271605,0.002146471450617284
# 0.018148148148148146,0.01777777777777778,0.005190787037037037,0.0045776851851851855
# 0.0031697530864197524,0.0016907407407407414,0.03136111111111111,0.04505555555555556
# 0.0005300565843621397,0.00016801543209876552,0.18333333333333335,0.4666666666666667
# 8.672588305898487e-05,8.8858359053498e-05,1.0,1.0
def calc_fb(liks, ptrans, prior): # don't change this line
'''liks is list of (p(roll|F), p(roll|L)); ptrans is p[from][to];
prior is (p(F), p(L))'''
#backward = [(1.,1.)] # replace this with your calculation
#calculate forward probability
#first element is (p(X1, theta1 = F), p(X1, theta1 = L))
forward = [( liks[0][0]*prior[0],liks[0][1]*prior[1] )] # replace this with your calculation
for i in range(1,len(liks)): #iterate through roll indexed 1-4
#calculate f_F
f_F = (forward[i-1][0]*ptrans[0][0] + forward[i-1][1]*ptrans[1][0])*liks[i][0]
#calculate f_L
f_L = (forward[i-1][0]*ptrans[0][1] + forward[i-1][1]*ptrans[1][1])*liks[i][1]
forward.append((f_F, f_L))
pass
num_rolls = len(liks)
backward = [ [ 0 for y in range( 2 ) ] for x in range( num_rolls )]
#calculate backward probability
backward[num_rolls-1][0] = 1. # = backward[4][0] = b_F_4
backward[num_rolls-1][1] = 1. # = backward[4][1] = b_L_4
#traverse backward
for i in range(num_rolls-2, -1, -1):
#calculate b_F
backward[i][0]=(backward[i+1][0]*ptrans[0][0]*liks[i+1][0])+(backward[i+1][1]*ptrans[0][1]*liks[i+1][1])
#calculate b_L
backward[i][1]=(backward[i+1][0]*ptrans[1][0]*liks[i+1][0])+(backward[i+1][1]*ptrans[1][1]*liks[i+1][1])
pass
return forward, backward
#Calculate p(obs), the likelihood by f_F * b_F + f_L * b_L
#p(obs) should be the same for each successive dice roll!
# Sample Input:
# params,0.95,0.9,0.6666666666666666,0.3333333333333333
# 2,0.16666666666666666,0.1
# 4,0.16666666666666666,0.1
# 1,0.16666666666666666,0.1
# 4,0.16666666666666666,0.1
# 5,0.16666666666666666,0.1
#Sample Output:
# 0.1111111111111111,0.03333333333333333,0.0006791936882716048,0.000210080524691358,8.246864951989023e-05
# 0.018148148148148146,0.003555555555555556,0.004240731481481481,0.0015489074074074073,8.246864951989025e-05
# 0.0029327160493827155,0.0004107407407407409,0.026394444444444443,0.012322222222222222,8.246864951989023e-05
# 0.00047119238683127556,5.163024691358028e-05,0.16333333333333333,0.10666666666666667,8.246864951989023e-05
# 7.546596536351163e-05,7.0026841563786055e-06,1.0,1.0,8.246864951989023e-05
def calc_fb(liks, ptrans, prior): # don't change this line
'''liks is list of (p(roll|F), p(roll|L)); ptrans is p[from][to];
prior is (p(F), p(L))'''
forward = [( liks[0][0]*prior[0],liks[0][1]*prior[1] )] # replace this with your calculation
for i in range(1,len(liks)): #iterate through roll indexed 1-4
#calculate f_F
f_F = (forward[i-1][0]*ptrans[0][0] + forward[i-1][1]*ptrans[1][0])*liks[i][0]
#calculate f_L
f_L = (forward[i-1][0]*ptrans[0][1] + forward[i-1][1]*ptrans[1][1])*liks[i][1]
forward.append((f_F, f_L))
pass
num_rolls = len(liks)
backward = [ [ 0 for y in range( 2 ) ] for x in range( num_rolls )]
#calculate backward probability
backward[num_rolls-1][0] = 1. # = backward[4][0] = b_F_4
backward[num_rolls-1][1] = 1. # = backward[4][1] = b_L_4
#traverse backward
for i in range(num_rolls-2, -1, -1):
#calculate b_F
backward[i][0]=(backward[i+1][0]*ptrans[0][0]*liks[i+1][0])+(backward[i+1][1]*ptrans[0][1]*liks[i+1][1])
#calculate b_L
backward[i][1]=(backward[i+1][0]*ptrans[1][0]*liks[i+1][0])+(backward[i+1][1]*ptrans[1][1]*liks[i+1][1])
pass
pobs = []
for i in range(0, num_rolls):
pobs.append(forward[i][0]*backward[i][0] + forward[i][1]*backward[i][1])
return forward, backward, pobs
#get ppost: a list of (p(F|obs), p(L|obs)) posterior probability tuples for the F vs. L hidden state at each successive dice roll.
# Sample Input:
# params,0.95,0.9,0.6666666666666666,0.3333333333333333
# 5,0.16666666666666666,0.1
# 5,0.16666666666666666,0.1
# 3,0.16666666666666666,0.1
# 2,0.16666666666666666,0.1
# 6,0.16666666666666666,0.5
# Sample Output:
# 0.1111111111111111,0.03333333333333333,0.0008447630401234568,0.0004985047839506174,0.00011047938614540466,0.8495934245161588,0.15040657548384123
# 0.018148148148148146,0.003555555555555556,0.005190787037037037,0.0045776851851851855,0.00011047938614540466,0.8526764624637937,0.14732353753620628
# 0.0029327160493827155,0.0004107407407407409,0.03136111111111111,0.04505555555555556,0.00011047938614540466,0.8324922602392261,0.1675077397607739
# 0.00047119238683127556,5.163024691358028e-05,0.18333333333333335,0.4666666666666667,0.00011047938614540466,0.781913024076486,0.21808697592351392
# 7.546596536351163e-05,3.5013420781893024e-05,1.0,1.0,0.00011047938614540466,0.6830773413620257,0.31692265863797425
def calc_fb(liks, ptrans, prior): # don't change this line
'''liks is list of (p(roll|F), p(roll|L)); ptrans is p[from][to];
prior is (p(F), p(L))'''
forward = [( liks[0][0]*prior[0],liks[0][1]*prior[1] )] # replace this with your calculation
for i in range(1,len(liks)): #iterate through roll indexed 1-4
#calculate f_F
f_F = (forward[i-1][0]*ptrans[0][0] + forward[i-1][1]*ptrans[1][0])*liks[i][0]
#calculate f_L
f_L = (forward[i-1][0]*ptrans[0][1] + forward[i-1][1]*ptrans[1][1])*liks[i][1]
forward.append((f_F, f_L))
pass
num_rolls = len(liks)
backward = [ [ 0 for y in range( 2 ) ] for x in range( num_rolls )]
#calculate backward probability
backward[num_rolls-1][0] = 1. # = backward[4][0] = b_F_4
backward[num_rolls-1][1] = 1. # = backward[4][1] = b_L_4
#traverse backward
for i in range(num_rolls-2, -1, -1):
#calculate b_F
backward[i][0]=(backward[i+1][0]*ptrans[0][0]*liks[i+1][0])+(backward[i+1][1]*ptrans[0][1]*liks[i+1][1])
#calculate b_L
backward[i][1]=(backward[i+1][0]*ptrans[1][0]*liks[i+1][0])+(backward[i+1][1]*ptrans[1][1]*liks[i+1][1])
pass
pobs = []
for i in range(0, num_rolls):
pobs.append(forward[i][0]*backward[i][0] + forward[i][1]*backward[i][1])
ppost = [] #ppost: a list of (p(F|obs), p(L|obs))
p_all = pobs[0]
for i in range(0, num_rolls):
p_F_obs = forward[i][0]*backward[i][0]/p_all
p_L_obs = forward[i][1]*backward[i][1]/p_all
ppost.append( (p_F_obs, p_L_obs) )
return forward, backward, pobs, ppost
|
#
# api_microformat.py
#
# David Janes
# 2008.12.28
#
# Copyright 2008 David Janes
#
import os
import os
import sys
import urllib
import types
import pprint
import types
import bm_extract
import bm_uri
import bm_api
from bm_log import Log
import hatom
import hcalendar
import hcard
import hdocument
class Microformat(bm_api.APIBase):
_parser = None
_meta = {}
_items = []
_required_properties = [ "uri", ]
def __init__(self, **ad):
bm_api.APIBase.__init__(self, **ad)
def GetMeta(self):
self.Fetch()
return self._meta
def IterItems(self):
self.Fetch()
for item in self._items:
if self.AtomLike():
yield self.CustomizeAtomItem(item)
else:
yield item
def Fetch(self):
raise NotImplementedError
def CustomizeReset(self):
self._parser = None
def ExtractCategories(self, itemd):
try:
tags = itemd.pop("tag")
if tags:
cats = []
for tagd in tags:
cats.append({
"term" : tagd["@@title"],
})
itemd["category"] = cats
except KeyError:
pass
class SimpleMicroformat(Microformat):
_parser_class = None
_parserd = {}
def __init__(self, **ad):
Microformat.__init__(self, **ad)
def Fetch(self):
if self._parser:
return
self.CustomizeValidate()
self._parser = self._parser_class(page_uri = self.uri, at_prefix = '@@', **self._parserd)
self._parser.PragmaCLI()
self._items = list(self._parser.Iterate())
self._meta = {
"link" : self.uri,
"title" : self._parser.document_title,
}
if self._parser.document_date:
self._meta['updated'] = bm_extract.coerce_datetime(self._parser.document_date).isoformat()
class HAtom(SimpleMicroformat):
_parser_class = hatom.MicroformatHAtom
_atom_item = {
"content" : "entry-content",
"summary" : "entry-summary",
"title" : "entry-title",
"link" : "bookmark",
"published" : "published",
}
def CustomizeAtomItem(self, itemd):
try:
author = itemd.pop("author")
if author:
itemd["author"] = bm_extract.as_string(author, "@@title")
if bm_extract.is_list(author) or bm_extract.is_list_like(author):
itemd["hcard:author"] = map(lambda a: hcard.decompose(a, "hcard"), author)
elif bm_extract.is_dict(author):
itemd["hcard:author"] = hcard.decompose(author, "hcard")
except KeyError:
pass
self.ExtractCategories(itemd)
return bm_api.APIBase.CustomizeAtomItem(self, itemd)
class HCalendar(SimpleMicroformat):
_parser_class = hcalendar.MicroformatHCalendar
def CustomizeAtomItem(self, itemd):
return {
"title" : bm_extract.as_string(itemd, "@@title"),
"content" : bm_extract.as_string(itemd, "@@html"),
"link" : itemd.find('url') or bm_extract.as_string(itemd, "@@uri"),
"hcalendar:hcalendar" : hcalendar.decompose(itemd, "hcalendar"),
}
class HCard(SimpleMicroformat):
_parser_class = hcard.MicroformatHCard
def CustomizeAtomItem(self, itemd):
return {
"title" : bm_extract.as_string(itemd, "@@title"),
"content" : bm_extract.as_string(itemd, "@@html"),
"link" : itemd.find('url') or bm_extract.as_string(itemd, "@@uri"),
"hcard:hcard" : hcard.decompose(itemd, "hcard"),
}
class Document(Microformat):
field = "a"
_properties = [ "uri", "field" ]
_required_properties = [ "uri", "field" ]
def __init__(self, **ad):
Microformat.__init__(self, **ad)
def Fetch(self):
if self._parser:
return
self.CustomizeValidate()
self._parser = hdocument.MicroformatDocument(page_uri = self.uri, at_prefix = '@@')
self._parser.PragmaCLI()
for resultd in self._parser.Iterate():
self._items = resultd.get(self.field) or []
break
class Feeds(SimpleMicroformat):
_parser_class = hdocument.MicroformatDocument
def IterItems(self):
self.Fetch()
yield {
"link" : self.uri,
"title" : self._parser.document_title,
"feeds" : [ item["src"] for item in self._items[0]["feed"] ],
}
if __name__ == '__main__':
to_test = 'hcard'
if to_test == 'hatom':
api = HAtom(_atom_like = True)
api.request = {
"uri" : "http://tantek.com/",
}
count = -1
for item in api.items:
count += 1
pprint.pprint(item)
## Log(count = count, item = item)
break
pprint.pprint(api.meta)
if to_test == 'hcalendar':
api = HCalendar()
api.request = {
"uri" : "http://tantek.com/",
}
count = -1
for item in api.items:
count += 1
pprint.pprint(item, width = 1)
if to_test == 'hcard':
api = HCard()
api.request = {
"uri" : "http://tantek.com/",
}
count = -1
for item in api.items:
count += 1
pprint.pprint(item, width = 1)
if to_test == 'feeds':
api = Feeds()
api.request = {
"uri" : "http://code.davidjanes.com/blog/"
}
count = -1
for item in api.items:
count += 1
pprint.pprint(item, width = 1)
if to_test == 'document':
api = Document()
api.request = {
"uri" : "http://twitter.com/dpjanes",
"field" : "image",
}
count = -1
for item in api.items:
count += 1
pprint.pprint(item, width = 1)
api.request = {
"uri" : "http://twitter.com/dpjanes",
"field" : "a",
}
count = -1
for item in api.items:
count += 1
pprint.pprint(item, width = 1)
|
from __future__ import annotations
import datetime
import json
import logging.config
import os
from pathlib import Path
from miranda.scripting import LOGGING_CONFIG
from miranda.storage import report_file_size
logging.config.dictConfig(LOGGING_CONFIG)
__all__ = [
"era5_variables",
"eccc_rdrs_variables",
"gather_agcfsr",
"gather_agmerra",
"gather_ecmwf",
"gather_grnch",
"gather_nex",
"gather_nrcan_gridded_obs",
"gather_raw_rdrs_by_years",
"gather_rdrs",
"gather_sc_earth",
"gather_wfdei_gem_capa",
"gather_emdna",
"nasa_ag_variables",
"nrcan_variables",
"project_institutes",
"sc_earth_variables",
"wfdei_gem_capa_variables",
"xarray_frequencies_to_cmip6like",
]
_data_folder = Path(__file__).parent / "data"
eccc_rdrs_variables = dict()
eccc_rdrs_variables["raw"] = [
v
for v in json.load(open(_data_folder / "eccc_rdrs_cf_attrs.json"))[
"variables"
].keys()
]
eccc_rdrs_variables["cf"] = [
attrs["_cf_variable_name"]
for attrs in json.load(open(_data_folder / "eccc_rdrs_cf_attrs.json"))[
"variables"
].values()
]
era5_variables = json.load(open(_data_folder / "ecmwf_cf_attrs.json"))[
"variables"
].keys()
grnch_variables = ["T", "Tmin", "Tmax", "P"]
nrcan_variables = ["tasmin", "tasmax", "pr"]
nasa_ag_variables = json.load(open(_data_folder / "nasa_cf_attrs.json"))[
"variables"
].keys()
sc_earth_variables = ["prcp", "tdew", "tmean", "trange", "wind"]
wfdei_gem_capa_variables = json.load(open(_data_folder / "usask_cf_attrs.json"))[
"variables"
].keys()
project_institutes = {
"cfsr": "ncar",
"era5": "ecmwf",
"era5-land": "ecmwf",
"era5-land-monthly-means": "ecmwf",
"era5-monthly": "ecmwf",
"era5-pressure-levels": "ecmwf",
"era5-pressure-levels-preliminary-back-extension": "ecmwf",
"era5-pressure-monthly-means-levels-preliminary-back-extension": "ecmwf",
"era5-single-levels": "ecmwf",
"era5-single-levels-monthly-means": "ecmwf",
"era5-single-levels-monthly-means-preliminary-back-extension": "ecmwf",
"era5-single-levels-preliminary-back-extension": "ecmwf",
"merra2": "nasa",
"nrcan-gridded-10km": "nrcan",
"wfdei-gem-capa": "usask",
"rdrs-v21": "eccc",
"NEX-GDDP-CMIP6": "nasa",
}
# Manually map xarray frequencies to CMIP6/CMIP5 controlled vocabulary.
# see: https://github.com/ES-DOC/pyessv-archive
xarray_frequencies_to_cmip6like = {
"H": "hr",
"D": "day",
"W": "sem",
"M": "mon",
"Q": "qtr", # TODO does this make sense? does not exist in cmip6 CV
"A": "yr",
"Y": "yr",
}
def _gather(
name: str,
variables: list[str],
source: str | os.PathLike,
glob_pattern: str,
suffix: str | None = None,
recursive: bool | None = False,
) -> dict[str, list[Path]]:
source = Path(source).expanduser()
logging.info(f"Gathering {name} files from: {source.as_posix()}")
in_files = list()
for variable in variables:
if suffix:
pattern = glob_pattern.format(variable=variable, name=name, suffix=suffix)
else:
pattern = glob_pattern.format(variable=variable)
if recursive:
in_files.extend(list(sorted(source.rglob(pattern))))
else:
in_files.extend(list(sorted(source.glob(pattern))))
logging.info(
f"Found {len(in_files)} files, totalling {report_file_size(in_files)}."
)
return {name: in_files}
def gather_ecmwf(
project: str,
path: str | os.PathLike,
back_extension: bool = False,
monthly_means: bool = False,
) -> dict[str, list[Path]]:
"""
Parameters
----------
project : {"era5-single-levels", "era5-pressure-levels", "era5-land"}
path : str or os.PathLike
back_extension : bool
monthly_means : bool
Returns
-------
dict[str, list[pathlib.Path]]
"""
name = (
f"{project}"
f"{'-monthly-means' if monthly_means else ''}"
f"{'-preliminary-back-extension' if back_extension else ''}"
)
glob_pattern = "".join(["{variable}", f"_*_{name}_*.nc"])
return _gather(name, era5_variables, source=path, glob_pattern=glob_pattern)
def gather_agmerra(path: str | os.PathLike) -> dict[str, list[Path]]:
"""Gather agMERRA source data.
Parameters
----------
path : str or os.PathLike
Returns
-------
dict[str, list[pathlib.Path]]
"""
return _gather(
"merra", nasa_ag_variables, source=path, glob_pattern="AgMERRA_*_{variable}.nc4"
)
def gather_agcfsr(path: str | os.PathLike) -> dict[str, list[Path]]:
"""Gather agCFSR source data.
Parameters
----------
path : str or os.PathLike
Returns
-------
dict[str, list[pathlib.Path]]
"""
return _gather(
"cfsr", nasa_ag_variables, source=path, glob_pattern="AgCFSR_*_{variable}.nc4"
)
def gather_nrcan_gridded_obs(path: str | os.PathLike) -> dict[str, list[Path]]:
"""Gather NRCan Gridded Observations source data.
Parameters
----------
path : str or os.PathLike
Returns
-------
dict(str, list[pathlib.Path])
"""
return _gather(
"nrcan", nrcan_variables, source=path, glob_pattern="*{variable}_*.nc"
)
def gather_wfdei_gem_capa(path: str | os.PathLike) -> dict[str, list[Path]]:
"""Gather WFDEI-GEM-CaPa source data.
Parameters
----------
path : str or os.PathLike
Returns
-------
dict[str, list[pathlib.Path]]
"""
return _gather(
"wfdei-gem-capa",
wfdei_gem_capa_variables,
source=path,
glob_pattern="{variable}_*.nc",
)
def gather_sc_earth(path: str | os.PathLike) -> dict[str, list[Path]]:
"""Gather SC-Earth source data
Parameters
----------
path : str or os.PathLike
Returns
-------
dict[str, list[pathlib.Path]]
"""
return _gather(
"sc-earth",
sc_earth_variables,
source=path,
glob_pattern="SC-Earth_{variable}_*.nc",
)
def gather_rdrs(
name: str, path: str | os.PathLike, suffix: str, key: str
) -> dict[str, dict[str, list[Path]]]:
"""Gather RDRS processed source data.
Parameters
----------
name : str
path : str or os.PathLike
suffix : str
key : {"raw", "cf"}
Indicating which variable name dictionary to search for.
Returns
-------
dict[str, list[pathlib.Path]]
"""
if isinstance(path, str):
path = Path(path).expanduser()
files = dict({name: dict()})
for vv in eccc_rdrs_variables[key]:
tmp = _gather(
name,
[vv],
source=path.joinpath(vv),
glob_pattern="{variable}_*_{name}_*.{suffix}",
suffix=suffix,
recursive=True,
)
files[name][vv] = tmp[name]
return files
def gather_raw_rdrs_by_years(
path: str | os.PathLike,
) -> dict[str, dict[str, list[Path]]]:
"""Gather raw RDRS files for preprocessing.
Parameters
----------
path: str or os.PathLike
Returns
-------
dict[str, dict[str, list[pathlib.Path]]
"""
# Time stamps starts at noon and flow into subsequent months
# Need full year plus previous december in order to easily produce complete hourly frequency monthly files
path = Path(path)
year_sets = dict()
for year in range(1950, datetime.datetime.now().year + 1):
files = sorted(list(path.glob(f"{year - 1}12*.nc")))
if files:
files = [files[-1]]
files.extend(sorted(list(path.glob(f"{year}*.nc"))))
year_sets[str(year)] = files
return {"rdrs-v21": year_sets}
def gather_grnch(path: str | os.PathLike) -> dict[str, list[Path]]:
"""Gather raw ETS-GRNCH files for preprocessing.
Parameters
----------
path: str or os.PathLike
Returns
-------
dict(str, dict(str, list[Path])) or None
"""
# GRNCH-ETS source data
source_grnch = Path(path)
logging.info(f"Gathering GRNCH from: {source_grnch.as_posix()}")
in_files_grnch = list()
for v in grnch_variables:
for yyyy in range(1970, 2020):
in_files_grnch.extend(list(source_grnch.rglob(f"{v}_{yyyy}.nc")))
logging.info(
f"Found {len(in_files_grnch)} files, totalling {report_file_size(in_files_grnch)}."
)
return dict(cfsr=sorted(in_files_grnch))
def gather_nex(
path: str | os.PathLike,
) -> dict[str, list[Path]]:
"""Gather raw NEX files for preprocessing.
Put all files that should be contained in one dataset in one entry of the dictionary.
Parameters
----------
path : str or os.PathLike
Returns
-------
dict[str, list[pathlib.Path]]
"""
source = Path(path)
datasets = source.glob("*/*/*/*/*/*/*/*/*/")
out_dict = dict()
# separate files by datasets
for dataset in datasets:
in_files = list()
in_files.extend(list(sorted(dataset.glob("*.nc"))))
out_dict[str(dataset)] = in_files
return out_dict
def gather_emdna(
path: str | os.PathLike,
) -> dict[str, list[Path]]:
"""Gather raw EMDNA files for preprocessing.
Put all files with the same member together.
Parameters
----------
path : str or os.PathLike
Returns
-------
dict[str, list[pathlib.Path]]
"""
source = Path(path)
member_dict = {}
# 100 members
members = [f"{i:03d}" for i in range(1, 101)]
for member in members:
member_dict[member] = list(
sorted(source.glob(f"EMDNA_estimate/*/EMDNA_*.{member}.nc4"))
)
# OI
member_dict["OI"] = list(sorted(source.glob("OI_estimate/*.nc4")))
return member_dict
|
import numpy as np
import flask
from flask import Flask, request
import json
from nanonet.features import events_to_features
from nanonet.segment import segment
from nanonet import decoding, nn
from nanonet.util import kmers_to_sequence
import tempfile
import pkg_resources
import subprocess
import timeit
import os
app = Flask(__name__)
def events_numpy_to_dict(events):
d = {}
try:
d["start"] = events["start"].tolist()
d["length"] = events["length"].tolist()
d["mean"] = events["mean"].tolist()
d["stdv"] = events["stdv"].tolist()
except ValueError:
d["start"] = events["start"].tolist()
d["length"] = events["length"].tolist()
d["mean"] = events["mean"].tolist()
d["stdv"] = events["variance"].tolist()
return d
def events_dict_to_numpy(d):
events = np.empty(len(d["start"]), dtype=[('start', float), ('length', float),
('mean', float), ('stdv', float)])
events["start"] = np.array(d["start"], dtype = float)
events["length"] = np.array(d["length"], dtype = float)
events["mean"] = np.array(d["mean"], dtype = float)
events["stdv"] = np.array(d["stdv"], dtype = float)
return events
__ETA__ = 1e-300
def _basecall(events,_id, min_prob=1e-5, trans = None, trim=10):
modelfile = os.path.abspath(pkg_resources.resource_filename('nanonet', 'data/default_template.npy'))
network = np.load(modelfile).item()
features = events_to_features(events, window=[-1, 0, 1])
features = features[trim:-trim]
post = network.run(features.astype(nn.dtype))
kmers = network.meta['kmers']
# Do we have an XXX kmer? Strip out events where XXX most likely,
# and XXX states entirely
if kmers[-1] == 'X'*len(kmers[-1]):
bad_kmer = post.shape[1] - 1
max_call = np.argmax(post, axis=1)
good_events = (max_call != bad_kmer)
post = post[good_events]
post = post[:, :-1]
if len(post) == 0:
return None
weights = np.sum(post, axis=1).reshape((-1,1))
post /= weights
post = min_prob + (1.0 - min_prob) * post
trans = decoding.estimate_transitions(post, trans=trans)
score, states = decoding.decode_profile(post, trans=np.log(__ETA__ + trans), log=False)
# Form basecall
kmer_path = [kmers[i] for i in states]
seq = kmers_to_sequence(kmer_path)
return seq
def run_bwa_mem(f):
ref = os.path.join(os.path.dirname(__file__), 'data/NC_000962.3.fasta')
l = ['bwa','mem','-v','1', '-x', 'ont2d',ref , f]
outsam = subprocess.check_output(l)
return outsam
def is_tb(sam):
sam=sam.split("\n")
return sam[2] and not int(sam[2].split('\t')[1]) == 4
@app.route('/', methods=['GET','POST'])
def process_events():
if request.method == 'POST':
t0 = timeit.default_timer()
if isinstance(request.json,dict):
data = request.json
else:
data = json.loads(request.json)
while not isinstance(data,dict):
data = json.loads(data)
_id= data.get("id", "")
events = events_dict_to_numpy(data)
events, _ = segment(events, section='template')
t1 = timeit.default_timer()
# print "time to convert events", t1-t0
seq = _basecall(events, _id)
# print (_id, seq)
t1a = timeit.default_timer()
# print "time to basecall", t1a-t1
channel,tmpf = tempfile.mkstemp()
os.write(channel,">%s\n" % _id)
os.write(channel,"%s\n" % seq)
ff=open("/tmp/BCALLS","a")
ff.write(">{0}\n{1}\n".format (_id,seq))
ff.close()
os.close(channel)
t1b = timeit.default_timer()
# print "time to mktmp", t1b-t1a
outsam = run_bwa_mem(tmpf)
os.unlink(tmpf)
t2 = timeit.default_timer()
return flask.jsonify({"id" : _id, "is_tb" : is_tb(outsam), "response_time" : t2-t0})
else:
sdata = os.path.join(os.path.dirname(__file__), 'data/sample_events.json')
with open(sdata,"r") as infile:
data = json.load(infile)
return """<p>Please POST event data. e.g. </p><p>
curl -H "Content-Type: application/json" -X POST -d '%s' http://localhost:5000/
</p>
""" % (str(data))
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_private_endpoint_connections_adt_api import *
from .get_private_endpoint_connections_comp import *
from .get_private_endpoint_connections_for_edm import *
from .get_private_endpoint_connections_for_mip_policy_sync import *
from .get_private_endpoint_connections_for_scc_powershell import *
from .get_private_endpoint_connections_sec import *
from .getprivate_link_services_for_edm_upload import *
from .getprivate_link_services_for_m365_compliance_center import *
from .getprivate_link_services_for_m365_security_center import *
from .getprivate_link_services_for_mip_policy_sync import *
from .getprivate_link_services_for_o365_management_activity_api import *
from .getprivate_link_services_for_scc_powershell import *
from .private_endpoint_connections_adt_api import *
from .private_endpoint_connections_comp import *
from .private_endpoint_connections_for_edm import *
from .private_endpoint_connections_for_mip_policy_sync import *
from .private_endpoint_connections_for_scc_powershell import *
from .private_endpoint_connections_sec import *
from .private_link_services_for_edm_upload import *
from .private_link_services_for_m365_compliance_center import *
from .private_link_services_for_m365_security_center import *
from .private_link_services_for_mip_policy_sync import *
from .private_link_services_for_o365_management_activity_api import *
from .private_link_services_for_scc_powershell import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.m365securityandcompliance.v20210325preview as __v20210325preview
v20210325preview = __v20210325preview
else:
v20210325preview = _utilities.lazy_import('pulumi_azure_native.m365securityandcompliance.v20210325preview')
|
#Do, or do not. There is no try. --Yoda
###############
# CHAPTER 2: Py Ingredients: Numbers, Strings, and Variables
##############
# booleans: whihc have the value True and False:
# integeres: whole number such as 42 adn 10000000
# floats : numbers with decimal points such as 3.12 or sometimes exponents line 10.0e8
# strings: sequences of text characters
# these are, In a way, they’re like atoms.!
# variables: names that refe to valies in the computer's memory that can define for use
x= 7
print(x)
# variables are just names
# variables are just names'
b=x
b
#
## use "type()" function to understand the type of anything = same as "class()" function in R
type(b)
#Finally, don’t use any of these for variable names, because they are Python’s reserved
#words:
#False class finally is return
#None continue for lambda try
#True def from nonlocal while
#and del global not with
#as elif if or yield
#assert else import pass
#break except in raise
##################3
# "%" modulus (remainder), 7%3 whihc will give 1
5
# / carries out floating-point (decimal) division
# // performs integer (truncating) division
9/5
9//5
5/0
a=95
a= a -3
a
# or: You can combine the arithmetic operators with assignment by putting the operator before the =.
a -=3
a *= 2
a +=8
a /= 3
a= 13
a //=4
a
# to get reminder of divided use "%"
9 % 5
# use // as module to get quatient :
9 // 5
# to have both reminder and get truncated :
divmod(9,5)
# To change other Python data types to an integer, use the int() function:
int(True)
int(8.3)
int('8.4')
int('99')
## The boolean value False is treated as 0 or 0.0 when mixed with integers or floats, and
# True is treated as 1 or 1.0:
True+1
False+1
########
# Floats:
########
# to convert other types into to float use "float()" function:
float(True)
float(8)
float('33')
float('1.0e4')
float('-22')
###########
## Strings:
###########
# Unlike other languages, strings in Python are immutable
# You make a Python string by enclosing characters in either single quotes or double
# quotes, as demonstrated in the following:
'haha'
"haha"
'"hah"'
# if we have mutli lines then use '''hashsh ahschalhsf '''
poem2 = '''I do not like thee, Doctor Fell.
... The reason why, I cannot tell.
... But this I know, and know full well:
... I do not like thee, Doctor Fell.
... '''
poem2
print(poem2) # useing print() function cause that to excape with \m
#
bottles = 99
base = ''
base += 'current inventory: '
base += str(bottles)
base
# use "str() to convert Python data types to strings
str(8)
####
# Escape with \
#
# using "\n" means to begin a new line:
palindrome= 'A man,\nA plan,\nA Canal: \nPanama:'
print(palindrome)
# use "\t" to scape seguence :
print('\tabd')
print('a\tbc')
print('abc\t')
# to have " and ' inside a string, should use \' and \" and \\ for \.
testimony= "\"I did nothing!\" he \""
print(testimony)
fact= "The word 54' "
print(fact)
test2= 'Todya is first day \\'
print(test2)
# Combine with + : to combine two strings:
' majid ' + ' mahsa'
# Duplicate with *
start= 'na ' *4 + '\n'
end='goodby.'
print(start+end)
# Extract a Character with []
letters = 'abcdefghijklmnopqrstuvwxyz'
print(letters)
letters[0]
letters[-1]
#
name= 'majid'
name[0]='M' # will give erro to address this error, use these:
name.replace('m','M') # or :
'M' + name[1:]
# Slice with [ start : end : step ]
letters = 'abcdefghijklmnopqrstuvwxyz'
letters[:]
letters[:5]
letters[2:4]
letters[-3:]
letters[-6:-2]
letters[::2]
letters[4:20:3]
letters[:21:5] # From the start to offset 20 by 5: the end needs to be one more than the actual offset
letters[-1::-1] # backward
letters[::-1]
letters[-51:-50]
letters[:2]
letters[:70]
# Get Length with "len()" to get number of characters, use for strings
len(letters)
# Split with split()
todos = 'get gloves,get mask,give cat vitamins,call ambulance'
todos.split(',')
todos.split(' ')
todos.split()
# Combine with join() for stirng
crypto_list = ['Yeti', 'Bigfoot', 'Loch Ness Monster']
crypto_string = ', '.join(crypto_list)
tst= 'test'
tst2=','.join(tst)
# Playing with Strings
poem = '''All that doth flow we cannot liquid name
Or else would fire and water be the same;
But that is liquid which is moist and wet
Fire that property can never get.
Then 'tis not cold that doth the fire put out
But 'tis the wet that makes it die, no doubt.'''
poem[:6]
len(poem)
poem.startswith('All')
poem.startswith('majid')
poem.endswith('test')
poem.find('the')
poem.rfind('the') # find the last "the" in sentence
poem.count("the")
poem.isalnum() # are all of the characters in the sentence either letters and numbers
######## Case and Alignment
steup ='a duck goes into a bar ...'
steup.strip('.') # will remove ...
steup.capitalize()
steup.title() # capitalize all the elphabet of each word
steup.upper() # capitalize all words
steup.lower()
steup.swapcase() # Swap upper- and lowercase
len(steup)
steup.center(30) # put sentence in center of 30 . Aligned
steup.ljust(30)
steup.ljust(len(steup))
steup
steup.rjust(40)
# Substitute with "replace()" to replace in stings
steup.replace('duck','marmoset')
steup.replace('a ','a famous ', 100) # change up to 100 of them.
##############
# Things to Do
##############
#1-how many second in 1 hour
60 * 60
# 2- Assign the result from the previous task (seconds in an hour) to a variable called seconds_per_hour.
seonds_per_hour=3600
# 3 :How many seconds are in a day? Use your seconds_per_hour variable.
seconds_per_day=24* seonds_per_hour
# 5: Divide seconds_per_day by seconds_per_hour. Use floating-point (/) division.
seconds_per_day/seonds_per_hour
# 6: Divide seconds_per_day by seconds_per_hour, using integer (//) division.
seconds_per_day//seonds_per_hour
|
import sys
import os
sys.path.append(os.getcwd())
import torch
from datasets.avmnist.get_data import get_dataloader
from unimodals.common_models import LeNet,MLP,Constant
from torch import nn
from training_structures.cca_onestage import train, test
from fusions.common_fusions import Concat
from unimodals.common_models import MLP, VGG16, Linear_inited, MaxOut_MLP
from utils.helper_modules import Sequential2
traindata, validdata, testdata = get_dataloader('/home/pliang/yiwei/avmnist/_MFAS/avmnist',batch_size=800)
channels=6
encoders=[LeNet(1,channels,3).cuda(),Sequential2(LeNet(1,channels,5),Linear_inited(192,48)).cuda()]
#encoders=[MLP(300,512,outdim), MLP(4096,1024,outdim)]
#encoders=[MLP(300, 512, 512), VGG16(512)]
#encoders=[Linear(300, 512), Linear(4096,512)]
#head=MLP(2*outdim,2*outdim,23).cuda()
head=Linear_inited(96, 10).cuda()
fusion=Concat().cuda()
train(encoders,fusion,head,traindata,validdata,25,outdim=48,\
save="best_cca.pt", optimtype=torch.optim.AdamW,lr=1e-2)
#,weight_decay=0.01)
print("Testing:")
model=torch.load('best_cca.pt').cuda()
test(model,testdata)
|
'''
Filemaker
Input:
- Path to a markdown file with sections deliminated
Output:
- A directory of discrete markdown files.
- A CSV with the path to each file and a summary of each section.
Description:
This script will break up a monolothinc markdown file into parts.
v0.2 2021.2.15
'''
from csv import reader
INFILE = r""
OUTPATH = ""
HEADERS = []
def get_text_from_file(path):
'''Return text from a text filename path'''
textout = ""
fh = open(path, "r", encoding="utf8")
for line in fh:
textout += line
fh.close()
return textout
def open_csv(filename):
'''read csv file as a list of lists'''
with open(filename, 'r') as read_obj:
csv_reader = reader(read_obj)
list_of_rows = list(csv_reader)
return list_of_rows
def save_md(filename, outbody):
'''Export the content of the current item as a markdown file.'''
with open(filename, 'w') as f:
try:
f.write(outbody)
except Exception as e:
print(e)
def main():
'''Open markdown file and split by `chapter` and three carriage returns.'''
global HEADERS
parts = open_csv(INFILE)
for indx, part in enumerate(parts):
if indx == 0:
HEADERS = parts[0]
if indx > 0:
filebody = ""
for slot, field in enumerate(part):
if slot == 0:
filebody = filebody + "# {}: {}\n\n".format(HEADERS[slot], field)
else:
if field:
filebody = filebody + "**{}**: {}\n\n".format(HEADERS[slot], field)
out_path = OUTPATH + str(indx) + ".md"
print("Saving... {}".format(out_path))
save_md(out_path, filebody)
if __name__ == "__main__":
main()
|
"""
first_name = input("Enter your first name: ")
last_name = input("Enter your last name: ")
print("Hello {} {}".format(first_name, last_name))
# The value type from Input is always a String
operand_1 = int(input("Enter 1st operand: "))
operand_2 = int(input("Enter 2nd operand: "))
print("sum is " + str(operand_1 + operand_2))
# Conversions
int = int("1001")
print(int)
floating = float("3.17")
print(floating)
strings = str("1")
print(strings)
# Multiple inputs in one go
name, age = input("Enter name and then age separated by ,").split(",")
print("Hello {}, You are {} years old!!!".format(name, age))
print(f"Hello {name}, You are just {age}!!!")
"""
# Exercise
x,y,z = input("Enter 3 space separated integer values: ").split(" ")
print(f"Average of {x}, {y} and {z} is {(int(x)+int(y)+int(z))/3}") |
# pylint: disable-all
import unittest
"""
Description:
Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
Note:
The number of elements initialized in nums1 and nums2 are m and n respectively.
You may assume that nums1 has enough space (size that is greater or equal to m + n) to hold additional elements from nums2.
Example:
Input:
nums1 = [1,2,3,0,0,0], m = 3
nums2 = [2,5,6], n = 3
Output: [1,2,2,3,5,6]
"""
nums11 = [1, 2, 3, 0, 0, 0]
nums21 = [2, 5, 6]
m1 = 3
n1 = 3
class Solution:
def merge(self, nums1, m: int, nums2, n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
index1, index2, end = m - 1, n - 1, m + n - 1
while index1 >= 0 and index2 >=0:
if nums1[index1] >= nums2[index2]:
nums1[end] = nums1[index1]
index1 -= 1
else:
nums1[end] = nums2[index2]
index2 -= 1
end -= 1
if index2 >= 0:
nums1[: index2 + 1] = nums2[: index2 + 1]
class MyTest(unittest.TestCase):
def test_example1(self):
solution = Solution()
solution.merge(nums11, n1, nums21, m1)
self.assertEqual(nums11, [1, 2, 2, 3, 5, 6])
if __name__ == '__main__':
unittest.main()
|
import os
from constants import PATH_TO_WEIGHTS
def get_model_versions():
previous_runs = os.listdir(PATH_TO_WEIGHTS)
version_numbers = [int(r.split("_")[-1].split(".")[0]) for r in previous_runs if
r[0] != "." and MODEL_ID in r]
return version_numbers
def load_recent_weights(model):
version_numbers = get_model_versions()
version_number = max(version_numbers)
path_to_file = os.path.join(PATH_TO_WEIGHTS, f"{MODEL_ID}_{version_number}.h5")
model.load_weights(path_to_file)
return model
def save_model(config, model):
version_numbers = get_model_versions()
new_version = 0 if len(version_numbers) == 0 else max(version_numbers) + 1
path_to_file = os.path.join(PATH_TO_WEIGHTS, f"{MODEL_ID}_{new_version}.h5")
model.save(path_to_file)
class MemoryNetworkConfig:
def __init__(self): # TODO: set defaults
self.vocab_size = None
self.embedding_size = None
self.dropout_rate = None
self.n_lstm_nodes = None
self.story_max_length = None
self.query_max_length = None
self.answer_max_length = None # seq2seq models
self.dataset_name = None
self.n_decoder_lstm = 3
self.n_encoder_lstm = 3
self.model_id = "memory_networks"
self.momentum = 0.7
|
# author: Arun Ponnusamy
# object detection with yolo custom trained weights
# usage: python3 yolo_custom_weights_inference.py <yolov3.weights> <yolov3.config> <labels.names>
# import necessary packages
import cvlib as cv
from cvlib.object_detection import YOLO
import cv2
import sys
weights = sys.argv[1]
config = sys.argv[2]
labels = sys.argv[3]
# open webcam
webcam = cv2.VideoCapture(0)
if not webcam.isOpened():
print("Could not open webcam")
exit()
yolo = YOLO(weights, config, labels)
# loop through frames
while webcam.isOpened():
# read frame from webcam
status, frame = webcam.read()
if not status:
print("Could not read frame")
exit()
# apply object detection
bbox, label, conf = yolo.detect_objects(frame)
print(bbox, label, conf)
# draw bounding box over detected objects
yolo.draw_bbox(frame, bbox, label, conf, write_conf=True)
# display output
cv2.imshow("Real-time object detection", frame)
# press "Q" to stop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# release resources
webcam.release()
cv2.destroyAllWindows()
|
from google.appengine.ext import db
from google.appengine.api import memcache
import json
import logging
import random
from imageM import *
from userInfo import *
import datetime
import jinja2
import os
import util
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
#google app engine database model that stores chats for a game. model name is same as the game name.
class Gchats(db.Model):
name = db.StringProperty(default = '') #model name
chats= db.TextProperty(default = '##br####br####br####br####br####br####br##')#chat text
rendered =db.TextProperty(default = '')#rendered version of chats (rendered by frirst player session)
ver = db.IntegerProperty(default = 0)#version flag so users can check if they need to re-render
newchats = db.BooleanProperty(default = False)#queue for chats before they are added
shownames = db.IntegerProperty(default = 1)#0 or 1 for anon or names shown
ucolors = db.TextProperty(default = '{}')#list of color for each user
lastuser = db.StringProperty(default = '')#last person to type (no need to rewrite the user name)
def safemod(self,command): #way to modify chat in memory without race condition
if command not in "":
client = memcache.Client()
errcount = 1000
while True and (errcount>0): # Retry loop
errcount -=1
s = client.gets("%gchat%"+self.name) #get the chats
exec command #do the operation
if client.cas("%gchat%"+self.name,s,3600): #write back the chat only if the access time is still current
break
if (errcount==0):
logging.error('gchat safemod fail')
return s
else:
return self
def mput(self): #put chat in memory (original entity is in datastore)
if not memcache.Client().set("%gchat%"+self.name,self,3600):
if not memcache.Client().add("%gchat%"+self.name,self,3600):
logging.error('gchat mput fail')
def uflare(self,uname): #add fake html for color to username
if uname != '##GOD##':
colors = ['00CC00','00CC99','006699','0000FF','6600FF','6600FF','FF0066','CC0000','FF6600','FFCC00','E6E600','669900']
ucolors = json.loads(self.ucolors)
color = colors[ucolors[uname]%len(colors)]
return '#n0##s0#'+color+'#s1#'+uname+'#se#:#n1# '
else:
return '##GOD##'
def addChat(self,uname,uchat): #append a chat to the chat text
command = """
uname = \'"""+self.uflare(uname)+"""\'
uchat = \'"""+uchat+"""\'
if uname not in "":
if (s.lastuser != uname) and (uname!='##GOD##'):
s.chats += ('##p1####p0##'+uname+uchat)
elif uname=='##GOD##':
s.chats += ('##p0####i0##'+uchat+'##i1####p1##')
else:
s.chats += ('##br##'+uchat)
s.newchats=True
s.lastuser = uname"""
s=self.safemod(command)
return s
def addRender(self,render): #put the rendered chat
command = """
render = \"\"\""""+render+"""\"\"\"
s.rendered = render
s.newchats=False
s.ver+=1"""
s=self.safemod(command)
return s
def addUser(self,uname): #give the user a color assignment if they dont have one
command = """
uname = \'"""+uname+"""\'
ucolors = json.loads(s.ucolors)
try:
nc=ucolors[uname]
except:
ucolors[uname]=len(ucolors)
s.ucolors = json.dumps(ucolors)"""
s=self.safemod(command)
return s
def newRender(self,shownames): #increment version and render the chats
command = """
s.shownames = """+str(shownames)+"""
chatstream = s.chats
template_values = {
'chatstream': chatstream,
}
template = jinja_environment.get_template('chat.html')
s.rendered=template.render(template_values)
s.newchats=False
s.ver+=1"""
s=self.safemod(command)
#util.unescape(chatstream),
return s
def getGchats(gid): #fast way to access chats. checks memory first. miss falls to datastore
try:
gchat = memcache.Client().get("%gchat%"+gid)
test = gchat.name
return gchat
except:
try:
gchat = Gchats.get_by_key_name(gid)
if not memcache.Client().add("%gchat%"+gid,gchat,3600):
logging.error('getGchats fail')
except:
gchat = ""
return gchat
#google app engine database model that stores a game. includes everything about the game, state, user status, scores, etc.
class Game(db.Model):
members = db.TextProperty(default = '[]')
memhands = db.TextProperty(default = '[]')
memims = db.StringProperty(default = '[]')
oldmemims = db.StringProperty(default = '[]')
readys = db.StringProperty(default = '[]')
active = db.StringProperty(default = '[]')
stage = db.IntegerProperty(default = 0)
turn = db.IntegerProperty(default = 0)
handsize = db.IntegerProperty(default = 7)
maxturn =db.IntegerProperty(default = 50)
whosturn = db.StringProperty(default = '')
scores = db.StringProperty(default = '[]')
dscores = db.StringProperty(default = '[]')
clue = db.TextProperty(default = '')
stats = db.TextProperty(default = '')
name = db.StringProperty(default = '')
kicktime = db.IntegerProperty(default = 100)
time = db.DateTimeProperty(auto_now_add=True)
wasfirst = db.StringProperty(default = '')
chats = db.TextProperty(default = """""")
autoclue = db.IntegerProperty(default = 0)
picpope = db.BooleanProperty(default = False)
imchosenby = db.StringProperty(default = '[]')
isrand = db.StringProperty(default = '[]')
#0=default,1=nofirstplayer,2=picturepope
def safemod(self,command): #way to modify game in memory without race condition
if command not in "":
client = memcache.Client()
errcount = 1000
while True and (errcount>0): # Retry loop
errcount -=1
s = client.gets("%game%"+self.name)
exec command
if client.cas("%game%"+self.name,s,3600):
break
if (errcount==0):
print "FUCKK, GAMESAFEMOD FAILED, FUCKKKK!OH SHIT OH SHIT!"
s = client.get("%game%"+self.name)
print s.stage
print s.scores
exec command
print command
print s.stage
print s.scores
print random.randint(0,500)
logging.error('game safemod fail')
return s
else:
return self
def putUser(self,uname): #put the user in a game
#if self.stage==0 or self.stage==1 or self.stage==2: #in hand or lobby
hand = json.dumps(makehand(self.handsize))
command = """
uname = \'"""+uname+"""\'
members = json.loads(s.members)
active = json.loads(s.active)
try: #in the game
ind = members.index(uname)
if not (s.stage==3):
active[ind] = 2
else:
active[ind] = 1
s.active = json.dumps(active)
except: #not in list
scores = json.loads(s.scores)
memims = json.loads(s.memims)
memhands = json.loads(s.memhands)
readys = json.loads(s.readys)
dscores = json.loads(s.dscores)
dscores.append([0,0])
members.append(uname)
scores.append(0)
memims.append(-1)
readys.append(False)
memhands.append(\'"""+hand+"""\')
if s.stage==0 or s.stage==1 or s.stage==2:
active.append(2)
else:
active.append(1)
s.members = json.dumps(members)
s.scores = json.dumps(scores)
s.memims = json.dumps(memims)
s.memhands = json.dumps(memhands)
s.readys = json.dumps(readys)
s.dscores = json.dumps(dscores)
s.active = json.dumps(active)
s.time = datetime.datetime.now()"""
s = self.safemod(command)
s.mput()
s.put()
members = json.loads(s.members)
try:
gchats = getGchats(self.name)
nc=gchats.name
except:
gchats = Gchats(key_name=self.name)
gchats.name = self.name
gchats.put()
gchats.mput()
gchats=gchats.addUser(uname)
gchats=gchats.addChat('##GOD##',uname+' has joined the game!')
gchats.put()
return members.index(uname)
#else: #voting, wait till next turn
#return False
def removeUser(self,uname): #marks user as inactive and figures out who is new first player if player was first
command = """
uname = \'"""+uname+"""\'
members = json.loads(s.members)
active = json.loads(s.active)
ind = members.index(uname)
active[ind] = 0
s.active = json.dumps(active)
if uname==s.whosturn:
if s.stage <700:
#new first player
s.wasfirst=uname
newInd = ind+1
while newInd < (len(members)):
if active[newInd]==2:
break
newInd +=1
if newInd==len(members):
newInd = 0
while newInd < (len(members)):
if active[newInd]==2:
break
newInd +=1
try:
s.whosturn = members[newInd]
except:
nc=1; #dont care, there was only one member in game
"""
s=self.safemod(command)
if s.wasfirst==uname:
if s.stage<2:
mem = getUserInfo(s.whosturn)
mem.safemod("""s.stage1=-2;s.stage2=-2;""")
gchats = getGchats(self.name)
gchats.addChat('##GOD##',uname+' has ditched you fuckers.')
gchats.put()
return True
def mput(self):#put game in memory
if not memcache.Client().set("%game%"+self.name,self,3600):
if not memcache.Client().add("%game%"+self.name,self,3600):
logging.error('g mput fail')
def advance(self,options): #game state machine shenanigans. moves to next state
if self.stage == 0: #in lobby. on transition, apply options and fill hands of users
command = """
s.stage = s.stage+1;s.time = datetime.datetime.now();"""
if not (options==''):
if 'op1.jpg' in options:
self.handsize=9
memhands = json.loads(self.memhands)
imcount = 0
for i in range(0,len(memhands)):
imcount+=(self.handsize-len(json.loads(memhands[i])))
randi = []
for i in range(0,imcount):
randi.append(getRand())
command = command+"""
randi = json.loads(\'"""+json.dumps(randi)+"""\')
numhands = """+str(self.handsize)+"""
memhands = json.loads(s.memhands)
count = 0
for i in range(0,len(memhands)):
hand = json.loads(memhands[i])
while len(hand)<numhands:
hand.append(randi[count])
count += 1
memhands[i]=(json.dumps(hand))
s.memhands = json.dumps(memhands)
s.handsize = numhands"""
if 'op2.jpg' in options:
command = command+"""
s.autoclue = random.randint(0,1083)+1
"""
if 'op3.jpg' in options:
command = command+"""
s.kicktime = 20
"""
if 'op4.jpg' in options:
command = command+"""
s.picpope = True
"""
elif self.stage == 1: #in hand but no clue. on transition, take first players clue and discard the cards selected by others
randi = [];
active = json.loads(self.active)
for i in range(0,len(active)):
if active[i] == 2:
randi.append(getRand())
command = """
randi = json.loads(\'"""+json.dumps(randi)+"""\')
s.clue = getUserInfo(s.whosturn).text #get clue
s.stage = s.stage+1
readys = json.loads(s.readys)
active = json.loads(s.active)
members = json.loads(s.members)
memhands = json.loads(s.memhands)
memims = json.loads(s.memims)
s.time = datetime.datetime.now()
count = 0
for i in range(0,len(readys)):
readys[i] = False
if active[i]==2:
if (not members[i]==s.whosturn) or s.autoclue:
memi = getUserInfo(members[i]).choice
if memi not in 'oskip.jpg oproceed.jpg':
memhand = json.loads(memhands[i])
j = memhand.index(memi)
playImage(memi)
memhand[j] = randi[count]
memhands[i]= json.dumps(memhand)
else:
playImage(randi[count])
count += 1
else:
memi= getUserInfo(members[i]).choice
if memi not in 'oskip.jpg oproceed.jpg':
memims[i] = memi
playImage(memi)
memhand = json.loads(memhands[i])
j = memhand.index(memi)
memhand[j] = randi[count]
memhands[i]= json.dumps(memhand)
count+=1
s.memhands = json.dumps(memhands)
s.memims = json.dumps(memims)
s.readys = json.dumps(readys)"""
elif self.stage == 2: #get image choices and discard or submit accordingly (first player is discarding, others are submittingto match clue)
randi = []
active = json.loads(self.active)
acount = 5;
if self.autoclue:
acount-=1
for i in range(0,len(active)):
if active[i] == 2:
randi.append(getRand())
acount -=1
while acount >0:
acount -=1
randi.append(getRand())
command = """
randi = json.loads(\'"""+json.dumps(randi)+"""\')
memhands = json.loads(s.memhands)
members = json.loads(s.members)
memims = json.loads(s.memims)
active = json.loads(s.active)
s.time = datetime.datetime.now()
isrand = []
count = 0
for i in range(0,len(members)):
isrand.append(1)
if active[i] == 2:
member = members[i]
memi= getUserInfo(member).choice
if (not members[i]==s.whosturn) or (s.autoclue and not s.picpope):
memims[i] = memi
isrand[i]=0
playImage(memi)
memhand = json.loads(memhands[i])
j = memhand.index(memi)
memhand[j] = randi[count]
memhands[i]= json.dumps(memhand)
count += 1
elif memi not in "":
if memi not in 'oskip.jpg oproceed.jpg':
memhand = json.loads(memhands[i])
j = memhand.index(memi)
playImage(memi)
memhand[j] = randi[count]
memhands[i]= json.dumps(memhand)
else:
playImage(randi[count])
count += 1
while count < 4:
memims.append(randi[count])
isrand.append(1)
playImage(randi[count])
count += 1
readys = json.loads(s.readys)
for i in range(0,len(readys)):
readys[i] = False
s.readys = json.dumps(readys)
s.memims = json.dumps(memims)
s.memhands = json.dumps(memhands)
s.isrand = json.dumps(isrand)
s.stage = s.stage+1"""
elif self.stage == 3: #get voting selections and award points. choose new first player and return game to first stage.
# score points
command = """
members = json.loads(s.members)
memims = json.loads(s.memims)
active = json.loads(s.active)
scores = json.loads(s.scores)
isrand = json.loads(s.isrand)
s.time = datetime.datetime.now()
firstPlayerInd = members.index(s.whosturn)
correctVotes = 0;
tempScores = [0]*len(members)
whoTheyChose = []
activeCount=0;dscores = [];imchosenby=[]
for i in range(0,len(members)):
imchosenby.append(json.dumps([]))
for i in range(0,len(members)): #mark choice of each member
dscores.append([0,0])
if active[i]==2:
member = members[i]
whoTheyChose.append(memims.index(getUserInfo(member).choice))
else:
whoTheyChose.append(-1)
s.imchosenby = json.dumps(imchosenby); count = -1;
for i in range(0,len(members)): #give points for choices
if active[i]==2:
activeCount+=1
member = members[i]
if whoTheyChose[i]>=0:
try:
temp = json.loads(imchosenby[whoTheyChose[i]])
temp.append(members[i])
imchosenby[whoTheyChose[i]]=json.dumps(temp)
except:
whatever =1
if (whoTheyChose[i]==firstPlayerInd) and not (s.autoclue and not s.picpope):
if (not s.picpope) and (not s.autoclue):
correctVotes += 1
tempScores[i]+=2
dscores[i][0]+=2
else:
if s.picpope and (member==s.whosturn):
if not isrand[whoTheyChose[i]]:
tempScores[whoTheyChose[i]] += 5
dscores[whoTheyChose[i]][0] += 5
else:
tempScores[i] -= 4
dscores[i][0] -= 4
elif s.picpope:
if not isrand[whoTheyChose[i]]:
dscores[whoTheyChose[i]][1] +=1
else:
dscores[i][1] -=1
else:
if not isrand[whoTheyChose[i]]:
tempScores[whoTheyChose[i]] += 1
dscores[whoTheyChose[i]][1] +=1
else:
tempScores[i] -= 1
dscores[i][0] -=1
elif s.autoclue:
if not s.picpope:
tempScores[i]-=1
dscores[i][0]-=1
elif active[i]==1:
active[i]=2
s.active = json.dumps(active)
s.imchosenby = json.dumps(imchosenby)
if correctVotes > 0 and correctVotes <(activeCount-1): #bonus for clue writer
tempScores[firstPlayerInd] += 3
dscores[firstPlayerInd][0] +=3
s.dscores = json.dumps(dscores)
for i in range(0,len(members)): #update scores
scores[i] += tempScores[i]
s.scores = json.dumps(scores)
#new first player
s.wasfirst = s.whosturn
newInd = firstPlayerInd+1
while newInd < (len(members)):
if active[newInd]==2:
break
newInd +=1
if newInd==len(members):
newInd = 0
while newInd < (len(members)):
if active[newInd]==2:
break
newInd +=1
s.whosturn = members[newInd]
#clear things
readys = json.loads(s.readys)
for i in range(0,len(readys)):
readys[i] = False
s.readys = json.dumps(readys)
s.oldmemims = s.memims
s.memims = json.dumps([-1]*len(members))
s.clue = ""
s.turn += 1
if s.autoclue!=0:
s.autoclue=random.randint(0,1083)+1
#back to hand
s.stage = 1"""
gchat = getGchats(self.name)
gchat.safemod('s.newchats = True;')
return self.safemod(command)
def readyCheck(self): #first player session checks that all players are ready before advancing
readys = json.loads(self.readys)
active = json.loads(self.active)
members = json.loads(self.members)
for i in range(0,len(readys)):
if active[i]==2:
if not readys[i]:
if not (self.picpope and (self.whosturn==members[i]) and self.autoclue and (self.stage==2)):
return False
return True
def getGame(gid): #fast way to access game in memory. a miss falls to the datastore entitiy
try:
game = memcache.Client().get("%game%"+gid)
test = game.stage
return game
except:
try:
game = Game.get_by_key_name(gid)
if not memcache.Client().add("%game%"+gid,game,3600):
logging.error('getGame fail')
except:
game = ""
return game |
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import datetime
from web_app.models.models import RuleType
from web_app.serializers.rule_type import RuleTypeSerializer
from rest_framework import serializers
def test_validate_mail_address_ok_single():
"""
validate_mail_address 正常終了 1件
"""
serializer = RuleTypeSerializer()
mail_address = "test@example.com"
result = serializer.validate_mail_address(mail_address)
assert mail_address == result
def test_validate_mail_address_ok_multiple():
"""
validate_mail_address 正常終了 複数件
"""
serializer = RuleTypeSerializer()
mail_address = "test1@example.com;test2@example.com"
result = serializer.validate_mail_address(mail_address)
assert mail_address == result
def test_validate_mail_address_ng_length():
"""
validate_mail_address 異常終了 文字数オーバー
"""
serializer = RuleTypeSerializer()
mail_address = "abcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefghijalmnopqrstuvwxyzabcdefg@example.com"
try:
with pytest.raises(serializers.ValidationError):
result = serializer.validate_mail_address(mail_address)
assert False
except:
assert False
def test_validate_mail_address_ng_format():
"""
validate_mail_address 異常終了 形式エラー
"""
serializer = RuleTypeSerializer()
mail_address = "test1@testcom"
try:
with pytest.raises(serializers.ValidationError):
result = serializer.validate_mail_address(mail_address)
assert False
except:
assert False |
from discord.ext import commands
import lib.dbman as db
import random
class Dice(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def r(self, ctx, pool: int = 1, diff: int = 6, wp: str = "0", *reason):
"""
Rolls and checks successes.\n\
Syntax: $r [Dice Pool] [Difficulty] [modifier]\n\
\n\
Example: $r 5 7 => [5, 2, 8, 4, 3] Results: 1 Successes!
"""
alert_st = 0
reason_string = ""
if pool < 1:
pass
elif diff < 1:
pass
else:
reason = list(reason)
try:
wp = int(wp)
except ValueError:
reason.insert(0, wp)
wp = 0
if not reason:
reason = ["No reason provided."]
for word in reason:
reason_string += word + " "
reason_string = reason_string[:-1]
ss = 0
fail = 0
random_raw = []
for i in range(pool):
random_raw.append(random.randint(1, 10))
await ctx.send(random_raw)
for roll in random_raw:
if roll >= diff:
ss += 1
if roll == 1:
fail += 1
if ss <= 0 and wp <= 0:
if fail > 0:
result = "Botch! | Reason: " + str(reason_string)
alert_st = 2
else:
result = "Failure! | Reason: " + str(reason_string)
alert_st = 1
elif ss - fail <= 0 < ss and wp <= 0:
result = "Failure! | Reason: " + str(reason_string)
alert_st = 1
else:
ss += wp
if ss - fail > 0:
result = (
str(ss - fail) + " successes! | Reason: " + str(reason_string)
)
else:
result = (
"{} successes!".format(str(wp))
+ " | Reason: "
+ str(reason_string)
)
await ctx.send("{} - Results: ".format(ctx.author.mention) + result)
guild = db.get_guild_info(ctx.guild.id)
if ctx.channel.id == guild.get("feeding_chan"):
net_ss = ss - fail
if net_ss < 0:
net_ss = wp
player = db.get_player_info(ctx.guild.id, ctx.author.id)
current_bp = player.get("bp")
bp_max = player.get("bp_max")
new_bp = current_bp + net_ss
if new_bp > bp_max:
new_bp = bp_max
db.execute(
"UPDATE Characters SET bp = %s WHERE id = %s",
(new_bp, player.get("id")),
)
if alert_st == 1:
await self.bot.get_channel(guild.get("st_alerts_chan")).send(
"{} failed a feeding roll!".format(ctx.author.mention)
)
elif alert_st == 2:
await self.bot.get_channel(guild.get("st_alerts_chan")).send(
"{} botched a feeding roll!".format(ctx.author.mention)
)
@commands.command()
async def rs(self, ctx, pool: int = 1, diff: int = 6, wp: str = "0", *reason):
"""
Same as $r except this also applies explosions to the dice.\n\
Syntax: $rs [Dice Pool] [Difficulty] [wpifier]\n\
\n\
Example: $rs 5 7 => [10, 2, 8, 4, 3] [9] Results: 3 Successes!
"""
st_alert = 0
reason_string = ""
if pool < 1:
pass
elif diff < 1:
pass
else:
reason = list(reason)
try:
wp = int(wp)
except ValueError:
reason.insert(0, wp)
wp = 0
if not reason:
reason = ["No reason provided."]
for word in reason:
reason_string += word + " "
reason_string = reason_string[:-1]
ss = 0
fail = 0
tens = 0
random_raw = []
for i in range(pool):
random_raw.append(random.randint(1, 10))
await ctx.send(random_raw)
for roll in random_raw:
if roll >= diff:
ss += 1
elif roll == 1:
fail += 1
if roll == 10:
tens += 1
guild = db.get_guild_info(ctx.guild.id)
if guild.get("exploding_toggle") == 1:
if ss <= 0 and wp <= 0:
if fail > 0:
result = "Botch! | Reason: " + str(reason_string)
st_alert = 2
else:
result = "Failure! | Reason: " + str(reason_string)
st_alert = 1
else:
ss -= fail
tens -= fail
while tens > 0:
explosion = []
for i in range(tens):
explosion.append(random.randint(1, 10))
await ctx.send(explosion)
ten = 0
for roll in explosion:
if roll == 10:
ten += 1
ss += 1
elif roll >= diff:
ss += 1
tens = ten
if ss <= 0:
ss = wp
else:
ss += wp
if ss <= 0:
result = "Failure! | Reason: " + str(reason_string)
st_alert = 1
else:
result = str(ss) + " Successes! | Reason: " + str(reason_string)
elif guild.get("exploding_toggle") == 0:
ss += tens
ss -= fail
if ss <= 0:
ss = wp
else:
ss += wp
if ss <= 0:
result = "Failure! | Reason: " + str(reason_string)
st_alert = 1
else:
result = str(ss) + " Successes! | Reason: " + str(reason_string)
await ctx.send("{} - Results: ".format(ctx.author.mention) + result)
if ctx.channel.id == guild.get("feeding_chan"):
net_ss = ss - fail
if net_ss < 0:
net_ss = wp
player = db.get_player_info(ctx.guild.id, ctx.author.id)
current_bp = player.get("bp")
bp_max = player.get("bp_max")
new_bp = current_bp + net_ss
if new_bp > bp_max:
new_bp = bp_max
db.execute(
"UPDATE Characters SET bp = %s WHERE id = %s",
(new_bp, player.get("id")),
)
if st_alert == 1:
await self.bot.get_channel(guild.get("st_alerts_chan")).send(
"{} failed a feeding roll!".format(ctx.author.mention)
)
elif st_alert == 2:
await self.bot.get_channel(guild.get("st_alerts_chan")).send(
"{} botched a feeding roll!".format(ctx.author.mention)
)
def setup(bot):
bot.add_cog(Dice(bot))
|
from typing import List
from datetime import datetime
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from ninja import Schema, ModelSchema, Field, Router
from ninja.pagination import paginate
from .models import Post, Category, Tag
router = Router()
class PostOut(ModelSchema):
"""
generate a schema from models.
use alias/resolver to override field. (https://django-ninja.rest-framework.com/guides/response/)
"""
category: str = Field(None, alias="category.name")
tag: List[str]
owner: str = Field(None, alias="owner.username")
@staticmethod
def resolve_tag(obj):
return [t.name for t in obj.tag.all()]
class Config:
model = Post
model_fields = ["id", "title", "category", "desc", "content", "tag", "owner", "created_time"]
@router.get("/posts", response=List[PostOut])
@paginate # 分页,后续可以考虑加上前一页和下一页的字段,值为url
def get_posts(request):
"""
Get post list.
"""
qs = Post.objects.all().prefetch_related("category", "tag", "owner").order_by("created_time")
return qs
@router.get("/posts/{post_id}", response=PostOut)
def get_post(request, post_id: int):
"""
get single post.
"""
post = get_object_or_404(Post, pk=post_id)
return post
|
#Russel Tagaca
#udpServer.py
import sys
import time
import socket
import struct
import random
serverIp = "127.0.0.1"
serverPort = 12000
dataLen = 10000000
responseCount = 0;
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#takes IPaddress and port # to socket
serverSocket.bind((serverIp, serverPort))
print('The server is ready to receive on port: ' + str(serverPort))
#loop forever to listen to datagram messages
while True:
#receive and print client data from "data" socket
data, address = serverSocket.recvfrom(dataLen)
if (data):
dataUnpack = struct.unpack('>ii', data)
responseCount += 1
if (random.randint(1,11) < 4):
print("Message with sequence number " + str(dataUnpack[1]) + " dropped")
continue;
#send back to client
else:
print("Responding to ping request with sequence number " + str(dataUnpack[1]))
msgResponse = struct.pack('>ii', 1, dataUnpack[1])
serverSocket.sendto(msgResponse,address)
|
with open('file_name', 'r') as inf:
s1 = inf.readline() # чтение первой строки в файле
s2 = inf.readline() # Чтение второй строки в файле
s = inf.readline().strip() # удаление служебных символов в троке
#import os
# os.path.join('.', 'dirname', 'filename') # указание полного пути к файлу ./dirname/filename |
'''
CTCI 1.8
- don't change values until finding all the cells with zeros
- O(N*M), quadratic solution inevitably.
'''
def findZero(mtrx):
row = []
col = []
i = 0
for i in range(len(mtrx)): # len == # rows
j = 0
for j in range(len(mtrx[i])): # len == # cols
if mtrx[i][j] == 0:
row.append(i)
col.append(j)
return row, col
def setToZero(row, col, mtrx):
for r in row:
i = 0
for i in range(len(mtrx[i])):
mtrx[r][i] = 0
# print("after row changes: ", mtrx)
for c in col:
i = 0
for i in range(len(mtrx)):
mtrx[i][c] = 0
# print("after col chagnes: ", mtrx)
return mtrx
def main():
A = [[1, 4, 5, 12], [-5, 8, 9, 0], [-6, 0, 11, 19]]
zeroLoc = findZero(A)
result = setToZero(zeroLoc[0], zeroLoc[1], A)
print(result)
main()
|
from selenium import webdriver
driver = webdriver.Chrome('Recursos\\chromedriver.exe')
driver.maximize_window()
driver.get('http://www.goodstartbooks.com/pruebas/index.html')
elemento = driver.find_element_by_name('ultimo')
if elemento is not None:
print('El elemento fue encontrado')
else:
print('El elemento no fue encontrado')
driver.quit() |
# SQLite 접속하기.
import sqlite3
con = sqlite3.connect('c:/temp/userDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
sql = "SELECT * FROM userTable"
cur.execute(sql)
print(' 사용자아이디 사용자이름 사용자나이')
print(' --------------------------------')
while True :
row = cur.fetchone()
if row == None :
break
userID = row[0]; userName = row[1]; userAge = row[2]
print("%10s %10s %10d" % (userID, userName, userAge))
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!') |
from django.shortcuts import render,redirect,reverse
from django.http import HttpResponse,JsonResponse
from .models import User
from .forms import LoginForm,RegisterForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
def regist_view(request):
if request.method=='POST':
form=RegisterForm(request.POST)
if form.is_valid():
telephone = form.cleaned_data.get('telephone')
name = form.cleaned_data.get('name')
password = form.cleaned_data.get('password')
sex=form.cleaned_data.get('sex')
id_card=form.cleaned_data.get('id_card')
user = User.objects.create_user(name=name, telephone=telephone, sex=sex, id_card=id_card,password=password)
user.save()
return redirect('user:regist_re')
else:
errors=[value[0] for value in form.get_errors().values()]
errors=set(errors)
return render(request, 'user/regist.html',{'errors':list(errors)})
return render(request, 'user/regist.html')
def regist_view_re(request):
return render(request,'user/regist_re.html')
def login_view(request):
if request.session.get('_auth_user_id'):
return redirect(reverse('detail:index'))
else:
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
telephone = form.cleaned_data.get('telephone')
password = form.cleaned_data.get('password')
remember = form.cleaned_data.get('remember')
user = authenticate(request, username=telephone, password=password)
if user:
if user.is_active:
login(request, user)
if remember:
request.session.set_expiry(None)
else:
request.session.set_expiry(0)
return redirect(reverse('detail:index'))
else:
return render(request, 'user/login.html', {'errors': '您的账号被冻结了'})
else:
return render(request, 'user/login.html', {'errors': '手机号或密码错误'})
else:
print(form.get_errors())
return render(request,'user/login.html',{'errors': '密码不能少于6个长度!'})
else:
return render(request, 'user/login.html')
def forget_view(request):
return render(request, 'user/forget.html')
def logout_view(request):
logout(request)
return render(request,'user/logout.html')
@login_required(login_url='/user/login/')
def login_index(request):
return render(request,'detail/index.html',{'user':request.user}) |
from random import seed
from random import gauss
from random import random
from cv2 import cv2 as cv2
import math
import jsonpickle
import json
import numpy
import shutil
import os
count = 0;
def sift(img):
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = numpy.float32(gray)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img,None)
for i in range(0,len(des1)):
des1[i] /= 2.0;
return kp1,des1
def keypointToFloatArray(kp):
pts = kp.pt
return pts
class LSH:
listA = None;
B = None;
w = 4.0;
dumplist = None
def init(self,dimension):
self.listA = numpy.zeros(dimension)
self.dumplist = numpy.zeros(dimension+1)
self.B = random()*self.w
self.dumplist[dimension] = self.B
for j in range(0,dimension):
self.listA[j] = gauss(-1,1)
self.dumplist[j] = self.listA[j]
def getHashValue(self,array):
ret = 0
dotProduct = numpy.dot(array,self.listA);
ret= math.floor((dotProduct + self.B)/self.w);
return ret;
def getDumpListArr(self):
return self.dumplist;
def fromDumplistArr(self,dimension,dumplist):
self.dumplist = dumplist;
self.listA = dumplist[:dimension]
self.B = dumplist[dimension]
global count
count +=1;
#this is the basic g function
class HashingFunc:
listLSH = [];
hash_string = ""
def init(self):
for j in range(0,3):
lsh = LSH()
lsh.init(128)
self.listLSH.append(lsh);
def hash(self,array):
result = numpy.empty(3)
self.hash_string = "";
for j in range(0,3):
hashVal = self.listLSH[j].getHashValue(array)
result[j] = hashVal
self.hash_string += str(hashVal)
return self.hash_string,result
def toFile(self,filename):
dumplist = numpy.zeros([3,129])
for i in range(0,3):
dumplist[i] = self.listLSH[i].getDumpListArr()
numpy.save(filename,dumplist)
def fromFile(self,filename):
self.listLSH = []
dumplist = numpy.load(filename)
for i in range(0,3):
lsh = LSH()
lsh.fromDumplistArr(128,dumplist[i])
self.listLSH.append(lsh)
# lines = text.split('\n');
# for i in range(0,3):
# funcLines = []
# for j in range(0,2):
# funcLines.append(lines[i*j + j])
# lsh = LSH()
# lsh.fromString(funcLines)
# self.listLSH.append(lsh)
#indeed, we use L HashFunc
class HashTable:
listHashFunc = []
mapHashStringToBucketID = []
bucketIDCount = []
mapBucketIDToImageID = []
def init(self):
for i in range(0,15):
func = HashingFunc()
func.init();
self.listHashFunc.append(func);
self.bucketIDCount.append(-1);
self.mapHashStringToBucketID.append({});
self.mapBucketIDToImageID.append({});
def putValue(self,array,imageID):
for i in range(0,15):
hashString,hashVal = self.listHashFunc[i].hash(array);
if hashString not in self.mapHashStringToBucketID[i]:
self.bucketIDCount[i] +=1
self.mapHashStringToBucketID[i][hashString] = self.bucketIDCount[i];
bucketID = self.mapHashStringToBucketID[i][hashString];
bucketID = str(bucketID)
if bucketID not in self.mapBucketIDToImageID[i]:
self.mapBucketIDToImageID[i][bucketID] = []
# else:
# print("HASH INTO THE SAME BUCKET")
self.mapBucketIDToImageID[i][bucketID].append(imageID);
def getValue(self,array):
ret = []
mark = {}
for i in range(0,15):
hashString,hashret = self.listHashFunc[i].hash(array);
# print(hashString)
if (hashString in self.mapHashStringToBucketID[i]):
bucketID = self.mapHashStringToBucketID[i][hashString];
# print(bucketID)
bucketID = str(bucketID)
# print(self.mapBucketIDToImageID[i])
listImg = self.mapBucketIDToImageID[i][bucketID]
for j in range(0,len(listImg)):
if listImg[j] not in mark:
ret.append(listImg[j])
mark[listImg[j]] = True
return ret
def toFiles(self,folderName):
#dump the hash functions to files
for i in range(0,15):
path = folderName+'/' +str(i);
if not os.path.exists(path):
os.makedirs(path)
#write file
filename = path+ '/' + 'hash_func';
# f_hash_func = open(filename,'w');
# # print (self.listHashFunc)
# f_hash_func.write(self.listHashFunc[i].toString());
self.listHashFunc[i].toFile(filename)
fileName_mapHashStringToBucketID = path + '/mapHashStringToBucketID'+'.map'
f_mapHashStringToBucketID = open(fileName_mapHashStringToBucketID,'w');
f_mapHashStringToBucketID.write(json.dumps(self.mapHashStringToBucketID[i]))
f_mapHashStringToBucketID.close()
fileName_bucketIDCount = path+ '/bucketIDCount.map'
f_bucketIDCount = open(fileName_bucketIDCount,'w');
f_bucketIDCount.write(json.dumps(self.bucketIDCount[i]))
f_bucketIDCount.close()
fileName_mapBucketIDToImageID = path +'/imageID.list';
f_imgid = open(fileName_mapBucketIDToImageID,'w');
json.dump(self.mapBucketIDToImageID[i],f_imgid)
f_imgid.close()
def fromFiles(self,folderName):
self.listHashFunc = []
self.mapHashStringToBucketID = []
self.bucketIDCount = []
self.mapBucketIDToImageID = []
for i in range(0,15):
path = folderName+'/' +str(i);
if not os.path.exists(path):
os.makedirs(path)
#write file
filename = path+ '/' + 'hash_func.npy';
h = HashingFunc()
h.fromFile(filename)
self.listHashFunc.append(h);
fileName_mapHashStringToBucketID = path + '/mapHashStringToBucketID'+'.map'
with open(fileName_mapHashStringToBucketID,'r') as f_mapHashStringToBucketID:
self.mapHashStringToBucketID.append(json.load(f_mapHashStringToBucketID));
# f_mapHashStringToBucketID.write(json.dumps(self.mapHashStringToBucketID[i]))
fileName_bucketIDCount = path+ '/bucketIDCount.map'
with open(fileName_bucketIDCount,'r') as f_mapHashStringToBucketID:
self.bucketIDCount.append(json.load(f_mapHashStringToBucketID))
fileName_mapBucketIDToImageID = path +'/imageID.list';
with open(fileName_mapBucketIDToImageID,'r') as f_imgid:
self.mapBucketIDToImageID.append(json.load(f_imgid))
def dumper(obj):
try:
return obj.toJSON()
except:
return obj.__dict__
def train():
path = './data/train/'
global hashtable
for folder in os.listdir(path):
for f in os.listdir(path + folder):
print("training " + path+folder+"/"+f)
img = cv2.imread(path+folder+"/"+f);
kps,desss = sift(img);
des_count = 0
for i in desss:
des_count +=1
hashtable.putValue(i,folder);
if (des_count > 144):
break
def query(filename):
img = cv2.imread(filename);
kps,desss = sift(img);
ret = {}
des_count=0
for des in desss:
# hashString,hashret = hashtable.listHashFunc[i].hash(desss[i]);
# print(hashString);
tag = hashtable.getValue(des)
for t in tag:
ret[t]= True
if (des_count > 144):
break
return ret.keys()
hashtable = HashTable()
def main():
global hashtable
if os.path.exists("trained_model"):
hashtable.fromFiles("trained_model")
else:
hashtable.init()
train()
hashtable.toFiles("trained_model")
file = open("result1.txt", "w")
for x in range(0, 270):
testFile = "./data/test/" + str(x+1) + ".jpg"
ret = query(testFile)
for i in ret:
file.write(i + " ")
file.write("\n")
file.close()
# hashtable.toFiles("trained_model")
# hashTable.fromFiles("trained_model")
file = open("result1.txt", "r")
result1 = []
for x in file:
row = x.split()
result1.append(row)
file.close()
# Read result from CEDD
file = open("result2.txt", "r")
result2 = []
for x in file:
row = x.split()
result2.append(row)
file.close()
# Join 2 result files to 1 result
file = open("result.txt", "w")
for i in range(0, 270):
for j in range(0, len(result1[i])):
if result1[i][j] in result2[i]:
file.write(result1[i][j] + " ")
file.write("\n")
file.close()
main() |
def find_contact(file_name, find_str):
'''Для поиска контакта по элементу данных переданных в виде строки.
Обработка данных. Вызов функции для
вывода на печать найденного контакта(ов)'''
file_txt_r = open(file_name, 'r') # Открытие файла для чтения данных.
contact_number = 0 # Начальное значение переменной для формирования номера контакта.
id_str = '' # Создаётся пустая строка.
while True: # Бесконечный цикл для обработки контактов построчно.
data_str_n = file_txt_r.readline() # Получение строки.
if data_str_n == '': # Если строка пустая то выход из цикла.
if id_str.find('1') == -1: # Перед выходом из цикла проверяем строку
# id_str на наличиеидентификатора(ов) совпадений.
# Если условие верно значит совпадений нет.
print(' ------------------') # Вывод на печать.
print(' | СОВПАДЕНИЙ НЕТ |') # Вывод на печать.
print(' ------------------') # Вывод на печать.
file_txt_r.close() # Закрытие текстового файла.
break # Выход из цикла
data_str = data_str_n.rstrip('\n') # Удаляет '\n' в конце строки.
contact_number += 1 # Присваевается номер контакта.
# С каждым проходом цикла увеличивается на единицу.
if data_str.find(find_str) != -1: # Если совпадения есть прибавляется к строке
# id_str единица.
id_str = id_str + '1'
print_contact(data_str, contact_number) # Функция вывода на печать.
else: # Иначе прибавляется к строке id_str ноль.
id_str = id_str + '0'
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import SUPERUSER_ID
import openerp.addons.decimal_precision as dp
from decimal import *
class purchase_order(osv.osv):
_inherit = "purchase.order"
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
product_uom = self.pool.get('product.uom')
if not order.importation:
price_unit = order_line.price_unit
else:
price_unit = order_line.price_unit_pesos * order.custom_rate
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id and not order.importation:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
move_template = {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': fields.date.date_to_datetime(self, cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'group_id': procurement.group_id.id or group_id, #move group is same as group of procurements if it exists, otherwise take another group
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='picking' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if diff_quantity > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
_columns = {
"custom_rate": fields.float("Taza de la planilla de aduanas",
help=u"El valor de la tasa de cambio en la planilla de liquidación de aduanas"),
"dop_total_cost": fields.float(u"Total de costos en pesos (DOP)",
help="La suma de todas las facturas y gastos en pesos antes de impuestos"),
"usd_total_cost": fields.float(u"Total de costos en dólares (USD)",
help=u"La suma de todas las facturas y gastos en dólares"),
"importation": fields.boolean(u"Es una Importación")
}
def action_invoice_create(self, cr, uid, ids, context=None):
order = self.browse(cr, uid, ids, context=context)
if order.importation:
pass
else:
return super(purchase_order, self).action_invoice_create(cr, uid, ids, context=context)
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def compute_list(self, total_paid, quantities, prices, ids):
new_prices = [Decimal(str(p)) for p in prices]
new_quantities = [Decimal(str(qty)) for qty in quantities]
totals_ary = [p*qty for p, qty in zip(new_quantities, new_prices)]
total = sum(totals_ary)
total_paid = Decimal(str(total_paid))
# totals + importation expenses
total_importation = [ n + total_paid*(n/total) for n in totals_ary]
# prices + importantion expenses
expenses = [imp/qty for qty, imp in zip(new_quantities, total_importation)]
ret = [round(float(expend), 2) for expend in expenses]
return [{'id': t[0], 'price': t[1]} for t in zip(ids, ret)]
def extract_lists(self, products):
ids = []
prices = []
quantities = []
for product_info in products:
ids.append(product_info['id'])
prices.append(product_info['price'])
quantities.append(product_info['qty'])
return {'ids': ids, 'prices': prices, 'quantities': quantities}
def _amount_line_liquidation(self, cr, uid, ids, prop, arg, context=None):
order = self.browse(cr, uid, ids, context=context)[0]
response = {}
if order.order_id.importation:
res_list = []
rate = order.order_id.custom_rate
lines = self.browse(cr, uid, ids, context=context)
for line in lines:
if line.price_subtotal <= 0:
raise osv.except_osv('Alerta!', "No puede agragar productos con valor cero")
elif rate < 0:
raise osv.except_osv('Alerta!', "Debe de expecificar la taza de la planilla de importacion")
elif order.order_id.dop_total_cost <= 0:
raise osv.except_osv('Alerta!', "Debe de expecificar el total de los gastos en pesos para esta importacion")
elif order.order_id.usd_total_cost <= 0:
raise osv.except_osv('Alerta!', "Debe de expecificar el total de los gastos en dolares para esta importacion")
res = {}
res["id"] = line.id
res["qty"] = line.product_qty
res["price"] = line.price_subtotal / line.product_qty
res_list.append(res)
lista = self.extract_lists(res_list)
total_importacion = (order.order_id.dop_total_cost/rate) + order.order_id.usd_total_cost
rets = self.compute_list(total_importacion, lista['quantities'], lista['prices'], lista['ids'])
for ret in rets:
response[ret["id"]] = ret["price"]
return response
return {}
_columns = {
'price_unit_pesos': fields.function(_amount_line_liquidation, string='Cost unit (USD)',
digits_compute=dp.get_precision('Account'))
}
class product_product(osv.osv):
_inherit = "product.product"
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not len(ids):
return []
def _name_get(d):
name = d.get('name', '')
code = context.get('display_default_code', False) and d.get('default_code', False) or False
if code:
name = '[%s] %s' % (code, name)
return (d['id'], name)
partner_id = context.get('partner_id', False)
if partner_id:
partner_ids = [partner_id, self.pool['res.partner'].browse(cr, user, partner_id,
context=context).commercial_partner_id.id]
else:
partner_ids = []
# all user don't have access to seller and partner
# check access and use superuser
self.check_access_rights(cr, user, "read")
self.check_access_rule(cr, user, ids, "read", context=context)
result = []
products = self.browse(cr, SUPERUSER_ID, ids, context=context)
for product in products:
variant = ", ".join([v.name for v in product.attribute_value_ids])
name = variant and "%s (%s)" % (product.name, variant) or product.name
sellers = []
if partner_ids:
sellers = filter(lambda x: x.name.id in partner_ids, product.seller_ids)
if sellers:
for s in sellers:
seller_variant = s.product_name and "%s (%s)" % (s.product_name, variant) or False
mydict = {
'id': product.id,
'name': seller_variant or name,
'default_code': s.product_code or product.default_code,
}
result.append(_name_get(mydict))
else:
mydict = {
'id': product.id,
'name': name,
'default_code': product.default_code,
}
result.append(_name_get(mydict))
return result |
import unittest
import sys
from selenium import webdriver
from utils import LogUtil
from utils import common
from utils.TestCaseInfo import TestCaseInfo
from utils.TestReport import TestReport
# sys.path.append("..")
from page import page_lu as page
class TestLu(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.base_url = 'http://www.lu.com'
self.testCaseInfo = TestCaseInfo(id='2', name=self.__str__(), owner='Oliver')
self.testReport = TestReport()
LogUtil.create_logger_file(__name__)
self.testCaseInfo.starttime = common.get_current_time()
LogUtil.log('Open base url: %s' % self.base_url)
# @unittest.skip("skip test_login")
def test_login(self):
driver = self.driver
try:
main_page = page.MainPage(driver)
main_page.open(self.base_url)
main_page.goto_login_page()
login_page = page.LoginPage(page)
login_page.authenticate()
main_page.verify_login()
except Exception as e:
self.testCaseInfo.errorinfo = str(e)
else:
self.testCaseInfo.result = 'Pass'
# @unittest.skip("skip test_my_account")
def test_my_account(self):
driver = self.driver
self.test_login()
main_page = page.MainPage(driver)
main_page.goto_account_page()
account_page = page.AccountPage(driver)
account_page.skip_guide()
account_page.check_balance()
def tearDown(self):
self.testCaseInfo.endtime = common.get_current_time()
self.testCaseInfo.secondsDuration = common.get_time_diff(self.testCaseInfo.starttime, self.testCaseInfo.endtime)
self.testReport.WriteHTML(self.testCaseInfo)
if __name__ == '__main__':
unittest.main()
|
########
#Important parameters
########
# viewing_distance = 60.0 #units can be anything so long as they match those used in screen_width below
# screen_width = 30.0 #units can be anything so long as they match those used in viewing_distance above
# screen_res = (1366,768) #pixel resolution of the screen
viewing_distance = 80 #units can be anything so long as they match those used in screen_width below
screen_width = 50 #units can be anything so long as they match those used in viewing_distance above
screen_res = (1600,900) #pixel resolution of the screen
response_modality = 'trigger' # 'key' or 'button' or 'trigger'
response_keys = ['z','/']
response_buttons = [8,9]
response_triggers = ['left','right']
trigger_criterion_value = -.5 #specify the trigger criterion
target_location_list = ['left','right','up','down']
target_list = ['black','white']
flankers_list = ['congruent','incongruent','neutral','neutral']
fixation_interval = 1.000
response_timeout = 1.000
ITI = 1.000
reps_per_block = 1
number_of_blocks = 30 #specify the number of blocks
instruction_size_in_degrees = 1 #specify the size of the instruction text
response_feedback_text_size_in_degrees = .5 #specify the size of the feedback text (if used)
target_size_in_degrees = .5 #specify the width of the target
flanker_separation_in_degrees = .25
offset_in_degrees = 3 #specify the vertical offset of the target from fixation
text_width = .9 #specify the proportion of the screen to use when drawing instructions
########
# Import libraries
########
import pygame
import Image
import aggdraw
import math
import sys
import os
import random
import time
import shutil
import hashlib
import multiprocessing
# import cv
########
# Start the random seed
########
seed = time.time() #grab the current time
random.seed(seed) #use the time to set the random seed
########
# Initialize pygame
########
pygame.init() #initialize pygame
pygame.mouse.set_visible(False) #make the mouse invisible
pygame.mixer.init()
error_sound = pygame.mixer.Sound('./_Stimuli/error.wav')
########
# Initialize the gamepad if necessary
########
if response_modality!='key':
gamepad = pygame.joystick.Joystick(0)
gamepad.init()
########
# set up the secondary process for writing data
########
if response_modality=='trigger':
#create a multiprocessing Queue object
writer_queue = multiprocessing.Queue()
#create a class for messages to the trigger queue
class writer_message(object):
def __init__(self, message_type, sub = '', trial_info = '', trigger_file_name = '', left_values = '', left_times = '', right_values = '', right_times = '' ):
self.message_type = message_type
self.sub = sub
self.trial_info = trial_info
self.trigger_file_name = trigger_file_name
self.left_values = left_values
self.left_times = left_times
self.right_values = right_values
self.right_times = right_times
#define a function to run continuously in a seperate process that monitors the writer queue for data to write and writes what it finds
def writer(queue):
initialized = False
done = False
while not done:
if not queue.empty():
from_queue = queue.get()
if from_queue.message_type == 'done':
done = True
if initialized:
trigger_file.close()
elif from_queue.message_type == 'initialize':
initialized = True
trigger_file = open(from_queue.trigger_file_name,'a')
elif from_queue.message_type == 'write':
if len(from_queue.left_values)>1:
for i in range(len(from_queue.left_values))[1:]:
for j in range(len(from_queue.left_values[i])):
to_write = from_queue.sub+'\t'+from_queue.trial_info+'\tleft\t'+str(from_queue.left_times[i][j])+'\t'+str(from_queue.left_values[i][j])+'\n'
trigger_file.write(to_write)
if len(from_queue.right_values)>1:
for i in range(len(from_queue.right_values))[1:]:
for j in range(len(from_queue.right_values[i])):
to_write = from_queue.sub+'\t'+from_queue.trial_info+'\tright\t'+str(from_queue.right_times[i][j])+'\t'+str(from_queue.right_values[i][j])+'\n'
trigger_file.write(to_write)
#start up the separate process
writer_process = multiprocessing.Process(target=writer, args=(writer_queue,))
writer_process.start()
########
# Initialize the screen
########
screen = pygame.display.set_mode(screen_res, pygame.FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF) #initialize a screen
screen_x_center = screen_res[0]/2 #store the location of the screen's x center
screen_y_center = screen_res[1]/2 #store the location of the screen's y center
########
#Perform some calculations to convert stimulus measurements in degrees to pixels
########
screen_width_in_degrees = math.degrees(math.atan((screen_width/2.0)/viewing_distance)*2)
PPD = screen_res[0]/screen_width_in_degrees #compute the pixels per degree (PPD)
instruction_size = int(instruction_size_in_degrees*PPD)
response_feedback_text_size = int(response_feedback_text_size_in_degrees*PPD)
target_size = int(target_size_in_degrees*PPD)
flanker_separation = int(flanker_separation_in_degrees*PPD)
offset = int(offset_in_degrees*PPD)
########
#Define some useful colors
########
black = (0,0,0)
white = (255,255,255)
grey = (119,119,119)
red = (173,96,113)
green = (81,131,59)
########
#Initialize the fonts
########
response_feedback_text_font_size = 2
response_feedback_text_font = pygame.font.Font('_Stimuli/DejaVuSans.ttf', response_feedback_text_font_size)
response_feedback_text_height = response_feedback_text_font.size('XXX')[1]
while response_feedback_text_height<response_feedback_text_size:
response_feedback_text_font_size = response_feedback_text_font_size + 1
response_feedback_text_font = pygame.font.Font('_Stimuli/DejaVuSans.ttf', response_feedback_text_font_size)
response_feedback_text_height = response_feedback_text_font.size('XXX')[1]
response_feedback_text_font_size = response_feedback_text_font_size - 1
response_feedback_text_font = pygame.font.Font('_Stimuli/DejaVuSans.ttf', response_feedback_text_font_size)
response_feedback_text_height = response_feedback_text_font.size('XXX')[1]
instruction_font_size = 2
instruction_font = pygame.font.Font('_Stimuli/DejaVuSans.ttf', instruction_font_size)
instruction_height = instruction_font.size('XXX')[1]
while instruction_height<instruction_size:
instruction_font_size = instruction_font_size + 1
instruction_font = pygame.font.Font('_Stimuli/DejaVuSans.ttf', instruction_font_size)
instruction_height = instruction_font.size('XXX')[1]
instruction_font_size = instruction_font_size - 1
instruction_font = pygame.font.Font('_Stimuli/DejaVuSans.ttf', instruction_font_size)
instruction_height = instruction_font.size('XXX')[1]
########
# Create sprites for visual stimuli
########
#define a function to turn PIL/aggdraw images to pygame surfaces
def image2surf(image):
mode = image.mode
size = image.size
data = image.tostring()
return pygame.image.fromstring(data, size, mode)
# black_brush = aggdraw.Brush(black)
# white_brush = aggdraw.Brush(white)
#
# white_circle = aggdraw.Draw('RGBA',[target_size,target_size],(0,0,0,0))
# white_circle.ellipse((0,0,target_size,target_size), white_brush)
# white_circle = image2surf(white_circle)
#
# black_circle = aggdraw.Draw('RGBA',[target_size,target_size],(0,0,0,0))
# black_circle.ellipse((0,0,target_size,target_size), black_brush)
# black_circle = image2surf(black_circle)
white_circle = aggdraw.Draw('RGBA',[target_size,target_size],grey)
this_degree = 0
for i in range(12):
this_degree = i*30
if i%2==1:
brush = aggdraw.Brush(white)
else:
brush = aggdraw.Brush(grey)
for j in range(30):
this_degree = this_degree+1
white_circle.polygon(
(
int(round(target_size/2.0))
, int(round(target_size/2.0))
, int(round(target_size/2.0 + math.sin(this_degree*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.cos(this_degree*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.sin((this_degree+1)*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.cos((this_degree+1)*math.pi/180)*target_size/2.0))
)
, brush
)
white_circle = image2surf(white_circle)
black_circle = aggdraw.Draw('RGBA',[target_size,target_size],grey)
this_degree = 0
for i in range(12):
this_degree = i*30
if i%2==1:
brush = aggdraw.Brush(grey)
else:
brush = aggdraw.Brush(black)
for j in range(30):
this_degree = this_degree+1
black_circle.polygon(
(
int(round(target_size/2.0))
, int(round(target_size/2.0))
, int(round(target_size/2.0 + math.sin(this_degree*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.cos(this_degree*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.sin((this_degree+1)*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.cos((this_degree+1)*math.pi/180)*target_size/2.0))
)
, brush
)
black_circle = image2surf(black_circle)
neutral_circle = aggdraw.Draw('RGBA',[target_size,target_size],grey)
this_degree = 0
for i in range(12):
this_degree = i*30
if i%2==1:
brush = aggdraw.Brush(white)
else:
brush = aggdraw.Brush(black)
for j in range(30):
this_degree = this_degree+1
neutral_circle.polygon(
(
int(round(target_size/2.0))
, int(round(target_size/2.0))
, int(round(target_size/2.0 + math.sin(this_degree*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.cos(this_degree*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.sin((this_degree+1)*math.pi/180)*target_size/2.0))
, int(round(target_size/2.0 + math.cos((this_degree+1)*math.pi/180)*target_size/2.0))
)
, brush
)
neutral_circle = image2surf(neutral_circle)
black_congruent = pygame.Surface((target_size*3+flanker_separation*2,target_size*3+flanker_separation*2),pygame.SRCALPHA)
black_congruent.blit(black_circle,(target_size+flanker_separation,target_size+flanker_separation))
black_congruent.blit(black_circle,(0,target_size+flanker_separation))
black_congruent.blit(black_circle,(target_size*2+flanker_separation*2,target_size+flanker_separation))
black_congruent.blit(black_circle,(target_size+flanker_separation,0))
black_congruent.blit(black_circle,(target_size+flanker_separation,target_size*2+flanker_separation*2))
black_incongruent = pygame.Surface((target_size*3+flanker_separation*2,target_size*3+flanker_separation*2),pygame.SRCALPHA)
black_incongruent.blit(black_circle,(target_size+flanker_separation,target_size+flanker_separation))
black_incongruent.blit(white_circle,(0,target_size+flanker_separation))
black_incongruent.blit(white_circle,(target_size*2+flanker_separation*2,target_size+flanker_separation))
black_incongruent.blit(white_circle,(target_size+flanker_separation,0))
black_incongruent.blit(white_circle,(target_size+flanker_separation,target_size*2+flanker_separation*2))
black_neutral = pygame.Surface((target_size*3+flanker_separation*2,target_size*3+flanker_separation*2),pygame.SRCALPHA)
black_neutral.blit(black_circle,(target_size+flanker_separation,target_size+flanker_separation))
black_neutral.blit(neutral_circle,(0,target_size+flanker_separation))
black_neutral.blit(neutral_circle,(target_size*2+flanker_separation*2,target_size+flanker_separation))
black_neutral.blit(neutral_circle,(target_size+flanker_separation,0))
black_neutral.blit(neutral_circle,(target_size+flanker_separation,target_size*2+flanker_separation*2))
white_congruent = pygame.Surface((target_size*3+flanker_separation*2,target_size*3+flanker_separation*2),pygame.SRCALPHA)
white_congruent.blit(white_circle,(target_size+flanker_separation,target_size+flanker_separation))
white_congruent.blit(white_circle,(0,target_size+flanker_separation))
white_congruent.blit(white_circle,(target_size*2+flanker_separation*2,target_size+flanker_separation))
white_congruent.blit(white_circle,(target_size+flanker_separation,0))
white_congruent.blit(white_circle,(target_size+flanker_separation,target_size*2+flanker_separation*2))
white_incongruent = pygame.Surface((target_size*3+flanker_separation*2,target_size*3+flanker_separation*2),pygame.SRCALPHA)
white_incongruent.blit(white_circle,(target_size+flanker_separation,target_size+flanker_separation))
white_incongruent.blit(black_circle,(0,target_size+flanker_separation))
white_incongruent.blit(black_circle,(target_size*2+flanker_separation*2,target_size+flanker_separation))
white_incongruent.blit(black_circle,(target_size+flanker_separation,0))
white_incongruent.blit(black_circle,(target_size+flanker_separation,target_size*2+flanker_separation*2))
white_neutral = pygame.Surface((target_size*3+flanker_separation*2,target_size*3+flanker_separation*2),pygame.SRCALPHA)
white_neutral.blit(white_circle,(target_size+flanker_separation,target_size+flanker_separation))
white_neutral.blit(neutral_circle,(0,target_size+flanker_separation))
white_neutral.blit(neutral_circle,(target_size*2+flanker_separation*2,target_size+flanker_separation))
white_neutral.blit(neutral_circle,(target_size+flanker_separation,0))
white_neutral.blit(neutral_circle,(target_size+flanker_separation,target_size*2+flanker_separation*2))
eraser = aggdraw.Draw('RGBA',[target_size*3+flanker_separation*2,target_size*3+flanker_separation*2],grey)
eraser = image2surf(eraser)
########
# Drawing and helper functions
########
#define a function to draw a pygame surface centered on given coordinates
def blit_to_screen(surf,x_offset=0,y_offset=0):
x = screen_x_center+x_offset-surf.get_width()/2.0
y = screen_y_center+y_offset-surf.get_height()/2.0
screen.blit(surf,(x,y))
#define a function that draws a target on the screen
def draw_target(target_location,target,flankers):
if target=='black':
if flankers=='congruent':
target=black_congruent
elif flankers=='incongruent':
target=black_incongruent
else:
target=black_neutral
else:
if flankers=='congruent':
target=white_congruent
elif flankers=='incongruent':
target=white_incongruent
else:
target=white_neutral
if target_location=='left':
blit_to_screen( target , x_offset=-offset )
elif target_location=='right':
blit_to_screen( target , x_offset=offset )
elif target_location=='up':
blit_to_screen( target , y_offset=-offset )
else:
blit_to_screen( target , y_offset=offset )
#define a function that draws a target on the screen
def erase_target(target_location):
if target_location=='left':
blit_to_screen( eraser , x_offset=-offset )
elif target_location=='right':
blit_to_screen( eraser , x_offset=offset )
elif target_location=='up':
blit_to_screen( eraser , y_offset=-offset )
else:
blit_to_screen( eraser , y_offset=offset )
#define a function that waits for a given duration to pass
def simple_wait(duration):
start = time.time()
while time.time() < (start + duration):
pass
#define a function that formats text for the screen
def draw_text(my_text, instruction_font, text_color, my_surface, text_width):
my_surface_rect = my_surface.get_rect()
text_width_max = int(my_surface_rect.size[0]*text_width)
paragraphs = my_text.split('\n')
render_list = []
text_height = 0
for this_paragraph in paragraphs:
words = this_paragraph.split(' ')
if len(words)==1:
render_list.append(words[0])
if (this_paragraph!=paragraphs[len(paragraphs)-1]):
render_list.append(' ')
text_height = text_height + instruction_font.get_linesize()
else:
this_word_index = 0
while this_word_index < (len(words)-1):
line_start = this_word_index
line_width = 0
while (this_word_index < (len(words)-1)) and (line_width <= text_width_max):
this_word_index = this_word_index + 1
line_width = instruction_font.size(' '.join(words[line_start:(this_word_index+1)]))[0]
if this_word_index < (len(words)-1):
#last word went over, paragraph continues
render_list.append(' '.join(words[line_start:(this_word_index-1)]))
text_height = text_height + instruction_font.get_linesize()
this_word_index = this_word_index-1
else:
if line_width <= text_width_max:
#short final line
render_list.append(' '.join(words[line_start:(this_word_index+1)]))
text_height = text_height + instruction_font.get_linesize()
else:
#full line then 1 word final line
render_list.append(' '.join(words[line_start:this_word_index]))
text_height = text_height + instruction_font.get_linesize()
render_list.append(words[this_word_index])
text_height = text_height + instruction_font.get_linesize()
#at end of paragraph, check whether a inter-paragraph space should be added
if (this_paragraph!=paragraphs[len(paragraphs)-1]):
render_list.append(' ')
text_height = text_height + instruction_font.get_linesize()
num_lines = len(render_list)*1.0
for this_line in range(len(render_list)):
this_render = instruction_font.render(render_list[this_line], True, text_color)
this_render_rect = this_render.get_rect()
this_render_rect.centerx = my_surface_rect.centerx
this_render_rect.centery = int(my_surface_rect.centery - text_height/2.0 + 1.0*this_line/num_lines*text_height)
my_surface.blit(this_render, this_render_rect)
#define a function that waits for a response
def wait_for_response():
pygame.event.clear()
done = False
while not done:
pygame.event.pump()
for event in pygame.event.get() :
if event.type == pygame.KEYDOWN :
response = event.unicode
if response == '\x1b':
pygame.quit()
if response_modality=='trigger':
writer_queue.put(writer_message('done'))
writer_process.join()
simple_wait(1)
if writer_process.is_alive():
writer_process.terminate()
try:
data_file.close()
except:
pass
sys.exit()
else:
done = True
else:
if response_modality=='button':
if event.type == pygame.JOYBUTTONDOWN:
response = event.button
done = True
else:
if event.type == pygame.JOYAXISMOTION:
if (event.axis==4):
if event.value>trigger_criterion_value:
response = 'left'
done = True
elif (event.axis==5):
if event.value>trigger_criterion_value:
response = 'right'
done = True
pygame.event.clear()
return response
#define a function that prints a message on the screen while looking for user input to continue. The function returns the total time it waited
def show_message(my_text):
message_viewing_time_start = time.time()
pygame.event.pump()
pygame.event.clear()
screen.fill(black)
pygame.display.flip()
screen.fill(black)
draw_text(my_text, instruction_font, grey, screen, text_width)
simple_wait(.5)
pygame.display.flip()
screen.fill(black)
wait_for_response()
pygame.display.flip()
screen.fill(black)
simple_wait(.5)
message_viewing_time = time.time() - message_viewing_time_start
return message_viewing_time
#define a function that requests user input
def get_input(get_what):
get_what = get_what+'\n'
text_input = ''
screen.fill(black)
pygame.display.flip()
simple_wait(.5)
my_text = get_what+text_input
screen.fill(black)
draw_text(my_text, instruction_font, grey, screen, text_width)
pygame.display.flip()
screen.fill(black)
done = False
while not done:
pygame.event.pump()
for event in pygame.event.get() :
if event.type == pygame.KEYDOWN :
key_down = event.unicode
if key_down == '\x1b':
pygame.quit()
if response_modality=='trigger':
writer_queue.put(writer_message('done'))
writer_process.join()
simple_wait(1)
if writer_process.is_alive():
writer_process.terminate()
try:
data_file.close()
except:
pass
sys.exit()
elif key_down == '\x7f':
if text_input!='':
text_input = text_input[0:(len(text_input)-1)]
my_text = get_what+text_input
screen.fill(black)
draw_text(my_text, instruction_font, grey, screen, text_width)
pygame.display.flip()
elif key_down == '\r':
done = True
else:
text_input = text_input + key_down
my_text = get_what+text_input
screen.fill(black)
draw_text(my_text, instruction_font, grey, screen, text_width)
pygame.display.flip()
screen.fill(black)
pygame.display.flip()
return text_input
#define a function that obtains subject info via user input
def get_sub_info():
year = time.strftime('%Y')
month = time.strftime('%m')
day = time.strftime('%d')
hour = time.strftime('%H')
minute = time.strftime('%M')
sid = get_input('SID (\'test\' to demo):')
if sid != 'test':
gender = get_input('Gender (m or f):')
age = get_input('Age (2-digit number):')
handedness = get_input('Handedness (r or l):')
languages = get_input('Number of fluent languages:')
music = get_input('Number of years playing a musical instrument:')
gaming = get_input('Hours of gaming per week, averaged over the past 5 years:')
password = get_input('Please enter a password:')
else:
gender='test'
age='test'
handedness='test'
languages = 'test'
music = 'test'
gaming = 'test'
password = 'test'
password = hashlib.sha512(password).hexdigest()
sub_info = [ sid , year , month , day , hour , minute , gender , age , handedness , languages , music , gaming , password ]
return sub_info
#define a function that initializes the data file
def initialize_data_files(password):
if not os.path.exists('_Data'):
os.mkdir('_Data')
if sub_info[0]=='test':
filebase = 'test'
else:
filebase = '_'.join(sub_info[0:6])
if not os.path.exists('_Data/'+filebase):
os.mkdir('_Data/'+filebase)
shutil.copy('main.py', '_Data/'+filebase+'/'+filebase+'_code.py')
data_file_name = '_Data/'+filebase+'/'+filebase+'_data.txt'
data_file = open(data_file_name,'w')
data_file.write(password+'\n')
header ='\t'.join(['id' , 'year' , 'month' , 'day' , 'hour' , 'minute' , 'gender' , 'age' , 'handedness' , 'languages' , 'music' , 'gaming' , 'wait' , 'block' , 'trial' , 'target_location' , 'target' , 'flankers' , 'rt' , 'response' , 'error' , 'pre_target_response' , 'ITI_response' ])
data_file.write(header+'\n')
to_return = data_file
if response_modality=='trigger':
trigger_file_name = '_Data/'+filebase+'/'+filebase+'_trigger.txt'
trigger_file = open(trigger_file_name,'w')
header ='\t'.join(['id' , 'block' , 'trial' , 'target_location' , 'target' , 'flankers' , 'trigger' , 'time' , 'value' ])
trigger_file.write(header+'\n')
trigger_file.close()
writer_queue.put(writer_message('initialize',trigger_file_name=trigger_file_name))
return to_return
#define a function that generates a randomized list of trial-by-trial stimulus information representing a factorial combination of the independent variables.
def get_trials():
trials=[]
for target_location in target_location_list:
for target in target_list:
for flankers in flankers_list:
for i in range(reps_per_block):
if target_location=='neutral':
target_location = random.choice(['up','down'])
trials.append([target_location,target,flankers])
random.shuffle(trials)
return trials
#define a function to check for user input during a trial loop
def check_for_input(now):
pygame.event.pump
responses = []
these_left_values = []
these_left_times = []
these_right_values = []
these_right_times = []
for event in pygame.event.get():
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_ESCAPE:
pygame.quit()
if response_modality=='trigger':
writer_queue.put(writer_message('done'))
writer_process.join()
simple_wait(1)
if writer_process.is_alive():
writer_process.terminate()
try:
data_file.close()
except:
pass
sys.exit()
else:
responses.append(event.unicode)
if response_modality=='button':
if event.type == pygame.JOYBUTTONDOWN:
responses.append(event.button)
elif response_modality=='trigger':
if event.type == pygame.JOYAXISMOTION:
if event.axis==4:
these_left_values.append(event.value)
these_left_times.append(now)
if event.value>=trigger_criterion_value:
responses.append('left')
if event.value==1:
error_sound.play()
elif event.axis==5:
these_right_values.append(event.value)
these_right_times.append(now)
if event.value>=trigger_criterion_value:
responses.append('right')
if event.value==1:
error_sound.play()
return [responses,these_left_values,these_left_times,these_right_values,these_right_times]
#define a function that runs a block of trials
def run_block(block,message_viewing_time):
#get a trial list
trial_list = get_trials()
#prep some variables
trial_num = 0
#start running trials
for this_trial_info in trial_list:
#bump the trial number
trial_num = trial_num + 1
#parse the trial info
target_location,target,flankers = this_trial_info
#prep the fixation screen
screen.fill(grey)
blit_to_screen(neutral_circle)
#start the trial by showing the fixation screen
pygame.display.flip()
#get the trial start time
trial_start_time = time.time()
#prep the target screen
screen.fill(grey)
blit_to_screen(neutral_circle)
draw_target(target_location,target,flankers)
#prep some variables
pre_target_response = 'FALSE'
ITI_response = 'FALSE'
response = 'NA'
rt = 'NA'
error = 'NA'
#prep some data info to write later
this_trial_info = '\t'.join(map(str,this_trial_info))
this_trial_info = '\t'.join(map(str,[block,trial_num,this_trial_info]))
target_done = False
target_on = False
target_on_time = trial_start_time+fixation_interval
reseponse_timeout_time = target_on_time + response_timeout
ITI_done = False
left_values = [[-1]]
left_times = [[0]]
right_values = [[-1]]
right_times = [[0]]
response = False
#start the trial loop
loop_done = False
while not loop_done:
now = time.time()
#check if the stimulus display needs updating
if not target_done:
if not target_on:
if now>=target_on_time:
target_on = True
pygame.display.flip() #show the target
else: #target is done
if now>=reseponse_timeout_time:
ITI_done_time = reseponse_timeout_time + ITI
loop_done = True
break
#check for user input
responses,these_left_values,these_left_times,these_right_values,these_right_times = check_for_input(now-target_on_time)
if response_modality=='trigger':
if len(these_left_values)>0:
left_values.append(these_left_values)
left_times.append(these_left_times)
if len(these_right_values)>0:
right_values.append(these_right_values)
right_times.append(these_right_times)
del these_left_values,these_left_times,these_right_values,these_right_times
#process responses (if any)
if len(responses)>0:
if not target_on:
pre_target_response = 'TRUE'
else:
rt = now - target_on_time
ITI_done_time = now + ITI
response = responses[0]
loop_done = True
#done the main loop
#if necessary keep checking for trigger data to finish the response
if response_modality=='trigger':
if response!='NA':
left_done = False
right_done = False
# print [left_values[-1][-1],right_values[-1][-1]]
response_done = False
while not response_done:
responses,these_left_values,these_left_times,these_right_values,these_right_times = check_for_input(time.time()-target_on_time)
# print [responses,these_left_values,these_right_values]
if len(these_left_values)>0:
left_values.append(these_left_values)
left_times.append(these_left_times)
# print ['left', left_values[-1][-1]]
if len(these_right_values)>0:
right_values.append(these_right_values)
right_times.append(these_right_times)
# print ['right', right_values[-1][-1]]
del these_left_values,these_left_times,these_right_values,these_right_times
if left_values[-1][-1]<=-1:
left_done = True
if right_values[-1][-1]<=-1:
right_done = True
if (left_done) & (right_done):
response_done = True
#process the respose info
if not response:
response = 'NA'
error = 'NA'
screen.fill(grey)
pygame.display.flip()
else:
if response_modality=='key':
if response==black_key:
response = 'black'
elif response==white_key:
response = 'white'
elif response_modality=='button':
if response==black_button:
response = 'black'
elif response==white_button:
response = 'white'
else:
if response==black_trigger:
response = 'black'
elif response==white_trigger:
response = 'white'
if response=='black':
response_feedback_color = black
elif response=='white':
response_feedback_color = white
if response == target:
error = 'FALSE'
else:
error = 'TRUE'
response_feedback_text = response_feedback_text_font.render(str(int(round(rt*1000))),True,response_feedback_color,grey)
#prep and draw the ITI screen
screen.fill(grey)
blit_to_screen(response_feedback_text)
pygame.display.flip()
#wait for the ITI to elapse
ITI_done = False
while not ITI_done:
now = time.time()
if now>=ITI_done_time:
ITI_done = True
#check for user input
responses,these_left_values,these_left_times,these_right_values,these_right_times = check_for_input(now-target_on_time)
if response_modality=='trigger':
if len(these_left_values)>0:
left_values.append(these_left_values)
left_times.append(these_left_times)
if len(these_right_values)>0:
right_values.append(these_right_values)
right_times.append(these_right_times)
del these_left_values,these_left_times,these_right_values,these_right_times
#process responses (if any)
if len(responses)>0:
ITI_response = 'TRUE'
#send analog trigger data to a seperate process to be written out to file
if response_modality=='trigger':
message = writer_message(message_type='write',sub=sub_info[0],trial_info=this_trial_info,left_values=left_values,left_times=left_times,right_values=right_values,right_times=right_times)
writer_queue.put(message)
#write out data
trial_info = '\t'.join(map(str, [ sub_info_for_file , message_viewing_time , block , trial_num , target_location , target , flankers , rt , response , error , pre_target_response , ITI_response]))
data_file.write(trial_info+'\n')
def do_demo():
screen.fill(grey)
response_feedback_text = instruction_font.render('White = '+white_trigger+'; black = '+black_trigger,True,white,grey)
blit_to_screen(response_feedback_text)
pygame.display.flip()
screen_num = 0
done = False
while not done:
update = False
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN :
response = event.unicode
if response == '\x1b':
pygame.quit()
if response_modality=='trigger':
writer_queue.put(writer_message('done'))
writer_process.join()
simple_wait(1)
if writer_process.is_alive():
writer_process.terminate()
try:
data_file.close()
except:
pass
sys.exit()
elif response=='2':
screen_num = screen_num + 1
update = True
elif response=='1':
screen_num = screen_num - 1
update = True
elif response=='q':
done = True
if update:
screen.fill(grey)
if screen_num==1:
blit_to_screen(neutral_circle)
elif screen_num==2:
blit_to_screen(neutral_circle)
blit_to_screen(white_neutral,x_offset=-offset)
elif screen_num==3:
response_feedback_text = response_feedback_text_font.render('549',True,white,grey)
blit_to_screen(response_feedback_text)
elif screen_num==4:
blit_to_screen(neutral_circle)
elif screen_num==5:
blit_to_screen(neutral_circle)
blit_to_screen(black_neutral,x_offset=offset)
elif screen_num==6:
response_feedback_text = response_feedback_text_font.render('483',True,black,grey)
blit_to_screen(response_feedback_text)
elif screen_num==7:
blit_to_screen(neutral_circle)
elif screen_num==8:
blit_to_screen(neutral_circle)
blit_to_screen(black_neutral,y_offset=-offset)
elif screen_num==9:
response_feedback_text = response_feedback_text_font.render('595',True,white,grey)
blit_to_screen(response_feedback_text)
pygame.display.flip()
########
# Start the experiment
########
#get subject info
sub_info = get_sub_info()
sub_info_for_file = '\t'.join(map(str,sub_info[0:-1]))
password = sub_info[-1]
#counter-balance stimulus-response mapping
mapping = [0,1]
if sub_info[0]!='test':
if (int(sub_info[0])%2)==1:
mapping = [1,0]
white_key = response_keys[mapping[0]]
black_key = response_keys[mapping[1]]
white_button = response_buttons[mapping[0]]
black_button = response_buttons[mapping[1]]
white_trigger = response_triggers[mapping[0]]
black_trigger = response_triggers[mapping[1]]
#initialize the data file
data_file = initialize_data_files(password)
#show some demo screens
do_demo()
message_viewing_time = show_message('Press any trigger to begin practice.')
run_block('practice',message_viewing_time)
message_viewing_time = show_message('Practice is complete.\nPress any trigger to begin the experiment.')
block = 0
for i in range(number_of_blocks):
block = i+1
run_block(block,message_viewing_time)
if i<(number_of_blocks):
message_viewing_time = show_message('Take a break!\nYou\'re about '+str(block)+'/'+str(number_of_blocks)+' done.\nWhen you are ready, press any trigger to continue the experiment.')
message_viewing_time = show_message('You\'re all done!\nPlease alert the person conducting this experiment that you have finished.')
pygame.quit()
if response_modality=='trigger':
writer_queue.put(writer_message('done'))
writer_process.join()
simple_wait(1)
if writer_process.is_alive():
writer_process.terminate()
data_file.close()
sys.exit()
|
URL = "https://api.mailgun.net/v3/sandboxc2e75bed92b1485286fc02a3480847f8.mailgun.org/messages"
API_KEY = "key-7a869a6056a3b8bbc14d06cc2b585392"
FROM = "Mailgun Sandbox <postmaster@sandboxc2e75bed92b1485286fc02a3480847f8.mailgun.org>"
ALERT_TIMEOUT = 10
COLLECTION = "alerts" |
#!/bin/python3
import sys
def __is_palindrome(s):
return s == s[::-1]
def theLoveLetterMystery(s):
if __is_palindrome(s):
return 0
last_index = len(s) - 1
count = 0
for i in range(0, len(s)):
if last_index - i <= i:
break
if s[i] != s[last_index - i]:
count += abs(ord(s[i]) - ord(s[last_index - i]))
return count
q = int(input().strip())
for a0 in range(q):
s = input().strip()
result = theLoveLetterMystery(s)
print(result)
|
class BinarySearchTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def depth_first_for_each(self, cb):
# invoke callback with the value of this node
cb(self.value)
# if the left has a valuse recursively invoke the depth first for each on left
if self.left != None:
self.left.depth_first_for_each(cb)
# if the right has a value recursively invoke the depth first for each on right
if self.right != None:
self.right.depth_first_for_each(cb)
# some refs for the O notation https://medium.com/karuna-sehgal/a-simplified-explanation-of-the-big-o-notation-82523585e835
def breadth_first_for_each(self, cb):
# perform the call back on the root value
cb(self.value)
# check the level of the node [ a nested function : lets get recursive ]
def check_level_of_node(node):
# try to set left to the nodes left property
try:
left_node = node.left # O(1) operation
except:
# if it failes return an error or raise an exception
return "ERROR: No Left Node" # TODO: maybe throw an exception of some sort
else:
# if there was no exception on the try do a semi redundant call to make sure it is set
left_node = node.left
# try to set right to the nodes right property
try:
right_node = node.right # O(1) operation
except:
# if it failes return an error or raise an exception
return "ERROR: No Right Node" # TODO: maybe throw an exception of some sort
else:
# if there was no exception on the try do a semi redundant call to make sure it is set
right_node = node.right
# set a counter variable to 0
counter = 0 # O(1) operation
# if left holds a value the invoke the call back on the left value and increment the counter by 1
if left_node is not None:
cb(left_node.value) # O(n)
counter += 1
# if right holds a value the invoke the call back on the right value and increment the counter by 1
if right_node is not None:
cb(right_node.value) # O(n)
counter += 1
# if the counter hits 2 then do a recursive call to check the level of the node to the left and one to the right
if counter == 2:
check_level_of_node(left_node) # at this point we have 2 calls to the recirsive function so this opperation set of 2 is O(2^n)
check_level_of_node(right_node)
# then deal with a fallout case where left or right are none and just return to the caller
if left_node is None or right_node is None:
return
# do the seed call to the nested function
check_level_of_node(self) # this call in itself does a recursive call to the chain but it is set at about O(3^n) due to the inner recursion of 3
# so all in all we can postulate that the largest order is O(3^n) so we are working with an exponential order so we can simplify it to O(C^n)
def insert(self, value):
new_tree = BinarySearchTree(value)
if (value < self.value):
if not self.left:
self.left = new_tree
else:
self.left.insert(value)
elif value >= self.value:
if not self.right:
self.right = new_tree
else:
self.right.insert(value)
def contains(self, target):
if self.value == target:
return True
if self.left:
if self.left.contains(target):
return True
if self.right:
if self.right.contains(target):
return True
return False
def get_max(self):
if not self:
return None
max_value = self.value
current = self
while current:
if current.value > max_value:
max_value = current.value
current = current.right
return max_value
|
import datetime
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_call_later
import logging
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'actuator'
ACTUATE_SCHEMA = vol.Schema({
vol.Required('sensor_id'): cv.string,
vol.Optional('sensor_attr'): cv.string,
vol.Required('sensor_values'): list,
vol.Optional('alt_sensor_values'): list,
vol.Optional('alt_time_range'): list,
vol.Required('entity_id'): cv.string,
vol.Optional('entity_attr'): cv.string,
vol.Optional('service'): cv.string,
vol.Optional('service_attr'): cv.string,
vol.Required('entity_values'): list,
vol.Optional('condition_attr'): cv.string,
vol.Optional('condition_values'): list,
vol.Optional('delay'): int,
})
_hass = None
_executors = {}
def execute(params):
# Get entity state
entity_id = params.get('entity_id')
domain = entity_id[:entity_id.find('.')]
state = _hass.states.get(entity_id)
if state is None:
_LOGGER.error("Entity %s error", sensor_id)
return
state_value = state.state
state_attributes = state.attributes
# Check condition
condition_attr = params.get('condition_attr')
if condition_attr is not None:
condition_value = state_value if condition_attr == 'STATE' else state_attributes.get(
condition_attr)
if condition_value is None:
#_LOGGER.debug('Check condition: condition_value is None')
return
condition_values = params.get('condition_values')
if condition_values is not None and condition_value not in condition_values:
#_LOGGER.debug('Check condition: %s not in %s', condition_value, condition_values)
return
# Get sensor state
sensor_id = params.get('sensor_id')
sensor_attr = params.get('sensor_attr')
alt_time_range = params.get('alt_time_range') or [20, 8]
if 'alt_sensor_values' in params:
hour = datetime.datetime.now().hour
if alt_time_range[1] > alt_time_range[0]:
alt_time = hour >= alt_time_range[0] and hour < alt_time_range[1]
else:
alt_time = hour >= alt_time_range[0] or hour < alt_time_range[1]
else:
alt_time = False
sensor_values = params.get(
'alt_sensor_values' if alt_time else 'sensor_values')
sensor_state = _hass.states.get(sensor_id)
try:
sensor_attributes = sensor_state.attributes
sensor_value = sensor_state.state if sensor_attr is None else sensor_attributes.get(
sensor_attr)
sensor_number = float(sensor_value)
except:
_LOGGER.error("Sensor %s %s error", sensor_id, sensor_attr or '')
return
# Log prefix
sensor_log = sensor_attributes.get('friendly_name')
if sensor_attr:
sensor_log += '.' + sensor_attr
sensor_log += '=' + str(sensor_value)
# Action params
entity_attr = params.get('entity_attr')
service_attr = params.get('service_attr') or entity_attr
service = params.get('service') or 'set_' + service_attr
entity_values = params.get('entity_values')
entity_log = state_attributes.get('friendly_name')
# Check sensor range
i = len(sensor_values) - 1
while i >= 0:
if sensor_number >= sensor_values[i]:
sensor_log += '≥' + str(sensor_values[i])
from_value = state_value if entity_attr is None else state_attributes.get(
entity_attr)
to_value = entity_values[i]
if entity_attr:
entity_log += '.' + entity_attr
entity_log += '=' + str(from_value)
if state_value == 'off':
entity_log += ', ⇒on'
_hass.services.call(domain, 'turn_on', {
'entity_id': entity_id}, True)
if from_value == to_value:
_LOGGER.debug('%s; %s', sensor_log, entity_log)
return
pos = service.find('.')
if pos != -1:
domain = service[:pos]
service = service[pos + 1:]
data = {'entity_id': entity_id,
service_attr or entity_attr: to_value}
_LOGGER.warn('%s; %s, %s⇒%s', sensor_log,
entity_log, service, to_value)
_hass.services.call(domain, service, data, True)
return
else:
i = i - 1
# Turn off
sensor_log += '<' + str(sensor_values[0])
if state_value == 'off':
_LOGGER.debug('%s, %s=off', sensor_log, entity_log)
return
# Log
_LOGGER.warn('%s, %s=%s, ⇒off', sensor_log, entity_log, state_value)
_hass.services.call(domain, 'turn_off', {'entity_id': entity_id}, True)
class DelayExecutor:
def __init__(self, key, delay, params):
self.key = key
self.params = params
async_call_later(_hass, delay, self.call)
def call(self, *_):
del _executors[self.key]
execute(self.params)
def actuate(call):
params = call.data
delay = params.get('delay')
if delay is None:
delay = 120
if delay > 0:
key = params['entity_id'] + '~' + \
(params.get('service_attr') or params.get('entity_attr'))
if key not in _executors:
_executors[key] = DelayExecutor(key, delay, params)
# else:
# _LOGGER.debug('%s ignored', key)
else:
execute(params)
def setup(hass, config):
global _hass
_hass = hass
hass.services.register(DOMAIN, 'actuate', actuate, schema=ACTUATE_SCHEMA)
return True
|
import os
import gym
import numpy as np
import logging
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
from torch.utils.tensorboard import SummaryWriter
from noise import OUNoise
from model import Actor,Critic,DDPG_AC
from utils import get_args,dir_maker
from replay import ReplayOneDeque
from ddpg_agent import one_trajectory,train,eval_trajectory
def main():
args = get_args()
# directory define
FILENAME = os.path.splitext(os.path.basename(__file__))[0]
DIR,comment = dir_maker(args,FILENAME)
BEST_MODEL_FILE = f"{DIR}/Best_Model.pt"
LAST_MODEL_FILE = f"{DIR}/Last_Model.pt"
writer = SummaryWriter(f"{DIR}/{comment}")
logging.basicConfig(filename=f'{DIR}/{comment}.log',level=logging.DEBUG)
# Model define
if args.DEVICE is None:
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
else:
device = args.DEVICE
env = gym.make(args.env_name)
N_OB = env.observation_space.shape[0]
N_ACT = env.action_space.shape[0]
ac = DDPG_AC(N_OB,N_ACT,fc_n=args.fc_n,device=device).to(device)
ac_target = DDPG_AC(N_OB,N_ACT,fc_n=args.fc_n,device=device).to(device)
ac_target.load_state_dict(ac.state_dict())
actor_optim = torch.optim.Adam(ac.actor.parameters(),lr= args.lr_actor)
critic_optim = torch.optim.Adam(ac.critic.parameters(),lr= args.lr_actor)
ou = OUNoise(mu=np.zeros(N_ACT))
memos = ReplayOneDeque(MEMORY_SIZE=args.MEMORY_SIZE,BATCH_SIZE = args.BATCH_SIZE)
# visulize data
reward_list,eval_reward_list = [],[]
loss_dict = {"critic":[],"actor":[]}
best_reward = None
# start train
pbar = tqdm(range(args.EPOCHS))
for ep in pbar:
reward_sum = one_trajectory(env,ac,ou,memos)
writer.add_scalar("Reward",reward_sum,ep)
reward_list.append(reward_sum)
pbar_string = (f"Epoch: {ep} "
f"\t reward:{reward_sum:.2f}")
loss_actor, loss_critic = train(memos,ac,ac_target,actor_optim,critic_optim,args)
#breakpoint()
if loss_actor is not None:
loss_dict["critic"].append(loss_critic)
loss_dict["actor"].append(loss_actor)
writer.add_scalar("Loss_critic",loss_dict["critic"][-1],ep)
writer.add_scalar("Loss_actor",loss_dict["actor"][-1],ep)
pbar_string += (f"\tloss actor:{loss_actor:.5f}"
f"\tloss critic:{loss_critic:.5f}")
if ep>0 and ep % args.EVAL_FREQ == 0:
eval_reward_sum = eval_trajectory(env,ac)
eval_reward_list.append(eval_reward_sum)
writer.add_scalar("Eval_reward",eval_reward_sum,ep)
pbar_string += (f"\teval reward: {eval_reward_sum:.2f}"
f"\tmean reward: {sum(eval_reward_list[-100:])/min(len(eval_reward_list),100):.2f}"
f"\tbest reward: {max(eval_reward_list):.2f}")
if best_reward is None or max(reward_list) > best_reward:
best_reward = max(reward_list)
pbar.write(pbar_string)
logging.info(pbar_string)
writer.flush()
torch.save(ac.state_dict(),LAST_MODEL_FILE)
writer.close()
plt.figure()
plt.plot(reward_list)
plt.title("Reward")
plt.savefig(f"{DIR}/reward_list_and_max_{max(reward_list):.2f}.png")
plt.figure()
plt.plot(loss_dict['critic'],label= 'critic')
plt.plot(loss_dict['actor'],label='actor')
plt.legend(loc=2)
plt.title("Loss")
plt.savefig(f"{DIR}/loss_list_and_sum.png")
print("Test Final Model:")
reward_list = 0
for i in tqdm(range(10)):
reward_sum = eval_trajectory(env,ac)
tqdm.write(f"\t {i} with reward: {reward_sum:.2f}")
logging.info(f"\t {i} with reward: {reward_sum:.2f}")
reward_list +=reward_sum
print(f"Final Model Mean Reward:{reward_list/10:.2f}")
logging.info(f"Final Model Mean Reward:{reward_list/10:.2f}")
if __name__ == "__main__":
main() |
from django.contrib import admin
from .models import Tweet, Relationship, Like
# Register your models here.
class TweetAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'text',)
list_display_links = ('id',)
admin.site.register(Tweet, TweetAdmin)
class RelationshipAdmin(admin.ModelAdmin):
list_display = ('id', 'from_user', 'target_user',)
list_display_links = ('id', 'from_user', 'target_user',)
admin.site.register(Relationship, RelationshipAdmin)
class LikeAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'tweet',)
list_display_links = ('id', 'user', 'tweet',)
admin.site.register(Like, LikeAdmin) |
"""
Diffuse Self-Shading
====================
Modeling the reduction in diffuse irradiance caused by row-to-row diffuse
shading.
"""
# %%
# The term "self-shading" usually refers to adjacent rows blocking direct
# irradiance and casting shadows on each other. However, the concept also
# applies to diffuse irradiance because rows block a portion of the sky
# dome even when the sun is high in the sky. The irradiance loss fraction
# depends on how tightly the rows are packed and where on the module the
# loss is evaluated -- a point near the top of edge of a module will see
# more of the sky than a point near the bottom edge.
#
# This example uses the approach presented by Passias and Källbäck in [1]_
# and recreates two figures from that paper using
# :py:func:`pvlib.shading.masking_angle_passias` and
# :py:func:`pvlib.shading.sky_diffuse_passias`.
#
# References
# ----------
# .. [1] D. Passias and B. Källbäck, "Shading effects in rows of solar cell
# panels", Solar Cells, Volume 11, Pages 281-291. 1984.
# DOI: 10.1016/0379-6787(84)90017-6
from pvlib import shading, irradiance
import matplotlib.pyplot as plt
import numpy as np
# %%
# First we'll recreate Figure 4, showing how the average masking angle varies
# with array tilt and array packing. The masking angle of a given point on a
# module is the angle from horizontal to the next row's top edge and represents
# the portion of the sky dome blocked by the next row. Because it changes
# from the bottom to the top of a module, the average across the module is
# calculated. In [1]_, ``k`` refers to the ratio of row pitch to row slant
# height (i.e. 1 / GCR).
surface_tilt = np.arange(0, 90, 0.5)
plt.figure()
for k in [1, 1.5, 2, 2.5, 3, 4, 5, 7, 10]:
gcr = 1/k
psi = shading.masking_angle_passias(surface_tilt, gcr)
plt.plot(surface_tilt, psi, label=f'k={k}')
plt.xlabel('Inclination angle [degrees]')
plt.ylabel('Average masking angle [degrees]')
plt.legend()
plt.show()
# %%
# So as the array is packed tighter (decreasing ``k``), the average masking
# angle increases.
#
# Next we'll recreate Figure 5. Note that the y-axis here is the ratio of
# diffuse plane of array irradiance (after accounting for shading) to diffuse
# horizontal irradiance. This means that the deviation from 100% is due to the
# combination of self-shading and the fact that being at a tilt blocks off
# the portion of the sky behind the row. The first effect is modeled with
# :py:func:`pvlib.shading.sky_diffuse_passias` and the second with
# :py:func:`pvlib.irradiance.isotropic`.
plt.figure()
for k in [1, 1.5, 2, 10]:
gcr = 1/k
psi = shading.masking_angle_passias(surface_tilt, gcr)
shading_loss = shading.sky_diffuse_passias(psi)
transposition_ratio = irradiance.isotropic(surface_tilt, dhi=1.0)
relative_diffuse = transposition_ratio * (1-shading_loss) * 100 # %
plt.plot(surface_tilt, relative_diffuse, label=f'k={k}')
plt.xlabel('Inclination angle [degrees]')
plt.ylabel('Relative diffuse irradiance [%]')
plt.ylim(0, 105)
plt.legend()
plt.show()
# %%
# As ``k`` decreases, GCR increases, so self-shading loss increases and
# collected diffuse irradiance decreases.
|
count = 0
for number in range(1, 10):
if(number % 2 == 0):
print(number)
count += 1
print(f"The total number of even numbers are: {count}")
|
from IPython.display import clear_output
board = ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
def printboard():
print
board[0], board[1], board[2]
print
board[3], board[4], board[5]
print
board[6], board[7], board[8]
marker = 'x'
index = -1
while True:
if board[0] == board[1] == board[2] == marker:
print 'Congrats %s' % marker
break
elif board[3] == board[4] == board[5] == marker:
print 'Congrats %s' % marker
break
elif board[6] == board[7] == board[8] == marker:
print 'Congrats %s' % marker
break
elif board[0] == board[3] == board[6] == marker:
print 'Congrats %s' % marker
break
elif board[1] == board[4] == board[7] == marker:
print 'Congrats %s' % marker
break
elif board[2] == board[8] == board[5] == marker:
print 'Congrats %s' % marker
break
elif board[0] == board[4] == board[8] == marker:
print 'Congrats %s' % marker
break
elif board[2] == board[4] == board[6] == marker:
print 'Congrats %s' % marker
break
else:
if index == 1:
marker = 'o'
else:
marker = 'x'
print 'enter your location as [x,y]'
x = input("what is x")
y = input("what is y")
n = (x - 1) + (y - 1) * 3
if board[n] != 'a':
print 'error'
break
board[n] = marker
index = index * -1
printboard()
print 'good'
|
import pygame
from pygame.locals import *
import sys
import random
WINDOWWIDTH = 300
WINDOWHEIGHT = 400
PADDLEWIDTH = 60
PADDLEHEIGHT = 20
FRUITSIZE = 10
FPS = 60
PADDLECOLOR = (255, 0, 0)
FRUITCOLOR = (0, 0, 255)
class Paddle(pygame.sprite.Sprite):
def __init__(self, window_width, window_height, width, height, color):
pygame.sprite.Sprite.__init__(self)
self.window_width = window_width
self.window_height = window_height
self.image = pygame.Surface((width, height))
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.center = (window_width//2, window_height - 2*height)
self.speed = 5
self.moving = False
self.direction = 0
def start_moving(self):
self.moving = True
def stop_moving(self):
self.moving = False
def set_direction(self, direction):
if direction == 'left':
self.direction = -1
elif direction == 'right':
self.direction = 1
def reset(self):
self.rect.centerx = self.window_width // 2
self.moving = False
self.direction = 0
def update(self):
if self.moving:
_cx = self.rect.centerx + self.direction * self.speed
if _cx > self.window_width:
_cx = self.window_width
elif _cx < 0:
_cx = 0
self.rect.centerx = _cx
def draw(self, surface):
surface.blit(self.image, self.rect)
class Fruit(pygame.sprite.Sprite):
def __init__(self, window_width, window_height, size, color):
pygame.sprite.Sprite.__init__(self)
self.window_width = window_width
self.window_height = window_height
self.speed = random.randint(1, 5)
self.image = pygame.Surface((size, size))
self.image.fill(color)
self.rect = self.image.get_rect()
self.reset()
def reset(self):
self.rect.center = (random.randint(0, self.window_width), 0)
self.speed = random.randint(1, 5)
def update(self):
self.rect.centery = self.rect.centery + self.speed
class Catcher:
def __init__(self, screen, window_width, window_height, width, height, color):
self.screen = screen
self.paddle = Paddle(window_width, window_height, width, height, color)
self.fruits_group = pygame.sprite.Group()
self.score = 0
self.missed = 0
def reset(self):
self.paddle.reset()
self.fruits_group.empty()
self.score = 0
self.missed = 0
def _handle_event(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN and event.key == K_d:
self.paddle.start_moving()
self.paddle.set_direction('right')
elif event.type == KEYUP and event.key == K_d:
self.paddle.stop_moving()
elif event.type == KEYDOWN and event.key == K_a:
self.paddle.start_moving()
self.paddle.set_direction('left')
elif event.type == KEYUP and event.key == K_a:
self.paddle.stop_moving()
def add_fruit(self):
if len(self.fruits_group.sprites()) < 10:
self.fruits_group.add(Fruit(WINDOWWIDTH, WINDOWHEIGHT, FRUITSIZE, FRUITCOLOR))
def step(self):
self._handle_event()
# move the paddle and fruits
self.fruits_group.update()
self.paddle.update()
# determine if paddle is collided with fruits
collide_list = pygame.sprite.spritecollide(self.paddle, self.fruits_group, True)
if collide_list:
self.score += len(collide_list)
# determine if fruits fall down
for fruit in self.fruits_group.sprites():
if fruit.rect.centery > WINDOWHEIGHT:
fruit.reset()
self.missed += 1
self.screen.fill((0, 0, 0))
self.fruits_group.draw(self.screen)
self.paddle.draw(self.screen)
def show_scores(self):
font = pygame.font.SysFont('Arial', 16)
score_surf = font.render("CATCH: "+str(self.score), True, (0, 255, 0))
score_rect = score_surf.get_rect()
score_rect.topleft = (40, 40)
self.screen.blit(score_surf, score_rect)
miss_surf = font.render("MISS: "+str(self.missed), True, (0, 255, 0))
miss_rect = miss_surf.get_rect()
miss_rect.topright = (WINDOWWIDTH-40, 40)
self.screen.blit(miss_surf, miss_rect)
if __name__ == "__main__":
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
FPSCLOCK = pygame.time.Clock()
pygame.display.set_caption('Catcher')
game = Catcher(DISPLAYSURF, WINDOWWIDTH, WINDOWHEIGHT, PADDLEWIDTH, PADDLEHEIGHT, PADDLECOLOR)
generator = 0
while True:
if generator == 100:
game.add_fruit()
generator = 0
else:
generator += 1
game.step()
game.show_scores()
pygame.display.update()
FPSCLOCK.tick(FPS)
|
def find_duplicate(arr):
i = 0
while i < len(arr):
j = arr[i] - 1
if arr[i] != arr[j]:
arr[i], arr[j] = arr[j], arr[i]
else:
i += 1
print (arr)
for i in range(len(arr)):
if arr[i] != i+1:
return arr[i]
return -1
def main():
print(find_duplicate([1, 4, 4, 3, 2]))
print(find_duplicate([2, 1, 3, 3, 5, 4]))
print(find_duplicate([2, 4, 1, 4, 4]))
main() |
# encoding: utf-8
# module cmath
# from /home/pyy2/.virtualenvs/pyy3.5/lib/python3.5/lib-dynload/cmath.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
"""
This module is always available. It provides access to mathematical
functions for complex numbers.
"""
# no imports
# Variables with simple values
e = 2.718281828459045
pi = 3.141592653589793
# functions
def acos(*args, **kwargs): # real signature unknown
""" Return the arc cosine of z. """
pass
def acosh(*args, **kwargs): # real signature unknown
""" Return the inverse hyperbolic cosine of z. """
pass
def asin(*args, **kwargs): # real signature unknown
""" Return the arc sine of z. """
pass
def asinh(*args, **kwargs): # real signature unknown
""" Return the inverse hyperbolic sine of z. """
pass
def atan(*args, **kwargs): # real signature unknown
""" Return the arc tangent of z. """
pass
def atanh(*args, **kwargs): # real signature unknown
""" Return the inverse hyperbolic tangent of z. """
pass
def cos(*args, **kwargs): # real signature unknown
""" Return the cosine of z. """
pass
def cosh(*args, **kwargs): # real signature unknown
""" Return the hyperbolic cosine of z. """
pass
def exp(*args, **kwargs): # real signature unknown
""" Return the exponential value e**z. """
pass
def isclose(*args, **kwargs): # real signature unknown
"""
Determine whether two complex numbers are close in value.
rel_tol
maximum difference for being considered "close", relative to the
magnitude of the input values
abs_tol
maximum difference for being considered "close", regardless of the
magnitude of the input values
Return True if a is close in value to b, and False otherwise.
For the values to be considered close, the difference between them must be
smaller than at least one of the tolerances.
-inf, inf and NaN behave similarly to the IEEE 754 Standard. That is, NaN is
not close to anything, even itself. inf and -inf are only close to themselves.
"""
pass
def isfinite(*args, **kwargs): # real signature unknown
""" Return True if both the real and imaginary parts of z are finite, else False. """
pass
def isinf(*args, **kwargs): # real signature unknown
""" Checks if the real or imaginary part of z is infinite. """
pass
def isnan(*args, **kwargs): # real signature unknown
""" Checks if the real or imaginary part of z not a number (NaN). """
pass
def log(*args, **kwargs): # real signature unknown
"""
The logarithm of z to the given base.
If the base not specified, returns the natural logarithm (base e) of z.
"""
pass
def log10(*args, **kwargs): # real signature unknown
""" Return the base-10 logarithm of z. """
pass
def phase(*args, **kwargs): # real signature unknown
""" Return argument, also known as the phase angle, of a complex. """
pass
def polar(*args, **kwargs): # real signature unknown
"""
Convert a complex from rectangular coordinates to polar coordinates.
r is the distance from 0 and phi the phase angle.
"""
pass
def rect(*args, **kwargs): # real signature unknown
""" Convert from polar coordinates to rectangular coordinates. """
pass
def sin(*args, **kwargs): # real signature unknown
""" Return the sine of z. """
pass
def sinh(*args, **kwargs): # real signature unknown
""" Return the hyperbolic sine of z. """
pass
def sqrt(*args, **kwargs): # real signature unknown
""" Return the square root of z. """
pass
def tan(*args, **kwargs): # real signature unknown
""" Return the tangent of z. """
pass
def tanh(*args, **kwargs): # real signature unknown
""" Return the hyperbolic tangent of z. """
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
|
""" Models and database functions """
from flask_sqlalchemy import SQLAlchemy
import bcrypt
db = SQLAlchemy()
class User(db.Model):
""" User information """
__tablename__ = "users"
user_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
email = db.Column(db.String(30), unique=True, nullable=False)
password = db.Column(db.String(250), nullable=False)
first_name = db.Column(db.Unicode(30), nullable=False)
last_name = db.Column(db.Unicode(30), nullable=False)
addresses = db.relationship('Address', backref=db.backref('user'))
routes = db.relationship('Route', backref=db.backref('user'))
def __repr__(self):
return "<User id=%s email=%s>" % (self.user_id, self.email)
class Address(db.Model):
""" Saved addresses for a user """
__tablename__ = "addresses"
address_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)
label = db.Column(db.String(20), nullable=False)
address_str = db.Column(db.String(100), nullable=False)
latitude = db.Column(db.Float, nullable=False)
longitude = db.Column(db.Float, nullable=False)
is_default = db.Column(db.Boolean, nullable=False)
def __repr__(self):
return "<Address id=%s name=%s user=%s>" % \
(self.address_id, self.label, self.user.user_id)
class Route(db.Model):
""" Routes created for a user """
__tablename__ = "routes"
route_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)
total_ascent = db.Column(db.Float, nullable=False)
total_descent = db.Column(db.Float, nullable=False)
is_accepted = db.Column(db.Boolean, nullable=False)
score = db.Column(db.Integer, nullable=True)
issue = db.Column(db.String(50), nullable=True)
total_miles = db.Column(db.Float, nullable=False)
total_minutes = db.Column(db.Float, nullable=False)
waypoints = db.relationship('Waypoint', backref=db.backref('route'))
rides = db.relationship('Ride', backref=db.backref('route'))
def __repr__(self):
return "<Route id=%s accepted=%s>" % \
(self.route_id, self.is_accepted)
class Ride(db.Model):
""" All rides a user does for specific routes """
__tablename__ = "rides"
ride_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
route_id = db.Column(db.Integer, db.ForeignKey('routes.route_id'), nullable=False)
time_of_ride = db.Column(db.DateTime, nullable=False)
comments = db.Column(db.UnicodeText, nullable=True)
def __repr__(self):
return "<Ride id=%s time_of_ride=%s route_id:%s>" % \
(self.ride_id, self.time_of_ride, self.route.route_id)
class Waypoint(db.Model):
""" Waypoints for each route """
__tablename__ = "waypoints"
waypoint_id = db.Column(db.Integer, autoincrement=True, primary_key=True)
route_id = db.Column(db.Integer, db.ForeignKey('routes.route_id'), nullable=False)
latitude = db.Column(db.Float, nullable=False)
longitude = db.Column(db.Float, nullable=False)
def __repr__(self):
return "<Waypoint id=%s route=%s>" % (self.waypoint_id, self.route.route_id)
def example_data():
""" Test data for test.py """
password = "test123"
password_given = bcrypt.hashpw(password, bcrypt.gensalt())
test_user = User(email="test@test.com", password=password_given, last_name="Cohen", first_name="Leonard")
db.session.add(test_user)
try:
db.session.commit()
except:
db.session.rollback()
raise
address = Address(user_id=test_user.user_id, label="Avenue Cyclery",
address_str="756 Stanyan Street, San Francisco, CA, United States",
latitude=37.7680873, longitude=-122.452986, is_default=True)
route1 = Route(user_id=test_user.user_id, total_ascent=1000, total_descent=1000,
is_accepted=True, score=4, total_miles=10, total_minutes=60)
route2 = Route(user_id=test_user.user_id, total_ascent=1500, total_descent=1500,
is_accepted=True, score=3, total_miles=15, total_minutes=90)
route3 = Route(user_id=test_user.user_id, total_ascent=3500, total_descent=3500,
is_accepted=False, score=0, total_miles=20, total_minutes=120)
db.session.add_all([address, route1, route2, route3])
try:
db.session.commit()
except:
db.session.rollback()
raise
waypoint1_1 = Waypoint(route_id=route1.route_id, latitude=37.7472843749906, longitude=-122.448249748807)
waypoint1_2 = Waypoint(route_id=route1.route_id, latitude=37.7694811467305, longitude=-122.399497857258)
waypoint1_3 = Waypoint(route_id=route1.route_id, latitude=37.7255684531721, longitude=-122.467528922137)
waypoint2_1 = Waypoint(route_id=route2.route_id, latitude=37.7472843749906, longitude=-122.448249748807)
waypoint2_2 = Waypoint(route_id=route2.route_id, latitude=37.7363830024061, longitude=-122.468924663359)
waypoint2_3 = Waypoint(route_id=route2.route_id, latitude=37.7444489169474, longitude=-122.396923835657)
waypoint3_1 = Waypoint(route_id=route3.route_id, latitude=37.7472843749906, longitude=-122.448249748807)
waypoint3_2 = Waypoint(route_id=route3.route_id, latitude=37.7894328096981, longitude=-122.402355494602)
waypoint3_3 = Waypoint(route_id=route3.route_id, latitude=37.8462084474116, longitude=-122.462122562782)
db.session.add_all([waypoint1_1, waypoint1_2, waypoint1_3, waypoint2_1,
waypoint2_2, waypoint2_3, waypoint3_1, waypoint3_2,
waypoint3_3])
try:
db.session.commit()
except:
db.session.rollback()
raise
def connect_to_db(app, db_uri):
"""Connect the database to our Flask app."""
if not db_uri:
db_uri = 'postgresql:///bike_routes'
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
if __name__ == "__main__":
from server import app
connect_to_db(app)
db.create_all()
print "Connected to DB."
|
from credential.telegram import key as telegramKey
from credential.mongoDb import key as mongoDbKey
from aiogram import Bot, Dispatcher, executor, types
from aiogram.dispatcher.filters import Text
from pymongo import MongoClient
import json
import logging
import datetime
def bot():
client = MongoClient(mongoDbKey)
mydb = client['videmaker']
db = mydb['themes']
logging.basicConfig(level=logging.INFO)
bot = Bot(token=telegramKey)
dp = Dispatcher(bot)
content={
'theme': '',
'date': '',
'ready': 0
}
@dp.message_handler(commands=['help'])
async def sendHelp(message: types.Message):
await message.reply('Me envie um tema para um video')
await message.reply('Exemplo: Tema god of war')
@dp.message_handler(Text(contains='Tema', ignore_case=True), state='*')
async def getTheme(message: types.Message):
try:
removeTheme = message.text.split()[1:]
theme = ' '.join(removeTheme)
content['theme'] = theme
content['date'] = datetime.datetime.utcnow()
db.update_one({'_id': theme}, {'$set': content}, upsert=True)
theme = f'Seu tema é {theme}'
except TypeError:
theme = "Erro"
print(theme)
await message.reply(theme)
executor.start_polling(dp, skip_updates=True)
if __name__ == "__main__":
bot() |
N, K = map(int, input().split())
q = list(range(1, N + 1))
ret = []
pos = 0
for _ in range(N):
pos += K - 1
pos %= len(q)
ret.append(str(q[pos]))
del q[pos]
print('<' + ', '.join(ret) + '>') |
##
# -*- coding: utf-8 -*-
import os
import cv2
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='flip the images')
parser.add_argument('--flip_dir', dest='flip_dir', required=True, help='directory to flip images')
args = parser.parse_args()
path_dir = args.flip_dir + '/result/'
file_list = os.listdir(path_dir)
for item in file_list:
print(item)
img = cv2.imread(path_dir+item)
_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
flipped_img = cv2.flip(_img, 0) # 1은 좌우 반전, 0은 상하 반전입니다.
cv2.imwrite('./flipped_result/'+item,flipped_img)
|
import numpy as np
from .rk4 import rk4
def rka(x,t,tau,err,derivsRK,param):
"""Adaptive Runge-Kutta routine
Inputs
x Current value of the dependent variable
t Independent variable (usually time)
tau Step size (usually time step)
err Desired fractional local truncation error
derivsRK Right hand side of the ODE; derivsRK is the
name of the function which returns dx/dt
Calling format derivsRK (x,t,param).
param Extra parameters passed to derivsRK
Outputs
xSmall New value of the dependent variable
t New value of the independent variable
tau Suggested step size for next call to rka
"""
#* Set initial variables
tSave, xSave = t, x # Save initial values
safe1, safe2 = 0.9, 4.0 # Safety factors
eps = 1.e-15
#* Loop over maximum number of attempts to satisfy error bound
xTemp = np.empty(len(x))
xSmall = np.empty(len(x)); xBig = np.empty(len(x))
maxTry = 100
for iTry in range(maxTry):
#* Take the two small time steps
half_tau = 0.5 * tau
xTemp = rk4(xSave,tSave,half_tau,derivsRK,param)
t = tSave + half_tau
xSmall = rk4(xTemp,t,half_tau,derivsRK,param)
#* Take the single big time step
t = tSave + tau
xBig = rk4(xSave,tSave,tau,derivsRK,param)
#* Compute the estimated truncation error
scale = err * (abs(xSmall) + abs(xBig))/2.
xDiff = xSmall - xBig
errorRatio = np.max( np.absolute(xDiff) / (scale + eps) )
#* Estimate new tau value (including safety factors)
tau_old = tau
tau = safe1*tau_old*errorRatio**(-0.20)
tau = max(tau, tau_old/safe2)
tau = min(tau, safe2*tau_old)
#* If error is acceptable, return computed values
if errorRatio < 1 :
return np.array([xSmall, t, tau])
#* Issue error message if error bound never satisfied
print('ERROR: Adaptive Runge-Kutta routine failed')
return np.array([xSmall, t, tau]) |
"""
Server to guess what ... serving ludos model.
This service is running a zmq client/server interface
to run the inference.
To access it, just connect to the server using
```
socket = context.socket(zmq.REQ)
socket.connect("tcp://IP_OF_THE_SERVER:PORT_OF_THE_SERVER")
```
The server expected request format and serialized in a specifc way.
The request should be a dict with three keys
1. model_id which reference the model to use
2. predict_method which store the name of the method you want to run
3. kwargs which store the argument of the method
Then this request should be pickled/compressed using
```
req = pickle.dumps(request, protocol)
req = zlib.compress(req)
```
before being sent to the
"""
import hashlib
import json
import logging
import pickle
import traceback
import zlib
import box
from box import Box
import zmq
from ludos.models import common
def get_logger():
log = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter(
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
log.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
return log
class RequestFormatError(ValueError):
pass
CODES = {
'success': 200,
'exceptions': {
common.ModelLoadingError: 401,
ValueError: 404,
AttributeError: 404,
TypeError: 404,
RuntimeError: 404,
box.exceptions.BoxKeyError: 401,
RequestFormatError: 402
}
}
class LudosServer(object):
"""
Simple server exposing models inference via a client/server zeroMQ interface.
The server expected request format and serialized in a specifc way.
The request should be a dict with three keys
1. model_id: Name of the model in the registry
2. predict_method: which store the name of the method you want to run
3. predict_kwargs: which store the argument of the method
Then this request should be pickled/compressed before being sent to the
server. Inversely, the response should also be decrompress/unserialized
using pickle
Full client side workflow below:
```
socket = context.socket(zmq.REQ)
socket.connect("tcp://IP_OF_THE_SERVER:PORT_OF_THE_SERVER")
req = zlib.compress(pickle.dumps(request, protocol))
socket.send(req,flags = 0)
response = socket.recv(flags = 0)
response = pickle.loads(zlib.decompress(response))
```
"""
def __init__(self, host: str = '*', port: int = 5555):
"""
Args:
host (str): IP address of the host
port (int): Port to access the server
"""
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://{}:{}".format(host, port))
self.models = Box()
self.host = host
self.port = port
self.logger = get_logger()
def send_response(self,
payload,
status_code: int,
protocol: int = 2,
flags: int = 0):
"""
Send the response to the client
Args:
payload (obj): output of the model
status_code (int): exit status
protocol (int): Protocol to pickle the msg. Use 2
to talk to python2
"""
response = dict()
response['payload'] = payload
response['status_code'] = status_code
p = pickle.dumps(response, protocol)
z = zlib.compress(p)
return self.socket.send(z, flags=flags)
def receive_request(self, flags: int = 0):
"""
Receive request.
A request is made of three attributes
request = dict(model_id='cctv_expert',
predict_method='predict_prob',
predict_kwargs=...)
Returns:
Deserialized request sent by the client.
"""
z = self.socket.recv(flags)
p = zlib.decompress(z)
request = pickle.loads(p, encoding='latin1')
for key in ['model_id', 'predict_method', 'predict_kwargs']:
if key not in request.keys():
self.logger.error('Missing key {}'.format(key))
raise RequestFormatError()
return request
def load_model(self, model_id: str, model_task: str, model_name: str,
expname: str):
"""
Args:
model_id (str): model key
model_task (str): Task for the model
model_name (str): Name of the model
expname (str): Name of the experiment
Returns:
model
"""
self.logger.info('Loading model {}/{}/{}'.format(
model_task, model_name, expname))
self.models[model_id] = common.load_model(model_task=model_task,
model_name=model_name,
expname=expname)
self.logger.info('Succesfully load model {}/{}/{}'.format(
model_task, model_name, expname))
def start(self):
"""
Start the server loop
"""
self.logger.info('Server started on http://{}:{}'.format(
self.host, self.port))
while True:
try:
self.logger.info('Waiting new request')
request = self.receive_request()
self.logger.info('Running inference')
out = getattr(
self.models[request['model_id']],
request['predict_method'])(**request['predict_kwargs'])
except (common.ModelLoadingError, RequestFormatError, ValueError,
TypeError, RuntimeError, AttributeError,
box.exceptions.BoxKeyError) as e:
trace = traceback.format_exc()
code_status = CODES['exceptions'][e.__class__]
self.logger.error('Error with status: {}'.format(code_status))
self.logger.error('Traceback: {}'.format(trace))
self.send_response(payload='', status_code=code_status)
continue
except Exception as e:
trace = traceback.format_exc()
self.logger.error('Traceback: {}'.format(trace))
code_status = 404
self.send_response(payload='', status_code=code_status)
continue
self.send_response(payload=out, status_code=CODES['success'])
self.logger.info('Succesfully run inference')
|
"""
A Joule is the international unit of energy.
A watt is the international unit of power.
A watt is a measure of energy *flow*. That is a watt is a flow of energy
(lets say out of the wall socket, into your lamp) of 1 Joule per second.
One joule is approximately 1% of a peanut. This is a tiny amount. Honestly. A more useful and commonly used amount of energy is the kilowatt-hour.
A kilowatt is a thousand Joules per second.
So a Kilowatt hour, is a thousand Joules per second flowing out of a wall socket over the course of an hour. Into our big bag of energy would come
1000 x 60 x 60
or
3,600,000 Joules
Now a Joule = Calorie. Yes thats right, the maazine cover diet style of calorie. Except that every time you hear the word calorie in a diet related sense, it is used wrong - 1,000 times wrong. THe calorie you need to cut out in a diet is actally a kil-calorie. I have no idea why it got confused but there you are, it did.
So my wife's glossy tells me I need to eat only 2,500 calories a day or so. Which really means 2,500,000 Joules.
Now I can easily scoff more than that, and with (a lot less ease) burn that up. A days hard hiking, or a job on a building site will probably use 3,600 calories a day.
SO we have a useful, human sized, measure of a "bag" of energy - a kWh. Its easy to calculate (its about the energy used
leaving a bar fire or a kettle on for an hour, its also the energy used by working hard for a day - perhaps getting a servant)
So a kilowatt-hour is *not* a thousand watts per hour, or any other measure of flow. A kilowatt-hour is a bag of energy that is about the amount of work you could do in a day, and feel knackered at the end.
"""
|
import imutils
import cv2
# print(cv2.__version__)
# # load the input image and show its dimensions, keeping in mind that
# # images are represented as a multi-dimensional NumPy array with
# # shape no. rows (height) x no. columns (width) x no. channels (depth)
image = cv2.imread("static/images/boca2000.jpg")
(h, w, d) = image.shape
# # Depth is the number of channels — in our case this is three since we’re working with 3 color channels: Blue, Green, and Red.
# print("width={}, height={}, depth={}".format(w, h, d))
# # display the image to our screen -- we will need to click the window
# # open by OpenCV and press a key on our keyboard to continue execution
cv2.imshow("Title", image)
cv2.waitKey(0)
# # access the RGB pixel located at x=50, y=100, keepind in mind that
# # OpenCV stores images in BGR order rather than RGB
# image[y,x]
(B, G, R) = image[100, 50]
print(B)
print(G)
print(R)
# print("R={}, G={}, B={}".format(R, G, B))
#
#
# # extract a 100x100 pixel square ROI (Region of Interest) from the
# # input image starting at x=320,y=60 at ending at x=420,y=160
# roi = image[100:200, 350:450]
# # image[startY:endY, startX:endX]
# cv2.imshow("Cara", roi)
# cv2.waitKey(0)
#
# # in a near future we will be able to detect faces
#
# resize the image to 200x200px, ignoring aspect ratio
# resized = cv2.resize(image, (200, 200))
# cv2.imshow("Fixed Resizing", resized)
# cv2.waitKey(0)
#
# # fixed resizing and distort aspect ratio so let's resize the width
# # to be 300px but compute the new height based on the aspect ratio
# r = 300.0 / w
# dim = (300, int(h * r))
# resized = cv2.resize(image, dim)
# cv2.imshow("Aspect Ratio Resize", resized)
# cv2.waitKey(0)
#
#
# # manually computing the aspect ratio can be a pain so let's use the
# # imutils library instead
resized = imutils.resize(image, width=300)
cv2.imshow("Imutils Resize", resized)
cv2.waitKey(0)
# # let's rotate an image 45 degrees clockwise using OpenCV by first
# # computing the image center, then constructing the rotation matrix,
# # and then finally applying the affine warp
# center = (w // 2, h // 2)
# # use // to perform integer math
# M = cv2.getRotationMatrix2D(center, -45, 1.0)
# # -45 == 315
# rotated = cv2.warpAffine(image, M, (w, h))
# cv2.imshow("OpenCV Rotation", rotated)
# cv2.waitKey(0)
#
# # rotation can also be easily accomplished via imutils with less code
# rotated = imutils.rotate(image, -45)
# cv2.imshow("Imutils Rotation", rotated)
# cv2.waitKey(0)
#
# # OpenCV doesn't "care" if our rotated image is clipped after rotation
# # so we can instead use another imutils convenience function to help
# # us out
# rotated = imutils.rotate_bound(image, 45)
# cv2.imshow("Imutils Bound Rotation", rotated)
# cv2.waitKey(0)
#
# # apply a Gaussian blur with a 11x11 kernel to the image to smooth it,
# # useful when reducing high frequency noise
# blurred = cv2.GaussianBlur(image, (11, 11), 0)
# cv2.imshow("Blurred", blurred)
# cv2.waitKey(0)
#
# blurred_2 = cv2.GaussianBlur(image, (5, 5), 0)
# cv2.imshow("Blurred2", blurred_2)
# cv2.waitKey(0)
#
# # drawing operations on images are performed in-place.
# # Therefore at the beginning of each code block,
# # we make a copy of the original image storing the copy as output.
# # We then proceed to draw on the image called output in-place
# # so we do not destroy our original image.
#
# # draw a 2px thick red rectangle surrounding the face
# output = image.copy()
# # cv2.rectangle(img, pt1, pt2, color, thickness)
# cv2.rectangle(output, (350, 100), (450, 200), (0, 0, 255), 2)
# cv2.imshow("Rectangle", output)
# cv2.waitKey(0)
#
# cv2.rectangle(output, (350, 100), (450, 200), (0, 0, 255), 8)
# cv2.imshow("Rectangle2", output)
# cv2.waitKey(0)
#
# output = image.copy()
# # cv2.line(img, pt1, pt2, color, thickness)
# cv2.line(output, (60, 20), (400, 200), (0, 0, 255), 5)
# cv2.imshow("Line", output)
# cv2.waitKey(0)
#
# output = image.copy()
# # cv2.circle(img, center, radius, color, thickness)
# cv2.circle(output, (400, 150), 50, (0, 255, 255), 2)
# cv2.imshow("Circle", output)
# cv2.waitKey(0)
#
# output = image.copy()
# # cv2.putText(img, text, pt, font, scale, color, thickness)
# cv2.putText(output, "OpenCV + Vision Artificial", (350, 500),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# cv2.imshow("Text", output)
# cv2.waitKey(0)
# # USAGE
# # python opencv_object_tracking.py
# # python opencv_object_tracking.py --video dashcam_boston.mp4 --tracker csrt
#
# # import the necessary packages
# from imutils.video import VideoStream
# from imutils.video import FPS
# import argparse
# import imutils
# import time
# import cv2
#
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-v", "--video", type=str,
# help="path to input video file")
# ap.add_argument("-t", "--tracker", type=str, default="kcf",
# help="OpenCV object tracker type")
# args = vars(ap.parse_args())
#
# # extract the OpenCV version info
# (major, minor) = cv2.__version__.split(".")[:2]
#
# # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# # function to create our object tracker
# if int(major) == 3 and int(minor) < 3:
# tracker = cv2.Tracker_create(args["tracker"].upper())
#
# # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# # approrpiate object tracker constructor:
# else:
# # initialize a dictionary that maps strings to their corresponding
# # OpenCV object tracker implementations
# OPENCV_OBJECT_TRACKERS = {
# "csrt": cv2.TrackerCSRT_create,
# "kcf": cv2.TrackerKCF_create,
# "boosting": cv2.TrackerBoosting_create,
# "mil": cv2.TrackerMIL_create,
# "tld": cv2.TrackerTLD_create,
# "medianflow": cv2.TrackerMedianFlow_create,
# "mosse": cv2.TrackerMOSSE_create
# }
#
# # grab the appropriate object tracker using our dictionary of
# # OpenCV object tracker objects
# tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
#
# # initialize the bounding box coordinates of the object we are going
# # to track
# initBB = None
#
# # if a video path was not supplied, grab the reference to the web cam
# if not args.get("video", False):
# print("[INFO] starting video stream...")
# vs = VideoStream(src=0).start()
# time.sleep(1.0)
#
# # otherwise, grab a reference to the video file
# else:
# vs = cv2.VideoCapture(args["video"])
#
# # initialize the FPS throughput estimator
# fps = None
#
# # loop over frames from the video stream
# while True:
# # grab the current frame, then handle if we are using a
# # VideoStream or VideoCapture object
# frame = vs.read()
# frame = frame[1] if args.get("video", False) else frame
#
# # check to see if we have reached the end of the stream
# if frame is None:
# break
#
# # resize the frame (so we can process it faster) and grab the
# # frame dimensions
# frame = imutils.resize(frame, width=500)
# (H, W) = frame.shape[:2]
#
# # check to see if we are currently tracking an object
# if initBB is not None:
# # grab the new bounding box coordinates of the object
# (success, box) = tracker.update(frame)
#
# # check to see if the tracking was a success
# if success:
# (x, y, w, h) = [int(v) for v in box]
# cv2.rectangle(frame, (x, y), (x + w, y + h),
# (0, 255, 0), 2)
#
# # update the FPS counter
# fps.update()
# fps.stop()
#
# # initialize the set of information we'll be displaying on
# # the frame
# info = [
# ("Tracker", args["tracker"]),
# ("Success", "Yes" if success else "No"),
# ("FPS", "{:.2f}".format(fps.fps())),
# ]
#
# # loop over the info tuples and draw them on our frame
# for (i, (k, v)) in enumerate(info):
# text = "{}: {}".format(k, v)
# cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
#
# # show the output frame
# cv2.imshow("Frame", frame)
# key = cv2.waitKey(1) & 0xFF
#
# # if the 's' key is selected, we are going to "select" a bounding
# # box to track
# if key == ord("s"):
# # select the bounding box of the object we want to track (make
# # sure you press ENTER or SPACE after selecting the ROI)
# initBB = cv2.selectROI("Frame", frame, fromCenter=False,
# showCrosshair=True)
#
# # start OpenCV object tracker using the supplied bounding box
# # coordinates, then start the FPS throughput estimator as well
# tracker.init(frame, initBB)
# fps = FPS().start()
#
# # if the `q` key was pressed, break from the loop
# elif key == ord("q"):
# break
#
# # if we are using a webcam, release the pointer
# if not args.get("video", False):
# vs.stop()
#
# # otherwise, release the file pointer
# else:
# vs.release()
#
# # close all windows
# cv2.destroyAllWindows()
|
#Exercício Python 44: Elabore um programa que calcule o valor a ser pago por um produto, considerando o seu preço normal e condição de pagamento:
#– à vista dinheiro/cheque: 10% de desconto
#– à vista no cartão: 5% de desconto
#– em até 2x no cartão: preço formal
#– 3x ou mais no cartão: 20% de juros
print('='*15, '\033[1;33mLOJAS GUANABARA\033[m', '='*15, )
preco = float(input('Preço das compras: R$'))
print('\033[1;33mFORMAS DE PAGAMENTO\033[m')
print('[ \033[1;33m1\033[m ] à vista dinheiro/cheque')
print('[ \033[1;33m2\033[m ] à vista cartão')
print('[ \033[1;33m3\033[m ] 2x no cartão')
print('[ \033[1;33m4\033[m ] 3x ou mais no cartão')
opcao = int(input('Qual é a opção? '))
if opcao == 1:
total = preco * 0.90
print('Sua compra \033[1;34mà vista dinheiro/cheque\033[m de \033[1;32mR${:.2f}\033[m vai custar \033[1;32mR${:.2f}\033[m no final'.format(preco, total))
elif opcao == 2:
total = preco * 0.95
print('Sua compra \033[1;34mà vista no cartão\033[m de \033[1;32mR${:.2f}\033[m vai custar \033[1;32mR${:.2f}\033[m no final'.format(preco, total))
elif opcao == 3:
total = preco * 0.95
print('Sua compra \033[1;34mde 2x no cartão\033[m de \033[1;32mR${:.2f}\033[m vai custar \033[1;32mR${:.2f}\033[m cada parcela'.format(preco, preco / 2))
elif opcao == 4:
parcelas = int(input('Quantas parcelas? '))
total = preco * 1.20
print('Sua compra \033[1;34mde {}x no cartão\033[m de \033[1;32mR${:.2f}\033[m vai custar \033[1;32mR${:.2f}\033[m no total, sendo cada parcela no valor de \033[1;32mR${:.2f}\033[m'.format(parcelas, preco, total, total / parcelas))
else:
print('\033[1;31opsss! Algo de inesperado aconteceu!\033[m')
|
import os
import sys
import pymysql
pymysql.install_as_MySQLdb()
sys.path.append(os.getcwd())
os.environ['DJANGO_SETTINGS_MODULE'] = "main.settings"
### new django versions
from django.core.wsgi import get_wsgi_application
wsgi_application = get_wsgi_application()
def application(environ, start_response):
if environ['wsgi.url_scheme'] == 'https':
url = 'http://' + environ['HTTP_HOST'] + environ['REQUEST_URI']
start_response('301 Moved Permanently', [('Location', url)])
return []
return wsgi_application(environ, start_response)
|
from django.urls import path
from loginregist import views
app_name = 'loginregist'
urlpatterns = [
path('login', views.login, name='login'),
path('loginlogic', views.loginlogic,name='loginlogic'),
path('regist', views.regist, name='regist'),
path('registlogic', views.registlogic, name='registlo'),
path('getcapthca', views.getcapthca, name='getcapthca'),
path('checkname',views.checkname,name='checkname'),
path('checkcode',views.checkcode,name='checkcode'),
] |
from typing import Dict, Any, Callable
import logging
import traceback
import boto3
logger = logging.getLogger()
sqs_client = boto3.client('sqs')
# give a callback that return True on successful processing
def process_messages(
queue_url: str,
message_callback: Callable[[Dict[str, Any]], bool]) -> None:
# Get messages from the queue
logger.info("Trying to receive messages for queue {0}".format(queue_url))
response = sqs_client.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=10,
)
messages = response.get('Messages', list())
logger.info("Got {0} message(s)".format(len(messages)))
# Try to process each message received regardless of failures
for message in messages:
try:
body = message.get('Body')
if body is None:
raise Exception("Got a message with no body")
if message_callback(body) is True:
logger.info("Deleting message from queue")
sqs_client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=message.get('ReceiptHandle'),
)
else:
raise Exception("message callback did not return True")
except Exception as e:
traceback.print_exc()
logger.error("Failed to process message. Continuing with remaining messages." + str(e))
make_message_visible(queue_url, message)
# Set a message's VisibilityTimeout to 0 so it's available again to other consumers
def make_message_visible(queue_url: str, message: Dict[str, Any]) -> None:
sqs_client.change_message_visibility(
QueueUrl=queue_url,
ReceiptHandle=message.get('ReceiptHandle'),
VisibilityTimeout=0,
)
|
def sum_up_to_even(lista):
soma = 0
for i in lista:
soma = soma + i
if i%2 == 0:
return soma - i
#Se não existir número par retorna -1
return -1
print(sum_up_to_even([1,3,8, 9, 10])) |
from django.test import TestCase
from numbers_converter.exceptions import TooBigNumberException
from numbers_converter.services import ConverterService
class ConverterServiceTest(TestCase):
def test_converter_return_zero(self):
converted_text = ConverterService.number_to_text(0)
self.assertEqual(converted_text, 'zero')
def test_converter_units_or_teens(self):
converted_text1 = ConverterService.number_to_text(1)
converted_text2 = ConverterService.number_to_text(2)
converted_text3 = ConverterService.number_to_text(3)
converted_text4 = ConverterService.number_to_text(4)
converted_text5 = ConverterService.number_to_text(5)
converted_text6 = ConverterService.number_to_text(6)
converted_text7 = ConverterService.number_to_text(7)
converted_text8 = ConverterService.number_to_text(8)
converted_text9 = ConverterService.number_to_text(9)
converted_text11 = ConverterService.number_to_text(11)
converted_text12 = ConverterService.number_to_text(12)
converted_text13 = ConverterService.number_to_text(13)
converted_text14 = ConverterService.number_to_text(14)
converted_text15 = ConverterService.number_to_text(15)
converted_text16 = ConverterService.number_to_text(16)
converted_text17 = ConverterService.number_to_text(17)
converted_text18 = ConverterService.number_to_text(18)
converted_text19 = ConverterService.number_to_text(19)
self.assertEqual(converted_text1, 'jeden')
self.assertEqual(converted_text2, 'dwa')
self.assertEqual(converted_text3, 'trzy')
self.assertEqual(converted_text4, 'cztery')
self.assertEqual(converted_text5, 'pięć')
self.assertEqual(converted_text6, 'sześć')
self.assertEqual(converted_text7, 'siedem')
self.assertEqual(converted_text8, 'osiem')
self.assertEqual(converted_text9, 'dziewięć')
self.assertEqual(converted_text11, 'jedenaście')
self.assertEqual(converted_text12, 'dwanaście')
self.assertEqual(converted_text13, 'trzynaście')
self.assertEqual(converted_text14, 'czternaście')
self.assertEqual(converted_text15, 'piętnaście')
self.assertEqual(converted_text16, 'szesnaście')
self.assertEqual(converted_text17, 'siedemnaście')
self.assertEqual(converted_text18, 'osiemnaście')
self.assertEqual(converted_text19, 'dziewiętnaście')
def test_convert_two_digit_number(self):
converted_text1 = ConverterService.number_to_text(48)
converted_text2 = ConverterService.number_to_text(93)
self.assertEqual(converted_text1, 'czterdzieści osiem')
self.assertEqual(converted_text2, 'dziewięćdziesiąt trzy')
def test_convert_three_digit_number(self):
converted_text1 = ConverterService.number_to_text(148)
converted_text2 = ConverterService.number_to_text(993)
self.assertEqual(converted_text1, 'sto czterdzieści osiem')
self.assertEqual(converted_text2, 'dziewięćset dziewięćdziesiąt trzy')
def test_convert_four_digit_number(self):
converted_text1 = ConverterService.number_to_text(1000)
converted_text2 = ConverterService.number_to_text(2361)
self.assertEqual(converted_text1, 'jeden tysiąc')
self.assertEqual(converted_text2, 'dwa tysiące trzysta sześćdziesiąt jeden')
def test_convert_five_digit_number(self):
converted_text1 = ConverterService.number_to_text(11111)
self.assertEqual(converted_text1, 'jedenaście tysięcy sto jedenaście')
def test_convert_six_and_more_digit_number(self):
converted_text1 = ConverterService.number_to_text(222211)
converted_text2 = ConverterService.number_to_text(13333111321)
converted_text3 = ConverterService.number_to_text(111111111111111111111)
self.assertEqual(converted_text1, 'dwieście dwadzieścia dwa tysiące dwieście jedenaście')
self.assertEqual(converted_text2,
'trzynaście miliardów trzysta trzydzieści trzy miliony sto jedenaście tysięcy trzysta dwadzieścia jeden')
self.assertEqual(converted_text3,
'sto jedenaście trylionów sto jedenaście biliarów sto jedenaście bilionów sto jedenaście miliardów sto jedenaście milionów sto jedenaście tysięcy sto jedenaście')
def test_number_too_big_exception(self):
with self.assertRaises(TooBigNumberException):
converted_text1 = ConverterService.number_to_text(2123454353453453454351)
def test_convert_minus_numbers(self):
converted_text1 = ConverterService.number_to_text(-1)
converted_text2 = ConverterService.number_to_text(-12)
converted_text3 = ConverterService.number_to_text(-34)
converted_text4 = ConverterService.number_to_text(-431)
converted_text5 = ConverterService.number_to_text(-4131)
converted_text6 = ConverterService.number_to_text(-4165656)
self.assertEqual(converted_text1, 'minus jeden')
self.assertEqual(converted_text2, 'minus dwanaście')
self.assertEqual(converted_text3, 'minus trzydzieści cztery')
self.assertEqual(converted_text4, 'minus czterysta trzydzieści jeden')
self.assertEqual(converted_text5, 'minus cztery tysiące sto trzydzieści jeden')
self.assertEqual(converted_text6, 'minus cztery miliony sto sześćdziesiąt pięć '
'tysięcy sześćset pięćdziesiąt sześć')
|
# -*- coding: utf-8 -*-
"""
celery cli handlers logtool module.
"""
from pyrin.task_queues.celery.cli.decorators import celery_cli_handler
from pyrin.task_queues.celery.cli.enumerations import CeleryCLIHandlersEnum
from pyrin.task_queues.celery.cli.interface import CeleryCLIHandlerBase
from pyrin.task_queues.celery.cli.handlers.params import ActionParam, FilesParam
@celery_cli_handler()
class LogToolCLIHandler(CeleryCLIHandlerBase):
"""
logtool cli handler class.
"""
def __init__(self):
"""
initializes an instance of LogToolCLIHandler.
"""
super().__init__(CeleryCLIHandlersEnum.LOGTOOL)
def _inject_params(self, params):
"""
injects all the params of current handler into given list.
:param list[CLIParamBase] params: list of all params.
"""
params.extend([ActionParam(index=0), FilesParam(index=1)])
return super()._inject_params(params)
|
import numpy as np
import cv2
import subprocess
from VITA_PRINTER import VITA_PRINTER
from camera_v4 import VITA_PRINTER_CONTROLLER
import serial
import pygame
from pi2uno_v2 import ARDUINO
from picamera import PiCamera
from time import sleep
from fractions import Fraction
refpt = []
isClick = False
def mainLoop(controller, printer):
global isClick
global refpt
sensor = ARDUINO()
camera = controller.camera
camera.start_preview(fullscreen = False, window = (10, 100, 400, 300))
counter = 0
filetype = (".jpeg")
num_im_limit = 60
serialConnection = '/dev/ttyUSB1'
while True:
try:
if counter > num_im_limit:
subprocess.run("rm -f temp*", shell = True)
counter = 0
filename = "temp" + str(counter) + filetype
counter = counter + 1
#camera.capture("temp.png", use_video_port = True, resize = (400,300))
camera.capture(filename, use_video_port = False, resize = (400,300))
img = cv2.imread(filename,0)
clahe = cv2.createCLAHE(clipLimit=5.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
#edges = cv2.Canny(cl1, 30,150)
blur = cv2.GaussianBlur(cl1, (3,5),0)
cv2.circle(blur, (200, 150), 2, (255, 0, 0))
#ret, th1 = cv2.threshold(blur, 60, 255, cv2.THRESH_BINARY_INV)
cv2.imshow("image", blur)
cv2.setMouseCallback("image", getMouseCoords)
if isClick:
isClick = False
(x, y) = refpt[0]
print("("+ str(x) + " + " + str(y) + ")")
height = sensor.getDistance(serialConnection)
pix2mm = -0.1987*height+50.2399 #formula determined experimentally
x_dist = (y - 150)/pix2mm
y_dist = (x - 200)/pix2mm
print("moving " + str(x_dist) + " " + str(y_dist))
printer.printerMove(controller, x_dist, y_dist, 0, 2500)
cv2.waitKey(100)
except KeyboardInterrupt:
controller.terminate()
exit()
def getMouseCoords(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
global isClick
global refpt
isClick = True
refpt = [(x,y)]
#sets up serial connection with Velleman k8200 3d printer
def setupSerial():
#set up serial connection
ser = serial.Serial('/dev/ttyUSB0')
ser.baudrate = 250000
return ser
#sets up camera settings
def setupCamera():
camera = PiCamera(
#resolution = (3280, 2464),
resolution = (1280, 960),
#resolution = (400,300),
sensor_mode = 4
)
camera.iso = 100
camera.raw_format = 'rgb'
#default = 50
camera.brightness = 55
camera.shutter_speed = 500000
#see modes
#off, auto, night, nightpreview, backlight
#spotlight, sports, snow, beach, verylong, fixedfps
#antishake, fireworks
camera.exposure_mode = 'off'
camera.annotate_text_size = 160
camera.awb_gains = 0
camera.awb_mode = 'off'
#between -100 and 100 default = 0
camera.contrast = 100
#camera.color_effects = (120, 120)
#denoise = true
camera.image_denoise = False
#see metering modes 'average', 'spot', backlit', 'matrix'
camera.meter_mode = 'average'
#-100 to 100 default = 0
camera.saturation = 60
#-100 to 100 default = 0
camera.sharpness = 100
return camera
def main():
ser = setupSerial()
printer = VITA_PRINTER()
camera = setupCamera()
controller = VITA_PRINTER_CONTROLLER(camera, ser)
ser.write(b"G28 X Y\r\n")
sleep(5)
printer.printerCheckOk(controller)
printer.printerMove(controller, 110, 60, 0, 2500)
mainLoop(controller, printer)
#if main loop terminates, exit program
controller.terminate()
exit()
if __name__ == "__main__":
main()
|
import unittest
from zxopt.data_structures.circuit import Circuit, MeasurementComponent, GateComponent, PauliXGateType
from zxopt.data_structures.circuit.register.classical_register import ClassicalRegister
from zxopt.data_structures.circuit.register.quantum_register import QuantumRegister
class CircuitTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CircuitTest, self).__init__(*args, **kwargs)
def test_add_remove_quantum_register(self):
circuit = Circuit()
qreg = QuantumRegister(1)
circuit.add_register(qreg)
self.assertIn(qreg, circuit.quantum_registers)
self.assertEqual(circuit, qreg.circuit)
circuit.remove_register(qreg)
self.assertNotIn(qreg, circuit.quantum_registers)
def test_add_remove_classical_register(self):
circuit = Circuit()
creg = ClassicalRegister(1)
circuit.add_register(creg)
self.assertIn(creg, circuit.classical_registers)
self.assertEqual(circuit, creg.circuit)
circuit.remove_register(creg)
self.assertNotIn(creg, circuit.classical_registers)
def test_add_component(self):
circuit = Circuit()
qreg = QuantumRegister(1)
creg = ClassicalRegister(1)
measurement = MeasurementComponent(qreg[0], creg[0])
self.assertIn(qreg[0], measurement.affected_bits)
self.assertIn(creg[0], measurement.affected_bits)
circuit.add_register(qreg)
circuit.add_register(creg)
circuit.add_component(measurement)
self.assertIn(measurement, circuit.components)
self.assertEqual(circuit, measurement.circuit)
self.assertEqual(0, measurement.step)
def test_get_components_affecting_bits(self):
circuit = Circuit()
qreg = QuantumRegister(2)
creg = ClassicalRegister(2)
operation1 = GateComponent(qreg[0], PauliXGateType(), {creg[0]}) # measurements always affect everything, do not use measurements for this test :)
operation2 = GateComponent(qreg[1], PauliXGateType(), {creg[1]})
circuit.add_register(qreg)
circuit.add_register(creg)
circuit.add_component(operation1)
circuit.add_component(operation2)
self.assertIn(operation1, circuit.get_components_affecting_bits({qreg[0]}))
self.assertIn(operation1, circuit.get_components_affecting_bits({creg[0]}))
self.assertIn(operation2, circuit.get_components_affecting_bits({qreg[1]}))
self.assertIn(operation2, circuit.get_components_affecting_bits({creg[1]}))
self.assertNotIn(operation1, circuit.get_components_affecting_bits({qreg[1]}))
self.assertNotIn(operation1, circuit.get_components_affecting_bits({creg[1]}))
self.assertNotIn(operation2, circuit.get_components_affecting_bits({qreg[0]}))
self.assertNotIn(operation2, circuit.get_components_affecting_bits({creg[0]}))
self.assertIn(operation1, circuit.get_components_affecting_bits({qreg[0], qreg[1]}))
self.assertIn(operation2, circuit.get_components_affecting_bits({qreg[0], qreg[1]}))
def test_add_components_step(self):
circuit = Circuit()
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
circuit.add_register(qreg)
circuit.add_register(creg)
for i in range(100):
comp = GateComponent(qreg[i % 2], PauliXGateType(), {creg[0]})
circuit.add_component(comp)
self.assertEqual(i, comp.step)
comp = GateComponent(qreg[2], PauliXGateType(), {creg[1]})
circuit.add_component(comp)
self.assertEqual(0, comp.step) |
import gym
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
def get_action(observation,W1,W2,b1,b2):
# convert the observation array into a matrix with 1 column and ninputs rows
observation.resize(ninputs,1)
Z1 = np.dot(W1, observation) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = np.tanh(Z2)
if (isinstance(env.action_space, gym.spaces.box.Box)):
action = A2
else:
action = np.argmax(A2)
return action
def initialize_weights(ninputs,nhiddens,noutputs,pvariance):
W1=np.random.randn(nhiddens,ninputs) * pvariance
W2=np.random.randn(noutputs, nhiddens) * pvariance
b1=np.zeros(shape=(nhiddens, 1))
b2=np.zeros(shape=(noutputs, 1))
return W1, W2, b1, b2
def update_weights(best_in_pop,ninputs,nhiddens,noutputs,ppvariance):
l_pop=[]
for i in range(len(best_in_pop)):
W1, W2, b1, b2=best_in_pop[i]
dW1, dW2, db1, db2=initialize_weights(ninputs,nhiddens,noutputs,ppvariance)
l_pop.append([W1+dW1,W2+dW2,b1+db1,b2+db2])
dW1, dW2, db1, db2=initialize_weights(ninputs,nhiddens,noutputs,ppvariance)
l_pop.append([W1+dW1,W2+dW2,b1+db1,b2+db2])
return l_pop
def run_steps(env,W1,W2,b1,b2,steps,vis_flag=0,t=10):
env.reset()
eval=0
action=0
for _ in range(steps):
observation, reward, done, info = env.step(action)
eval+=reward
action=get_action(observation,W1,W2,b1,b2)
if vis_flag==1:
env.render()
sleep(t/steps)
return eval
def find_best_in_pop(pop_size,l_pop,env,W1,W2,b1,b2,steps):
rewards=np.zeros(pop_size)
for i in range(pop_size):
W1, W2, b1, b2=l_pop[i]
rewards[i]=run_steps(env,W1,W2,b1,b2,steps)
best_l_pop=np.argsort(rewards)
best_in_pop=[]
for i in best_l_pop[int(pop_size/2):]:
best_in_pop.append(l_pop[i])
eval=sum(rewards[best_l_pop[int(pop_size/2):]])/(steps*pop_size/2)
return best_in_pop, eval
env=gym.make('CartPole-v0')
steps=200
epochs=50
pop_size=10
pvariance=0.1
ppvariance=0.005
nhiddens=5
ninputs = env.observation_space.shape[0]
if (isinstance(env.action_space, gym.spaces.box.Box)):
noutputs = env.action_space.shape[0]
else:
noutputs = env.action_space.n
l_pop=[]
evolution=[]
for i in range(pop_size):
W1, W2, b1, b2=initialize_weights(ninputs,nhiddens,noutputs,pvariance)
l_pop.append([W1,W2,b1,b2])
evolution.append(l_pop)
results=np.zeros((epochs,2))
for i in range(epochs):
best_in_pop, eval=find_best_in_pop(pop_size,l_pop,env,W1,W2,b1,b2,steps)
l_pop=update_weights(best_in_pop,ninputs,nhiddens,noutputs,ppvariance)
evolution.append(l_pop)
results[i,:]=[i+1,eval]
print(i+1,eval)
W1, W2, b1, b2=best_in_pop[-1]
run_steps(env,W1,W2,b1,b2,steps,vis_flag=1,t=10)
plt.plot(results[:,0],results[:,1])
plt.title('Evaluation neural network by summary reward')
plt.xlabel('Epochs')
plt.ylabel('Summary reward')
plt.xlim([0,epochs+1])
plt.ylim([0,1.1])
plt.grid('on')
plt.show() |
def check_memory_loop(memory, current_memory):
if memory == current_memory:
return True
else:
return False
def memory_distribution(file_name):
with open(file_name) as fp:
line = [int(x) for x in fp.read().strip().split()]
tmp = line #??????????????????????? zmienia sie
print(line)
maximum = max(line)
print(maximum)
index = line.index(maximum)
print(index)
length = len(line)
i = index + 1
count = 0
while i <= length:
count += 1
if i == length:
i = 0
if line[index] != 0:
if i != index:
line[index] -= 1
line[i] += 1
if check_memory_loop(tmp, line):
break
i += 1
else:
maximum = max(line)
index = line.index(maximum)
i = index + 1
else:
maximum = max(line)
index = line.index(maximum)
i = index + 1
print(count)
if __name__ == '__main__':
memory_distribution('input') |
#Time Complexity : O(n), Space Complexity : O(1)
#Solution : 움직이는 속도가 다른 두 노드를 두어 언젠가 동일해지면 loop가 존재하는 것.
class Solution(object):
def hasCycle(self, head):
node_fast = head
node_slow = head
while node_fast:
node_fast = node_fast.next
if not node_fast:
break
node_fast = node_fast.next
node_slow = node_slow.next
if node_fast == node_slow:
return True
return False |
# PyTorch imports
import torch
import torch.nn as nn
import torch.optim as optim
# Other libraries
import numpy as np
from collections import OrderedDict
import math
class heuristic:
"""
Heuristic model that just return the sigmoid of the difference in gold and experience combined,
with scaling and bias.
"""
def __init__(self, xp_scale_factor=1.0, total_scale_factor=2.0):
"""
Keywords:
xp_scale_factor: Float. The scaling factor between total gold and total experience.
total_scale_factor: float. The scaling factor for the sum of total gold and experience before passing into sigmoid function.
"""
self.xp_scale_factor = xp_scale_factor
self.total_scale_factor = total_scale_factor
self.gold_xp_scale = torch.cat((torch.ones(10),self.xp_scale_factor*torch.ones(10)))
def fit(self, inputs):
"""
Inputs:
inputs: torch.Tensor. The input features for the heuristic model. Currently only support batch size of 1.
Returns:
Torch.Tensor. Returns the probability of radiant win.
"""
gold_xp_combined = torch.sum(inputs,1)
return self.sigmoid(gold_xp_combined/self.total_scale_factor)
def sigmoid(self, inputs):
return 1/(1+torch.exp(-inputs))
class LSTM_baseline(nn.Module):
"""
LSTM model for win probability prediction.
Bases: Torch.nn.Module
"""
def __init__(self, input_dim, hidden_dim, output_dim=1, batch_size=10, device=torch.device('cpu')):
"""
Inputs:
input_dim: int. Dimension of the input features.
hidden_dim: int. Dimentsion of the hidden layer.
Keywords:
output_dim: int. Dimension of the output features. Should only be 1 since we are just predicting the probability.
batch_size: int. Batch size used as the inputs.
device: torch.device. The device used for the model.
"""
super(LSTM_baseline, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.batch_size = batch_size
if device == torch.device('cuda'):
if torch.cuda.is_available():
print('GPU is used.')
self.device = device
else:
self.device = torch.device('cpu')
else:
self.device = device
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, batch_first=True)
self.linear = nn.Linear(self.hidden_dim, self.output_dim)
self.hidden_cell = self.init_hidden(batch_size)
def init_hidden(self, batch_size):
"""
The function to initialize the hidden layer.
"""
return (torch.zeros(1,batch_size,self.hidden_dim).to(self.device),
torch.zeros(1,batch_size,self.hidden_dim).to(self.device))
def forward(self, inputs):
"""
Inputs:
inputs: torch.Tensor, size=(batch_size, input_dim, L). L is the length of the longest sequence in the batch.
Returns:
torch.Tensor, size=(batch_size, 1, L)
"""
lstm_out, hidden_cell = self.lstm(inputs, self.hidden_cell)
self.hidden_cell = tuple([hidden_.detach_() for hidden_ in hidden_cell])
lstm_out = self.linear(lstm_out)
return torch.tanh(lstm_out)
class LSTMWithH2vSubnet(nn.Module):
"""
LSTM model plus a feed forward subnet for the team embeddings.
Bases: Torch.nn.Module
"""
def __init__(self, input_dim, hidden_dim, h2v_dim=20, h2v_layer_dim=[50,30,1],
output_dim=1, batch_size=10, device=torch.device('cpu')):
super(LSTMWithH2vSubnet, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.h2v_dim = h2v_dim
self.h2v_layer_dim = h2v_layer_dim
self.output_dim = output_dim
self.batch_size = batch_size
if device == torch.device('cuda'):
if torch.cuda.is_available():
print('GPU is used.')
self.device = device
else:
self.device = torch.device('cpu')
else:
self.device = device
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, batch_first=True)
self.linear = nn.Linear(self.hidden_dim+self.h2v_layer_dim[-1], self.output_dim)
self.hidden_cell = self.init_hidden(batch_size)
h2v_layers = OrderedDict()
h2v_layers['Linear0'] = nn.Linear(h2v_dim, h2v_layer_dim[0])
for i in range(len(h2v_layer_dim)-1):
h2v_layers['Linear'+str(i+1)] = nn.Linear(h2v_layer_dim[i], h2v_layer_dim[i+1])
self.h2v_linear = nn.Sequential(h2v_layers)
def init_hidden(self, batch_size):
"""
The function to initialize the hidden layer.
"""
return (torch.zeros(1,batch_size,self.hidden_dim).to(self.device),
torch.zeros(1,batch_size,self.hidden_dim).to(self.device))
def forward(self, inputs, embeddings):
lstm_out, hidden_cell = self.lstm(inputs, self.hidden_cell)
self.hidden_cell = tuple([hidden_.detach_() for hidden_ in hidden_cell])
h2v_out = self.h2v_linear(embeddings)
concat_out = torch.cat((lstm_out, h2v_out.repeat(1,lstm_out.shape[1],1).view(-1,lstm_out.shape[1],1)), dim=2)
concat_out = self.linear(concat_out)
return torch.tanh(concat_out)
|
def count_sheep( input_ ):
if input_ == 0:
return "INSOMNIA"
digit = set()
x = input_
while True:
x_string = str(x)
for c in x_string:
digit.add(c)
if len(digit) == 10:
return x
x += input_
if __name__ == '__main__':
t = int(raw_input())
for i in xrange( 1, t+1 ):
n = int(raw_input())
print "Case #{}: {}".format(i, count_sheep(n))
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from authentication.models import Account
class AccountTests(APITestCase):
def test_create_account(self):
"""
Ensure we can create a new account object.
"""
#This doesn't appear to be working. It can't find the right url.
url = reverse(r'accounts')
data = {'username': 'John.Doe', 'email':'John.Doe@nowhere.com'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Account.objects.count(), 1)
self.assertEqual(Account.objects.get().username, 'John.Doe')
|
from data_types import *
import cv2
import numpy as np
import math
kNumLevelsInitSigma = 40
kSigmaLevel0 = 1.0
kNumFeatures = 2000
kFASTKeyPointSizeRescaleFactor = 4
# best features to keep
def sat_num_features(keypoints, des=None, num_features=kNumFeatures):
if len(keypoints) > num_features:
# keep the features with the best response
if des is None:
kps = sorted(keypoints, key=lambda x: x.response, reverse=True)[:num_features]
else:
# sort by score to keep highest score features
# print('sat with des')
order = np.argsort([kp.response for kp in keypoints])[::-1][:num_features] # [::-1] is for reverse order
kps = np.array(keypoints)[order]
des = np.array(des)[order]
return kps, des
def hamming_distance(a, b):
return np.count_nonzero(a != b)
def hamming_distances(a, b):
return np.count_nonzero(a != b, axis=1)
def l2_distance(a, b):
return np.linalg.norm(a.ravel() - b.ravel())
def l2_distances(a, b):
return np.linalg.norm(a - b, axis=-1, keepdims=True)
def import_f(f_module, f_name, method=None):
try:
im_module = __import__(f_module, fromlist=[f_name])
im_name = getattr(im_module, f_name)
if method is None:
return im_name
else:
return getattr(im_name, method)
except:
if method is not None:
f_name = f_name + '.' + method
print('WARNING: cannot import ' + f_name + ' from ' + f_module + ', check the file TROUBLESHOOTING.md')
return None
# manager to manage the points features and descriptors
class points_manager:
def __init__(self, number_features=2000, number_levels=4, scale=1.2,
detector_type=DetectorTypes.FAST, descriptor_type=DescriptorTypes.FAST):
self.detector_type = detector_type
self.feature_type = None
self.descriptor_type = descriptor_type
self.number_features = number_features
self.number_levels = number_levels
self.scale = scale
self.norm_type = None
self.do_keypoints_size_rescaling = False # managed below depending on selected features
self.keypoint_filter_type = KeyPointFilterTypes.SAT # keypoint-filter type
self.need_nms = False # non-maximum suppression needed
self.keypoint_nms_filter_type = KeyPointFilterTypes.KDT_NMS # default keypoint-filter type if NMS is needed
self.sigma_level0 = kSigmaLevel0
self.keypoint_filter_type = KeyPointFilterTypes.SAT
# sigmas for keypoint levels
self.init_sigma_levels()
self.need_color_image = False
# features
# detector
self.FAST_create = import_f('cv2', 'FastFeatureDetector_create')
self.need_nms = False
if self.detector_type == DetectorTypes.FAST:
self._detector = self.FAST_create(threshold=20, nonmaxSuppression=True)
if self.descriptor_type != DescriptorTypes.NONE:
# self.use_bock_adaptor = True # override a block adaptor?
self.use_pyramid_adaptor = self.number_levels > 1 # override a pyramid adaptor?
self.need_nms = self.number_levels > 1
self.keypoint_nms_filter_type = KeyPointFilterTypes.OCTREE_NMS
self.do_keypoints_size_rescaling = True
else:
raise ValueError("Unknown feature detector %s" % self.detector_type)
if self.need_nms:
self.keypoint_filter_type = self.keypoint_nms_filter_type
try:
self.norm_type = FInfo.norm_type[self.descriptor_type]
except:
print('You did not set the norm type for: ', self.descriptor_type.name)
raise ValueError("Unmanaged norm type for feature descriptor %s" % self.descriptor_type.name)
# descriptor distance functions
if self.norm_type == cv2.NORM_HAMMING:
self.descriptor_distance = hamming_distance
self.descriptor_distances = hamming_distances
if self.norm_type == cv2.NORM_L2:
self.descriptor_distance = l2_distance
self.descriptor_distances = l2_distances
print('points manger is created')
# initialize scale factors, sigmas for each octave level
def init_sigma_levels(self):
print('num_levels: ', self.number_levels)
num_levels = max(kNumLevelsInitSigma, self.number_levels)
self.inv_scale_factor = 1. / self.scale
self.scale_factors = np.zeros(num_levels)
self.level_sigmas2 = np.zeros(num_levels)
self.level_sigmas = np.zeros(num_levels)
self.inv_scale_factors = np.zeros(num_levels)
self.inv_level_sigmas2 = np.zeros(num_levels)
self.log_scale_factor = math.log(self.scale)
self.scale_factors[0] = 1.0
self.level_sigmas2[0] = self.sigma_level0 * self.sigma_level0
self.level_sigmas[0] = math.sqrt(self.level_sigmas2[0])
for i in range(1, num_levels):
self.scale_factors[i] = self.scale_factors[i - 1] * self.scale
self.level_sigmas2[i] = self.scale_factors[i] * self.scale_factors[i] * self.level_sigmas2[0]
self.level_sigmas[i] = math.sqrt(self.level_sigmas2[i])
for i in range(num_levels):
self.inv_scale_factors[i] = 1.0 / self.scale_factors[i]
self.inv_level_sigmas2[i] = 1.0 / self.level_sigmas2[i]
# filter matches by using
# or SAT (get features with best responses)
def filter_keypoints(self, type, frame, kps, des=None):
_name = type.name
if type == KeyPointFilterTypes.NONE:
pass
elif type == KeyPointFilterTypes.SAT:
if len(kps) > self.number_features:
keypoints, des = sat_num_features(kps, des, self.number_features)
else:
raise ValueError("Unknown match-filter type")
return keypoints, des, _name
def keypoints_rescale(self, keypoints):
# if keypoints are FAST, etc. then rescale their small sizes
# in order to let descriptors compute an encoded representation with a decent patch size
scale = 1
doit = False
if self.detector_type == DetectorTypes.FAST:
scale = kFASTKeyPointSizeRescaleFactor
doit = True
if doit:
for keypoint in keypoints:
keypoint.size *= scale
# out: kps (array of cv2.KeyPoint)
def detect(self, frame, mask=None, filter=True):
if not self.need_color_image and frame.ndim > 2: # check if we have to convert to gray image
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
else:
# standard detection
kps = self._detector.detect(frame, mask)
# filter keypoints
filter_name = 'NONE'
if filter:
kps, _, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps)
# if keypoints are FAST, etc. give them a decent size in order to properly compute the descriptors
if self.do_keypoints_size_rescaling:
self.rescale_keypoint_size(kps)
return kps
# compute the descriptors once given the keypoints
def compute(self, frame, kps, filter=True):
if not self.need_color_image and frame.ndim > 2: # check if we have to convert to gray image
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
kps, des = self._feature_descriptor.compute(frame, kps) # then, compute descriptors
# filter keypoints
filter_name = 'NONE'
if filter:
kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des)
return kps, des
# detect keypoints and their descriptors
# out: kps, des
def detectAndCompute(self, frame, mask=None, filter=True):
if not self.need_color_image and frame.ndim > 2: # check if we have to convert to gray image
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# standard detectAndCompute
if self.is_detector_equal_to_descriptor:
# detector = descriptor => call them together with detectAndCompute() method
kps, des = self._feature_detector.detectAndCompute(frame, mask)
else:
# detector and descriptor are different => call them separately
# 1. first, detect keypoint locations
kps = self.detect(frame, mask, filter=False)
# 2. then, compute descriptors
kps, des = self._feature_descriptor.compute(frame, kps)
filter_name = 'NONE'
if filter:
kps, des, filter_name = self.filter_keypoints(self.keypoint_filter_type, frame, kps, des)
return kps, des
def points_manager_factory(number_features=2000, number_levels=4, scale=1.2,
detector_type=DetectorTypes.FAST, descriptor_type=DescriptorTypes.FAST):
return points_manager(number_features, number_levels, scale, detector_type, descriptor_type)
|
from itertools import permutations
t = int( raw_input() )
for i in range( 1, t+1 ):
inps = raw_input().split(" ")
k = int(inps[0])
c = int(inps[1])
s = int(inps[2])
#print k
#print c
#print s
if k == 1:
print 'Case #{}: {}'.format(i, 1)
continue
print 'Case #{}:'.format(i), " ".join([ str(i) for i in range(1,k+1) ])
continue
jstr = []
j=0
while j < 2**k :
j +=1
st = "{0:b}".format(j).zfill(k)
#print st
stemp = st
s0 = ''.join(['0' for l in range(len(st))])
if c==1:
jstr.append(st)
for p in range(1,c):
final_str = ""
for n in range(0,len(stemp)):
if stemp[n] == '0':
final_str += s0
else:
final_str += st
stemp = final_str
if p == c-1:
jstr.append( final_str )
#print final_str
rem = 0
dif = 0
for j in jstr:
rem = int(j,2) & rem
dif = int(j,2) | dif
length = len(j)
# print "{0:b}".format(rem).zfill(length)
# print "{0:b}".format(dif).zfill(length)
ind = "{0:b}".format(dif).zfill(length).find('0')
if ind != -1:
print 'Case #{}: {}'.format(i, ind )
continue
else:
masks = []
for row in permutations("{0:b}".format( 2**( s ) -1).zfill(length)):
masks.append( "".join(list(row)) )
#print masks
#print jstr
peaceout = False
for m in masks:
#print m
dif = 0
nope =False
for j in jstr:
if ( int(j,2) & int(m,2) ) == int(m,2):
nope = True
break
if nope:
continue
polo = []
for lp in range( 0, len(m) ):
if m[lp]=='1':
polo.append(str(lp+1))
print 'Case #{}:'.format(i), " ".join(polo)
peaceout = True
break
if not peaceout:
print 'Case #{}: IMPOSSIBLE'.format(i)
# print 'Case #{}: {}'.format(i, jstr)
# ------
# GG: GG GG: GG GG GG GG
# GL: GG GL: GG GG GG GL
# LG: LG GG: LG GG GG GG
# LL: LL LL: LL LL LL LL
# ------
# GGG: GGG GGG GGG
# GGL: GGG GGG GGL
# GLG: GGG GLG GGG
# GLL: GGG GLL GLL
# LGG: LGG GGG GGG
# LGL: LGL GGG LGL
# LLG: LLG LLG GGG
# LLL: LLL LLL LLL |
# Generated by Django 2.2.3 on 2019-11-15 21:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vocab_list', '0007_auto_20190404_2141'),
]
operations = [
migrations.AddField(
model_name='vocabularylistentry',
name='link_job_ended',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='vocabularylistentry',
name='link_job_id',
field=models.CharField(blank=True, max_length=250),
),
migrations.AddField(
model_name='vocabularylistentry',
name='link_job_started',
field=models.DateTimeField(null=True),
),
]
|
from selenium.webdriver.common.by import By
from testcase.common.allBasePageClass import AllBasePage
from utils.config import get_appPackage
class ZXFillAnswerPage1(AllBasePage):
appPackage = get_appPackage()
'''
真题写作作答页
'''
start_to_answer_btn_id = (By.ID, "{}:id/fragment_error_find_question_start_tv".format(appPackage))
# com.langlib.ncee: id / fragment_error_find_question_start_tv
zhenti_xiezuo_time_id = (By.ID, "{}:id/fragment_writing_detail_time".format(appPackage))
zhenti_xiezuo_words_count_id = (By.ID, "{}:id/fragment_writing_detail_count".format(appPackage))
zhenti_xiezuo_edit_id = (By.ID, "{}:id/fragment_writing_detail_edittext".format(appPackage))
zhenti_xiezuo_zancun_id = (By.ID, "{}:id/frame_writing_bottom_btn_cache_tv".format(appPackage))
zhenti_xiezuo_submit_id = (By.ID, "{}:id/frame_writing_bottom_btn_handin_tv".format(appPackage))
def click_zhenti_xiezuo_start_to_answer_btn(self):
self.find_element(*self.start_to_answer_btn_id).click()
def get_zhenti_xiezuo_time(self):
return self.getText(self.find_element(*self.zhenti_xiezuo_time_id))
def get_zhenti_xiezuo_words_count(self):
return self.getText(self.find_element(*self.zhenti_xiezuo_words_count_id))
def fill_zhenti_xiezuo_answer(self, answer):
ele = self.find_element(*self.zhenti_xiezuo_edit_id)
ele.send_keys(str(answer))
self.hideKeyboard()
def click_zhenti_xiezuo_zancun_btn(self):
self.find_element(*self.zhenti_xiezuo_zancun_id).click()
def click_zhenti_xiezuo_submit_id(self):
self.find_element(*self.zhenti_xiezuo_submit_id).click()
if __name__ == "__main__":
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time: 00:14 2020/12/25
# @Author: Sijie Shen
# @File: seq2seq_inference
# @Project: Seq2seqModel
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time: 01:38 2020/8/11
# @Author: Sijie Shen
# @File: seq2seq_train
# @Project: Seq2seqModel
import torch
import torch.nn as nn
import torchtext
import random
import os
import json
import pickle
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
from nltk.translate.bleu_score import corpus_bleu
from utils.logger import get_logger
from utils.trainer import Trainer
from models.seq2seq import Seq2seqModel
def sub_tokens_to_tokens(sub_tokens):
result = list()
current = ''
for sub_token in sub_tokens:
if sub_token.endswith('$$'):
current += sub_token[:-2]
else:
current += sub_token
result.append(current)
current = ''
if len(current) != 0:
result.append(current)
return result
class MyTrainer(Trainer):
def __init__(self, model, optimizer, lr_scheduler, loss_function, logger, writer, train_params):
super().__init__(model, optimizer, lr_scheduler, loss_function, logger, writer, train_params)
def train_batch(self, batch_data):
pass
def evaluate_batch(self, batch_data):
pass
def inference(self, dataset, name, src_vocab, tgt_vocab, max_decode_length=64):
self.model.eval()
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
output_file = (self.output_dir / ('%s_inference_result.json' % name)).open('w', encoding='utf-8')
total = 0
correct = 0
refs = list()
hyps = list()
with torch.no_grad():
for i, example in tqdm(enumerate(dataset)):
total += 1
input_seq = ['<sos>'] + example[self.train_params['dataset']['input_key']] + ['<eos>']
input_length = torch.tensor([len(input_seq)], dtype=torch.int64).to(device)
input_ids = [src_vocab.stoi[token] for token in input_seq]
input_ids = torch.tensor(input_ids, dtype=torch.int64).unsqueeze(1).to(device)
model_output = self.model.inference(input_ids, input_length, max_decode_length)
output_seq, output_length = model_output
output_tokens = list()
for idx in output_seq:
output_tokens.append(tgt_vocab.itos[idx])
output_tokens = sub_tokens_to_tokens(output_tokens)
refs.append([example['tgt_tokens']])
hyps.append(output_tokens)
if output_tokens == example['tgt_tokens']:
correct += 1
example['hyp'] = output_tokens
print(json.dumps(example), file=output_file)
self.logger.info('%s accuracy: %.6f' % (name.capitalize(), correct / total))
self.logger.info('%s BLEU: %.6f' % (name.capitalize(), corpus_bleu(refs, hyps) * 100))
output_file.close()
def main(args):
# Read params configuration
params = json.load(open(args.params))
model_params = params['model_params']
train_params = params['train_params']
output_dir = Path(train_params['output_dir'])
if not output_dir.exists():
output_dir.mkdir()
# Set up logger and TensorBoard writer
logger = get_logger(output_dir / 'inference.log')
logger.debug('PID: %d', os.getpid())
logger.info('Using params file: %s' % args.params)
logger.info(json.dumps(params))
writer = None
# Set random seed
seed = 1911
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = True
logger.debug('Set random seed to %d', seed)
# Load vocabulary and dataset
logger.debug('Loading vocabulary...')
src_field = torchtext.data.field.Field(init_token='<sos>', eos_token='<eos>', include_lengths=True)
src_field.vocab = pickle.load(Path(train_params['dataset']['src_vocab_path']).open('rb'))
assert len(src_field.vocab) == model_params['src_vocab_size']
tgt_field = torchtext.data.field.Field(init_token='<sos>', eos_token='<eos>', is_target=True)
tgt_field.vocab = pickle.load(Path(train_params['dataset']['tgt_vocab_path']).open('rb'))
assert len(tgt_field.vocab) == model_params['tgt_vocab_size']
logger.info('Vocab loaded, src vocab size: %d, tgt vocab size: %d' % (len(src_field.vocab), len(tgt_field.vocab)))
logger.debug('Loading dataset...')
datasets = dict()
dataset_names = ['valid', 'test']
for name in dataset_names:
dataset = [json.loads(line.strip()) for line in Path(train_params['dataset'][name]).open(encoding='utf-8')]
datasets[name] = dataset
logger.debug('%s size: %d' % (name.capitalize(), len(dataset)))
# Build model
logger.debug('Building model...')
model = Seq2seqModel(model_params)
optimizer = None
lr_scheduler = None
loss_function = getattr(nn, train_params['loss_function'])(**train_params['loss_function_args'])
if torch.cuda.is_available():
model.cuda()
logger.debug('Model built')
# Train model
trainer = MyTrainer(model, optimizer, lr_scheduler, loss_function, logger, writer, train_params)
logger.info('Loading model from %s', args.load)
trainer.load_model(args.load)
logger.info('Model loaded')
logger.info('Inference begins...')
logger.info('Evaluating %s' % train_params['dataset']['valid'])
trainer.inference(datasets['valid'], 'valid', src_field.vocab, tgt_field.vocab, max_decode_length=2048)
logger.info('Evaluating %s' % train_params['dataset']['test'])
trainer.inference(datasets['test'], 'test', src_field.vocab, tgt_field.vocab, max_decode_length=2048)
if __name__ == '__main__':
parser = ArgumentParser('Tree positional encoding experiment main function.')
parser.add_argument('-p', '--params', action='store',
help='Path of configuration file, should be a .json file')
parser.add_argument('-l', '--load', action='store', default=None,
help='Load a model from given path')
args = parser.parse_args()
if not args.load:
print('Should specify a checkpoint for inference')
exit(-1)
main(args)
|
import os
from celery import Celery
from celery.schedules import crontab
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BPPRC.settings")
celery_app = Celery("BPPRC")
celery_app.config_from_object("django.conf.settings", namespace="CELERY")
# here is the beat schedule dictionary defined
celery_app.conf.beat_schedule = {
"print-every-minute": {
"task": "namingalgorithm.tasks.run",
# "schedule": crontab(hour="*/1")
"schedule": crontab(hour="23", minute="58")
# 'args': ('Its Thursday!',)
},
}
celery_app.conf.timezone = "GMT"
celery_app.autodiscover_tasks()
|
from __future__ import annotations
from abc import ABC
from typing import Any
from .profile import EDCProfile, WindowReport
from .smart_grid import EnergyDemand
class ProcessingUnitReport:
def __init__(self, edc_id: str, pu_id: str, pu_type_id: str, status: bool, service_id: str | None,
n_sessions: int | None, max_sessions: int | None, queue_time: float, power: float, temperature: float):
"""
Processing Unit report message.
:param edc_id: ID of the EDC that contains the PU.
:param pu_id: ID of the PU.
:param pu_type_id: ID of the PU model type.
:param status: status of the Processing Unit (true if switched on).
:param service_id: ID of the service reserved by the PU. If None, the PU accepts requests from any service.
:param n_sessions: number of opened sessions of the reserved service. If service_id is None, n_sessions is None.
:param max_sessions: maximum number of sessions allowed If service_id is None, max_sessions is None.
:param queue_time: approximate delay overhead
:param power: power consumption of PU (in Watts)
:param temperature: temperature of PU (in Kelvin)
"""
self.edc_id: str = edc_id
self.pu_id: str = pu_id
self.pu_type_id: str = pu_type_id
self.status: bool = status
self.service_id: str | None = service_id
self.n_sessions: int | None = n_sessions
self.max_sessions: int | None = max_sessions
self.queue_time: float = queue_time
self.power: float = power
self.temperature: float = temperature
@property
def utilization(self) -> float | None:
return None if self.service_id is None else self.n_sessions / self.max_sessions
class CoolerReport:
def __init__(self, edc_id: str, cooler_type_id: str, temp: float, it_power: float, cooling_power: float):
"""
:param edc_id: ID of the EDC that contains the cooler.
:param cooler_type_id: ID of the cooler type.
:param temp: EDC temperature (in Kelvin).
:param it_power: Power consumption (in Watts) of all the PUs of the EDC.
:param cooling_power: power (in Watts) required by the EDC for refrigerating the PUs.
"""
self.edc_id: str = edc_id
self.cooler_type_id: str = cooler_type_id
self.temp: float = temp
self.it_power: float = it_power
self.cooling_power: float = cooling_power
@property
def total_power(self) -> float:
return self.it_power + self.cooling_power
class SrvSlicingReport:
def __init__(self, expected_size: int, slice_size: int, slize_available: int, free_size: int, free_available: int):
"""
Service slicing report.
:param expected_size: expected number of tasks by the edge data center.
:param slice_size: number of tasks that the edge data center can actually handle. It depends on sliced PUs.
:param slize_available: number of additional tasks that can be admitted by sliced PUs.
:param free_size: number of tasks that can be processed using PUs that do not belong to any slice.
:param free_available: number of tasks currently that can be admitted by unassigned PUs.
"""
self.expected_size: int = expected_size
self.slice_size: int = slice_size
self.slize_available: int = slize_available
self.free_size: int = free_size
self.free_available: int = free_available
@property
def congested(self) -> bool: # TODO rename? me dice si una slice es más pequeña de lo que debería
return self.slice_size < self.expected_size
@property
def slice_u(self) -> float:
# TODO utilización de los recursos de la slice. Si una PU tiene tareas "incompatibles" cuenta como llena
return self.utilization(self.slice_size, self.slize_available)
@property
def free_u(self) -> float:
# TODO utilización de los recursos libres. Si una PU tiene tareas "incompatibles", cuenta como llena
return self.utilization(self.free_size, self.free_available)
@property
def total_u(self) -> float | None:
# TODO utilización de todos los recursos. Si una PU tiene tareas "incompatibles", cuenta como llena
return self.utilization(self.slice_size + self.free_size, self.slize_available + self.free_available)
@staticmethod
def utilization(r_size: int, r_available: int) -> float:
return 1 if r_size < 1 else 1 - r_available / r_size # TODO
class EDCProfileReport:
def __init__(self, edc_id: str, srv_id: str, n_clients: int, req_type: str, result: str, window: WindowReport):
self.edc_id: str = edc_id
self.srv_id: str = srv_id
self.n_clients: int = n_clients
self.req_type: str = req_type
self.result: str = result
self.window: WindowReport = window
def __ne__(self, other):
return self.n_clients != self.n_clients or self.window != other.window
class EdgeDataCenterReport(EnergyDemand):
def __init__(self, edc_id: str, slicing: dict[str, SrvSlicingReport], it_power: float, cooling_power: float):
"""
Edge data center report message.
:param edc_id: ID of the EDC.
:param slicing: report of the resources allocated to the EDC for each service
:param it_power: IT power consumption (in Watts).
:param cooling_power: Cooling power consumption (in Watts).
"""
super().__init__()
self.edc_id: str = edc_id
self.slicing: dict[str, SrvSlicingReport] = slicing
self.it_power: float = it_power
self.cooling_power: float = cooling_power
self.edc_profile: EDCProfile | None = None
@property
def consumer_id(self) -> str:
return self.edc_id
@property
def power_demand(self) -> float:
return self.it_power + self.cooling_power
@property
def pue(self) -> float:
return self.power_demand / self.it_power if self.it_power > 0 else 0
@property
def congested(self) -> bool: # TODO rename?
return any(srv_slice.congested for srv_slice in self.slicing.values())
def srv_congested(self, service_id: str) -> bool: # TODO rename?
return service_id in self.slicing and self.slicing[service_id].congested
def srv_expected_size(self, service_id: str) -> int:
return self.slicing[service_id].expected_size if service_id in self.slicing else 0
def srv_slice_size(self, service_id: str) -> int:
return self.slicing[service_id].slice_size if service_id in self.slicing else 0
def srv_slice_available(self, service_id: str) -> int:
return self.slicing[service_id].slize_available if service_id in self.slicing else 0
def srv_slice_u(self, service_id: str) -> float:
return self.slicing[service_id].slice_u if service_id in self.slicing else 1
def srv_free_size(self, service_id: str) -> int:
return self.slicing[service_id].free_size if service_id in self.slicing else 0
def srv_free_available(self, service_id: str) -> int:
return self.slicing[service_id].free_available if service_id in self.slicing else 0
def srv_free_u(self, service_id: str) -> float:
return self.slicing[service_id].free_u if service_id in self.slicing else 1
def srv_total_u(self, service_id: str) -> float:
return self.slicing[service_id].total_u if service_id in self.slicing else 1
class SrvDemandEstimationReport:
def __init__(self, edc_id: str, edc_report: EdgeDataCenterReport | None, demand_estimation: dict[str, int]):
"""
Service demand estimation message.
:param edc_id: Edge Data Center ID.
:param edc_report: Edge Data Center report.
:param demand_estimation: number of clients that are expected to send requests within a given time window.
"""
self.edc_id: str = edc_id
self.edc_report: EdgeDataCenterReport | None = edc_report
self.demand_estimation: dict[str, int] = demand_estimation
class NewEDCConfig(ABC):
def __init__(self, edc_id: str):
self.edc_id: str = edc_id
class NewEDCMapping(NewEDCConfig):
def __init__(self, edc_id: str, mapping_id: str, mapping_config: dict[str, Any] | None = None):
super().__init__(edc_id)
self.mapping_id: str = mapping_id
self.mapping_config: dict[str, Any] = dict() if mapping_config is None else mapping_config
class NewEDCSlicing(NewEDCConfig):
def __init__(self, edc_id: str, slicing: dict[str, int]):
super().__init__(edc_id)
self.slicing: dict[str, int] = slicing
|
__author__ = 'amir'
import glob
import random
import csv
import os
import math
import sqlite3
import shutil
import results
import subprocess
import wekaMethods.wekaAccuracy
import utilsConf
def optimize(inds_bef, mat, priors):
inds=[]
newPriors = []
newMat = [[] for t in mat]
len_mat0_ = len(mat[0]) - 1
for i in range(len_mat0_):
need = False
for elem in mat:
if elem[i] == "1":
if elem[len(elem) - 1] == "1":
need = True
break
if need == True:
newPriors = newPriors + [priors[i]]
inds = inds + [inds_bef[i]]
for ind in range(len(newMat)):
newMat[ind] = newMat[ind] + [mat[ind][i]]
i = len(mat[0]) - 1
for ind in range(len(newMat)):
newMat[ind] = newMat[ind] + [mat[ind][i]]
newTm=[]
last=len(newMat[0])-1
if(last==-1):
newTm=[]
else:
for i in range(len(newMat)):
if ("1" in newMat[i][:last]):
newTm=newTm+[newMat[i]]
return inds, newTm, newPriors
def optimizeAll(allFiles, allTests,outcomes, allBugs,priors,testsChoosedNames,FileNames):
newFiles=[]
newFileNames=[]
newBugs = []
newTests = []
newOutcomes=[]
testsChoosedNamesNew=[]
# remove irrelevant files: files that participate just in pass tests
for index in range(len(allFiles)):
i=allFiles[index]
need = False
ind=0
for elem in allTests:
if elem[i] == "1":
if outcomes[ind] == "1":
need = True
break
ind=ind+1
if need == True:
newFiles.append(i)
newFileNames.append(FileNames[index])
if i in allBugs:
newBugs.append(i)
# update tests
for t in allTests:
test={}
for i in newFiles:
if (t[i]=="1"):
test[newFiles.index(i)]="1"
else:
test[newFiles.index(i)]="0"
newTests.append(test)
ind=ind+1
ind=0
testsRet=[]
# remove tests without files
for t in newTests:
need = False
for i in t:
if(t[i]=="1"):
need=True
break
if (need==True):
# check if there is a test with same components and result
if not (t in testsRet and outcomes[ind] == outcomes[testsRet.index(t)]):
testsRet.append(t)
newOutcomes.append(outcomes[ind])
testsChoosedNamesNew.append(testsChoosedNames[ind])
ind=ind+1
newBugs=[newFiles.index(i) for i in newBugs ]
if(priors!=[]):
priors=[priors[newFiles.index(i)] for i in newFiles]
newFiles=range(len(newFiles))
return newFiles, testsRet,newOutcomes,newBugs,priors,testsChoosedNamesNew,newFileNames
def conesOptimize2(allFiles, allTests,outcomes, allBugs,priors,testsChoosedNames,FileNames):
to_check=[]
to_check_valids=[]
newBugs = []
newTests = []
newOutcomes=[]
distinctFiles=[]
testsChoosedNamesNew=[]
# cones on bugs
for f in allFiles:
if( f not in allBugs):
to_check_valids.append(f)
to_check=list(allBugs)
while(len(to_check)!=0):
comp=to_check[0]
to_check=to_check[1:]
distinctFiles.append(comp)
new_check=[]
new_check_valids=[]
for c in to_check:
dup=True
for t in allTests:
if (t[comp] !=t[c]):
dup=False
break
if(not dup):
new_check.append(c)
to_check=list(new_check)
for c in to_check_valids:
dup=True
for t in allTests:
if (t[comp] !=t[c]):
dup=False
break
if(not dup):
new_check_valids.append(c)
to_check_valids=list(new_check_valids)
# cones on others
while(len(to_check_valids)!=0):
comp=to_check_valids[0]
to_check_valids=to_check_valids[1:]
distinctFiles.append(comp)
new_check=[]
for c in to_check_valids:
dup=True
for t in allTests:
if (t[comp] !=t[c]):
dup=False
break
if(not dup):
new_check.append(c)
to_check_valids=list(new_check)
ind=0
for t in allTests:
test={}
for i in distinctFiles:
if (t[i]=="1"):
test[distinctFiles.index(i)]="1"
else:
test[distinctFiles.index(i)]="0"
newTests.append(test)
ind=ind+1
ind=0
testsRet=[]
for t in newTests:
need = False
for i in t:
if(t[i]=="1"):
need=True
break
if (need==True):
testsRet.append(t)
newOutcomes.append(outcomes[ind])
testsChoosedNamesNew.append(testsChoosedNames[ind])
ind=ind+1
newBugs=[distinctFiles.index(i) for i in allBugs if i in distinctFiles]
newFileNames=[FileNames[allFiles.index(f)] for f in distinctFiles]
if(False):
for index in range(len(allFiles)):
if allFiles[index] in distinctFiles:
newFileNames.append(FileNames[index])
if(priors!=[]):
priors=[priors[i] for i in distinctFiles]
newFiles=range(len(distinctFiles))
return newFiles, testsRet,newOutcomes,newBugs,priors,testsChoosedNamesNew,newFileNames
def conesOptimize(allFiles, allTests,outcomes, allBugs,priors,testsChoosedNames):
to_check=[]
newBugs = []
newTests = []
newOutcomes=[]
distinctFiles=[]
testsChoosedNamesNew=[]
# cones on bugs
to_check=list(allBugs)
while(len(to_check)!=0):
comp=to_check[0]
to_check=to_check[1:]
distinctFiles.append(comp)
new_check=[]
for c in to_check:
dup=True
for t in allTests:
if (t[comp] !=t[c]):
dup=False
break
if(not dup):
new_check.append(c)
to_check=list(new_check)
# cones on others
for f in allFiles:
if( f not in allBugs):
to_check.append(f)
while(len(to_check)!=0):
comp=to_check[0]
to_check=to_check[1:]
distinctFiles.append(comp)
new_check=[]
for c in to_check:
dup=True
for t in allTests:
if (t[comp] !=t[c]):
dup=False
break
if(not dup):
new_check.append(c)
to_check=list(new_check)
ind=0
for t in allTests:
test={}
for i in distinctFiles:
if (t[i]=="1"):
test[distinctFiles.index(i)]="1"
else:
test[distinctFiles.index(i)]="0"
newTests.append(test)
ind=ind+1
ind=0
testsRet=[]
for t in newTests:
need = False
for i in t:
if(t[i]=="1"):
need=True
break
if (need==True):
testsRet.append(t)
newOutcomes.append(outcomes[ind])
testsChoosedNamesNew.append(testsChoosedNames[ind])
ind=ind+1
newBugs=[distinctFiles.index(i) for i in allBugs if i in distinctFiles]
if(priors!=[]):
priors=[priors[i] for i in distinctFiles]
newFiles=range(len(distinctFiles))
return newFiles, testsRet,newOutcomes,newBugs,priors,testsChoosedNamesNew
def optimizeVeyCommonFiles(allFiles, allTests,outcomes, allBugs,priors,testsChoosedNames,FileNames):
newFiles=[]
newFileNames=[]
newBugs = []
newTests = []
newOutcomes=[]
testsChoosedNamesNew=[]
treshold=0.75*len(allTests)
for index in range(len(allFiles)):
i=allFiles[index]
count=0
ind=0
for elem in allTests:
if elem[i] == "1":
count=count+1
ind=ind+1
if count <= treshold:
newFiles.append(i)
newFileNames.append(FileNames[index])
if i in allBugs:
newBugs.append(i)
for t in allTests:
test={}
for i in newFiles:
if (t[i]=="1"):
test[newFiles.index(i)]="1"
else:
test[newFiles.index(i)]="0"
newTests.append(test)
ind=ind+1
ind=0
testsRet=[]
for t in newTests:
need = False
for i in t:
if(t[i]=="1"):
need=True
break
if (need==True):
testsRet.append(t)
newOutcomes.append(outcomes[ind])
testsChoosedNamesNew.append(testsChoosedNames[ind])
ind=ind+1
newBugs=[newFiles.index(i) for i in newBugs]
if(priors!=[]):
priors=[priors[i] for i in newFiles]
newFiles=range(len(newFiles))
return newFiles, testsRet,newOutcomes,newBugs,priors,testsChoosedNames,newFileNames
def barinelAppend(bugsFiles,barinelFiles):
allFiles=set()
allBugged=set()
Tests=[]
for bugF,barF in zip(bugsFiles,barinelFiles):
f=open(bugF,"r")
reader =csv.reader(f)
read=[]
for r1 in reader:
read.append(r1)
bfiles=read[0]
bBugs=read[1]
allFiles= allFiles | set(bfiles)
allBugged=allBugged | set(bBugs)
f.close()
f=open(barF,"r")
reader =csv.reader(f)
read=[]
for r1 in reader:
read.append(r1)
reader=read[1:]
for line in reader:
test={}
length=len(line)
for comp,bf in zip(line[:length-1],bfiles):# without outcome
test[bf]=comp
if(test not in Tests):
Tests.append(test)
f.close()
AllTests=[]
for test in Tests:
for file in allFiles:
if (not (file in test)):
test[file]="0"
AllTests.append(test)
l=len(AllTests[0])
return allBugged,allFiles,AllTests
def getBuggedFilesTestsByBugsIDs(dbPath,bugsIDS,package,times,priorsByFiles, buggedTestsChooser,notRand,buggedTable):
allFiles=[]
FileNames=[]
allBugged=[]
allTests=[]
testsNames=[]
Newpriors=[]
testsSorted=['amir']
testsChoosedNames=[]
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
indexA="CREATE INDEX IF NOT EXISTS FilesNames ON testsFiles (fileName)"
c.execute(indexA)
conn.commit()
indexA="CREATE INDEX IF NOT EXISTS FilesTests ON testsFiles (Test)"
c.execute(indexA)
conn.commit()
indexA="CREATE INDEX IF NOT EXISTS FilesBugged ON buggedFiles (BugId)"
c.execute(indexA)
conn.commit()
if( type(package) is list):
package="or testsFiles.fileName like ".join(["\"%"+ pack+"%\" " for pack in package])
else:
package="\"%"+ package+"%\" "
s="select distinct Test from testsFiles where testsFiles.fileName like "+package
for r in c.execute(s):
testsNames.append(r[0])
s="select Test,c from (select Test,count(fileName) as c from testsFiles where testsFiles.fileName like" +package+" group by Test) where c>=2 order by c DESC"
s="select Test,c/(a+0.0) as d,c ,a from (select Test,count(fileName) as a, count(case When fileName like "+package+" Then 1 Else Null End) as c from testsFiles group by Test) where c>0 order by d DESC"
#s="select Test, a-c as d,c ,a from (select Test,count(fileName) as a, count(case When fileName like "+package+" Then 1 Else Null End) as c from testsFiles group by Test) where c>0 order by d"
for r in c.execute(s):
if(r[0] in testsNames):
testsSorted.append(r[0])
testsTup = str(tuple(testsSorted))
if (len(testsSorted)==1 ):
return [],[],[],[],[]
s="select distinct fileID,fileName from testsFiles where Test in "+ testsTup
for r in c.execute(s):
allFiles.append(r[0])
FileNames.append(r[1])
bugsTup = str(tuple(bugsIDS))
buggedNames=[]
s="select distinct fileID,fileName from "+buggedTable+" where BugId in "+ bugsTup
for r in c.execute(s):
if(r[0] in allFiles):
allBugged.append(r[0])
buggedNames.append(r[1])
testZerosToCopy={}
for ind in range(len(allFiles)):
testZerosToCopy[ind]="0"
testsIndex=[]
testsDict={}
buggesTests=[]
validTests=[]
for i in range(times):
if(buggedTestsChooser!=-1):
if(i % buggedTestsChooser ==0):
buggesTests.append(i)
else:
validTests.append(i)
testsIndex.append(i)
testsDict[i]=None
testsDict[times]=None
if(buggedTestsChooser==-1):
validTests=testsIndex
buggesTests=testsIndex
for ind in range(len(testsSorted)):
if len(testsSorted)==0:
break
tn=[]
if notRand:
tn=testsSorted[0]
else:
tn=random.choice(testsSorted)
testsSorted.remove(tn)
testsChoosedNames.append(tn)
isbugged=False
if len(testsDict)==times:
break
s2="select testsFiles.Test as Test ,testsFiles.fileID as fileID from testsFiles where testsFiles.Test=\""+tn +"\""
testTrace=testZerosToCopy.copy()
for r in c.execute(s2):
testNa=r[0]
fileID=r[1]
testTrace[allFiles.index(fileID)]="1"
if fileID in allBugged:
isbugged=True
if(isbugged):
if len(buggesTests)==0:
continue
testsDict[buggesTests[0]]=testTrace
buggesTests.remove(buggesTests[0])
else:
if len(validTests)==0:
continue
testsDict[validTests[0]]=testTrace
validTests.remove(validTests[0])
vals=testsDict.values()
min1 = min(vals.index(None), times)
allTests=vals[:min1]
if(priorsByFiles!=None):
for f in allFiles:
if( f in priorsByFiles):
Newpriors.append(priorsByFiles[f])
else:
Newpriors.append(0.01)
allBugged=[allFiles.index(x) for x in allBugged]
allFiles=range(len(allFiles))
return allBugged,allFiles,allTests,Newpriors,testsChoosedNames,FileNames
def getBuggedFilesTestsByBugsIDsMethods(dbPath,bugsIDS,package,times,priorsByFiles, buggedTestsChooser,notRand,buggedTable,testTable):
allFiles=[]
FileNames=[]
allBugged=[]
allTests=[]
testsNames=[]
Newpriors=[]
testsSorted=['amir']
testsChoosedNames=[]
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
indexA="CREATE INDEX IF NOT EXISTS FilesNames ON testsMethods (methodName)"
c.execute(indexA)
conn.commit()
indexA="CREATE INDEX IF NOT EXISTS FilesTests ON testsMethods (Test)"
c.execute(indexA)
conn.commit()
indexA="CREATE INDEX IF NOT EXISTS FilesBugged ON buggedFiles (BugId)"
c.execute(indexA)
conn.commit()
if( type(package) is list):
package=("or "+testTable+".name like ").join(["\"%"+ pack+"%\" " for pack in package])
else:
package="\"%"+ package+"%\" "
s="select distinct Test from "+testTable+" where "+testTable+".name like "+package
for r in c.execute(s):
testsNames.append(r[0])
s="select Test,c from (select Test,count(name) as c from "+testTable+" where "+testTable+".name like" +package+" group by Test) where c>=2 order by c DESC"
s="select Test,c/(a+0.0) as d,c ,a from (select Test,count(name) as a, count(case When name like "+package+" Then 1 Else Null End) as c from "+testTable+" group by Test) where c>0 order by d DESC"
for r in c.execute(s):
if(r[0] in testsNames):
testsSorted.append(r[0])
testsTup = str(tuple(testsSorted))
if (len(testsSorted)==1 ):
return [],[],[],[],[],[]
s="select distinct name,name from "+testTable+" where Test in "+ testsTup
for r in c.execute(s):
allFiles.append(r[0])
FileNames.append(r[1])
bugsTup = str(tuple(bugsIDS))
buggedNames=[]
s="select distinct name,BugId from "+buggedTable+" where BugId in "+ bugsTup
for r in c.execute(s):
if(r[0] in allFiles):
allBugged.append(r[0])
buggedNames.append(r[1])
testZerosToCopy={}
for ind in range(len(allFiles)):
testZerosToCopy[ind]="0"
testsIndex=[]
testsDict={}
buggesTests=[]
validTests=[]
for i in range(times):
if(buggedTestsChooser!=-1):
if(i % buggedTestsChooser ==0):
buggesTests.append(i)
else:
validTests.append(i)
testsIndex.append(i)
testsDict[i]=None
testsDict[times]=None
if(buggedTestsChooser==-1):
validTests=testsIndex
buggesTests=testsIndex
for ind in range(len(testsSorted)):
if len(testsSorted)==0:
break
tn=[]
if notRand:
tn=testsSorted[0]
else:
tn=random.choice(testsSorted)
testsSorted.remove(tn)
testsChoosedNames.append(tn)
isbugged=False
if len(testsDict)==times:
break
s2="select "+testTable+".Test as Test ,"+testTable+".name as name from "+testTable+" where "+testTable+".Test=\""+tn +"\""
testTrace=testZerosToCopy.copy()
for r in c.execute(s2):
testNa=r[0]
fileID=r[1]
testTrace[allFiles.index(fileID)]="1"
if fileID in allBugged:
isbugged=True
if(isbugged):
if len(buggesTests)==0:
continue
testsDict[buggesTests[0]]=testTrace
buggesTests.remove(buggesTests[0])
else:
if len(validTests)==0:
continue
testsDict[validTests[0]]=testTrace
validTests.remove(validTests[0])
vals=testsDict.values()
min1 = min(vals.index(None), times)
allTests=vals[:min1]
if(priorsByFiles!=None):
for f in allFiles:
if( f in priorsByFiles):
Newpriors.append(priorsByFiles[f])
else:
Newpriors.append(0.01)
allBugged=[allFiles.index(x) for x in allBugged]
allFiles=range(len(allFiles))
return allBugged,allFiles,allTests,Newpriors,testsChoosedNames,FileNames
def exportBarinel(out,priors,allBugs,allFiles,allTests,outcomes):
lines=[]
ind=0
mat=[]
for t in allTests:
srt=sorted(t.items(),key=lambda tup: tup[0])
srt=[s[1] for s in srt]+[outcomes[ind]]
ind=ind+1
mat.append(srt)
inds=range(len(allFiles))
#inds, newMat, newPriors = optimize( inds, mat, priors)
inds, newMat, newPriors = inds, mat, priors
lines=[newPriors]+newMat
with open(out, 'wb') as f:
writer = csv.writer(f)
writer.writerows(lines)
def plannerTestsRows(allFiles,allTests,outcomes):
dets = []
names = []
ind = 0
for t in allTests:
trace=sorted(t.items(),key=lambda tup: tup[0])
trace=[allFiles.index(tr[0]) for tr in trace if tr[1]=="1"]
dets.append([ind,trace,trace,outcomes[ind]])
names.append([ind])
ind += 1
return names,dets
def priorsByPbugPvalid(allBugs, allFiles, pBug, pValid):
priors=[]
for f in allFiles:
if (f in allBugs):
priors.append(pBug)
else:
priors.append(pValid)
return priors
def exportPlanner(out,priors,allBugs,allFiles,allTests,outcomes,initials, failsProb ):
names,dets=plannerTestsRows(allFiles,allTests,outcomes)
lines=[["[Priors]"]]+ [[priors]]
lines=lines+[ ["[Bugs]"]]+ [[allFiles.index(bug)] for bug in allBugs]
initFailed=[]
initpassed=[]
for i in range(len(outcomes)):
if outcomes[i]=="1":
if( len(initFailed) <= failsProb *initials):
initFailed.append(names[i])
else:
if( len(initpassed) <=(1-failsProb)*initials):
initpassed.append(names[i])
lines=lines + [["[InitialTests]"]]+initFailed+initpassed
#lines=lines + [["[InitialTests]"]]+names
lines=lines+ [["[TestDetails]"]]+dets
with open(out, 'wb') as f:
writer = csv.writer(f,delimiter=";")
writer.writerows(lines)
def generateOutcomes(allBugs,allFiles,allTests,const):
outcomes=[]
setBugs = set(allBugs)
for t in allTests:
comps=[a[0] for a in t.items() if a[1]=="1"]
pss=math.pow(const,len(setBugs & set(comps)))
threshFail=random.uniform(0,1)
if( pss >= threshFail):
outcomes=outcomes+["0"]
else:
outcomes=outcomes+["1"]
return outcomes
def exportBugs_Files(outbugs_Files,allBugs,allFiles,bugsIDS,TestsCount,pack,testsChoosedNames,addition,FileNames):
bugsInds=[]
for b in allBugs:
if (b in allFiles):
bugsInds.append(allFiles.index(b))
with open(outbugs_Files, 'wb') as f:
writer = csv.writer(f)
writer.writerows([["Files"],addition+range(len(allFiles)),["Bugged"],addition+bugsInds,["BugsIds"],bugsIDS,["TestsCount"],[TestsCount],["pack"],[pack],["testsChoosedNames"],testsChoosedNames,["FileNames"],FileNames] )
def allBugsFromDB(dbPath,package,weka, table="buggedFiles"):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
bugs=[]
query = "select distinct BugId from "+table+" where "+table+".fileName like \"%" + package + "%\" "
if (weka):
#query=query+"and BugId in (select * from [8_1_2Bugs])"
query=query+"and BugId in (select BugId from buggedFiles)"
#print query
for r in c.execute(query):
bugID=str(r[0])
bugs.append(bugID)
return bugs
def allBugsFromDBMethods(dbPath,package,weka, table):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
bugs=[]
query = "select distinct BugId from "+table+" where "+table+".methodDir like \"%" + package + "%\" "
if (weka):
#query=query+"and BugId in (select * from [8_1_2Bugs])"
query=query+"and BugId in (select BugId from "+table+")"
#print query
for r in c.execute(query):
bugID=str(r[0])
bugs.append(bugID)
return bugs
def dirStruct(outPath):
o = outPath
if not (os.path.isdir(o)):
os.mkdir(o)
o = outPath + "\\barinel\\"
if not (os.path.isdir(o)):
os.mkdir(o)
o = outPath + "\\planner\\"
if not (os.path.isdir(o)):
os.mkdir(o)
o = outPath + "\\plannerRecords\\"
if not (os.path.isdir(o)):
os.mkdir(o)
o = outPath + "\\out\\"
if not (os.path.isdir(o)):
os.mkdir(o)
o = outPath + "\\bugs_Files\\"
if not (os.path.isdir(o)):
os.mkdir(o)
def getAllpacks(packsPath):
f=open(packsPath,"r")
l=[line.split("\n")[0] for line in f.readlines()]
f.close()
return l
def priorsFromWeka(dbPathTests,wekaAns,FileNames,allFiles):
wekaPriors={}
filesPriors={}
first=0
with open(wekaAns,"r") as f:
reader=csv.reader(f)
for l in reader:
if(first==0):
first=1
continue
prior=l[5]
if( "*" in prior):
prior="".join(list(prior[1:]))
prior=float(prior)
if(l[3]=="2:no") or (l[3]=="2:valid") :
#prior=1-prior
prior=prior
wekaPriors[l[0]]=prior+0.01
if FileNames==[]:
conn = sqlite3.connect(dbPathTests)
conn.text_factory = str
c = conn.cursor()
for x in c.execute(' select distinct methodName,methodName from testsMethods order by methodName'):
if (x[0] not in wekaPriors):
filesPriors[x[1]]=0.01
else:
filesPriors[x[1]]=wekaPriors[x[0]]
conn.close()
if FileNames!=[]:
for i in range(len(FileNames)):
name=FileNames[i]
ind=allFiles[i]
if name not in wekaPriors:
filesPriors[ind]=0.01
else:
filesPriors[ind]=wekaPriors[name]
return filesPriors
def allPackBugs(dbPath, numOfBugs, packsPath,numOfExperiments,weka,table="buggedFiles"):
packsList = getAllpacks(packsPath)
bugspathsAndpaths = []
for p in packsList:
lst1 = []
if "Files" in table:
lst1 = allBugsFromDB(dbPath, p,weka,table)
if "Methods" in table:
lst1 = allBugsFromDBMethods(dbPath, p,weka,table)
bugspathsAndpaths.append((lst1, p))
#bugs = [x for x in bugspathsAndpaths if len(x[0]) >= numOfBugs]
bugspathsAndpaths=sorted(bugspathsAndpaths,key=lambda r: len(r[0]),reverse=True)
bugs = [x for x in bugspathsAndpaths if len(x[0]) >= numOfBugs]
bugs = [x for x in bugspathsAndpaths if len(x[0]) >= 1]
#bugs = [x for x in bugspathsAndpaths ]
return bugs
def allPackBugsMethods(dbPath, numOfBugs, packsPath,numOfExperiments,weka,table):
packsList = getAllpacks(packsPath)
bugspathsAndpaths = []
for p in packsList:
lst1 = allBugsFromDBMethods(dbPath, p,weka,table)
bugspathsAndpaths.append((lst1, p))
#bugs = [x for x in bugspathsAndpaths if len(x[0]) >= numOfBugs]
bugspathsAndpaths=sorted(bugspathsAndpaths,key=lambda r: len(r[0]),reverse=True)
bugs = [x for x in bugspathsAndpaths if len(x[0]) >= numOfBugs]
return bugs
def choosePackBug(bugs, numOfBugs,order,numOfPacks,chooseThis):
bugsChoose= list(bugs)
choosed=[]
if( chooseThis==[]):
for x in range(numOfPacks):
if (len(bugsChoose) == 0):
break
b = random.choice(bugsChoose)
choosed.append(b)
bugsChoose.remove(b)
else:
for i in chooseThis:
b = bugsChoose[i]
choosed.append(b)
#bugsChoose.remove(b)
lst1=[x[0] for x in choosed]
l=[]
for item in lst1:
for i in item:
l.append(i)
pack =[x[1] for x in choosed]
lst1=list(l)
bugsIDS = ['-1']
for i in range(numOfBugs):
if (len(lst1) == 0):
break
b = random.choice(lst1)
bugsIDS.append(b)
lst1.remove(b)
return bugsIDS, pack
def readBarinel(path):
files=[]
tests=[]
bugs=[]
outs=[]
f=open(path,"r")
reader=csv.reader(f)
lines=[r for r in reader]
priors=lines[0]
files=range(len(priors))
inde=0
for i in priors:
if i==str(-0.01):
bugs.append(inde)
inde=inde+1
lines=lines[1:]
for l in lines:
outs.append(l[len(l)-1])
test={}
for i in range(len(l)-1):
test[i]=l[i]
tests.append(test)
return files, tests,outs, bugs,priors
def readBugFile(bug_f):
f = open(bug_f, "r")
lines = [x.split("\n")[0] for x in f.readlines()]
f.close()
used = lines[1].split(",")
bugged = lines[3].split(",")
BugsIds = lines[5].split(",")
testsCount = int(lines[7].split(",")[0])
pack = lines[9].split(",")[0]
testsChoosedNames=lines[11].split(",")
FileNames=lines[13].split(",")
return BugsIds, bugged, pack, testsCount, used,testsChoosedNames,FileNames
def optimizations(FileNames, allBugged, allFiles, allTests, outcomes, priors, testsChoosedNames):
allFiles, allTests, outcomes, allBugged, priors, testsChoosedNames, FileNames = optimizeAll(allFiles, allTests, outcomes, allBugged, priors,testsChoosedNames,FileNames)
allFiles, allTests, outcomes, allBugged, priors, testsChoosedNames, FileNames = conesOptimize2(allFiles, allTests, outcomes, allBugged, priors, testsChoosedNames, FileNames)
allFiles, allTests, outcomes, allBugged, priors, testsChoosedNames, FileNames = optimizeVeyCommonFiles(allFiles, allTests, outcomes,allBugged,priors, testsChoosedNames, FileNames)
return FileNames, allBugged, allFiles, allTests, outcomes, priors, testsChoosedNames
def buildInstanceAndOptimize(bugsIDS, const, dbPath, pack, times,priorsByFiles,buggedTestsChooser,notRand,buggedTable):
allBugged, allFiles, allTests,priors,testsChoosedNames,FileNames = getBuggedFilesTestsByBugsIDs(dbPath, bugsIDS, pack, times,priorsByFiles,buggedTestsChooser,notRand,buggedTable)
if(len(allTests)==0 or len(allBugged)==0):
return [],[],[],[],[],[]
outcomes = generateOutcomes(allBugged, allFiles, allTests, const)
FileNames, allBugged, allFiles, allTests, outcomes, priors, testsChoosedNames = optimizations(FileNames, allBugged,allFiles, allTests,outcomes, priors,testsChoosedNames)
return allBugged, allFiles, allTests, outcomes,priors,testsChoosedNames,FileNames
def buildInstanceAndOptimizeMethods(bugsIDS, const, dbPath, pack, times,priorsByFiles,buggedTestsChooser,notRand,buggedTable,testTable):
allBugged, allFiles, allTests,priors,testsChoosedNames,FileNames = getBuggedFilesTestsByBugsIDsMethods(dbPath, bugsIDS, pack, times,priorsByFiles,buggedTestsChooser,notRand,buggedTable,testTable)
if(len(allTests)==0 or len(allBugged)==0 or len(allBugged)==1):
return [],[],[],[],[],[],[]
outcomes = generateOutcomes(allBugged, allFiles, allTests, const)
FileNames, allBugged, allFiles, allTests, outcomes, priors, testsChoosedNames = optimizations(FileNames, allBugged,allFiles, allTests,outcomes, priors,testsChoosedNames)
return allBugged, allFiles, allTests, outcomes,priors,testsChoosedNames,FileNames
def readInstance( exp,times,copyPath):
filePre=str(times)+"_"
outbugs_Files =copyPath+"\\bugs_Files\\"+filePre + str(exp) + ".txt"
outBarinel = copyPath+"\\barinel\\"+filePre +"uniform_" + str(exp) + ".csv"
allFiles, allTests,outcomes, bugs,priors=readBarinel(outBarinel)
BugsIds, allBugged, pack, testsCount, files,testsChoosedNames,FileNames = readBugFile(outbugs_Files)
allBugged=[int(x) for x in allBugged]
files=[int(f) for f in files]
return allBugged, files, allTests, outcomes, priors,testsChoosedNames,FileNames
def wekaProbs(outPath,dbPath,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,wekaAns,initials,order,numOfPacks,buggedTestsChooser,initialsChooser,notRand):
priorsByFiles=priorsFromWeka(dbPath,wekaAns,[],[])
exp=-1
conf_file = outPath+"conf.txt"
exportConf(conf_file,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,initials,order,numOfPacks,buggedTestsChooser,notRand)
expIND=0
for numOfBugs in numOfBugsARR:
bugs = allPackBugs(dbPath, 50 , packsPath,numOfExperiments,True)
start = min([20, len(bugs)-1])
for choo in [-1]:
bugsIDS, pack=choosePackBug(bugs, numOfBugs,order,numOfPacks,[choo])
for t in range(len(timesArr)):
times=timesArr[t]
exp=-1
cont=0
while exp <numOfExperiments:
#bugsIDS, pack = choosePackBug(bugs, numOfBugs,order,numOfPacks,[])
if(len(bugsIDS)==1): # (-1,)
break
allBugged, allFiles, allTests, outcomes, priors,testsChoosedNames,FileNames = buildInstanceAndOptimize(bugsIDS, const, dbPath, pack, times,priorsByFiles,buggedTestsChooser,notRand)
if(len(allTests)<=minimalTests or len(allTests)>maximalTests or len(allBugged)==0):
cont=cont+1
if( cont==1):
cont=0
break
else:
continue
exp=exp+1
expIND=expIND+1
outbugs_Files = outPath+"\\bugs_Files\\" + str(expIND) + ".txt"
exportBugs_Files(outbugs_Files,allBugged,allFiles,bugsIDS,len(allTests),pack,testsChoosedNames,[],FileNames)
file = str(expIND)
outBarinel = outPath+"\\barinel\\weka_" + file + ".csv"
outPlanner = outPath+"\\planner\\weka_" + file + ".txt"
exportBarinel(outBarinel,priors,allBugged,allFiles,allTests,outcomes)
exportPlanner(outPlanner,priors,allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
outBarinel = outPath+"\\barinel\\uniform_" + file + ".csv"
outPlanner = outPath+"\\planner\\uniform_" + file + ".txt"
priors=[0.1 for p in priors]
exportBarinel(outBarinel,priors,allBugged,allFiles,allTests,outcomes)
exportPlanner(outPlanner,priors,allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
return expIND
def allBuggedFilesDB(dbPath,FileNames,buggedTable):
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
bugsTup = str(tuple(FileNames))
s="select distinct fileName from "+buggedTable+" where fileName in "+ bugsTup
buggedNames=[b[0] for b in c.execute(s) ]
buggedInds=[FileNames.index(b) for b in buggedNames ]
return buggedInds
def MultyWekaAndSanity(outPath,dbPath,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,wekaAnsArr,initialsFactor,order,numOfPacks,buggedTestsChooser,initialsChooser,notRand,copybool,copyPath,buggedTable,pureSanity, bugsPacks=[]):
#priorsByFiles=priorsFromWeka(dbPath,wekaAns)
exp=-1
conf_file = outPath+"conf.txt"
exportConf(conf_file,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,initialsFactor,order,numOfPacks,buggedTestsChooser,notRand,initialsChooser,buggedTable,pureSanity)
exp=-1
expIND=-1
timesMax=max(timesArr)
bugs = allPackBugs(dbPath, 20 , packsPath,numOfExperiments,True)
if bugsPacks==[]:
bugsPacks=[choosePackBug(bugs, 20,order,numOfPacks,[]) for x in range(numOfExperiments)]
print "start Experiment"
while exp <numOfExperiments:
bugsIDS, pack=bugsPacks[exp]
if(len(bugsIDS)==1): # (-1,)
break
if copybool:
allBuggedExp, allFilesExp, allTestsExp, outcomesExp, priorsExp,testsChoosedNamesExp,FileNamesExp = readInstance(exp+1,timesMax,copyPath)
else:
allBuggedExp, allFilesExp, allTestsExp, outcomesExp, priorsExp,testsChoosedNamesExp,FileNamesExp = buildInstanceAndOptimize(bugsIDS, const, dbPath, pack, timesMax,[],buggedTestsChooser,notRand,buggedTable)
if(len(allTestsExp)<=minimalTests or len(allTestsExp)>maximalTests or len(allBuggedExp)==0):
continue
exp=exp+1
expIND=expIND+1
for t in range(len(timesArr)):
times=timesArr[t]
filePre=str(times)+"_"
outbugs_Files =outPath+"\\bugs_Files\\"+filePre + str(expIND) + ".txt"
allTests=allTestsExp[:times+1]
outcomes=outcomesExp[:times+1]
FileNames, allBugged, allFiles, allTests, outcomes, priors, testsChoosedNames = optimizations(FileNamesExp, allBuggedExp,allFilesExp, allTests,outcomes, priorsExp,testsChoosedNamesExp)
exportBugs_Files(outbugs_Files,allBugged,allFiles,bugsIDS,len(allTests),pack,testsChoosedNames,[],FileNames)
outBarinel = outPath+"\\barinel\\"+filePre +"uniform_" + str(expIND) + ".csv"
outPlanner = outPath+"\\planner\\"+filePre +"uniform_" + str(expIND) + ".txt"
priors=[0.1 for p in FileNames]
exportBarinel(outBarinel,priors,allBugged,allFiles,allTests,outcomes)
initials=int(initialsFactor*times)
exportPlanner(outPlanner,priors,allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
for wekaAns,name in wekaAnsArr:
priorsByFiles=priorsFromWeka(dbPath,wekaAns,FileNames,allFiles)
outBarinel = outPath+"\\barinel\\"+filePre +"weka_" +name+ str(expIND) + ".csv"
outPlanner = outPath+"\\planner\\"+filePre +"weka_" +name+ str(expIND) + ".txt"
exportBarinel(outBarinel,priorsByFiles.values(),allBugged,allFiles,allTests,outcomes)
exportPlanner(outPlanner,priorsByFiles.values(),allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
pBug = 0.6
if(pureSanity):
allBugged=allBuggedFilesDB(dbPath,FileNames,buggedTable)
for j in range(2+1): # pValid < pBug
pValid = j / 10.0
file = filePre +str(pBug) + "_" + str(pValid) + "_" + str(expIND)
outBarinel = outPath+"\\barinel\\" + file + ".csv"
outPlanner = outPath+"\\planner\\" + file + ".txt"
priors=priorsByPbugPvalid(allBugged, allFiles, pBug+0.01, pValid+0.01)
exportBarinel(outBarinel,priors,allBugged,allFiles,allTests,outcomes)
exportPlanner(outPlanner,priors,allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
return expIND
def MultyWekaAndSanityMethods(outPath,dbPath,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,wekaAnsArr,initialsFactor,order,numOfPacks,buggedTestsChooser,initialsChooser,notRand,copybool,copyPath,buggedTable,pureSanity, bugsPacks,testTable):
#priorsByFiles=priorsFromWeka(dbPath,wekaAns)
exp=-1
conf_file = outPath+"conf.txt"
exportConf(conf_file,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,initialsFactor,order,numOfPacks,buggedTestsChooser,notRand,initialsChooser,buggedTable,pureSanity,testTable)
exp=-1
expIND=-1
timesMax=max(timesArr)
bugs = allPackBugs(dbPath, 20 , packsPath,numOfExperiments,True)
if bugsPacks==[]:
bugsPacks=[choosePackBug(bugs, 20,order,numOfPacks,[]) for x in range(numOfExperiments)]
print "start Experiment"
while exp <numOfExperiments:
bugsIDS, pack=bugsPacks[exp]
if(len(bugsIDS)==1): # (-1,)
break
if copybool:
allBuggedExp, allFilesExp, allTestsExp, outcomesExp, priorsExp,testsChoosedNamesExp,FileNamesExp = readInstance(exp+1,timesMax,copyPath)
else:
allBuggedExp, allFilesExp, allTestsExp, outcomesExp, priorsExp,testsChoosedNamesExp,FileNamesExp = buildInstanceAndOptimizeMethods(bugsIDS, const, dbPath, pack, timesMax,[],buggedTestsChooser,notRand,buggedTable,testTable)
if(len(allTestsExp)<=minimalTests or len(allTestsExp)>maximalTests or len(allBuggedExp)==0):
exp=exp+1
continue
exp=exp+1
expIND=expIND+1
for t in range(len(timesArr)):
times=timesArr[t]
filePre=str(times)+"_"
outbugs_Files =outPath+"\\bugs_Files\\"+filePre + str(expIND) + ".txt"
allTests=allTestsExp[:times+1]
outcomes=outcomesExp[:times+1]
FileNames, allBugged, allFiles, allTests, outcomes, priors, testsChoosedNames = optimizations(FileNamesExp, allBuggedExp,allFilesExp, allTests,outcomes, priorsExp,testsChoosedNamesExp)
exportBugs_Files(outbugs_Files,allBugged,allFiles,bugsIDS,len(allTests),pack,testsChoosedNames,[],FileNames)
outBarinel = outPath+"\\barinel\\"+filePre +"uniform_" + str(expIND) + ".csv"
outPlanner = outPath+"\\planner\\"+filePre +"uniform_" + str(expIND) + ".txt"
priors=[0.1 for p in FileNames]
exportBarinel(outBarinel,priors,allBugged,allFiles,allTests,outcomes)
initials=int(initialsFactor*times)
exportPlanner(outPlanner,priors,allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
for wekaAns,name in wekaAnsArr:
priorsByFiles=priorsFromWeka(dbPath,wekaAns,FileNames,allFiles)
outBarinel = outPath+"\\barinel\\"+filePre +"weka_" +name+ str(expIND) + ".csv"
outPlanner = outPath+"\\planner\\"+filePre +"weka_" +name+ str(expIND) + ".txt"
exportBarinel(outBarinel,priorsByFiles.values(),allBugged,allFiles,allTests,outcomes)
exportPlanner(outPlanner,priorsByFiles.values(),allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
pBug = 0.6
if(pureSanity):
allBugged=allBuggedFilesDB(dbPath,FileNames,buggedTable)
for j in range(2+1): # pValid < pBug
pValid = j / 10.0
file = filePre +str(pBug) + "_" + str(pValid) + "_" + str(expIND)
outBarinel = outPath+"\\barinel\\" + file + ".csv"
outPlanner = outPath+"\\planner\\" + file + ".txt"
priors=priorsByPbugPvalid(allBugged, allFiles, pBug+0.01, pValid+0.01)
exportBarinel(outBarinel,priors,allBugged,allFiles,allTests,outcomes)
exportPlanner(outPlanner,priors,allBugged,allFiles,allTests,outcomes,initials,initialsChooser)
return expIND
def exportConf(conf_file,packsPath,numOfExperiments,numOfBugs,times,const,minimalTests,maximalTests,initials,order,numOfPacks,buggedTestsChooser,notRand,initialsChooser,buggedTable,pureSanity,testTable):
with open(conf_file, 'wb') as f:
writer = csv.writer(f)
writer.writerows([["packsPath"],[packsPath],["buggedTable"],[buggedTable],["testTable"],[testTable],
["numOfExperiments"],[numOfExperiments],
["numOfBugs"],[numOfBugs],
["times"],[times],
["const"],[const],
["minimalTests"],[minimalTests],
["maximalTests"],[maximalTests],
["initials"],[initials],
["order"],[order],
["buggedTestsChooser"],[buggedTestsChooser],
["notRand"],[notRand],
["pureSanity"],[pureSanity],
["initialsChooser"],[initialsChooser],
["numOfPacks"],[numOfPacks]] )
def sanityProbs(outPath,dbPath,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,initials,order,numOfPacks,buggedTestsChooser,initialsChooser,notRand):
#bugs = allPackBugs(dbPath, numOfBugs, packsPath,numOfExperiments,False)
conf_file = outPath+"conf.txt"
exportConf(conf_file,packsPath,numOfExperiments,numOfBugsARR,timesArr,const,minimalTests,maximalTests,initials,order,numOfPacks,buggedTestsChooser,notRand)
expIND=0
for numOfBugs in numOfBugsARR:
bugs = allPackBugs(dbPath, 30 , packsPath,numOfExperiments,True)
start = min([20, len(bugs)-1])
for choo in [-1]:
bugsIDS, pack=choosePackBug(bugs, numOfBugs,order,numOfPacks,[choo])
for t in range(len(timesArr)):
times=timesArr[t]
exp=-1
cont=0
while exp <numOfExperiments:
if(len(bugsIDS)==1): # (-1,)
break
allBugged, allFiles, allTests, outcomes, priors,testsChoosedNames = buildInstanceAndOptimize(bugsIDS, const, dbPath, pack, times,None,buggedTestsChooser,notRand)
if(len(allTests)<=minimalTests or len(allTests)>=maximalTests or len(allBugged)==0):
cont=cont+1
if( cont==5):
cont=0
break
else:
continue
exp=exp+1
expIND=expIND+1
outbugs_Files = outPath+"\\bugs_Files\\" + str(expIND) + ".txt"
exportBugs_Files(outbugs_Files,allBugged,allFiles,bugsIDS,len(allTests),pack,testsChoosedNames)
for i in range(11):
pBug = i / 10.0
for j in range(i+1): # pValid < pBug
pValid = j / 10.0
file = str(pBug) + "_" + str(pValid) + "_" + str(expIND)
outBarinel = outPath+"\\barinel\\" + file + ".csv"
outPlanner = outPath+"\\planner\\" + file + ".txt"
priors=priorsByPbugPvalid(allBugged, allFiles, pBug+0.01, pValid+0.01)
exportBarinel(outBarinel,priors,allBugged,allFiles,allTests,outcomes)
exportPlanner(outPlanner,priors,allBugged,allFiles,allTests,outcomes,initials,1/initialsChooser)
return expIND
def statisticalInfo(dbPath,packsPath):
packsBugs=allPackBugs(dbPath, 0, packsPath,0,False)
packsInfo=[]
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
ans=[]
for pack in packsBugs:
bugs,package=pack
testsFiles=[]
testsNames=[]
s="select distinct Test from testsFiles where testsFiles.fileName like \""+package+".%\" "
for r in c.execute(s):
testsNames.append(r[0])
s="select distinct fileName from testsFiles where testsFiles.fileName like \""+package+".%\" "
for r in c.execute(s):
testsFiles.append(r[0])
ans.append([package,len(bugs),len(testsNames),len(testsFiles)])
return ans
def statisticalInfoMethods(dbPath,packsPath):
packsBugs=allPackBugsMethods(dbPath, 0, packsPath,0,False,"buggedMethods")
packsInfo=[]
conn = sqlite3.connect(dbPath)
conn.text_factory = str
c = conn.cursor()
ans=[]
for pack in packsBugs:
bugs,package=pack
testsFiles=[]
testsNames=[]
s="select distinct Test from testsMethods where testsMethods.methodName like \"%"+package+"%\" "
for r in c.execute(s):
testsNames.append(r[0])
s="select distinct methodName from testsMethods where testsMethods.methodName like \"%"+package+"%\" "
for r in c.execute(s):
testsFiles.append(r[0])
ans.append([package,len(bugs),len(testsNames),len(testsFiles)])
return ans
def copySTMS(outPath,utilsPath):
outPath=outPath+"\\"
lines=[["Components Table:",""]]+[[str(i),str(i)] for i in range(9000)]
shutil.copyfile(os.path.join(utilsPath,"conv_comp_table.csv"), outPath + "planner\\conv_comp_table.csv")
shutil.copyfile(os.path.join(utilsPath,"barinel.jar"), outPath + "barinel.jar")
shutil.copyfile(os.path.join(utilsPath,"planner150.jar"), outPath + "planner150.jar")
shutil.copyfile(os.path.join(utilsPath,"barinelRun.bat"), outPath + "barinelRun.bat")
def transposeBugs():
global bugs, d, x, lst, p, b
bugs = allPackBugs(dbPath, numOfBugs, packsPath, 0, False)
d = {}
for x in bugs:
lst, p = x
for b in lst:
if b in d:
d[b] = (d[b]).append(p)
else:
d[b] = [p]
def Most_All_Mkdirs(outPath,experimentsInstances):
dirs=[]
for training,testing, weka, table in experimentsInstances:
o = outPath + "\\"+training+"-"+testing+"\\"
if not (os.path.isdir(o)):
os.mkdir(o)
dirStruct(o)
dirs.append(o)
return dirs
def RunAndResults(buggedTestsChooser, bugsPacks, const, copy, copyPath, outpath, dbPath, initialsChooser, initialsFactor,
maximalTests, minimalTests, numOfBugs, numOfExperiments, numOfPacks, packsPath, pureSanity, table,
times, wekaAnsArr,testTable):
numOfExperiments = MultyWekaAndSanityMethods(outpath, dbPath, packsPath, numOfExperiments, numOfBugs, times, const, minimalTests,
maximalTests, wekaAnsArr, initialsFactor, False, numOfPacks, buggedTestsChooser,
initialsChooser, False, copy, copyPath, table, pureSanity, bugsPacks,testTable)
weka = True
run_commands = ["java", "-jar", "planner150.jar","1", outpath + "\\planner\\", outpath + "\\plannerRecords\\", str(0.7)]
proc = subprocess.Popen(run_commands, stdout=subprocess.PIPE, shell=True,cwd=outpath)
(out, err) = proc.communicate()
run_commands = ["barinelRun.bat"]
proc = subprocess.Popen(run_commands, stdout=subprocess.PIPE, shell=True,cwd=outpath)
(out, err) = proc.communicate()
types = ["all", "normal", "can't advance"]
a = 0
for t in types:
a = a + 1
results.planner_resultsMultyWekaAndSanity(outpath + "\\plannerRes" + t + ".csv", outpath + "\\plannerMEDRes" + t + ".csv", outpath,
numOfExperiments, t, weka, times, wekaAnsArr)
# results.planner_recordes(outPath+"plannerRes"+t+".csv",outPath+"plannerMEDRes"+t+".csv",outPath+"\\plannerRecords\\",numOfExperiments,t,weka,exps)
# results.resultsAllBarinel("%s\\barinelOptA.csv" % outPath,"%s\\barinelOptA2.csv" % outPath, "%s\\" % outPath,1,weka,numOfExperiments)
results.resultsMultyWekaAndSanity("%s\\barinelOptA.csv" % outpath, "%s\\barinelOptA2.csv" % outpath, "%s\\" % outpath, 1,
numOfExperiments, times, wekaAnsArr)
def RunAndResultsMethods(buggedTestsChooser, bugsPacks, const, copy, copyPath, d, dbPath, initialsChooser, initialsFactor,
maximalTests, minimalTests, numOfBugs, numOfExperiments, numOfPacks, packsPath, pureSanity, table,
times, wekaAnsArr):
numOfExperiments = MultyWekaAndSanityMethods(d, dbPath, packsPath, numOfExperiments, numOfBugs, times, const, minimalTests,
maximalTests, wekaAnsArr, initialsFactor, False, numOfPacks, buggedTestsChooser,
initialsChooser, False, copy, copyPath, table, pureSanity, bugsPacks)
weka = True
plannerRunSTMT = "cmd /x /c \" cd /d " + d + " & java -jar planner150.jar %s %s %s %s \"" % (
str(1), d + "\\planner\\", d + "plannerRecords\\", str(0.7))
os.system(plannerRunSTMT)
bat_ = "cmd.exe /X /C \" cd /d " + d + " & " + d + "barinelRun.bat\""
os.system(bat_)
types = ["all", "normal", "can't advance"]
a = 0
for t in types:
a = a + 1
results.planner_resultsMultyWekaAndSanity(d + "plannerRes" + t + ".csv", d + "plannerMEDRes" + t + ".csv", d,
numOfExperiments, t, weka, times, wekaAnsArr)
# results.planner_recordes(outPath+"plannerRes"+t+".csv",outPath+"plannerMEDRes"+t+".csv",outPath+"\\plannerRecords\\",numOfExperiments,t,weka,exps)
# results.resultsAllBarinel("%s\\barinelOptA.csv" % outPath,"%s\\barinelOptA2.csv" % outPath, "%s\\" % outPath,1,weka,numOfExperiments)
results.resultsMultyWekaAndSanity("%s\\barinelOptA.csv" % d, "%s\\barinelOptA2.csv" % d, "%s\\" % d, 1,
numOfExperiments, times, wekaAnsArr)
def Most_All_Real(outPath,dbPath,packsPath,wekaBase,numOfExperiments,numOfBugs,times,const,minimalTests,maximalTests,initialsFactor,numOfPacks,buggedTestsChooser,initialsChooser,copy,copyPath,pureSanity):
experimentsInstances=[] # tuple of training,testing, weka, table
wekaAlgs=["weka.classifiers.trees.J48" ,"weka.classifiers.bayes.NaiveBayes" , "weka.classifiers.trees.RandomForest" ]
wekaAlgs=[ "weka.classifiers.trees.RandomForest" ]
wekaAnsArr=[(wekaBase+"CDT_8_1_2_AllFiles_1_Only.csv",w) for w in wekaAlgs]#+[(wekaBase+"weka.classifiers.trees.RandomForest_Style2.csv","prev")] #all
experimentsInstances.append(["all","all",wekaAnsArr, "buggedFiles" ] )
if 0==1:
experimentsInstances.append(["all","real",wekaAnsArr, "buggedFilesChecked" ] )
wekaAnsArr=[(wekaBase+w+"_Most_blameP.csv",w) for w in wekaAlgs] #most
experimentsInstances.append(["most","most",wekaAnsArr, "buggedFilesOne" ] )
experimentsInstances.append(["most","real",wekaAnsArr, "buggedFilesChecked" ] )
if 0==1:
wekaAnsArr=[(wekaBase+w+"_CleanAll.csv",w) for w in wekaAlgs] #all+
experimentsInstances.append(["allP","allP",wekaAnsArr, "buggedFiles" ] )
experimentsInstances.append(["allP","real",wekaAnsArr, "buggedFilesChecked" ] )
wekaAnsArr=[(wekaBase+w+"_Most_Clean.csv",w) for w in wekaAlgs] #most+
experimentsInstances.append(["mostP","mostP",wekaAnsArr, "buggedFilesOne" ] )
experimentsInstances.append(["mostP","real",wekaAnsArr, "buggedFilesChecked" ] )
bugs = allPackBugs(dbPath, 2 , packsPath,numOfExperiments,True,"buggedFilesChecked")
bugsPacks=[choosePackBug(bugs, 7,False,1,[])for x in range(numOfExperiments)]
dirs=Most_All_Mkdirs(outPath,experimentsInstances)
for ei,d in zip(experimentsInstances,dirs):
copySTMS(d)
for ei,d in zip(experimentsInstances,dirs):
training,testing, weka, table=ei
RunAndResults(buggedTestsChooser, bugsPacks, const, copy, copyPath, d, dbPath, initialsChooser, initialsFactor,
maximalTests, minimalTests, numOfBugs, numOfExperiments, numOfPacks, packsPath, pureSanity, table,
times, wekaAnsArr)
linesWrite=[]
linesWrite.append(["training","testing", "algorithm","pBug","pValid","times","precision_avg","recall_avg","auc_avg","tests_avg","files_avg","bugged_avg" ])
for ei,d in zip(experimentsInstances,dirs):
training,testing, weka, table=ei
bar= d+"\\barinelOptA.csv"
reader= csv.reader(open(bar,"r"))
i=0
for r in reader:
i=i+1
if i==1:
continue
linesWrite.append([training,testing]+r)
with open(outPath+"\\barinelOptA.csv", 'wb') as f:
writer = csv.writer(f)
writer.writerows(linesWrite)
linesWrite=[]
linesWrite.append(["training","testing", "algorithm","pBug","pValid","times","precision_avg","recall_avg","steps","tests_avg","files_avg","bugged_avg","initials","tests" ])
for ei,d in zip(experimentsInstances,dirs):
training,testing, weka, table=ei
t="all"
bar= d+"plannerRes"+t+".csv"
reader= csv.reader(open(bar,"r"))
i=0
for r in reader:
i=i+1
if i==1:
continue
linesWrite.append([training,testing]+r)
with open(outPath+"\\plannerRes.csv", 'wb') as f:
writer = csv.writer(f)
writer.writerows(linesWrite)
def Old_main():
dbPath="C:\\GitHub\\agent\\testsBugs.db"
outPath="C:\\GitHub\\experiments\\check4\\"
#copyPath="C:\\GitHub\\experiments\\WekaNet\\"
copyPath="C:\\GitHub\\experiments\\ManyPacks6\\"
copy=False
packsPath="C:\\GitHub\\agent\\PacksFiltered.txt"
wekaBase="C:\\GitHub\\experiments\\"
wekaAlgs=["weka.classifiers.trees.J48" ,"weka.classifiers.bayes.NaiveBayes" , "weka.classifiers.trees.RandomForest" ]
wekaAlgs=[ "weka.classifiers.trees.RandomForest" ]
wekaAnsArr=[(wekaBase+w+"_Style2.csv",w) for w in wekaAlgs] #all
wekaAnsArr=[(wekaBase+w+"_OneBug2.csv",w) for w in wekaAlgs] #most
wekaAnsArr=[(wekaBase+w+"_CleanAll.csv",w) for w in wekaAlgs] #all+
wekaAnsArr=[(wekaBase+w+"_Most_Clean.csv",w) for w in wekaAlgs] #most+
wekaAnsArr=[(wekaBase+"CDT_8_1_2_AllFiles_1_Only.csv",w) for w in wekaAlgs] #most+
numOfExperiments=10
buggedTable="buggedFiles"
#buggedTable="buggedFilesOne"
#buggedTable="buggedFilesChecked"
numOfPacks=1
numOfrepeats=1
numOfBugs=[2]
times=[25,40,70,100,130]
const=0.2
minimalTests=100
maximalTests=220
buggedTestsChooser=10
initialsFactor=0.1
initialsChooser=0.5
tresh=0.7
pureSanity=True
tresh=0.7
outPath="C:\\GitHub\\experiments\\"+str("Ext")+"\\"
o=outPath
if not (os.path.isdir(o)):
os.mkdir(o)
dirStruct(outPath)
#outPath,dbPath,packsPath,wekaBase,numOfExperiments,numOfBugs,times,const,minimalTests,maximalTests,initialsFactor,numOfPacks,buggedTestsChooser,initialsChooser,copy,copyPath,pureSanity
#Most_All_Real(outPath,dbPath,packsPath,numOfExperiments,numOfBugs,times,const,minimalTests,maximalTests,wekaAnsArr,initialsFactor,numOfPacks,buggedTestsChooser,initialsChooser,copy,copyPath,pureSanity)
numOfExperiments=MultyWekaAndSanity(outPath,dbPath,packsPath,numOfExperiments,numOfBugs,times,const,minimalTests,maximalTests,wekaAnsArr,initialsFactor,False,numOfPacks,buggedTestsChooser,initialsChooser,False,copy,copyPath,buggedTable,pureSanity)
weka=True
copySTMS(outPath)
plannerRunSTMT="cmd /x /c \"c: & cd C:\\GitHub\\agent & java -jar planner150.jar %s %s %s %s \"" %(str(numOfrepeats),outPath+"\\planner\\",outPath+"plannerRecords\\",str(tresh))
os.system(plannerRunSTMT)
bat_ = "cmd.exe /X /C \"c: & cd " + outPath + " & " + outPath + "barinelRun.bat\""
os.system(bat_)
exps=range(numOfExperiments+1)[1:]
types=["all","normal","can't advance"]
a=0
for t in types:
a=a+1
results.planner_resultsMultyWekaAndSanity(outPath+"plannerRes"+t+".csv" , outPath+"plannerMEDRes"+t+".csv",outPath,numOfExperiments,t,weka,times,wekaAnsArr)
#results.planner_recordes(outPath+"plannerRes"+t+".csv",outPath+"plannerMEDRes"+t+".csv",outPath+"\\plannerRecords\\",numOfExperiments,t,weka,exps)
#results.resultsAllBarinel("%s\\barinelOptA.csv" % outPath,"%s\\barinelOptA2.csv" % outPath, "%s\\" % outPath,1,weka,numOfExperiments)
results.resultsMultyWekaAndSanity("%s\\barinelOptA.csv" % outPath,"%s\\barinelOptA2.csv" % outPath, "%s\\" % outPath,1,numOfExperiments,times,wekaAnsArr)
def eclipse():
dbPath="C:\\GitHub\\agent\\testsBugs.db"
outPath="C:\\GitHub\\experiments\\E8\\"
packsPath="C:\\GitHub\\agent\\PacksFiltered.txt"
wekaBase="C:\\GitHub\\experiments\\wekOut\\"
numOfExperiments=10
numOfPacks=1
numOfrepeats=1
numOfBugs=[2]
times=[25,40,70,100,130]
const=0.2
minimalTests=25
maximalTests=220
buggedTestsChooser=10
initialsFactor=0.1
initialsChooser=0.5
tresh=0.7
pureSanity=False
if not (os.path.isdir(outPath)):
os.mkdir(outPath)
#Old_main()
Most_All_Real(outPath,dbPath,packsPath,wekaBase,numOfExperiments,numOfBugs,times,const,minimalTests,maximalTests,initialsFactor,numOfPacks,buggedTestsChooser,initialsChooser,False,"",pureSanity)
def POI():
dbPath="C:\projs\poi2Working\\testsBugs.db"
outPath="C:\\GitHub\\experiments\\POI\\"
packsPath="C:\projs\poiWorking\\POIpacks.txt"
numOfExperiments=10
numOfPacks=1
numOfrepeats=1
numOfBugs=[2]
times=[25,40,70,100,130]
#times=[10,20,30,40]
const=0.2
minimalTests=25
maximalTests=220
buggedTestsChooser=10
initialsFactor=0.1
initialsChooser=0.5
tresh=0.7
pureSanity=False
copyPath="C:\\GitHub\\experiments\\POI50E2\\"
for i in range(1):
outPath="C:\\GitHub\\experiments\\PO8"+str(i)+"\\"
copy=False
o=outPath
if not (os.path.isdir(o)):
os.mkdir(o)
bugs = allPackBugs(dbPath, 2 , packsPath,numOfExperiments,True,"buggedFiles")
bugsPacks=[choosePackBug(bugs, 6,False,5,[])for x in range(numOfExperiments)]
dirStruct(outPath)
copySTMS(outPath)
wekaAnsArr=[("C:\GitHub\weka\poi\\poiRF.csv","randomForest")]#+[(wekaBase+"weka.classifiers.trees.RandomForest_Style2.csv","prev")] #all
RunAndResults(buggedTestsChooser, bugsPacks, const, copy, copyPath, outPath, dbPath, initialsChooser, initialsFactor,
maximalTests, minimalTests, numOfBugs, numOfExperiments, numOfPacks, packsPath, pureSanity, "buggedFiles" ,
times, wekaAnsArr)
def POIMethods():
dbPath="C:\projs\poi2Working\\testsBugsMethods.db"
outPath="C:\\GitHub\\experiments\\POIMethods4\\"
packsPath="C:\projs\poi2Working\\POIpacks14.txt"
#statisticalInfoMethods(dbPath,packsPath)
if not os.path.isfile(packsPath):
packFileCreate(dbPath,1,-1,packsPath)
numOfExperiments=10
numOfPacks=1
numOfrepeats=1
numOfBugs=[2]
times=[25,40,70,100,130]
#times=[10,20,30,40]
const=0.2
minimalTests=25
maximalTests=220
buggedTestsChooser=10
initialsFactor=0.1
initialsChooser=0.5
tresh=0.7
pureSanity=False
copyPath="C:\\GitHub\\experiments\\POI50E2\\"
for i in range(5):
copy=False
outPath="C:\\GitHub\\experiments\\POIMethods12"+str(i)+"\\"
o=outPath
if not (os.path.isdir(o)):
os.mkdir(o)
#bugs = allPackBugs(dbPath, 2 , packsPath,numOfExperiments,True,"buggedFiles")
bugs = allPackBugsMethods(dbPath, 1 , packsPath,numOfExperiments,True,"buggedMethods")
bugsPacks=[choosePackBug(bugs, 20,False,15,[])for x in range(numOfExperiments)]
dirStruct(outPath)
copySTMS(outPath)
wekaAnsArr=[("C:\projs\poi2Working\weka\selected\\weka.classifiers.bayes.NaiveBayes_AllFiles_3.csv","randomForest")]#+[(wekaBase+"weka.classifiers.trees.RandomForest_Style2.csv","prev")] #all
RunAndResults(buggedTestsChooser, bugsPacks, const, copy, copyPath, outPath, dbPath, initialsChooser, initialsFactor,
maximalTests, minimalTests, numOfBugs, numOfExperiments, numOfPacks, packsPath, pureSanity, "buggedMethods" ,
times, wekaAnsArr)
def RunExperiments(dbPath,outPath,packsPath,wekaPath,Unit,buggedType,utilsPath):
print "RunExperiments"
numOfExperiments=20
numOfPacks=1
times=[25,40,70,100,130, 180]
const=0.05
minimalTests=25
maximalTests=250
buggedTestsChooser=10
initialsFactor=0.1
initialsChooser=0.5
tresh=0.7
pureSanity=False
table=""
testTable=""
if (Unit=="File"):
testTable="testsFiles"
if (buggedType=="All"):
table="buggedFiles"
if (buggedType=="Most"):
table="buggedFilesMostModified"
if (Unit=="method"):
testTable="testsMethods"
if (buggedType=="All"):
table="buggedMethods"
if (buggedType=="Most"):
table="buggedMethodsMostModified"
dirStruct(outPath)
copySTMS(outPath,utilsPath)
bugs = allPackBugs(dbPath, 1 , packsPath,numOfExperiments,True,table)
bugsPacks=[choosePackBug(bugs, 2,False,10,[])for x in range(numOfExperiments)]
wekaAnsArr=[(wekaPath,"randomForest")]#+[(wekaBase+"weka.classifiers.trees.RandomForest_Style2.csv","prev")] #all
RunAndResults(buggedTestsChooser, bugsPacks, const, False, "", outPath, dbPath, initialsChooser, initialsFactor,
maximalTests, minimalTests, [2], numOfExperiments, numOfPacks, packsPath, pureSanity, table ,
times, wekaAnsArr,testTable)
@utilsConf.marker_decorator(utilsConf.PACKS_FILE_MARKER)
def packFileCreate(dbpath, startInd, endInd,outPath):
conn = sqlite3.connect(dbpath)
conn.text_factory = str
c = conn.cursor()
lines=set()
#wanted_files='select distinct name from haelsTfiles order by name'
wanted_files='select distinct fileName from buggedFiles order by fileName'
for row in c.execute(wanted_files):
r=row[0]
r=r.split("\\")
#r=r[startInd:endInd]
r=r[:-1]
concat=[]
for elem in r:
concat.append(elem)
lines.add("\\".join(concat))
#r=r
#lines.add("\\".join(r))
f=open(outPath,"wb")
writer=csv.writer(f)
writer.writerows([[x] for x in list(lines)])
f.close()
ans=statisticalInfoMethods(dbpath,outPath)
newLines=[]
for row in ans:
pack,a,b,c=row
if a>0 and b>0 and c>0:
newLines.append(pack)
f=open(outPath,"wb")
writer=csv.writer(f)
writer.writerows([[x] for x in list(newLines)])
f.close()
if __name__ == "__main__":
RunExperiments(os.path.join(workingDir,"testsBugsMethods.db"), outPath,packsPath,outCsv,"method",buggedType)
#RunExperiments("C:\projs\\antWorking\\testsBugs.db","C:\\GitHub\\experiments\\ant1\\","C:\projs\\antWorking\\antPacks.txt","C:\GitHub\weka\\ant\\antRF.csv")
#packFileCreate("C:\projs\\antWorking\\dbAdd\\ANT_182.db",3,-2,"C:\projs\\antWorking\\antPacks.txt")
exit()
dbPath="C:\\GitHub\\agent\\testsBugs.db"
outPath="C:\\GitHub\\experiments\\POI\\"
packsPath="C:\\GitHub\\agent\\PacksFiltered.txt"
wekaBase="C:\\GitHub\\experiments\\wekOut\\"
dbPath="C:\projs\poiWorking\\testsBugs3.db"
outPath="C:\\GitHub\\experiments\\POI\\"
packsPath="C:\projs\poiWorking\\POIpacks.txt"
numOfExperiments=50
numOfPacks=1
numOfrepeats=1
numOfBugs=[2]
times=[25,40,70,100,130]
const=0.2
minimalTests=25
maximalTests=220
buggedTestsChooser=10
initialsFactor=0.1
initialsChooser=0.5
tresh=0.7
pureSanity=True
copyPath="C:\\GitHub\\experiments\\TenPure1\\"
for i in range(5):
outPath="C:\\GitHub\\experiments\\POI50E"+str(i)+"\\"
copy=False
o=outPath
if not (os.path.isdir(o)):
os.mkdir(o)
#Old_main()
#Most_All_Real(outPath,dbPath,packsPath,wekaBase,numOfExperiments,numOfBugs,times,const,minimalTests,maximalTests,initialsFactor,numOfPacks,buggedTestsChooser,initialsChooser,copy,copyPath,pureSanity)
#statisticalInfo(dbPath,packsPath)
#exit()
bugs = allPackBugs(dbPath, 4 , packsPath,numOfExperiments,True,"buggedFiles")
bugsPacks=[choosePackBug(bugs, 15,False,8,[])for x in range(numOfExperiments)]
dirStruct(outPath)
copySTMS(outPath)
wekaAnsArr=[(wekaBase+"CDT_8_1_2_AllFiles_1_Only.csv","f")]#+[(wekaBase+"weka.classifiers.trees.RandomForest_Style2.csv","prev")] #all
wekaAnsArr=[("C:\GitHub\weka\m29\\poiRF.csv","randomForest")]#+[(wekaBase+"weka.classifiers.trees.RandomForest_Style2.csv","prev")] #all
RunAndResults(buggedTestsChooser, bugsPacks, const, copy, copyPath, outPath, dbPath, initialsChooser, initialsFactor,
maximalTests, minimalTests, numOfBugs, numOfExperiments, numOfPacks, packsPath, pureSanity, "buggedFiles" ,
times, wekaAnsArr) |
import speech_recognition as sr # type: ignore
class Listen:
def listen_for_speech(self, text: str="Powiedz cość") -> str:
r = sr.Recognizer()
with sr.Microphone() as source:
print(text)
audio = r.listen(source)
try:
recoding = r.recognize_google(audio, language="pl-PL")
if recoding != "":
return recoding.lower()
return ""
except sr.UnknownValueError:
print("Nie zrozumiałem")
except sr.RequestError as e:
print(f"Error: {e}")
return '' |
#!public /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 Sean Kirmani <sean@kirmani.io>
#
# Distributed under terms of the MIT license.
"""TODO(Sean Kirmani): DO NOT SUBMIT without one-line documentation for test
TODO(Sean Kirmani): DO NOT SUBMIT without a detailed description of test.
"""
SEMICOLON = ';'
OPEN_BRACE = '{'
CLOSE_BRACE = '}'
BRACES_SET = OPEN_BRACE + CLOSE_BRACE
OPEN_PAREN = '('
CLOSE_PAREN = ')'
COMMA = ','
COMMENT_BLOCK_BEGIN = '/**'
COMMENT_BLOCK_END = '*/'
CONDITIONALS = ['if', 'else', 'for', 'do', 'while']
PACKAGE_START_STRING = 'package '
IMPORT_START_STRING = 'import '
# Java Object Types
ROOT = 'root'
PACKAGE = 'package'
IMPORT = 'import'
CLASS = 'class'
METHOD = 'method'
VARIABLE = 'variable'
PARAMETERS = 'parameters'
CONDITIONAL = 'conditional'
class JavaLexer(object):
def __init__(self, f):
self._file = f
self._file_lines = [l for l in f.readlines()]
self.thing = ROOT
self.children = _CreateTree(self._RemoveComments(self._RawText()))
def _RawText(self):
return ''.join([l.strip() for l in self._file_lines])
def _RemoveComments(self, text):
result = ''
in_comment = False
for index in range(len(text)):
if text[index : index + len(COMMENT_BLOCK_BEGIN)] == COMMENT_BLOCK_BEGIN:
in_comment = True
if not in_comment:
result += text[index]
if text[index + 1 - len(COMMENT_BLOCK_END) : index + 1] == \
COMMENT_BLOCK_END:
in_comment = False
return result
def Printable(self):
return self.thing
class JavaPackage(object):
def __init__(self, block):
self.package = block[len(PACKAGE_START_STRING):][:-len(SEMICOLON)]
self.thing = PACKAGE
def Printable(self):
return '%s: %s' % (self.thing, self.package)
class JavaImport(object):
def __init__(self, block):
self.thing = IMPORT
imported = block[len(IMPORT_START_STRING):][:-len(SEMICOLON)]
self.is_static = False
if 'static' in imported:
self.is_static = True
imported = imported[imported.find('static') + len('static'):]
import_path = imported.split('.')
self.package = import_path[:len(import_path) - 1]
self.name = import_path[len(import_path) - 1]
self.has_children = False
def Printable(self):
if self.is_static:
return '%s: static %s %s' % (self.thing, self.package, self.name)
return '%s: %s %s' % (self.thing, self.package, self.name)
class JavaClass(object):
def __init__(self, block, leftover):
self.qualifiers = self._GetQualifiers(block[:-len(BRACES_SET)])
self.name = self._GetName(block[:-len(BRACES_SET)])
self.children = _CreateTree(leftover)
self.thing = CLASS
def _GetQualifiers(self, block):
result = []
words = block.split()
for word in words:
if word == 'class':
return result
result.append(word)
def _GetName(self, block):
words = block.split()
is_next = False
for word in words:
if is_next:
return word
if word == 'class':
is_next = True
def Printable(self):
return '%s: %s %s' % (self.thing, self.qualifiers, self.name)
class JavaMethod(object):
def __init__(self, block, leftover):
leftside = block[:block.find(OPEN_PAREN)].rstrip()
words = leftside.split()
rightside = block[block.find(OPEN_PAREN) + len(OPEN_PAREN) :
block.find(CLOSE_PAREN)]
self.qualifiers = words[:len(words) - 1]
self.name = words[len(words) - 1]
parameters = JavaParameters(rightside)
self.children = [parameters] if len(parameters.children) > 0 else []
self.children += _CreateTree(leftover)
self.thing = METHOD
def Printable(self):
return '%s: %s %s' % (self.thing, self.qualifiers, self.name)
class JavaVariable(object):
def __init__(self, words):
self.thing = VARIABLE
if len(words) > 2:
self.qualifiers = words[:len(words) - 2]
else:
self.qualifiers = []
self.object_type = words[len(words) - 2]
self.name = words[len(words) - 1]
def Printable(self):
return '%s: %s %s %s' % (self.thing, self.qualifiers, self.object_type,
self.name)
# TODO: Finish Implementation of this
class JavaConditional(object):
def __init__(self, block):
self.thing = CONDITIONAL
for cond in CONDITIONALS:
if cond in block:
self.conditional = cond
leftover = block[block.find(OPEN_PAREN) + 1: block.find(CLOSE_PAREN)]
self.children = _CreateTree(leftover)
def Printable(self):
return '%s: %s' % (self.thing, self.conditional)
class JavaParameters(object):
def __init__(self, block):
self.thing = PARAMETERS
params = [param.split() for param in block.split(COMMA)]
self.children = [JavaVariable(param) for param in params
if len(param) >= 2]
def Printable(self):
return self.thing
def _GetTree(text):
result = ''
removed = ''
stack = []
for index in range(len(text)):
if index - 1 > 0 and text[index - 1] == OPEN_BRACE:
stack.append(OPEN_BRACE)
if len(stack) == 0:
result += text[index]
else:
removed += text[index]
if index + 1 < len(text) and text[index + 1] == CLOSE_BRACE:
if len(stack) > 0:
stack.pop()
return result, removed
def _GetBlocks(text):
text, leftover = _GetTree(text)
result = []
current_block = ''
for index in range(len(text)):
current_block += text[index]
if text[index] == ';' or text[index + 1 - len(BRACES_SET) : index + 1] == \
BRACES_SET:
result.append(current_block)
current_block = ''
return result, leftover
def _CreateTree(text):
blocks, leftover = _GetBlocks(text)
root = []
for block in blocks:
if 'class' in block and block.endswith(BRACES_SET):
root.append(JavaClass(block, leftover))
elif block.endswith(BRACES_SET):
root.append(JavaMethod(block, leftover))
if block.startswith(PACKAGE_START_STRING) and block.endswith(SEMICOLON):
root.append(JavaPackage(block))
if block.startswith(IMPORT_START_STRING) and block.endswith(SEMICOLON):
root.append(JavaImport(block))
if '=' in block and block.endswith(SEMICOLON):
leftside = block[:block.find('=')].rstrip()
words = leftside.split()
if len(words) > 1:
root.append(JavaVariable(words))
return root
def _PrintTree(root, spaces=''):
print(spaces + root.Printable())
if hasattr(root, 'children'):
for child in root.children:
_PrintTree(child, spaces + ' ')
"""All code below this line is an attempted revision to the generation of the
Java object tree
TODO (kirmani): COME BACK TO THIS"""
# TODO: Potentially use this
class JavaNode(object):
def __init__(self, text, children):
self.text = text
self.children = children
def _GetTreeNew(text, spaces=''):
indenters = [(OPEN_PAREN, CLOSE_PAREN), (OPEN_BRACE, CLOSE_BRACE)]
result = text
remaining = []
last_end_brace = 0
for index in range(len(result)):
if index > last_end_brace:
for indenter in indenters:
if result[index] == indenter[0]:
end_brace = index + 1 + \
_GetEndBrace(result[index + 1:], indenter[0], indenter[1])
subtext = result[index + 1 : end_brace]
remaining.append(subtext)
last_end_brace = end_brace
for r in remaining:
result = result.replace(r, '')
root = JavaNode(result, [])
for r in remaining:
root.children.append(_GetTree(r))
return root
def _GetEndBrace(text, start, end):
count = 0
for index in range(len(text)):
if text[index] == start:
count += 1
if text[index] == end:
if count > 0:
count -= 1
else:
return index
return None
def _PrintTextTree(node, spaces=''):
print(spaces + node.text)
for child in node.children:
_PrintTextTree(child, spaces + ' ')
|
import threading
class ZeroEvenOdd(object):
odd_lock , even_lock, zero_lock = threading.Lock, threading.Lock, threading.Lock
def __init__(self, n):
self.odd_lock = threading.Lock()
self.even_lock = threading.Lock()
self.zero_lock = threading.Lock()
self.odd_lock.acquire()
self.even_lock.acquire()
self.n = n
# printNumber(x) outputs "x", where x is an integer.
def zero(self, printNumber):
"""
:type printNumber: method
:rtype: void
"""
for i in range(1, self.n + 1):
self.zero_lock.acquire()
printNumber(0)
if i % 2 == 0:
self.even_lock.release()
else:
self.odd_lock.release()
def even(self, printNumber):
"""
:type printNumber: method
:rtype: void
"""
for i in range(2, self.n + 1, 2):
self.even_lock.acquire()
printNumber(i)
self.zero_lock.release()
def odd(self, printNumber):
"""
:type printNumber: method
:rtype: void
"""
for i in range(1, self.n + 1, 2):
self.odd_lock.acquire()
printNumber(i)
self.zero_lock.release()
def printNumber(n):
print n
a = ZeroEvenOdd(4)
t1 = threading.Thread(target=a.zero, args=[printNumber])
t2 = threading.Thread(target=a.odd, args=[printNumber])
t3 = threading.Thread(target=a.even, args=[printNumber])
t1.start()
t2.start()
t3.start() |
from botocore.exceptions import ClientError
import boto3
import os
import logging
import json
import time
import uuid
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
region = os.environ['AWS_DEFAULT_REGION']
sts_client = boto3.client('sts')
def lambda_handler(event, context):
logger.debug("Received event: %s for provisioning logs bucket", json.dumps(event))
result = event
new_aws_account = event['accountId']
devops_artifact_bucket = get_mandatory_evar("DEVOPS_ARTIFACT_BUCKET")
log_pipeline_provisioning_template_location = get_mandatory_evar("LOG_PIPELINE_CREATION_CFT_LOCATION")
log_pipeline_provisioning_template_version = get_mandatory_evar("LOG_PIPELINE_CREATION_CFT_VERSION")
logging_account_trust_role = get_mandatory_evar("LOGGING_ACCOUNT_TRUST_ROLE")
response = sts_client.assume_role(
RoleArn=logging_account_trust_role,
DurationSeconds=1800,
RoleSessionName="devops-account-sessison-"+str(uuid.uuid4())
)
session = boto3.Session(
aws_access_key_id=response['Credentials']['AccessKeyId'],
aws_secret_access_key=response['Credentials']['SecretAccessKey'],
aws_session_token=response['Credentials']['SessionToken']
)
cft_client = session.client('cloudformation')
stack_name = "pac-{}-log-pipeline-provisioning".format(new_aws_account)
try:
describe_stack_response = cft_client.describe_stacks(StackName=stack_name)
logger.info("describe_stack_response is {}".format(str(describe_stack_response)))
stack = describe_stack_response['Stacks'][0]
if stack['StackStatus'] !='DELETE_COMPLETE':
result['logPipelineStackName'] = stack_name
result['logPipelineAlreadyExists']= True
if stack['StackStatus'] in ['CREATE_COMPLETE','UPDATE_COMPLETE']:
result['provisioningLogBucketStatus'] = 'COMPLETED'
else:
result['provisioningLogBucketStatus'] = stack['StackStatus']
result['instrcution'] = "Please cleanup the log pipleine stack and rerun the step."
return result
except Exception as e:
logger.error(str(e))
result['provisioningLogPipelineCFTVersion'] = log_pipeline_provisioning_template_version
create_stack_response = cft_client.create_stack(
StackName= stack_name,
TemplateURL=log_pipeline_provisioning_template_location,
Parameters=[
{
'ParameterKey': 'AccountId',
'ParameterValue': new_aws_account
},
{
'ParameterKey': 'DevopsArtifactsBucket',
'ParameterValue': devops_artifact_bucket
},
{
'ParameterKey': 'LogPipelineCFTVersion',
'ParameterValue': log_pipeline_provisioning_template_version
}],
Capabilities=['CAPABILITY_NAMED_IAM'],
EnableTerminationProtection=True)
logger.info("Create stack response is {} ".format(create_stack_response))
time.sleep(20)
stack_status =cft_client.describe_stacks(StackName= stack_name)
logger.info("log bucket provisioning status is {} ".format(str(stack_status)))
if len(stack_status['Stacks'])==0:
raise Exception("Unable to find the log bucket stack")
result['logPipelineStackName'] = stack_name
status = stack_status['Stacks'][0]['StackStatus']
if 'Outputs' in stack_status['Stacks'][0]:
outputs = stack_status['Stacks'][0]['Outputs']
while status == 'CREATE_IN_PROGRESS':
time.sleep(5)
stack_status = cft_client.describe_stacks(StackName=stack_name)
status = stack_status['Stacks'][0]['StackStatus']
if 'Outputs' in stack_status['Stacks'][0]:
outputs = stack_status['Stacks'][0]['Outputs']
logger.info("log bucket provisioning status is {} ".format(str(stack_status)))
if status == 'CREATE_COMPLETE':
for output in outputs:
if output['OutputKey'] == 'BucketName':
result['logBucket'] = output['OutputValue']
result['provisioningLogBucketStatus'] = 'COMPLETED'
return result
else:
logger.info("Creation of log bucket has some issues. Please check")
result['provisioningLogBucketStatus'] = 'FAILED'
return result
def get_mandatory_evar(evar_name):
if not evar_name in os.environ:
raise RuntimeError("Missing environment variable: {}".format(evar_name))
return os.environ[evar_name]
|
class Pound:
def __init__(self,rare=False):
self.rare=rare
if self.rare:
self.value=1.25
else:
self.value=1.00
self.color="gold"
self.diameter=22.5#mm
self.num_edges=1
self.thickness=3.15#mm
self.heads=True
def rust(self):
self.color="greenish"
def clean(self):
self.color="gold"
def flip(self):
head_option=[True,False]
choice=random.choice(head_option)
self.head=choice
def __del__(self):
print("Coin spent!")
|
import cv2
import numpy as np
img = cv2.imread('SOfaOutput_img.png')
LegInImg = cv2.imread('sofawithleg.jpg')
LegInImgRight = cv2.flip(LegInImg, 1)
list = [LegInImgRight,LegInImg]
Repleg = cv2.imread('leg.jpg')
for i in list:
res = cv2.matchTemplate(img, i, cv2.TM_CCOEFF_NORMED)
loc = np.where (res >= 0.98)
for i in range(len(loc[0])):
posToDelete = (loc[0][i], loc[1][i])
posToAdd = (loc[0][i] -1 , loc[1][i]) # -1 pixels up +1 pixles down (if need)
posToAdd = (max(0, min(posToAdd[0],img.shape[0]-1 -Repleg.shape[0] )) , max(0, min(posToAdd[1],img.shape[1]-1-Repleg.shape[1])))
#img[posToDelete[0]:posToDelete[0] + LegInImg.shape[0],posToDelete[1]:posToDelete[1] + LegInImg.shape[1]] = (255,255,255)
img[posToAdd[0]:posToAdd[0] + Repleg.shape[0], posToAdd[1]:posToAdd[1] + Repleg.shape[1]] = Repleg
if i != 1:
Repleg = cv2.flip(Repleg, 1)
pass
cv2.imshow("Frame", img)
#cv2.imwrite("NIce_output.png",img)
cv2.waitKey(0)
|
class Settings():
def __init__(self):
self.screen_width = 900
self.screen_height = 900
self.bg_color = (243, 222, 187)
self.initial_background = "image/kulami.jpg"
self.hole_size = 50
self.tile_edge_color = (150, 127, 103)
self.font_color = (154, 202, 64)
self.font = 'image/Phosphate-Solid.ttf'
# 0 means robot first, 1 means player first
self.robot_turn = 1
# robot IQ means how deep tree search
self.robot_IQ = 6
self.trainset_address = "trainset"
|
##################################################
# cpdb_services_types.py
# generated by ZSI.generate.wsdl2python
##################################################
import ZSI
import ZSI.TCcompound
from ZSI.schema import LocalElementDeclaration, ElementDeclaration, TypeDefinition, GTD, GED
##############################
# targetNamespace
# cpdbns
##############################
class ns0:
targetNamespace = "cpdbns"
class getCpdbVersion_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getCpdbVersion"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getCpdbVersion_Dec.schema
TClist = []
kw["pname"] = ("cpdbns","getCpdbVersion")
kw["aname"] = "_getCpdbVersion"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
return
Holder.__name__ = "getCpdbVersion_Holder"
self.pyclass = Holder
class getCpdbVersionResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getCpdbVersionResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getCpdbVersionResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"cpdbVersion"), aname="_cpdbVersion", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getCpdbVersionResponse")
kw["aname"] = "_getCpdbVersionResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._cpdbVersion = None
return
Holder.__name__ = "getCpdbVersionResponse_Holder"
self.pyclass = Holder
class getAvailableAccessionTypes_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getAvailableAccessionTypes"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getAvailableAccessionTypes_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"entityType"), aname="_entityType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getAvailableAccessionTypes")
kw["aname"] = "_getAvailableAccessionTypes"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._entityType = None
return
Holder.__name__ = "getAvailableAccessionTypes_Holder"
self.pyclass = Holder
class getAvailableAccessionTypesResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getAvailableAccessionTypesResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getAvailableAccessionTypesResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"accType"), aname="_accType", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getAvailableAccessionTypesResponse")
kw["aname"] = "_getAvailableAccessionTypesResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._accType = []
return
Holder.__name__ = "getAvailableAccessionTypesResponse_Holder"
self.pyclass = Holder
class mapAccessionNumbers_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "mapAccessionNumbers"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.mapAccessionNumbers_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"accType"), aname="_accType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"accNumbers"), aname="_accNumbers", minOccurs=1, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","mapAccessionNumbers")
kw["aname"] = "_mapAccessionNumbers"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._accType = None
self._accNumbers = []
return
Holder.__name__ = "mapAccessionNumbers_Holder"
self.pyclass = Holder
class mapAccessionNumbersResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "mapAccessionNumbersResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.mapAccessionNumbersResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"accNumber"), aname="_accNumber", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"cpdbId"), aname="_cpdbId", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","mapAccessionNumbersResponse")
kw["aname"] = "_mapAccessionNumbersResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._accNumber = []
self._cpdbId = []
return
Holder.__name__ = "mapAccessionNumbersResponse_Holder"
self.pyclass = Holder
class getAvailableFsetTypes_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getAvailableFsetTypes"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getAvailableFsetTypes_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"entityType"), aname="_entityType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getAvailableFsetTypes")
kw["aname"] = "_getAvailableFsetTypes"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._entityType = None
return
Holder.__name__ = "getAvailableFsetTypes_Holder"
self.pyclass = Holder
class getAvailableFsetTypesResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getAvailableFsetTypesResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getAvailableFsetTypesResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"fsetType"), aname="_fsetType", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"description"), aname="_description", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getAvailableFsetTypesResponse")
kw["aname"] = "_getAvailableFsetTypesResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._fsetType = []
self._description = []
return
Holder.__name__ = "getAvailableFsetTypesResponse_Holder"
self.pyclass = Holder
class getDefaultBackgroundSize_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getDefaultBackgroundSize"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getDefaultBackgroundSize_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"fsetType"), aname="_fsetType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"accType"), aname="_accType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getDefaultBackgroundSize")
kw["aname"] = "_getDefaultBackgroundSize"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._fsetType = None
self._accType = None
return
Holder.__name__ = "getDefaultBackgroundSize_Holder"
self.pyclass = Holder
class getDefaultBackgroundSizeResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getDefaultBackgroundSizeResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getDefaultBackgroundSizeResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"bgSize"), aname="_bgSize", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getDefaultBackgroundSizeResponse")
kw["aname"] = "_getDefaultBackgroundSizeResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._bgSize = None
return
Holder.__name__ = "getDefaultBackgroundSizeResponse_Holder"
self.pyclass = Holder
class overRepresentationAnalysis_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "overRepresentationAnalysis"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.overRepresentationAnalysis_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"entityType"), aname="_entityType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"fsetType"), aname="_fsetType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"cpdbIdsFg"), aname="_cpdbIdsFg", minOccurs=1, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"cpdbIdsBg"), aname="_cpdbIdsBg", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"accType"), aname="_accType", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TCnumbers.FPfloat(pname=(ns,"pThreshold"), aname="_pThreshold", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","overRepresentationAnalysis")
kw["aname"] = "_overRepresentationAnalysis"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._entityType = None
self._fsetType = None
self._cpdbIdsFg = []
self._cpdbIdsBg = []
self._accType = None
self._pThreshold = None
return
Holder.__name__ = "overRepresentationAnalysis_Holder"
self.pyclass = Holder
class overRepresentationAnalysisResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "overRepresentationAnalysisResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.overRepresentationAnalysisResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"name"), aname="_name", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"details"), aname="_details", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"overlappingEntitiesNum"), aname="_overlappingEntitiesNum", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"allEntitiesNum"), aname="_allEntitiesNum", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"pValue"), aname="_pValue", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"qValue"), aname="_qValue", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","overRepresentationAnalysisResponse")
kw["aname"] = "_overRepresentationAnalysisResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._name = []
self._details = []
self._overlappingEntitiesNum = []
self._allEntitiesNum = []
self._pValue = []
self._qValue = []
return
Holder.__name__ = "overRepresentationAnalysisResponse_Holder"
self.pyclass = Holder
class enrichmentAnalysis_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "enrichmentAnalysis"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.enrichmentAnalysis_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"entityType"), aname="_entityType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"fsetType"), aname="_fsetType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"cpdbIdsMeasurements"), aname="_cpdbIdsMeasurements", minOccurs=1, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TCnumbers.FPfloat(pname=(ns,"pThreshold"), aname="_pThreshold", minOccurs=0, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","enrichmentAnalysis")
kw["aname"] = "_enrichmentAnalysis"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._entityType = None
self._fsetType = None
self._cpdbIdsMeasurements = []
self._pThreshold = None
return
Holder.__name__ = "enrichmentAnalysis_Holder"
self.pyclass = Holder
class enrichmentAnalysisResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "enrichmentAnalysisResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.enrichmentAnalysisResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"name"), aname="_name", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"details"), aname="_details", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"measuredEntitiesNum"), aname="_measuredEntitiesNum", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"allEntitiesNum"), aname="_allEntitiesNum", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"pValue"), aname="_pValue", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"qValue"), aname="_qValue", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","enrichmentAnalysisResponse")
kw["aname"] = "_enrichmentAnalysisResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._name = []
self._details = []
self._measuredEntitiesNum = []
self._allEntitiesNum = []
self._pValue = []
self._qValue = []
return
Holder.__name__ = "enrichmentAnalysisResponse_Holder"
self.pyclass = Holder
class getCpdbIdsInFset_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getCpdbIdsInFset"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getCpdbIdsInFset_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"fsetId"), aname="_fsetId", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"fsetType"), aname="_fsetType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded")), ZSI.TC.String(pname=(ns,"entsetType"), aname="_entsetType", minOccurs=1, maxOccurs=1, nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getCpdbIdsInFset")
kw["aname"] = "_getCpdbIdsInFset"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._fsetId = None
self._fsetType = None
self._entsetType = None
return
Holder.__name__ = "getCpdbIdsInFset_Holder"
self.pyclass = Holder
class getCpdbIdsInFsetResponse_Dec(ZSI.TCcompound.ComplexType, ElementDeclaration):
literal = "getCpdbIdsInFsetResponse"
schema = "cpdbns"
def __init__(self, **kw):
ns = ns0.getCpdbIdsInFsetResponse_Dec.schema
TClist = [ZSI.TC.String(pname=(ns,"cpdbIds"), aname="_cpdbIds", minOccurs=0, maxOccurs="unbounded", nillable=False, typed=False, encoded=kw.get("encoded"))]
kw["pname"] = ("cpdbns","getCpdbIdsInFsetResponse")
kw["aname"] = "_getCpdbIdsInFsetResponse"
self.attribute_typecode_dict = {}
ZSI.TCcompound.ComplexType.__init__(self,None,TClist,inorder=0,**kw)
class Holder:
typecode = self
def __init__(self):
# pyclass
self._cpdbIds = []
return
Holder.__name__ = "getCpdbIdsInFsetResponse_Holder"
self.pyclass = Holder
# end class ns0 (tns: cpdbns)
|
from ereuse_devicehub.resources.device.models import Device
from teal.resource import View
class DeviceView(View):
def one(self, id: int):
"""Gets one device."""
device = Device.query.filter_by(id=id).one()
return self.schema.jsonify(device)
def find(self, args: dict):
"""Gets many devices"""
devices = Device.query.all()
return self.schema.jsonify(devices, many=True)
|
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
import pymysql
from sqlalchemy import or_
pymysql.install_as_MySQLdb()
#创建一个Flsak应用程序,用于实现前后端交互以及与数据库连接的功能
app = Flask(__name__)
#制定连接的数据库
app.config['SQLALCHEMY_DATABASE_URI']="mysql://root:120913@localhost:3306/flask"
#指定执行完操作之后自动提交 ==db.session.commit()
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True
db = SQLAlchemy(app)
#根据现有的表结构构建模型类
class Users(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80),unique=True)
age = db.Column(db.Integer)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, age, email):
self.username = username
self.age = age
self.email = email
def __repr__(self):
return '<Users:%r>' % self.username
class Course(db.Model):
__tablename__="course"
id=db.Column(db.Integer,primary_key=True)
cname=db.Column(db.String(30))
#反向引用:返回与当前课程相关的teacher列表
# backref:定义反向关系,本质上会向Teacher实体中增加一个course属性.该属性可替代course_id来访问Course模型.此时获得到的是模型对象,而不是外键值
#一箭双雕:在Teacher实体中增加一个course属性,同时在Course实体中增加了teachers属性,以此实现两个表的互相查询
teachers=db.relationship('Teacher',backref='course_id',lazy='dynamic')
def __init__(self,cname):
self.cname=cname
class Teacher(db.Model):
__tablename__ = 'teacher'
id = db.Column(db.Integer, primary_key=True)
tname = db.Column(db.String(30))
tage = db.Column(db.Integer)
# 增加一列 : course_id,外键列,要引用自主键表(course)的主键列(id)
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
def __init__(self, tname, tage):
self.tname = tname
self.tage = tage
def __repr__(self):
return "<Teacher %r>" % self.tname
# 将创建好的实体类映射回数据库
db.drop_all()
db.create_all()
@app.route('/insert_user')
def insert():
user = Users('王hah',40,'wanghah@163.com')
db.session.add(user)
return "Insert OK"
@app.route('/query')
def query_views():
#测试查询
# print(db.session.query(Users))
# print(db.session.query(Users.username,Users.email))
# print(db.session.query(Users,Course))
#通过查询执行函数获得最终查询结果
# users=db.session.query(Users).all()
# for u in users:
# print(u.username,u.age,u.email)
#first():得到查询中的第一个结果
# user=db.session.query(Users).first()
# print(user)
# course = db.session.query(Course).first()
# print(course)
# 使用查询过滤器函数对数据进行筛选
# 查询年龄大于30的Users的信息
# users = db.session.query(Users).filter(Users.age>30).all()
# print(users)
#查询年龄大于30并且id大于5的Users的信息
# users1 = db.session.query(Users).filter(Users.age>30,Users.id > 5)
# users2 = db.session.query(Users).filter(Users.age>30,Users.id > 5).all()
# print(users1)
# print(users2)
# 查询年龄大于30 或者 id大于 5 的Users的信息
# users = db.session.query(Users).filter(or_(Users.age>30,Users.id > 5)).all()
# print(users)
#查询email中包含字符'w'的用户的信息
# users = db.session.query(Users).filter(Users.email.like('%w%')).all()
# print(users)
#查询id在1,2,3 之间的 用户的信息
# users = db.session.query(Users).filter(Users.id.in_([1,2,3])).all()
# print(users)
# 查询 Users 表中所有数据的前3条
# users = db.session.query(Users).limit(3).all()
# print(users)
# users = db.session.query(Users).limit(3).offset(1).all()
# print(users)
# 查询Users表中所有的数据,并按照id倒叙排序
# users = db.session.query(Users).order_by('id desc').all() #本版本会出错
# users=db.session.query(Users).order_by(Users.id.desc()).all()
# print(users)
# 查询Users表中所有的数据,并按照 age 进行分组
# users = db.session.query(Users.age).group_by('age').all()
# print(users)
# 基于Models实现的查询 : 查询id>3的所有用户的信息
users = Users.query.filter(Users.id>3).all()
print(users)
return "Query OK"
#作业
#方法1:id传参方式是flask特有的一种方式;<a href="/query_by_id/{{ u.id }}">
@app.route('/query_all')
def query_all():
#查询Users表中所有的数据
users=db.session.query(Users).all()
return render_template('01_users.html',params=locals())
@app.route('/query_by_id/<int:id>')
def query_by(id):
user = db.session.query(Users).filter_by(id=id).first()
return render_template('02_user.html',params=locals())
#方法2:该方法更加正规,根据http协议进行传参: <a href="/query_by_id_2?id={{ u.id }}">
@app.route('/query_all_2')
def query_all_2():
#查询Users表中所有的数据
users=db.session.query(Users).all()
return render_template('01_users_2.html',params=locals())
@app.route('/query_by_id_2')
def query_by_2():
# 接收前端通过地址栏传递过来的查询字符串
id=request.args.get('id')
# 根据id获取 user 的信息
user = db.session.query(Users).filter_by(id=id).first()
# 将user对象发送的02-user.html模板上进行显示
return render_template('02_user.html',params=locals())
@app.route('/delete_user')
def delete_user():
user = db.session.query(Users).filter_by(id=3).first()
db.session.delete(user)
return 'Delete OK'
@app.route('/update_user')
def update_user():
#1)查
user=Users.query.filter_by(id=5).first()
#2)改
user.username='Wang Wc'
user.age=40
# 3)保存
db.session.add(user)
return 'Update OK'
#删除功能
@app.route('/delete')
def delete_views():
# 接收请求过来的id值
id=request.args.get('id')
#根据id值查询对应的模型对象
user=Users.query.filter_by(id=id).first()
#将模型对象删除
db.session.delete(user)
# request.headers : 获取请求消息头的信息
# 获取 headers 中的 referer 请求消息头 : 请求的源地址
# referer = request.headers.get('referer', '')
url=request.headers.get('referer','/query_all')
print(url)
return redirect(url)
#修改功能
@app.route('/update',methods=['GET','POST'])
def update_views():
if request.method=='GET':
# 获取前端传递过来的 id
id=request.args.get('id')
# 根据id查询出对应的实体对象
user=Users.query.filter_by(id=id).first()
# 将实体对象放到03-update.html模板中显示
return render_template('03_update.html',params=locals())
else:
#接收前端传递过来的四个参数
id=request.form['id'] #id=request.form.get('id')
username=request.form['username']
age=request.form['age']
email=request.form['email']
#查
user=Users.query.filter_by(id=id).first()
#改
user.username=username
user.age=age
user.email=email
#保存
db.session.add(user)
return redirect('/query_all_2')
#添加插入功能
@app.route('/insert',methods=['GET','POST'])
def insert_views():
if request.method=='GET':
return render_template('04_insert.html')
else:
username=request.form['username']
age=request.form['age']
email=request.form['email']
user = Users(username,age,email)
db.session.add(user)
return redirect('/query_all_2')
if __name__=="__main__":
app.run(debug=True) |
import pytest
from pytestqt.qt_compat import qt_api
from src.dock import PangoDockWidget
from PyQt5.QtWidgets import QFileSystemModel, QListView
def test_basic_dock(app):
assert app.label_widget.isVisible() == True
assert app.label_widget.windowTitle() == "Labels"
assert app.undo_widget.isVisible() == True
assert app.undo_widget.windowTitle() == "History"
assert app.file_widget.isVisible() == True
assert app.file_widget.windowTitle() == "Files"
def test_label_dock(app):
assert app.label_widget.widget() == app.label_widget.tree_view
def test_undo_dock(app):
assert app.undo_widget.widget() == app.undo_widget.undo_view
# TODO: Add this when project saves undo stack too
# app.undo_widget.redo()
# assert app.undo_widget.undo_view.currentIndex().row() == 1
# app.undo_widget.undo()
# assert app.undo_widget.undo_view.currentIndex().row() == 0
def test_file_dock(app):
assert app.file_widget.widget() == app.file_widget.file_view
assert app.file_widget.file_model.iconProvider() == app.file_widget.th_provider
assert app.file_widget.file_view.currentIndex().row() == 0
app.file_widget.select_prev_image()
assert app.file_widget.file_view.currentIndex().row() == 0
app.file_widget.select_next_image()
assert app.file_widget.file_view.currentIndex().row() == 1
app.file_widget.select_prev_image()
assert app.file_widget.file_view.currentIndex().row() == 0
|
import sys
sys.setrecursionlimit(10**6)
def dfs(v, seen, group):
seen[v] = True
groups[v] = group
for nv in g[v]:
if seen[nv]: continue
dfs(nv, seen, group)
n, m = map(int, input().split())
g = [[] for _ in range(n)]
for _ in range(m):
a, b = map(int, input().split())
a -= 1
b -= 1
g[a].append(b)
g[b].append(a)
# print(g)
groups = [-1] * n
group = 0
for st in range(n):
if groups[st] != -1: continue
seen = [False] * n
dfs(st, seen, group)
group += 1
print(max(groups))
|
import re
class GIZAReader(object):
def __init__(self, filename):
self.aligned_lines = list()
with open(filename, 'r') as giza_file:
while True:
line_info = giza_file.readline()
if not line_info:
break
line_plain = giza_file.readline()
line_aligned = giza_file.readline()
line_num = int(re.findall(r'\(([^\)]*)\)', line_info)[0])
# Parse line with alignments into tuples (word, alignment)
line_aligned = re.findall(
r'([^\(]+) \(\{([^\}]*)\}\)', line_aligned)
# Alignment is represented as tuples of indices (src, sys)
alignment = list()
for (i, w) in enumerate(line_aligned):
indices = list(map(int, w[1].split()))
if not indices:
alignment.append((i-1, None))
else:
alignment.extend(
[(None, j-1) if w[0] == 'NULL' else (i-1, j-1) for j in indices])
self.aligned_lines.append({'num': line_num - 1,
'sys': line_plain.split(),
'src': [w[0] for w in line_aligned if w[0] != 'NULL'],
'alignment': alignment})
self.aligned_lines = sorted(self.aligned_lines, key=lambda x: x['num'])
|
#coding=utf-8
import itertools
import math
import os
import random
import sys
import numpy as np
import cv2
import codecs
from img_utils import *
from jittering_methods import *
from parse_args import parse_args
args = parse_args()
fake_resource_dir = sys.path[0] + "/fake_resource/"
output_dir = args.img_dir
resample_range = args.resample
gaussian_range = args.gaussian
noise_range = args.noise
rank_blur = args.rank_blur
brightness = args.brightness
motion_blur = args.motion_blur
fake_resource_dir = sys.path[0] + "/fake_resource/"
#output_dir = sys.path[0] + "/test_plate/"
number_dir = [fake_resource_dir + "/numbers/",fake_resource_dir + "/numbers1/", fake_resource_dir + "/numbers2/", fake_resource_dir + "/numbers3/",fake_resource_dir + "/numbers4/"]
letter_dir = [fake_resource_dir + "/letters/" ,fake_resource_dir + "/letters1/", fake_resource_dir + "letters2/", fake_resource_dir + "/letters3/", fake_resource_dir + "/letters4/"]
plate_dir = [fake_resource_dir + "/plate_background_use/", fake_resource_dir + "/plate_background_use1/"]
screw_dir = [fake_resource_dir + "/screw/", fake_resource_dir + "/screw1/"]
# character_y_size = 113
character_y_size = 140
plate_y_size = 328
class FakePlateGenerator():
def __init__(self, plate_size):
font = random.randint(0,4)
color = random.randint(0,1)
self.dst_size = plate_size
self.numbers = self.load_image(number_dir[font], character_y_size)
self.letters = self.load_image(letter_dir[font], character_y_size)
# self.numbers = self.load_imageInv(number_dir, character_y_size)
# self.letters = self.load_imageInv(letter_dir, character_y_size)
self.numbers_and_letters = dict(self.numbers, **self.letters)
#we only use blue plate here
self.plates = self.load_image(plate_dir[color], plate_y_size)
self.screws = self.load_screws(screw_dir[color],plate_y_size)
for i in self.plates.keys():
self.plates[i] = cv2.cvtColor(self.plates[i], cv2.COLOR_BGR2BGRA)
#positions
self.character_position_x_listTop = [270,420]
self.character_position_x_listBotStart = [130,200,270,340]
self.character_position_x_listBotRest = []
def get_radom_sample(self, data):
keys = list(data.keys())
i = random.randint(0, len(data) - 1)
key = keys[i]
value = data[key]
#注意对矩阵的深拷贝
return key, value.copy()
def load_image(self, path, dst_y_size):
img_list = {}
current_path = sys.path[0]
listfile = os.listdir(path)
for filename in listfile:
img = cv2.imread(path + filename, -1)
height, width = img.shape[:2]
x_size = int(width*(dst_y_size/float(height)))+50
img_scaled = cv2.resize(img, (x_size, dst_y_size), interpolation = cv2.INTER_CUBIC)
img_list[filename[:-4]] = img_scaled
return img_list
def load_screws(self, path, dst_y_size):
img_list = {}
current_path = sys.path[0]
listfile = os.listdir(path)
for filename in listfile:
img = cv2.imread(path + filename, -1)
img_list[filename[:-4]] = img
return img_list
def add_character_to_plateBottom(self, character, plate, x):
h_plate, w_plate = plate.shape[:2]
h_character, w_character = character.shape[:2]
start_x = x - int(w_character/2)
# start_y = int((h_plate - h_character)/2)
start_y = h_plate//2 + 10
a_channel = cv2.split(character)[3]
ret, mask = cv2.threshold(a_channel, 100, 255, cv2.THRESH_BINARY)
overlay_img(character, plate, mask, start_x, start_y)
def add_screws_to_plate(self, character, plate, x):
h_plate, w_plate = plate.shape[:2]
h_character, w_character = character.shape[:2]
start_x = x - int(w_character/2)
start_y = 50
a_channel = cv2.split(character)[3]
ret, mask = cv2.threshold(a_channel, 100, 255, cv2.THRESH_BINARY)
overlay_img(character, plate, mask, start_x, start_y)
def add_character_to_plateTop(self, character, plate, x):
h_plate, w_plate = plate.shape[:2]
h_character, w_character = character.shape[:2]
start_x = x - int(w_character/2)
# start_y = int((h_plate - h_character)/2)
start_y = 20
a_channel = cv2.split(character)[3]
ret, mask = cv2.threshold(a_channel, 100, 255, cv2.THRESH_BINARY)
overlay_img(character, plate, mask, start_x, start_y)
def generate_one_plate(self):
#self.character_position_x_list = self.character_position_x_listOG
plate_chars = ""
_, plate_img = self.get_radom_sample(self.plates)
plate_name = ""
num = random.randint(3, 102)#6
num = 6 if num >= 6 else num
#i = (len(self.character_position_x_list) - num)//2 - 1
i = 6 - num
character, img = self.get_radom_sample(self.letters)
self.add_character_to_plateTop(img, plate_img, self.character_position_x_listTop[0])
plate_name += "%s"%(character,)
plate_chars += character
character, img = self.get_radom_sample(self.letters)
self.add_character_to_plateTop(img, plate_img, self.character_position_x_listTop[1])
plate_name += "%s"%(character,)
plate_chars += character
#self.character_position_x_list = [x.__sub__(10) for x in self.character_position_x_list]
#makes sure first digit does not start with a 0
#spacing = random.randint(145,155)#150
self.character_position_x_listBotRest = []#clear()
for j in range(1,4):
self.character_position_x_listBotRest.append(self.character_position_x_listBotStart[i] + j*150)
while True:
character, img = self.get_radom_sample(self.numbers)
if int(character) != 0:
self.add_character_to_plateBottom(img, plate_img, self.character_position_x_listBotStart[i])
plate_name += character
plate_chars += character
break
for j in range(4,num+1):
character, img = self.get_radom_sample(self.numbers)
self.add_character_to_plateBottom(img, plate_img, self.character_position_x_listBotRest[j-4])
plate_name += character
plate_chars += character
screw, img = self.get_radom_sample(self.screws)
self.add_screws_to_plate(img, plate_img, 120)
self.add_screws_to_plate(img, plate_img, 560)
#转换为RBG三通道
plate_img = cv2.cvtColor(plate_img, cv2.COLOR_BGRA2BGR)
#转换到目标大小
plate_img = cv2.resize(plate_img, self.dst_size, interpolation = cv2.INTER_AREA)
return plate_img, plate_name, plate_chars
def write_to_txt(fo,img_name, plate_characters):
plate_label = '|' + '|'.join(plate_characters) + '|'
line = img_name + ';' + plate_label.upper() + '\n'
line.encode('utf8')
fo.write("%s" % line)
if __name__ == "__main__":
# fake_resource_dir = sys.path[0] + "/fake_resource/"
# output_dir = sys.path[0] + "/test_plate/"
img_size = (240, 180)#80, 60
reset_folder(output_dir)
numImgs = args.num_imgs
fo = codecs.open(output_dir + 'labels.txt', "w", encoding='utf-8')
for i in range(0, numImgs):
if i%100==0:
print(i)
fake_plate_generator = FakePlateGenerator( img_size)
plate, plate_name, plate_chars = fake_plate_generator.generate_one_plate()
plate = underline(plate)
plate = jittering_color(plate)
plate = add_noise(plate,noise_range)
plate = jittering_blur(plate,gaussian_range)
plate = resample(plate, resample_range)
plate = jittering_scale(plate)
# plate = perspectiveTransform(plate)
plate = random_rank_blur(plate,rank_blur)
plate = random_motion_blur(plate,motion_blur)
plate = random_brightness(plate, brightness)
file_name = save_random_img(output_dir,plate_chars.upper(), plate)
write_to_txt(fo,file_name,plate_chars)
|
import scrapy
from scrapy import Selector
import re
from lxml import etree
from bs4 import BeautifulSoup as BS
from ljbj.items import LjbjItem
class ljbj(scrapy.Spider):
name = 'ljbj'
def __init__(self):
self.allow_domains = ['lianjia.com']
self.start_urls = ['https://bj.lianjia.com/chengjiao/']
self.urlhead = 'https://bj.lianjia.com'
def start_requests(self):
yield scrapy.Request(url=self.start_urls[0], callback=self.parse1)
def parse1(self, response):
info = Selector(response)
url_firsts = info.xpath('//div[@data-role="ershoufang"]/div/a/@href').extract()
# 先爬取每个城区内部每个大概商圈的url
for url_first in url_firsts[0:-2]:
format_first = self.urlhead + url_first
yield scrapy.Request(url=format_first, callback=self.parse2)
for url_first in url_firsts[-2:]:
yield scrapy.Request(url=url_first, callback=self.parse2_extra)
# 这是爬取北京地区的信息
def parse2(self, response):
list = []
info = Selector(response)
second_urls = info.xpath('//div[@data-role="ershoufang"]/div[2]/a/@href').extract()
area_names = info.xpath('//div[@data-role="ershoufang"]/div[2]/a/text()').extract()
for (second_url, area_name) in zip(second_urls, area_names):
if area_name not in list:
list.append(area_name)
real_url = self.urlhead + second_url
yield scrapy.Request(url=real_url, meta={'item': area_name}, callback=self.parse3)
# 这是爬取河北地区的信息
def parse2_extra(self, response):
list = []
info = Selector(response)
second_urls = info.xpath('//div[@data-role="ershoufang"]/div[2]/a/@href').extract()
area_names = info.xpath('//div[@data-role="ershoufang"]/div[2]/a/text()').extract()
for (second_url, area_name) in zip(second_urls, area_names):
if area_name not in list:
list.append(area_name)
real_url = 'https://lf.lianjia.com' + second_url
yield scrapy.Request(url=real_url, meta={'item': area_name}, callback=self.parse3)
def parse3(self, response):
info = Selector(response)
area_name = response.meta['item']
nums = int(info.xpath('//div[@class="total fl"]/span/text()').extract()[0].strip())
pages = nums // 30 + 2
for i in range(1, min(101, pages)):
url = response.url + 'pg%s/' % str(i)
yield scrapy.Request(url=url, meta={'item': area_name}, callback=self.parse4)
def parse4(self, response):
info = Selector(response)
house_title_xpath = '//div[@class="info"]/div[@class="title"]/a/text()'
url_xpath = '//div[@class="info"]/div[@class="title"]/a/@href'
house_info_xpath = '//div[@class="address"]/div[@class="houseInfo"]/text()'
dealDate_xpath = '//div[@class="address"]/div[@class="dealDate"]/text()'
totalPrice_xpath = '//div[@class="address"]/div[@class="totalPrice"]//text()'
unitPrice_xpath = '//div[@class="unitPrice"]//text()'
urls = info.xpath(url_xpath).extract()
house_titles = info.xpath(house_title_xpath).extract() # 需要解析出房间的户型和面积 利用split()
house_infos = info.xpath(house_info_xpath).extract() # 房间朝向 是否精装 是否有电梯
dealDates = info.xpath(dealDate_xpath).extract()
totalPrices = info.xpath(totalPrice_xpath).extract()
totalPrices = [i for i in totalPrices if i != u'万']
unitPrices = info.xpath(unitPrice_xpath).extract()
unitPrices = [i for i in unitPrices if i != u'元/平']
if u'成交' in dealDates[0]:
for index in range(len(urls)):
item = LjbjItem()
item['area_name'] = response.meta['item']
house_title = house_titles[index].split()
item['location'] = house_title[0]
item['house_type'] = house_title[1]
item['floor_space'] = house_title[2]
house_info = house_infos[index].split('|')
item['toward'] = house_info[0]
item['decorate_type'] = house_info[1]
yield scrapy.Request(url=urls[index], meta={'item': item}, callback=self.parse5)
elif int(dealDates[0].split('.')[0]) >= 2017: # 这个地方注意一下 要把可爬取时间控制在两年之内
for index in range(len(urls)):
item = LjbjItem()
item['area_name'] = response.meta['item']
house_title = house_titles[index].split()
item['location'] = house_title[0]
item['house_type'] = house_title[1]
item['floor_space'] = house_title[2]
house_info = house_infos[index].split('|')
item['toward'] = house_info[0]
item['decorate_type'] = house_info[1]
item['dealDate'] = dealDates[index]
item['totalPrice'] = totalPrices[index]
item['unitPrice'] = unitPrices[index]
yield item
else:
pass
def parse5(self, response):
info = Selector(response)
item = response.meta['item']
item['dealDate'] = info.xpath('//div[@class="wrapper"]/span/text()').extract()[0]
try:
item['totalPrice'] = info.xpath('//span[@class="dealTotalPrice"]/i/text()').extract()[0]
item['unitPrice'] = info.xpath('//div[@class="price"]/b/text()').extract()[0]
except:
item['totalPrice'] = ''
item['unitPrice'] = ''
yield item
|
import argparse
import random
import parsl
from parsl.app.app import App
from parsl.tests.configs.local_threads import config
@App('python')
def map_one(x, dur):
import time
time.sleep(dur)
return x * 2
@App('python')
def map_two(x, dur):
import time
time.sleep(dur)
return x * 5
@App('python')
def add_two(x, y, dur):
import time
time.sleep(dur)
return x + y
def test_func_1(width=2):
fu_1 = []
for i in range(1, width + 1):
fu = map_one(i, random.randint(0, 5) / 10)
fu_1.extend([fu])
fu_2 = []
for fu in fu_1:
fu = map_two(fu, 0)
fu_2.extend([fu])
assert sum([i.result() for i in fu_2]) == sum(
range(1, width + 1)) * 10, "Sums do not match"
return fu_2
def test_func_2(width=2):
fu_1 = []
for i in range(1, width + 1):
fu = map_one(i, random.randint(0, 5))
fu_1.extend([fu])
fu_2 = []
for i in range(0, width + 1, 2)[0:-1]:
fu = add_two(fu_1[i], fu_1[i + 1], 0)
fu_2.extend([fu])
assert sum([i.result() for i in fu_2]) == sum(
range(1, width + 1)) * 2, "Sums do not match"
return fu_2
if __name__ == '__main__':
parsl.clear()
parsl.load(config)
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--width", default="10",
help="width of the pipeline")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_stream_logger()
tests = [test_func_1, test_func_2]
for test in tests:
print("*" * 50)
try:
test(width=int(args.width))
except AssertionError as e:
print("[TEST] %s [FAILED]" % test.__name__)
print(e)
else:
print("[TEST] %s type [SUCCESS]" % test.__name__)
print("*" * 50)
|
"""
Nipype-pipeline: anatomical preprocessing
Anatomical data preprocessing steps:
1. Transform slices (from oblique to axial orientation and) to FSL std orientation
2. Skull strip (Can be done with BET before entering the pipeline. In that case, leave out.)
3. Tissue segmentation
4. Register to MNI152 standard template with linear and nonlinear registration
Running in terminal:
ipython
(in python terminal):
%run preprocess_anatomical_data_Silja.py
Hanna Halme // 2015
Questions and comments: hanna.halme@hus.fi
Updated version: Riikka Ruuth // 2019
"""
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.afni as afni # afni
import copy
from nipype.interfaces.base import File, traits
import nipype.pipeline.engine as pe # pypeline engine
import os
import numpy as np
import multiprocessing
import traceback
data_path = '/opt1/MR_data/Silja_Raty/New_rs/image_data/'
results_path = '/opt1/MR_data/Silja_Raty/New_rs/results/Patient_data_test2/'
subj_dirs = ['Revis_0002_rs']
# Reference brain images
ref_brain = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz'
ref_mask = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain_mask.nii.gz'
reference_skull = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm.nii.gz'
fnirt_config = '/usr/share/fsl/5.0/etc/flirtsch/T1_2_MNI152_2mm.cnf'
owd = os.getcwd() # original working directory, added by Riikka Ruuth
def prepro_anat(k):
try:
subj = k
for s in (['session2']):
if (not os.path.isdir(data_path +subj+'/'+s)):
continue
if (os.path.isfile(results_path +subj +'/'+s+'/anat/nonlinear_reg/anat_HR_reoriented_warped.nii.gz')):
print "Skipping "+ subj +'/' + s
continue
'''
if (os.path.isfile(pilot_path +subj +'/anat/nonlinear_reg/anat_reoriented_skullstrip_warped.nii.gz')):
print "Skipping "+ subj +'/' + s
continue
'''
try:
os.stat(results_path)
except:
os.mkdir(results_path)
try:
os.stat(results_path+subj)
except:
os.mkdir(results_path+subj)
try:
os.stat(results_path+subj+'/'+s)
except:
os.mkdir(results_path+subj+'/'+s)
os.chdir(results_path+subj+'/'+s)
print "Currently processing subject: ", subj+'/'+s
anat = data_path + subj +'/'+ s + '/anat_HR.nii.gz'
# Initialize workflow
workflow = pe.Workflow(name='anat')
workflow.base_dir = '.'
# Reorient to FSL standard orientation
deoblique = pe.Node(interface=afni.Warp(in_file=anat, deoblique=True, outputtype='NIFTI_GZ'), name='deoblique') #leave out if you don't need this
reorient = pe.Node(interface=fsl.Reorient2Std(output_type='NIFTI_GZ'), name='reorient')
workflow.connect(deoblique, 'out_file', reorient, 'in_file')
# AFNI skullstrip
skullstrip = pe.Node(interface=afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip')
workflow.connect(reorient, 'out_file', skullstrip, 'in_file')
# Segment with FSL FAST
segmentation = pe.Node(interface=fsl.FAST(number_classes=3, use_priors=True, img_type=1), name='segmentation')
segmentation.segments = True
segmentation.probability_maps = True
workflow.connect(skullstrip, 'out_file', segmentation, 'in_files')
# Register to HR anatomical
hranat = results_path + subj+'/session1/anat/reorient/anat_HR_brain_reoriented.nii.gz'
#anat2hr = pe.Node(interface=fsl.FLIRT(no_search=True, reference=hranat), name='anat2hr')
anat2hr = pe.Node(interface=fsl.FLIRT(dof=6, reference=hranat), name='anat2hr')
workflow.connect(reorient, 'out_file', anat2hr, 'in_file')
# Register to standard MNI template
#1. linear
linear_reg = pe.Node(interface=fsl.FLIRT(cost='corratio', reference=ref_brain), name='linear_reg')
#2.nonlinear
nonlinear_reg = pe.Node(interface=fsl.FNIRT(fieldcoeff_file=True, jacobian_file=True, ref_file=ref_brain, refmask_file=ref_mask), name='nonlinear_reg')
inv_flirt_xfm = pe.Node(interface=fsl.utils.ConvertXFM(invert_xfm=True), name='inv_linear_xfm')
workflow.connect(skullstrip, 'out_file', linear_reg, 'in_file')
#workflow.connect(anat2hr, 'out_matrix_file', linear_reg, 'in_matrix_file')
workflow.connect(linear_reg, 'out_matrix_file', nonlinear_reg, 'affine_file')
workflow.connect(skullstrip, 'out_file', nonlinear_reg, 'in_file')
workflow.connect(linear_reg, 'out_matrix_file', inv_flirt_xfm, 'in_file')
# Run workflow
workflow.write_graph()
workflow.run()
print "ANATOMICAL PREPROCESSING DONE! Results in ", results_path+subj+'/'+s
except:
print "Error with patient: ", subj
traceback.print_exc()
os.chdir(owd) # back to owd, added by Riikka Ruuth
if __name__ == '__main__':
pool_size=10
pool = multiprocessing.Pool(processes=pool_size)
try:
pool.map(prepro_anat, subj_dirs)
finally:
pool.close()
pool.join()
|
import os
import logging
import collections
from logging import handlers
from .contracts import FutureBlockCall
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class empty(object):
pass
class _cache_once(object):
"""
Similar to cached property except that it doesn't cache the value until it
differs from the default value.
"""
_cache_value = empty
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = func.__name__
def __get__(self, instance, type=None):
value = self.func(instance)
if value != self.default_value:
instance.logger.debug("Caching return value: %s for function: %s", value, self.name)
instance.__dict__[self.name] = value
return value
def cache_once(default_value):
return type('cache_once', (_cache_once,), {'default_value': default_value})
LEVELS = collections.defaultdict(lambda: logging.INFO)
LEVELS.update({
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
})
def get_logger(name, level=None):
if level is None:
level = LEVELS[os.environ.get('LOG_LEVEL', logging.INFO)]
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level)
stream_handler.setFormatter(
logging.Formatter(name.upper() + ': %(levelname)s: %(asctime)s %(message)s')
)
logger.addHandler(stream_handler)
file_handler = handlers.RotatingFileHandler('logs/{0}.log'.format(name), maxBytes=10000000)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(levelname)s: %(asctime)s %(message)s'))
logger.addHandler(file_handler)
return logger
EMPTY_ADDRESS = '0x0000000000000000000000000000000000000000'
def enumerate_upcoming_calls(alarm, anchor_block):
block_cutoff = anchor_block + 40
blockchain_client = alarm._meta.blockchain_client
calls = []
while anchor_block > 0 and anchor_block < block_cutoff:
call_address = alarm.getNextCall(anchor_block)
if call_address == EMPTY_ADDRESS:
break
call = FutureBlockCall(call_address, blockchain_client)
try:
target_block = call.targetBlock()
except ValueError:
if len(blockchain_client.get_code(call_address)) <= 2:
continue
raise
if target_block > block_cutoff:
break
calls.append(call_address)
sibling_call_address = call_address
while sibling_call_address != EMPTY_ADDRESS:
sibling_call_address = alarm.getNextCallSibling(sibling_call_address)
if sibling_call_address != EMPTY_ADDRESS:
call = FutureBlockCall(sibling_call_address, alarm._meta.blockchain_client)
try:
sibling_target_block = call.targetBlock()
except ValueError:
if len(blockchain_client.get_code(sibling_call_address)) <= 2:
continue
raise
if sibling_target_block == target_block:
calls.append(sibling_call_address)
else:
break
anchor_block = target_block + 1
return tuple(calls)
|
"""
Usage:
ProcessingFilteredData.py <year> (--SP500 | --SP1500 | --all)
"""
import pandas as pd
from glob import glob
from docopt import docopt
import json
if __name__ == "__main__":
opt = docopt(__doc__)
print(opt)
year = opt["<year>"]
# Dictionary of dictionary to save occurrance of chain
cikAccumsDict = {}
for dailyFile in sorted(glob("./MyFilteredData/" + year + "/*.dat")):
#dailyFile = "./MyFilteredData/2003/log20031111.dat"
print("Extracting filtered data {}".format(dailyFile))
# Read in file and filter by S&P500/S&P1500
df = pd.read_csv(dailyFile, delimiter=',', header=0, usecols=[1, 2, 3, 4]).astype({'cik':'int64'})
if opt["--SP500"]:
df.drop(df[df['S&P500']==0].index, inplace=True)
if opt["--SP1500"]:
df.drop(df[df['S&P1500']==0].index, inplace=True)
df.drop(columns='S&P500', inplace=True)
df.drop(columns='S&P1500', inplace=True)
#df.to_csv(path_or_buf='./Processed1.csv')
# Filter step 4A: distinct A->A, ---> A
prevCik = 0
prevIp = ""
indexToRemove = []
for row in df.itertuples():
if getattr(row, "cik") == prevCik and getattr(row, "ip") == prevIp:
indexToRemove.append(getattr(row, "Index"))
prevCik = getattr(row, "cik")
prevIp = getattr(row, "ip")
df.drop(index = indexToRemove, inplace = True)
# Filter step 4B: distinct A->B, A->B ---> A->B
prevCik = 0
prevIp = ""
cikChains = set()
indexToRemove = []
for row in df.itertuples():
cik = getattr(row, "cik")
ip = getattr(row, "ip")
if prevIp != ip:
# Initialize set
cikChains = set()
else:
if (prevCik, cik) in cikChains:
indexToRemove.append(getattr(row, "Index"))
else:
cikChains.add((prevCik, cik))
prevCik = cik
prevIp = ip
df.drop(index = indexToRemove, inplace = True)
#df.to_csv(path_or_buf='./Processed2.csv')
# Calculate accumulate
prevCik = 0
prevIp = ""
for row in df.itertuples():
cik = getattr(row, "cik")
ip = getattr(row, "ip")
if prevIp != ip:
prevCik = cik
prevIp = ip
continue
if prevCik in cikAccumsDict:
if cik in cikAccumsDict[prevCik]:
cikAccumsDict[prevCik][cik] += 1
else:
cikAccumsDict[prevCik][cik] = 1
else:
cikAccumsDict[prevCik] = {}
cikAccumsDict[prevCik][cik] = 1
prevCik = cik
prevIp = ip
# Convert dictionary of dictionary to dictionary of sorted list
cikAccumList = {}
for cikDict in cikAccumsDict:
cikAccumList[cikDict] = sorted(cikAccumsDict[cikDict].items(), key=lambda kv: kv[1], reverse=True)
sumOver = sum(p[1] for p in cikAccumList[cikDict])
cikAccumList[cikDict] = [(x[0], x[1]/sumOver) for x in cikAccumList[cikDict]]
# write result to a file
json = json.dumps(cikAccumList)
if opt["--SP500"]:
f = open("./Result/dict"+year+"SP500.json","w")
if opt["--SP1500"]:
f = open("./Result/dict"+year+"SP1500.json","w")
if opt["--all"]:
f = open("./Result/dict"+year+"all.json","w")
f.write(json)
f.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.