seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32425523875 | import random
import time
from typing import Set
import schedule
from timmy import core
from timmy.data.war_state import WarState
from timmy.data.word_war import WordWar
from timmy.db_access import word_war_db
class WarTicker:
def __init__(self):
self.loaded_wars: Set[WordWar] = set()
self.active_wars: Set[WordWar] = set()
def reset_timer(self) -> None:
schedule.clear('warticker')
self.__init__()
self.load_wars()
def load_wars(self) -> None:
self.loaded_wars = word_war_db.load_wars()
schedule.every(1).seconds.do(war_update_loop).tag('warticker')
def begin_war(self, war: WordWar) -> None:
current_epoch = time.time()
late_start = int(current_epoch - war.start_epoch)
message = "{}: Starting now!".format(war.get_name())
if late_start >= 5:
message += " ({:d} seconds late. Sorry!)".format(late_start)
core.bot_instance.connection.privmsg(war.channel, message)
self.notify_war_members(war, message)
war.begin_war()
def end_war(self, war: WordWar) -> None:
current_epoch = time.time()
late_end = int(current_epoch - war.end_epoch)
message = "{}: Ending now!".format(war.get_name())
if late_end >= 5:
message += " ({:d} seconds late. Sorry!)".format(late_end)
core.bot_instance.connection.privmsg(war.channel, message)
self.notify_war_members(war, message)
if war.channel in core.bot_instance.channels:
core.bot_instance.channels[war.channel].last_war_id = war.get_id()
if war.current_chain >= war.total_chains:
war.end_war()
self.active_wars.remove(war)
if war.channel in core.bot_instance.channels \
and core.bot_instance.channels[war.channel].newest_war_id == war.get_id():
core.bot_instance.channels[war.channel].newest_war_id = ""
else:
war.current_chain += 1
if war.randomness:
war.start_epoch = war.end_epoch + war.base_break + (war.base_break * (random.randrange(20) - 10)) / 100
war.end_epoch = war.start_epoch + war.base_duration + (
war.base_duration * (random.randrange(20) - 10)) / 100
else:
war.start_epoch = war.end_epoch + war.base_break
war.end_epoch = war.start_epoch + war.base_duration
war.start_break()
self.war_start_count(war)
if war.channel in core.bot_instance.channels:
core.bot_instance.channels[war.channel].newest_war_id = war.get_id()
@staticmethod
def war_start_count(war: WordWar) -> None:
time_to_start = int(war.start_epoch - time.time())
if time_to_start < 60:
message = "{}: Starting in {:d} {}.".format(
war.get_name(), time_to_start, "seconds" if time_to_start > 1 else "second"
)
else:
minutes = time_to_start / 60
if time_to_start % 60 == 0:
message = "{}: Starting in {:d} {}.".format(
war.get_name(include_duration = True), int(minutes), "minutes" if minutes > 1 else "minute"
)
else:
message = "{}: Starting in {:.1f} minutes.".format(war.get_name(include_duration = True), minutes)
core.bot_instance.connection.privmsg(war.channel, message)
@staticmethod
def war_end_count(war: WordWar) -> None:
time_to_end = int(war.end_epoch - time.time())
if time_to_end < 60:
message = "{}: {:d} {} remaining!".format(
war.get_name(), time_to_end, "seconds" if time_to_end > 1 else "second"
)
else:
minutes = time_to_end // 60
message = "{}: {:d} {} remaining.".format(
war.get_name(), minutes, "minutes" if minutes > 1 else "minute"
)
core.bot_instance.connection.privmsg(war.channel, message)
@staticmethod
def notify_war_members(war: WordWar, message: str) -> None:
db_id = war.get_id()
message += f" [ID {db_id}]"
for nick in war.war_members:
core.bot_instance.connection.privmsg(nick, message)
def war_update_loop() -> None:
try:
from timmy.core import bot_instance
loaded_wars = core.war_ticker.loaded_wars.copy()
for war in loaded_wars:
if war.channel in bot_instance.channels.keys():
core.war_ticker.active_wars.add(war)
core.war_ticker.loaded_wars.remove(war)
bot_instance.channels[war.channel].newest_war_id = war.get_id()
wars = core.war_ticker.active_wars.copy()
if wars is None or len(wars) <= 0:
return
current_epoch = time.time()
for war in wars:
if war.start_epoch >= current_epoch:
time_difference = int(war.start_epoch - current_epoch)
if time_difference in [600, 300, 60, 30, 5, 4, 3, 2, 1]:
core.war_ticker.war_start_count(war)
elif time_difference == 0:
core.war_ticker.begin_war(war)
elif time_difference >= 3600:
if time_difference % 3600 == 0:
core.war_ticker.war_start_count(war)
elif time_difference >= 1800:
if time_difference % 1800 == 0:
core.war_ticker.war_start_count(war)
else:
if war.end_epoch >= current_epoch:
if war.state == WarState.PENDING:
core.war_ticker.begin_war(war)
else:
time_difference = int(war.end_epoch - current_epoch)
if time_difference in [600, 300, 60, 5, 4, 3, 2, 1]:
core.war_ticker.war_end_count(war)
elif time_difference == 0:
core.war_ticker.end_war(war)
elif time_difference >= 3600:
if time_difference % 3600 == 0:
core.war_ticker.war_end_count(war)
elif time_difference >= 1800:
if time_difference % 1800 == 0:
core.war_ticker.war_end_count(war)
else:
core.war_ticker.end_war(war)
except Exception:
from timmy.utilities import irc_logger
irc_logger.log_traceback()
| utoxin/TimTheWordWarBot | timmy/core/warticker.py | warticker.py | py | 6,662 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "typing.Set",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "timmy.data.word_war.WordWar",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "timmy.data.word_war... |
19963060538 | import os
import sys
import numpy as np
from collections import OrderedDict
def groupSinglets(comp_name):
d = { "sm": "SM",
"lin": "L ",
"quad": "Q ",
"lin_mixed": "M ",
"sm_lin_quad": "SM+L+Q ",
"quad_mixed": "Q+Q+M ",
"sm_lin_quad_mixed": "SM+L+L+Q+Q+M ",
"DATA": "DATA"
}
type_ = comp_name.split("_c")[0]
if type_ in d:
newName = d[type_]
else:
return comp_name
if type_ != "sm" and type_!= "DATA": #need to account for operators here
ops = comp_name.split(type_ + "_")[1]
if len(ops.split("_")) == 2:
ops = ops.split("_")
newName += ops[0] + " " + ops[1]
else:
newName += ops
return newName
def isBSM(sample):
if any(i in sample for i in ["sm_", "quad_", "lin_"]):
return True
else:
return False
def makeStructure(h_dict, model, outdir, isMkDC = True):
for sample in h_dict:
if isMkDC:
first_var = h_dict[sample].keys()[0]
structure = h_dict[sample][first_var].keys()
else:
structure = h_dict[sample]
file_name = outdir + "/structure_" + sample + "_" + model + ".py"
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
for key in structure:
isData = 0
if key == "DATA": isData = 1
f.write('structure["{}"] = {} \n'.format(key, "{"))
f.write(" 'isSignal' : 0, \n")
f.write(" 'isData' : {}, \n".format(isData))
f.write("{}\n".format("}"))
f.write("\n\n")
f.close()
# THIS FUNCTION IS TERRIBLE, HOPEFULLY SOMEONE STRONG OF HEART WILL FIX IT
# anyway we do not need the Latinos plot card redineed in order to generate the datacards
def makePlot(h_dict, model, config, outdir, isMkDC = True):
colors = config.getlist("d_plot", "colors")
c_types = [i.split(":")[0] for i in colors]
c_vals = [[int(j) for j in i.split(":")[1:]] for i in colors]
user_colors = {}
for i,j in zip(c_types, c_vals):
user_colors[i] = j
group = []
g_colors = []
if config.has_option("d_plot", "isSignal"):
isSignal = config.getlist("d_plot", "isSignal")
comp = [i.split(":")[0] for i in isSignal]
val = [i.split(":")[1] for i in isSignal]
isSignal = dict((c,v) for c,v in zip(comp, val))
else:
isSignal = {}
if config.has_option("d_plot", "group"): group = config.getlist("d_plot", "group")
if config.has_option("d_plot", "g_colors"): g_colors = [int(col) for col in config.getlist("d_plot", "g_colors")]
for sample in h_dict:
if isMkDC:
first_var = h_dict[sample].keys()[0]
structure = h_dict[sample][first_var].keys()
else:
structure = h_dict[sample]
ops = sample.split("_")[1:]
cd = {}
for key in user_colors.keys():
if key != "sm" and key not in config.getlist("variables", "makeDummy"):
if len(user_colors[key]) == len(ops):
for j,op in enumerate(ops):
cd[key + "_" + op] = user_colors[key][j]
if len(user_colors[key]) < len(ops):
if key + "_" + "_".join(op for op in ops) in structure:
cd[key + "_" + "_".join(op for op in ops)] = user_colors[key][0]
else:
cd[key + "_" + "_".join(op for op in ops[::-1])] = user_colors[key][0]
if len(user_colors[key]) > len(ops):
for j,op in enumerate(ops):
cd[key + "_" + op] = user_colors[key][j]
else:
cd[key] = user_colors[key][0]
for key in config.getlist("variables", "makeDummy"):
if key not in user_colors.keys():
cd[key] = 1
file_name = outdir + "/plot_" + sample + "_" + model + ".py"
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
for idx,g_ in enumerate(group):
group_these = {}
if g_.split(":")[1] == "BSM":
g_name = g_.split(":")[0]
if g_name == "model": g_name = model
group_these[g_name] = {}
if g_name in isSignal.keys(): sig_val = isSignal[g_name]
else: sig_val = 2
group_these[g_name]['nameHR'] = "'{}'".format(g_name)
group_these[g_name]['isSignal'] = sig_val
group_these[g_name]['color'] = g_colors[idx]
group_these[g_name]['samples'] = []
components = h_dict[sample][h_dict[sample].keys()[0]].keys() #they are equal forall variables
for comp in components:
if isBSM(comp): group_these[g_name]['samples'].append(comp)
elif g_.split(":")[1] == "all":
for i,key in enumerate(structure):
if key != "DATA":
leg_name = groupSinglets(key)
group_these[key] = {}
if key in isSignal.keys(): sig_val = isSignal[key]
else: sig_val = 2
# thisif else are ridicolous now
if key not in cd.keys(): cd[key] = 1
group_these[key]['nameHR'] = "'{}'".format(leg_name)
group_these[key]['isSignal'] = sig_val
group_these[key]['color'] = cd[key]
group_these[key]['samples'] = [key]
else:
g_name = g_.split(":")[0]
g_list = [str(i) for i in (g_.split(":")[1])[1:-1].split(" ")]
group_these[g_name] = {}
if g_name in isSignal.keys(): sig_val = isSignal[g_name]
else: sig_val = 2
group_these[g_name]['nameHR'] = "'{}'".format(g_.split(":")[0])
group_these[g_name]['isSignal'] = sig_val
group_these[g_name]['color'] = g_colors[idx]
group_these[g_name]['samples'] = []
components = h_dict[sample][h_dict[sample].keys()[0]].keys() #they are equal forall variables
for comp in g_list:
if comp not in components:
sys.exit("[ERROR] The sample {} specified for grouping into {} does not exists ...".format(comp, g_name))
group_these[g_name]['samples'].append(comp)
if len(group_these.keys()) != 0:
#sort the dict to allow right plotting
group_these = OrderedDict(sorted(group_these.items(), key=lambda t: t[1]["isSignal"], reverse=True))
for key in group_these.keys():
f.write('groupPlot["{}"] = {} \n'.format(key, "{"))
for subkey in group_these[key]:
if subkey != 'samples':
f.write(" '{}' : {}, \n".format(subkey, group_these[key][subkey]))
else:
write_list = "["
for s in group_these[key][subkey]:
write_list += "'{}'".format(s) + ","
write_list = write_list[:-1] + "]"
f.write(" '{}' : {}, \n".format(subkey, write_list))
f.write("{}\n".format("}"))
f.write("\n\n")
for i,key in enumerate(structure):
if key in isSignal.keys(): sig_val = isSignal[key]
else: sig_val = 2
isData = 0
if key == "DATA":
isData = 1
color = 1
if key in cd:
color = cd[key]
if i > len(cd.keys()): sys.exit("[ERROR]: Colors not sufficient, add more...")
f.write('plot["{}"] = {} \n'.format(key, "{"))
f.write(" 'color' : {}, \n".format(cd[key]))
f.write(" 'isSignal' : {}, \n".format(sig_val))
f.write(" 'isData' : {}, \n".format(isData))
f.write(" 'scale' : 1, \n")
if key == "DATA":
f.write(" 'isBlind' : 1, \n") #default blinding on data
f.write("{}\n".format("}"))
f.write("\n\n")
f.close()
def makeVariables(h_dict, model, config, outdir):
xaxis_ = config.getlist("d_variables", "xaxis")
name_ = config.getlist("d_variables", "name")
range_ = config.getlist("d_variables", "range")
fold_ = config.getlist("d_variables", "fold")
for sample in h_dict:
vars_ = h_dict[sample].keys()
bl = len(vars_)
#if not all(len(lst) == bl for lst in [xaxis_, name_, range_, fold_]):
if xaxis_[0] == "auto": xaxis_ = dict((i,j) for i,j in zip(vars_, vars_))
elif len(xaxis_) == len(vars_):
tn = config.getlist("variables", "treenames")
xaxis_ = dict((i,j) for i,j in zip(tn, xaxis_))
else:
sys.exit("[ERROR] xaxis name do not match variables, check inputs in cfg ...")
if name_[0] == "auto": name_ = dict((i,j) for i,j in zip(vars_, vars_))
elif len(name_) == len(vars_):
tn = config.getlist("variables", "treenames")
name_ = dict((i,j) for i,j in zip(tn, name_))
else:
sys.exit("[ERROR] names do not match variables, check inputs in cfg ...")
if fold_[0] == "auto": fold_ = dict((i,0) for i in vars_)
elif len(fold_) == len(vars_):
tn = config.getlist("variables", "treenames")
fold_ = dict((i,j) for i,j in zip(tn, fold_))
else:
sys.exit("[ERROR] folds do not match variables, check inputs in cfg ...")
if range_[0] == "auto":
tn = config.getlist("variables", "treenames")
range_ = dict.fromkeys(tn)
bins = [int(i) for i in config.getlist("variables", "bins")]
ranges = [i[1:-1].split(":") for i in config.getlist("variables", "xrange")]
ranges = [list(map(float, sublist)) for sublist in ranges]
for k,b,r in zip(range_.keys(), bins, ranges):
range_[k] = {'bins': b, 'range': [r[0], r[1]]}
elif len(range_) == len(vars_):
tn = config.getlist("variables", "treenames")
range_ = dict((i,j) for i,j in zip(tn, range_))
else:
sys.exit("[ERROR] ranges do not match variables, check inputs in cfg ...")
file_name = outdir + "/variables_" + sample + "_" + model + ".py"
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
for var, xa, name, ra, fold in zip(vars_, xaxis_, name_, range_, fold_):
f.write('variables["{}"] = {} \n'.format(var, "{"))
f.write(" 'name' : '{}', \n".format(name_[var]))
f.write(" 'range' : ({},{},{}), \n".format(range_[var]['bins'], range_[var]['range'][0], range_[var]['range'][1]))
f.write(" 'xaxis' : {}, \n".format(xaxis_[var]))
f.write(" 'fold' : {}, \n".format(fold_[var]))
f.write("{}\n".format("}"))
f.write("\n\n")
f.close()
def makeSamples(h_dict, model, config, outdir, isMkDC = True):
for sample in h_dict:
if isMkDC:
first_var = h_dict[sample].keys()[0]
structure = h_dict[sample][first_var].keys()
else:
structure = h_dict[sample]
file_name = outdir + "/samples_" + sample + "_" + model + ".py"
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
f.write("import os \n")
f.write("import inspect \n")
f.write("configurations = os.path.realpath(inspect.getfile(inspect.currentframe())) # this file \n")
f.write("configurations = os.path.dirname(configurations) \n\n")
f.write("from LatinoAnalysis.Tools.commonTools import getSampleFiles, getBaseW, addSampleWeight\n\n")
#Samples declaration
f.write("# samples\n\n")
f.write("try:\n")
f.write(" len(samples)\n")
f.write("except NameError:\n")
f.write(" import collections\n")
f.write(" samples = collections.OrderedDict()")
f.write("\n\n")
names = config.getlist("d_samples", "name")
w = config.getlist("d_samples", "weight")
ws = config.getlist("d_samples", "weights")
fxj = config.getlist("d_samples", "filesperjob")
if len(names) == len(w) == len(ws) == len(fxj) == 1:
names = names*len(structure)
w = w*len(structure)
ws = ws*len(structure)
fxj = fxj*len(structure)
elif len(names) == len(w) == len(ws) == len(fxj) != len(structure):
sys.exit("[ERROR] While making sample, provide a list of parameters = to number of EFT component \
or only one value (repeated). Nothing inbetween ...")
else:
sys.exit("[ERROR] While making sample, provide a list of parameters = to number of EFT component \
or only one value (repeated). Nothing inbetween ...")
for i,key in enumerate(structure):
f.write('samples["{}"] = {} \n'.format(key, "{"))
f.write(" 'name' : {}, \n".format(names[i]))
f.write(" 'weight' : {}, \n".format(w[i]))
f.write(" 'weights' : {}, \n".format(ws[i]))
f.write(" 'isData' : 0, \n")
f.write(" 'FilesPerJob' : {}, \n".format(fxj[i]))
f.write("{}\n".format("}"))
f.write("\n\n")
f.close()
def makeConfiguration(h_dict, model, config, outdir):
for sample in h_dict:
file_name = outdir + "/configuration_" + sample + "_" + model + ".py"
write_out = {}
write_out["tag"] = config.get("d_configuration", "tag")
aliasesFile = config.get("d_configuration", "aliasesFile")
if aliasesFile == "auto":
aliasesFile = "aliases_" + sample + "_" + model + ".py"
write_out["aliasesFile"] = aliasesFile
variablesFile = config.get("d_configuration", "variablesFile")
if variablesFile == "auto":
variablesFile = "variables_" + sample + "_" + model + ".py"
write_out["variablesFile"] = variablesFile
cutsFile = config.get("d_configuration", "cutsFile")
if cutsFile == "auto":
cutsFile = "cuts_" + sample + "_" + model + ".py"
write_out["cutsFile"] = cutsFile
samplesFile = config.get("d_configuration", "samplesFile")
if samplesFile == "auto":
samplesFile = "samples_" + sample + "_" + model + ".py"
write_out["samplesFile"] = samplesFile
plotFile = config.get("d_configuration", "plotFile")
if plotFile == "auto":
plotFile = "plot_" + sample + "_" + model + ".py"
write_out["plotFile"] = plotFile
structureFile = config.get("d_configuration", "structureFile")
if structureFile == "auto":
structureFile = "structure_" + sample + "_" + model + ".py"
write_out["structureFile"] = structureFile
nuisancesFile = config.get("d_configuration", "nuisancesFile")
if nuisancesFile == "auto":
nuisancesFile = "nuisances_" + sample + "_" + model + ".py"
write_out["nuisancesFile"] = nuisancesFile
write_out["lumi"] = config.get("d_configuration", "lumi")
write_out["outputDirPlots"] = config.get("d_configuration", "outputDirPlots")
write_out["outputDirDatacard"] = config.get("d_configuration", "outputDirDatacard")
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
for key, value in write_out.items():
if type(value) == str:
f.write("{} = '{}' \n\n".format(key, value))
else:
f.write("{} = {} \n\n".format(key, value))
f.close()
def makeAliases(h_dict, model, outdir):
for sample in h_dict:
file_name = outdir + "/aliases_" + sample + "_" + model + ".py"
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
f.write('aliases["inclusive"] = {} \n'.format("{"))
f.write(" 'expr': 0 == 0'\n".format("{"))
f.write("{}\n".format("}"))
f.write("\n\n")
f.close()
def makeCuts(h_dict, model, outdir):
for sample in h_dict:
file_name = outdir + "/cuts_" + sample + "_" + model + ".py"
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
f.write("cuts['{}'] = {} \n".format(sample, "{"))
f.write(" 'expr': 'inclusive', \n")
f.write("{}\n".format("}"))
f.write("\n\n")
f.close()
def whatNuis(comp):
test = np.array(["sm", "lin", "quad", "mixed"])
base = np.array(comp.split("_")) #sm #lin #quad #
if base.size == 1: return base #only identical sm has 1 len
mask = np.isin(base, test)
c = base[mask]
ops = np.setdiff1d(base, c)
#so finally this component
#receives nuis contributions from these
final = [mod + "_" + op for mod in c if mod != "sm" for op in ops]
if "sm" in c:
final.append("sm")
return final
def propagateNuis(h_dict, nuis_dict):
var = h_dict.keys()[0]
s_int = h_dict[var].keys()
for key_name in nuis_dict.keys():
samples_dict = nuis_dict[key_name]['samples']
for sam in samples_dict.keys():
sam_nuis_prop = 0
c = whatNuis(sam)
sample_nuis = samples_dict[sam] - 1
#propagation
if sam in s_int:
#comp_yield = float('%.4f'%h_dict[var][sam].Integral())
comp_yield = h_dict[var][sam].Integral()
else: continue
for basic_component in c:
if basic_component in s_int and basic_component in samples_dict.keys():
#yield_ = float('%.4f'%h_dict[var][basic_component].Integral())
yield_ = h_dict[var][basic_component].Integral()
sam_nuis_prop += (yield_ * samples_dict[basic_component]) / comp_yield
nuis_dict[key_name]['samples'][sam] = sam_nuis_prop
return nuis_dict
# def switchNuis(comp_1, nuis_comp_1, comp_2):
# #this stands also if the component is not sm
# #print("sigma_{} = ({}-1) * {}/{} + 1 = {}".format("2", nuis_comp_1, comp_1, comp_2, (nuis_comp_1 - 1) * float(comp_1)/comp_2 + 1))
# return (nuis_comp_1 - 1) * float(comp_1)/comp_2 + 1
# def propagateNuis(h_dict, nuis_dict):
# #only lnN nuisances can be propagated
# #checks are made
# for nuis_name in nuis_dict.keys():
# if nuis_dict[nuis_name]['type'] == "lnN":
# if len(nuis_dict[nuis_name]['samples'].keys()) > 1:
# sys.exit("[ERROR] Cannot propagate more than one nuisance, there is \
# ambiguity... Please insert only one component for each nuisance and it will be propagated")
# for sample in nuis_dict[nuis_name]['samples'].keys():
# #cerco questo oggetto nella dict degli histo
# comp_yield = 0
# comp_nuis = nuis_dict[nuis_name]['samples'][sample]
# for var in h_dict.keys():
# compnames = h_dict[var].keys()
# for j in compnames:
# if j == sample:
# #We do this because mkDatacards saves only the first 4 decimal places
# #in the rate. Without this the models are distorted... Not nice but still..
# #card.write(''.join(('%-.4f' % yieldsSig[name]) line 240
# comp_yield = float('%.4f'%h_dict[var][j].Integral())
# # propagate to other components having
# # the nuis name in their name
# for cn in compnames:
# if (sample in cn) and sample != cn:
# comp2_yield = float('%.4f'%h_dict[var][cn].Integral())
# #print(cn, comp2_yield)
# comp2_nuis = switchNuis(comp_yield, comp_nuis, comp2_yield)
# #print(comp2_nuis)
# nuis_dict[nuis_name]['samples'][cn] = comp2_nuis
# #print(nuis_dict)
# return nuis_dict
def check_Nuisances(nuis_dict, h_dict):
for nuis_name in nuis_dict.keys():
for sample in nuis_dict[nuis_name]['samples'].keys():
for sam in h_dict.keys():
for var in h_dict[sam].keys():
compnames = h_dict[sam][var].keys()
#check if the nuis same is present in at least one component
#for each variable
if not any([sample in i for i in compnames]):
return False
return True
def makeNuisDict(config, d_name_, name_, type_, samples_, components):
n_d = {}
if len(samples_) == 1 and samples_[0][0].split(":")[0] == "all":
val = float(samples_[0][0].split(":")[1])
t = type_[0]
dn = d_name_[0]
n = name_[0]
n_d[dn] = {}
n_d[dn]['name'] = n
n_d[dn]['type'] = t
n_d[dn]['samples'] = {}
for comp in components:
if comp not in config.getlist("variables", "makeDummy"):
n_d[dn]['samples'][comp] = val
return n_d
for dn, n, t, s in zip(d_name_, name_, type_, samples_):
n_d[dn] = {}
n_d[dn]['name'] = n
n_d[dn]['type'] = t
n_d[dn]['samples'] = {}
for kv in s:
comp = kv.split(":")[0]
val = float(kv.split(":")[1])
n_d[dn]['samples'][comp] = val
return n_d
def makeNuisances(h_dict, model, config, outdir, isMkDC = True):
#THIS PART IS NOT PERFECT
#CAN WORK IF THE NUISANCE IS ONLY ON SM
#DID NOT CHECK FOR OTHER SCENARIOS
defname = config.getlist("d_nuisances", "defname") #the name in dict key
name = config.getlist("d_nuisances", "name") # the 'name' field
samples = [i[1:-1].split("|") for i in config.getlist("d_nuisances", "samples")]
samples = [list(map(str, sublist)) for sublist in samples]
types = config.getlist("d_nuisances", "types")
for sample in h_dict:
if isMkDC: components = h_dict[sample][h_dict[sample].keys()[0]].keys()
else: components = h_dict[sample]
nd = makeNuisDict(config, defname, name, types, samples, components)
if isMkDC:
if not check_Nuisances(nd, h_dict):
sys.exit("[ERROR] Nuisances specified in cfg file are not present in components dict ... Check inputs")
if config.get("d_nuisances", "propagate") == "True":
#
# HORRIBLE FIX FOR 1D NUIS
#
nd = propagateNuis(h_dict[sample], nd)
file_name = outdir + "/nuisances_" + sample + "_" + model + ".py"
f = open(file_name, 'w')
f.write("#-----------------------------------\n")
f.write("# Automatically generated # \n")
f.write("# by mkDCInputs.py # \n")
f.write("#-----------------------------------\n")
f.write("\n\n\n")
for key in nd.keys():
f.write("nuisances['{}'] = {} \n".format(key, "{"))
f.write(" 'name' : '{}', \n".format(nd[key]['name']))
f.write(" 'type' : '{}', \n".format(nd[key]['type']))
f.write(" 'samples': {} \n".format("{"))
for sample in nd[key]['samples']:
f.write(" '{}' : '{}', \n".format(sample, nd[key]['samples'][sample]))
f.write(" {} \n".format("}"))
f.write("{}\n".format("}"))
f.write("\n\n")
f.close()
| GiacomoBoldrini/D6tomkDatacard | makeDummies.py | makeDummies.py | py | 25,816 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sys.exit",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_num... |
12078242829 | import jittor as jt
from jittor import init
import math
from os.path import join as pjoin
from collections import OrderedDict
from jittor import nn
def np2th(weights, conv=False):
'Possibly convert HWIO to OIHW.'
if conv:
weights = weights.transpose([3, 2, 0, 1])
return jt.float32(weights)
class StdConv2d(nn.Conv):
def execute(self, x):
w = self.weight
# (v, m) = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
m = jt.mean(w, dims=(1, 2, 3), keepdims=True)
v = jt.mean((w - m) ** 2, dims=(1,2,3), keepdims=True)
w = ((w - m) / jt.sqrt((v + 1e-05)))
return nn.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias)
class PreActBottleneck(nn.Module):
'Pre-activation (v2) bottleneck block.\n '
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = (cout or cin)
cmid = (cmid or (cout // 4))
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06, affine=None)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06, affine=None)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06, affine=None)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU()
if ((stride != 1) or (cin != cout)):
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout, affine=None)
def execute(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = nn.relu(self.gn1(self.conv1(x)))
y = nn.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = nn.relu((residual + y))
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
# self.conv1.weight.copy_(conv1_weight)
# self.conv2.weight.copy_(conv2_weight)
# self.conv3.weight.copy_(conv3_weight)
# self.gn1.weight.copy_(gn1_weight.view((- 1)))
# self.gn1.bias.copy_(gn1_bias.view((- 1)))
# self.gn2.weight.copy_(gn2_weight.view((- 1)))
# self.gn2.bias.copy_(gn2_bias.view((- 1)))
# self.gn3.weight.copy_(gn3_weight.view((- 1)))
# self.gn3.bias.copy_(gn3_bias.view((- 1)))
self.conv1.weight = (conv1_weight)
self.conv2.weight = (conv2_weight)
self.conv3.weight = (conv3_weight)
self.gn1.weight = (gn1_weight.view((- 1)))
self.gn1.bias = (gn1_bias.view((- 1)))
self.gn2.weight = (gn2_weight.view((- 1)))
self.gn2.bias = (gn2_bias.view((- 1)))
self.gn3.weight = (gn3_weight.view((- 1)))
self.gn3.bias = (gn3_bias.view((- 1)))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, 'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, 'gn_proj/bias')])
# self.downsample.weight.copy_(proj_conv_weight)
# self.gn_proj.weight.copy_(proj_gn_weight.view((- 1)))
# self.gn_proj.bias.copy_(proj_gn_bias.view((- 1)))
self.downsample.weight = (proj_conv_weight)
self.gn_proj.weight = (proj_gn_weight.view((- 1)))
self.gn_proj.bias = (proj_gn_bias.view((- 1)))
class ResNetV2(nn.Module):
'Implementation of Pre-activation (v2) ResNet mode.'
def __init__(self, block_units, width_factor):
super().__init__()
width = int((64 * width_factor))
self.width = width
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.GroupNorm(32, width, eps=1e-06, affine=None)), ('relu', nn.ReLU())]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(OrderedDict(([('unit1', PreActBottleneck(cin=width, cout=(width * 4), cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=(width * 4), cout=(width * 4), cmid=width)) for i in range(2, (block_units[0] + 1))])))), ('block2', nn.Sequential(OrderedDict(([('unit1', PreActBottleneck(cin=(width * 4), cout=(width * 8), cmid=(width * 2), stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=(width * 8), cout=(width * 8), cmid=(width * 2))) for i in range(2, (block_units[1] + 1))])))), ('block3', nn.Sequential(OrderedDict(([('unit1', PreActBottleneck(cin=(width * 8), cout=(width * 16), cmid=(width * 4), stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=(width * 16), cout=(width * 16), cmid=(width * 4))) for i in range(2, (block_units[2] + 1))]))))]))
def execute(self, x):
features = []
(b, c, in_size, _) = x.shape
x = self.root(x)
features.append(x)
x = nn.Pool(3, stride=2, padding=0, op='maximum')(x)
for i in range((len(self.body) - 1)):
x = self.body[i](x)
right_size = int(((in_size / 4) / (i + 1)))
if (x.shape[2] != right_size):
pad = (right_size - x.shape[2])
assert ((pad < 3) and (pad > 0)), 'x {} should {}'.format(x.shape, right_size)
# feat = jt.zeros((b, x.shape[1], right_size, right_size), device=x.device)
feat = jt.zeros((b, x.shape[1], right_size, right_size))
feat[:, :, 0:x.shape[2], 0:x.shape[3]] = x[:]
else:
feat = x
features.append(feat)
x = self.body[(- 1)](x)
return (x, features[::(- 1)]) | THU-CVlab/JMedSeg | model/TransUNet/vit_seg_modeling_resnet_skip.py | vit_seg_modeling_resnet_skip.py | py | 6,666 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "jittor.float32",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jittor.nn.Conv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "jittor.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "jittor.mean",
"line_... |
8662219899 | from django.core.management.base import BaseCommand
from django.utils.crypto import get_random_string
from django.utils import timezone
from user_paste.models import User, Post
import datetime
import string
class Command(BaseCommand):
help = 'Generates fake data for a local sqlite database'
def add_arguments(self, parser):
parser.add_argument('--num_users', type=int, required=True)
parser.add_argument('--num_user_posts', type=int, required=True)
def handle(self, *args, **options):
created_date = timezone.now() - datetime.timedelta(weeks=100)
for i in range(options['num_users']):
user_name = get_random_string(20, string.ascii_letters + string.digits)
user = User.objects.create(user_name=user_name)
for i in range(options['num_user_posts']):
created_date += datetime.timedelta(days=1)
post_content = '''Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'''
Post(post_title = user.user_name + f'post{i}',
post_author = user,
post_content = post_content,
post_description = 'Lorem ipsum dolor sit amet, consectetur adipiscing',
post_category = 'Plain Text',
post_type = 'Notes',
post_created_date = created_date).save()
| LoganHodgins/Pasta-Paste | user_paste/management/commands/gen_localdb.py | gen_localdb.py | py | 1,899 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 16,
"usage_type": "name"
},
{... |
6619883045 | import pytesseract
import os
import sys
from PIL import Image
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.pagesizes import A4
import fitz
import shutil
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import QMessageBox
class Form(QMainWindow):
def __init__(self,parent=None):
super().__init__(parent)
getFileNameButton = QPushButton("Выбрать файл")
getFileNameButton.clicked.connect(self.getFileName)
getFileNameButton.setFixedSize(160,160)
layoutV = QVBoxLayout()
layoutV.addWidget(getFileNameButton)
layoutH = QHBoxLayout()
layoutH.addLayout(layoutV)
centerWidget = QWidget()
centerWidget.setLayout(layoutH)
self.setCentralWidget(centerWidget)
self.resize(200,200)
self.setWindowTitle("PdfReader")
def getFileName(self):
filename, filetype = QFileDialog.getOpenFileName(self,
"Выбрать файл",
".",
"PDF Files(*.pdf)")
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
pdf_document = fitz.open(filename)
try:
os.mkdir("imagepdf")
except OSError:
None
os.chdir("imagepdf")
for current_page in range(len(pdf_document)):
for image in pdf_document.getPageImageList(current_page):
xref = image[0]
pix = fitz.Pixmap(pdf_document, xref)
if pix.n < 5: # this is GRAY or RGB
pix.writePNG("%s.png" % (current_page))
else: # CMYK: convert to RGB first
pix1 = fitz.Pixmap(fitz.csRGB, pix)
pix1.writePNG("%s.png" % (current_page))
pix1 = None
pix = None
os.chdir('..')
c = canvas.Canvas("Результат.pdf", pagesize=A4)
for k in range(len(pdf_document)):
os.chdir("imagepdf")
img = Image.open(str(k) + '.png')
os.chdir('..')
custom_config = r'--oem 3 --psm 6'
text = pytesseract.image_to_string(img, lang='rus', config= custom_config)
with open('Text.txt', 'w', encoding="cp1251") as text_file:
text_file.write(text)
sumtext = sum(1 for line in open('Text.txt'))
l = []
pdfmetrics.registerFont(TTFont('FreeSans', 'FreeSans.ttf'))
c.setFont('FreeSans', 14)
i = 29.0
with open('Text.txt', 'r+') as sumtext:
for l in sumtext:
i = i - 0.5
c.drawString(0.5 * cm, i * cm, l.rstrip())
if os.path.isfile('Text.txt'):
os.remove('Text.txt')
else: print("File doesn't exists!")
c.showPage()
m = k + 1
if m == len(pdf_document):
QMessageBox.about(self, "Выполнено", "Выполнено")
c.save()
shutil.rmtree("imagepdf") #удалить директорию с фото
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Form()
ex.show()
sys.exit(app.exec_())
| bydmak/pdfconvertpdf | main.py | main.py | py | 3,708 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytesseract.pytesseract",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "fitz.open",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_... |
10785625199 | # coding:utf-8
"""
@file: .py
@author: dannyXSC
@ide: PyCharm
@createTime: 2022年05月04日 21点47分
@Function: 请描述这个py文件的作用
"""
from Modal.Affiliation import Affiliation
from py2neo import Graph, NodeMatcher, Node
class AffiliationRepo:
label = "Affiliation"
def __init__(self):
pass
@staticmethod
def create_affiliation_check(graph: Graph, affiliation: Affiliation):
node_matcher = NodeMatcher(graph)
node = node_matcher.match(AffiliationRepo.label, name=affiliation.name).first()
if node is None:
node = AffiliationRepo.create_affiliation(graph, affiliation)
return node
@staticmethod
def create_affiliation(graph: Graph, affiliation: Affiliation):
node = Node(AffiliationRepo.label, name=affiliation.name)
graph.create(node)
graph.push(node)
return node
@staticmethod
def get_affiliation_by_name(graph: Graph, name: str) -> Node:
node_matcher = NodeMatcher(graph)
return node_matcher.match(AffiliationRepo.label, name=name).first()
@staticmethod
def get_all_affiliation_dict(graph: Graph) -> dict:
cql = "match (n:Affiliation) return (n);"
nodes = [x["n"] for x in graph.run(cql).data()]
return {x["name"]: x for x in nodes}
@staticmethod
def get_all_affiliation_name(graph: Graph) -> set:
cql = "match (n:Affiliation) return (n.name);"
return set(x["(n.name)"] for x in graph.run(cql).data())
@staticmethod
def to_neo4j_node(affiliation: Affiliation):
return Node(AffiliationRepo.label, **affiliation.to_dict())
| dannyXSC/BusinessIntelligence | ETL/Repository/AffiliationRepo.py | AffiliationRepo.py | py | 1,653 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "py2neo.Graph",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "Modal.Affiliation.Affiliation",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "py2neo.NodeMatcher",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "py2neo.... |
20828422696 | from projects.models import *
from communities.models import *
from users.models import MossaicUser
from risk_models.models import *
from django import forms
from django.forms.models import inlineformset_factory
from django.forms.models import modelformset_factory
from django.forms.models import BaseInlineFormSet
from django.forms import ModelForm, Textarea
from django.forms.widgets import HiddenInput
class NewModelForm(forms.Form):
project_name = forms.CharField()
class NewMMLForm(forms.Form):
new_links = forms.CharField(widget=forms.TextInput(attrs={'class':'tokenized ajaxurl-ajax-metrics'}))
class MetricForm(ModelForm):
class Meta:
model = Metric
widgets = {
'project': HiddenInput,
# 'metricType': RadioSelect,
}
class MCScoreForm(ModelForm):
class Meta:
model = MCScore
widgets = {
'option': HiddenInput,
'modelMetricLink': HiddenInput,
}
MCScoreFormSet = inlineformset_factory(ModelMetricLink,MCScore,form=MCScoreForm,extra=0,can_order=False,can_delete=False)
class ModelElementForm(ModelForm):
class Meta:
model = ModelMetricLink
widgets = {
'metric': HiddenInput
}
def save(self, *args, **kwargs):
super(ModelElementForm, self).save(*args, **kwargs)
if hasattr(self,'nested'):
self.nested.save()
def is_valid(self,*args, **kwargs):
if hasattr(self,'nested'):
return super(ModelElementForm, self).is_valid(*args, **kwargs) and self.nested.is_valid()
else:
return super(ModelElementForm, self).is_valid(*args, **kwargs)
def has_changed(self, *args, **kwargs):
has_changed = super(ModelElementForm, self).has_changed(*args, **kwargs)
if hasattr(self,'nested'):
for form in self.nested.forms:
has_changed = has_changed or form.has_changed
return has_changed
def __init__(self, data=None, *args, **kwargs):
super(ModelElementForm, self).__init__(data=data, *args, **kwargs)
if self.instance.metric.metricType == 'M':
self.nested = MCScoreFormSet(instance=self.instance,prefix="C%s" % self.instance.pk, data=data)
ChoiceFormSet = inlineformset_factory(Metric,MCOption,can_order=True,can_delete=True)
RiskModelFormset = inlineformset_factory(RiskModel, ModelMetricLink, form=ModelElementForm, extra=0,can_delete=True) | parauchf/mossaic | risk_models/forms.py | forms.py | py | 2,232 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.forms.Form",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.for... |
3789596757 | from django.shortcuts import render, HttpResponse, redirect
# HttpResponse = texto / redirect = redirecciones
from miapp.models import Article # Para usar modelos
from django.db.models import Q # Para usar OR en consultas
from miapp.forms import FormArticle # Para usar la clase formulario
from django.contrib import messages # Para usar msj flash
# Create your views here.
# MVC = Modelo Vista Controlador -> Acciones (métodos)
# MVT = Modelo Template Vista -> Acciones (métodos)
# MVT = MVC, la Vista es Template y Controlador es Vista
# Menú con hipervínculos
layout = """
<h1>Sitio web con Django | Jesús Brito</h1>
<hr/>
<ul>
<li>
<a href="/inicio">Inicio</a>
</li>
<li>
<a href="/hola-mundo">Hola Mundo</a>
</li>
<li>
<a href="/pagina-pruebas">Página de pruebas</a>
</li>
<li>
<a href="/contacto-dos">Contacto</a>
</li>
</ul>
<hr/>
"""
# Estamos en MVT usando 3 Vistas
def index(request): # Inicio
"""
html = ""
<h1>Inicio</h1>
<p>Años hasta el 2050:</p>
<ul>
""
# Demostrando que se puede usar while y if en django
year = 2021
while year <= 2050:
if year % 2 == 0:
html += f"<li>{str(year)}</li>"
year += 1
html += "</ul>"
"""
# Ciclo for en la plantilla
year = 2021
hasta = range(year, 2051)
# Recomendable siempre crear las variables en la vista y no en los templates
nombre = 'Jesús Brito'
lenguajes = ['JavaScript', 'Python', 'PHP', 'C']
#return HttpResponse(layout+html)
#return render(request, 'index.html')
# Pasar datos desde la vista y mostrarlos en la plantilla
return render(request, 'index.html', {
'title': 'Inicio 2',
'mi_variable': 'Soy un dato que esta en la vista',
'nombre': nombre,
'lenguajes': lenguajes,
'years': hasta
})
def hola_mundo(request):# es un párametro que permite recibir datos de peticiones a esta url
#return HttpResponse(layout+"""
# <h1>Hola mundo con Django!!</h1>
# <h3>Soy Jesús Brito WEB</h3>
#""")
return render(request, 'hola_mundo.html')
def pagina(request, redirigir = 0): # pagina de pruebas
if redirigir == 1:
#return redirect('/inicio/') # Redirecciona
#return redirect('/contacto/Jesús/Brito/')
return redirect('contacto', nombre="Jesús", apellidos="Brito")
# Ventaja: Al usar "name" de urlpatterns, redirecciona a pesar de cambiar la url
# (En este caso la cambiamos de contacto/ a contacto-dos/)
#return HttpResponse(layout+"""
# <h1>Página de mi web</h1>
# <p>Creado por Jesús Brito</p>
#""")
#return render(request, 'pagina.html')
return render(request, 'pagina.html', {
'texto': 'Este es mi texto',
'lista': ['uno', 'dos', 'tres']
})
def contacto(request, nombre="", apellidos=""):
html = ""
if nombre and apellidos: # Parámetro opcional
html += "<p>El nombre completo es: </p>"
html += f"<h3>{nombre} {apellidos}</h3>"
return HttpResponse(layout+f"<h2>Contacto</h2>"+html)
def crear_articulo(request, title, content, public):
# Crear modelo Article
"""
articulo = Article(
title = 'Primer articulo!!',
content = 'Contenido del articulo',
public = True
)"""
# Crear modelo Article usando parámetros de la url (propiedadClase / parámetroURL)
articulo = Article(
title = title,
content = content,
public = public
)
# Guardar datos en la BD usando modelo
articulo.save()
return HttpResponse(f"Articulo creado: <strong>{articulo.title}</strong> - {articulo.content}")
def save_article(request): # Devuelve msj
# Comprobar si nos llegan datos por GET
#if request.method == 'GET':
# Comprobar si nos llegan datos por POST
# (Es más seguro xq no muestra el guardado en el url)
if request.method == 'POST':
# Crear variables para recibir datos
title = request.POST['title']
content = request.POST['content']
public = request.POST['public']
# Validar titulo
if len(title) <= 5:
return HttpResponse("El titulo es muy pequeño")
# Crear modelo Article usando parámetros de la url (propiedadClase / parámetroURL)
articulo = Article(
title = title,
content = content,
public = public
)
# Guardar datos en la BD usando modelo
articulo.save()
return HttpResponse(f"Articulo creado: <strong>{articulo.title}</strong> - {articulo.content}")
else:
return HttpResponse("<h2>No se ha podido crear el articulo</h2>")
def create_article(request): # Devuelve a pagina
return render(request, 'create_article.html')
def create_full_article(request): # Redirecciona a pagina 'articulos', sino devuelve formulario vacio
# Comprobar si nos llegan datos por POST
# (Es más seguro xq no muestra el guardado en el url)
if request.method == 'POST':
# request.POST limpia y valida para acceder de mejor manera a los datos
formulario = FormArticle(request.POST)
if formulario.is_valid(): # Si formulario es valido
data_form = formulario.cleaned_data # Llegan los datos limpios
# Recoger datos
title = data_form.get('title')
content = data_form['content']
public = data_form['public']
# Crear modelo Article usando parámetros de la url (propiedadClase / parámetroURL)
articulo = Article(
title = title,
content = content,
public = public
)
# Guardar datos en la BD usando modelo
articulo.save()
# Crear mensaje flash (Sesión que solo se muestra una vez)
# Msj de guardado correcto
messages.success(request, f'Has creado correctamente el articulo {articulo.id}')
# Redireccion a otra pagina
return redirect('articulos')
# Devuelve datos
#return HttpResponse(articulo.title + ' - ' + articulo.content + ' - ' + str(articulo.public))
else:
# Crea objeto de la clase "FormArticle"
formulario = FormArticle() # Genera formulario vacio
return render(request, 'create_full_article.html', {
'form': formulario
})
def articulo(request): # Sacar datos y elementos de la base de datos
# Accede/saca objeto del modelo
try:
#articulo = Article.objects.get(id=8)
#articulo = Article.objects.get(pk=8)
articulo = Article.objects.get(title="Superman", public=False) # Cumplirse los 2 parámetros
response = f"Articulo: <br/> {articulo.id}. {articulo.title}"
except:
response = "<h1>Articulo no encontrado<h1/>"
return HttpResponse(response)
def editar_articulo(request, id):
# Selecciona articulo con el id escogido por el usuario
articulo = Article.objects.get(pk=id)
# Actualiza registro
articulo.title = "Batman"
articulo.content = "Pelicula del 2017"
articulo.public = True
# Guarda edición del registro
articulo.save()
return HttpResponse(f"Articulo {articulo.id} editado: <strong>{articulo.title}</strong> - {articulo.content}")
def articulos(request):
# Selecciona todos los articulos
#articulos = Article.objects.all()
# Ordena por orden númerico
#articulos = Article.objects.order_by('id')
# Ordena todos los articulos publicados por orden númerico inverso
articulos = Article.objects.filter(public=True).order_by('-id')
# Ordena por orden alfabetico
#articulos = Article.objects.order_by('title')
# Ordena por orden alfabetico inverso
#articulos = Article.objects.order_by('-title')
# Limite de x elementos
#articulos = Article.objects.order_by('id')[:3]
# Limite de x hasta x elementos
#articulos = Article.objects.order_by('id')[3:8]
# Consultas con condiciones, filter y lookups
# Que cumpla 2 condiciones
#articulos = Article.objects.filter(title="Batman", id=8)
# Que contenga elemento
#articulos = Article.objects.filter(title__contains="articulo")
# Que elemento sea exacto (incluyendo mayusculas y minusculas)
#articulos = Article.objects.filter(title__exact="articulo")
# Que elemento sea exacto (excluyendo mayusculas y minusculas)
#articulos = Article.objects.filter(title__iexact="articulo")
# Que id sea mayor a... con greater than (__gt)
#articulos = Article.objects.filter(id__gt=11)
# Que id sea mayor o igual a... con greater than (__gte)
#articulos = Article.objects.filter(id__gte=11)
# Que id sea menor a... con lest than (__lt)
#articulos = Article.objects.filter(id__lt=12)
# Que id sea menor o igual a... con lest than (__lte)
#articulos = Article.objects.filter(id__lte=12)
# Que id sea menor o igual a... y contenga elemento
#articulos = Article.objects.filter(id__lte=12, title__contains="2")
# Consultas con exclude
"""
articulos = Article.objects.filter(
title="Articulo",
public=True
) # Tabulacion solo para demostrar q puede ser como uno quiera
"""
"""
articulos = Article.objects.filter(
title="Articulo"
).exclude(
public=False
)"""
# Consultas con OR
# Que contenga elemento "2" o sea "publico"
"""
articulos = Article.objects.filter(
Q(title__contains="2") | Q(public=True)
)"""
# Consultas con SQL (Sirve por si no sabe hacer consultas con django como arriba)
#articulos = Article.objects.raw("SELECT * FROM miapp_article WHERE title='Articulo 2' AND public=0")
"""Nota: Se puede seleccionar un atributo de la tabla (junto con id q es obligatorio)
Ejm. SELECT id, title para sacar solo titulo
(es necesario borrar los demas atributos que no estan en el select en articulos.html)
Recomendable usar las consultas de arriba que son con Django xq si llegas a cambiar
de base de datos la consulta siempre sera igual (Django se encarga de ejecutar el SQL)
"""
#return HttpResponse(articulos) # Comprueba que existe un listado de articulos
return render(request, 'articulos.html', {
'articulos': articulos
})
def borrar_articulo(request, id):
# Selecciona articulo con el id escogido por el usuario
articulo = Article.objects.get(pk=id)
# Elimina registro del articulo
articulo.delete()
return redirect('articulos') # parámetro "name" del fichero "urls.py" | jesusbritomolina/Master-Python | 22-django/AprendiendoDjango/miapp/views.py | views.py | py | 10,896 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 86,
"usage_type": "call"
},
{
"api_nam... |
38784164623 | from flask import Flask, render_template, request
import requests
from flask_fontawesome import FontAwesome
import folium
import csv
from folium.plugins import HeatMap
import datetime
from flask import Response
import statistics
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import io
#create venv for flask
#https://stackoverflow.com/questions/31252791/flask-importerror-no-module-named-flask
#running prgram after installation
#cd flask
#source bin/activate
#FLASK_APP=hello.py flask run
# open traffic collisions dataset
def loadList(fileName):
with open(fileName,newline='') as csv_file:
reader = csv.reader(csv_file)
dataList = list(reader)
return dataList
automobile = loadList('/Users/andrew.hua/Desktop/Y10 Coding/Automobile.csv')
tvolume = loadList("/Users/andrew.hua/Desktop/Y10 Coding/traffic-volumes-data.csv")
# initiating flask, creating webpages
plt.rcParams["figure.autolayout"] = True
app = Flask(__name__)
fa = FontAwesome(app)
@app.route("/directions", methods = ["POST", "GET"])
def get_directions():
if request.method == 'GET':
return f"The URL /data is accessed directly. Try going to '/form' to submit form"
if request.method == 'POST':
global starting
global ending
starting = request.form.get('startpoint')
ending = request.form.get("endpoint")
print(starting)
print(ending)
# working with directions api
start = starting
end = ending
rawstart = start
rawend = end
start = start.replace(" ", "+")
end = end.replace(" ", "+")
#print(start)
#print(end)
directionslist = []
endpoints = []
waypoints = []
requesturl = "https://maps.googleapis.com/maps/api/directions/json?origin=" + start + "&destination=" + end + "&key=AIzaSyAbskEvIMBcbppePATVCTLwVf31gxXXq9w"
#print(requesturl)
apirequest = requests.get(requesturl).json()
# get info on distance, duration, and directions
distance = apirequest['routes'][0]['legs'][0]["distance"]["text"]
duration = apirequest['routes'][0]['legs'][0]["duration"]["text"]
myList = []
myList.append(apirequest['routes'][0]['legs'][0]["start_location"]["lat"])
myList.append(apirequest['routes'][0]['legs'][0]["start_location"]["lng"])
waypoints.append(myList)
for i in range(0,len(apirequest['routes'][0]['legs'][0]['steps'])):
myList=[]
step = apirequest['routes'][0]['legs'][0]['steps'][i]['html_instructions']
endpoints.append(apirequest['routes'][0]['legs'][0]['steps'][i]['end_location'])
myList.append(apirequest['routes'][0]['legs'][0]['steps'][i]['end_location']["lat"])
myList.append(apirequest['routes'][0]['legs'][0]['steps'][i]['end_location']["lng"])
waypoints.append(myList)
nonocharlist = ["<b>", "</b>", """<div style="font-size:0.9em">""", "</div>", "<wbr/>"]
for element in nonocharlist:
if element == """<div style="font-size:0.9em">""":
step = step.replace(element," ")
else:
step = step.replace(element, "")
directionslist.append(step)
#print(directionslist)
#print(waypoints)
# working with traffic volume api
# traffic volume
# traffic volume
# traffic volume
# traffic volume
# traffic volume
# traffic volume
# traffic volume
# traffic volume
# traffic volume
color = []
for j in range(len(directionslist)):
trafficurl = "https://api.tomtom.com/traffic/services/4/flowSegmentData/absolute/10/json?key=1SjA5xJYjygfrzY76gBLnYwAKkNy8cHW&point=" + str(endpoints[j]['lat']) + "," + str(endpoints[j]['lng'])
#print(trafficurl)
trafficapi = requests.get(trafficurl).json()
#print(trafficapi['flowSegmentData']['currentTravelTime']/trafficapi['flowSegmentData']['freeFlowTravelTime'])
if trafficapi['flowSegmentData']['currentTravelTime']/trafficapi['flowSegmentData']['freeFlowTravelTime'] < 0.6:
color.append("red")
elif trafficapi['flowSegmentData']['currentTravelTime']/trafficapi['flowSegmentData']['freeFlowTravelTime'] < 0.9:
color.append("yellow")
else:
color.append("white")
#color = ['white', 'red', 'yellow', 'white', 'white']
#for testing purposes
print(color)
return render_template('directionswebsite.html', content=directionslist, volume=color, distancetravelled = distance, timetaken = duration, begin = rawstart, destination = rawend)
@app.route("/")
def form():
return render_template('form.html')
@app.route("/heatmap")
def fetch_heatmap():
basemap = folium.Map(location=[43.6532, -79.3832], control_scale = False, zoom_start=13)
heat = HeatMap(data=coords,radius=14)
heat.add_to(basemap)
return basemap._repr_html_()
@app.route("/averagetraffic")
def fetch_averagebargraph():
graph = Figure()
axis = graph.add_subplot(1, 1, 1)
axis.bar(days, averages)
# below is taken from tutorialspoint
# compatability with flask
output = io.BytesIO()
FigureCanvas(graph).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
@app.route("/mediantraffic")
def fetch_medianbargraph():
graph = Figure()
axis = graph.add_subplot(1, 1, 1)
axis.bar(days, medians)
# below is taken from tutorialspoint
# compatability with flask
output = io.BytesIO()
FigureCanvas(graph).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
# create readable data points for heatmap
coords = []
for i in range(1,len(automobile)):
a=[]
if automobile[i][4] == '2019' or automobile[i][4] == '2018' or automobile[i][4] == '2017':
a.append(automobile[i][15]) # lat
a.append(automobile[i][16]) # long
coords.append(a)
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
totals = [0,0,0,0,0,0,0]
count = [0,0,0,0,0,0,0]
averages = [0,0,0,0,0,0,0]
mediantotal = [[],[],[],[],[],[],[]]
medians = []
for i in range(1,len(tvolume)):
countdate = tvolume[i][8]
month = int(countdate[:2])
day = int(countdate[3:5])
year = int(countdate[6:])
countdate = datetime.date(year, month, day)
dayofweek = countdate.strftime("%a")
for element in days: # iterate through each day of the week
if dayofweek == element: # find a matching day of week
totals[days.index(element)] = totals[days.index(element)] + int(tvolume[i][9]) # add corresponding traffic volume count
count[days.index(element)] += 1
mediantotal[days.index(element)].append(int(tvolume[i][9]))
for i in range(len(averages)):
averages[i] = totals[i]/count[i]
for item in mediantotal:
myMedian = statistics.median(item)
medians.append(myMedian) | andrew-hua/Y10Coding | trafficprogramfiles/hello.py | hello.py | py | 7,155 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "flask.Fl... |
33879492913 | import sys, time
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.backends.backend_pdf import PdfPages
import h5py
from combined_model import CombinedInterpolator
from spi.comparison_models import PiecewiseC3K
from spi.utils import dict_struct, within_bounds
from spi.plotting import get_stats, quality_map, bias_variance, specpages, write_results
from combined_params import bounds, features, pad_bounds
showlines = {'CO': (2.26, 2.35),
'CaT': (0.845, 0.870),
'Feh': (0.980, 1.0),
'NaD': (0.580, 0.596),
r'H$\beta$': (0.482, 0.492),
'NaI': (0.816, 0.824)}
def get_interpolator(mlib='', regime='', c3k_weight=1e-1, snr_max=1e3,
fake_weights=False, padding=True, mask_mann=True, **kwargs):
"""
"""
# --- The PSI Model ---
psi = CombinedInterpolator(training_data=mlib, c3k_weight=c3k_weight,
unweighted=False, snr_max=snr_max, logify_flux=True)
# renormalize by bolometric luminosity
psi.renormalize_library_spectra(bylabel='luminosity')
# Use fake, constant SNR for all the MILES spectra
if fake_weights:
g = psi.library_snr > 0
psi.library_snr[g] = 100
# mask the Mann mdwarf stars for now
if mask_mann:
mann = np.where(psi.library_labels['miles_id'] == 'mdwarf')[0]
psi.leave_out(mann)
#c3k = np.where(psi.library_labels['miles_id'] == 'c3k')[0]
# Choose parameter regime and features
if padding:
b = pad_bounds(bounds[regime], **kwargs)
else:
b = bounds[regime]
psi.restrict_sample(bounds=b)
psi.features = features[regime]
return psi
def leave_one_out(psi, loo_indices, retrain=True, **extras):
""" --- Leave-one-out ----
"""
# build output arrays
predicted = np.zeros([len(loo_indices), psi.n_wave])
inhull = np.zeros(len(loo_indices), dtype=bool)
if not retrain:
cinside = psi.remove_c3k_inside()
psi.train()
inhull = psi.inside_hull(psi.library_labels[loo_indices])
psi.library_mask[cinside] = True
# Loop over spectra to leave out and predict
for i, j in enumerate(loo_indices):
if (i % 10) == 0: print('{} of {}'.format(i, len(loo_indices)))
# Get full sample and the parameters of the star to leave out
spec = psi.library_spectra[j, :]
labels = dict_struct(psi.library_labels[j])
#labels = dict([(n, tlabels[n]) for n in psi.label_names])
# Leave one out and re-train
if retrain:
psi.library_mask[j] = False
c3k_inside = psi.remove_c3k_inside()
inhull[i] = psi.inside_hull(labels)
psi.train()
predicted[i, :] = psi.get_star_spectrum(**labels)
# now put it back
if retrain:
psi.library_mask[j] = True
psi.library_mask[c3k_inside] = True
return psi, predicted, inhull
def loo(regime='Warm Giants', outroot=None, nbox=-1, plotspec=True, **kwargs):
"""
"""
if outroot is None:
pdict= {'regime': regime.replace(' ','_'),
'unc': not kwargs['fake_weights']}
pdict.update(**kwargs)
outroot = '{regime}_unc={unc}_cwght={c3k_weight:04.3f}'.format(**pdict)
# --- Build models ----
psi = get_interpolator(regime=regime, **kwargs)
clibname = '/Users/bjohnson/Codes/SPS/ckc/ckc/lores/irtf/ckc14_irtf.flat.h5'
c3k_model = PiecewiseC3K(libname=clibname, use_params=['logt', 'logg', 'feh'],
verbose=False, n_neighbors=1, log_interp=True,
rescale_libparams=False, in_memory=True)
# --- Leave-one-out retraining ---
ts = time.time()
# These are the indices in the full library of the training spectra
loo_indices = psi.training_indices.copy()
# Only leave out MILES
miles = psi.training_labels['miles_id'] != 'c3k'
loo_indices = loo_indices[miles]
# Now do the leave out, with or without retraining
psi, predicted, inhull = leave_one_out(psi, loo_indices, **kwargs)
print('time to retrain {} models: {:.1f}s'.format(len(loo_indices), time.time()-ts))
# --- Useful arrays and Stats ---
labels = psi.library_labels[loo_indices]
# Keep track of whether MILES stars in padded region
inbounds = within_bounds(bounds[regime], labels)
wave = psi.wavelengths.copy()
observed = psi.library_spectra[loo_indices, :]
obs_unc = observed / psi.library_snr[loo_indices, :]
snr = observed / obs_unc
bias, variance, chisq = get_stats(wave, observed[inbounds,:],
predicted[inbounds,:], snr[inbounds,:], **kwargs)
sigma = np.sqrt(variance)
# --- Write output ---
psi.dump_coeffs_ascii('{}_coeffs.dat'.format(outroot))
write_results(outroot, psi, bounds[regime],
wave, predicted, observed, obs_unc, labels, **kwargs)
# --- Make Plots ---
# Plot the bias and variance spectrum
sfig, sax = bias_variance(wave, bias, sigma, qlabel='\chi')
sax.set_ylim(max(-100, min(-1, np.nanmin(sigma[100:-100]), np.nanmin(bias[100:-100]))),
min(1000, max(30, np.nanmax(bias[100:-100]), np.nanmax(sigma[100:-100]))))
sfig.savefig('{}_biasvar.pdf'.format(outroot))
# Plot a map of total variance as a function of label
quality, quality_label = np.log10(chisq), r'$log \, \chi^2$'
mapfig, mapaxes = quality_map(labels[inbounds], quality, quality_label=quality_label)
mapfig.savefig('{}_qmap.pdf'.format(outroot))
if plotspec:
# plot full SED
filename = '{}_sed.pdf'.format(outroot)
fstat = specpages(filename, wave, predicted, observed, obs_unc, labels,
c3k_model=c3k_model, inbounds=inbounds, inhull=inhull,
showlines={'Full SED': (0.37, 2.5)}, show_native=False)
# plot zoom-ins around individual lines
filename = '{}_lines.pdf'.format(outroot)
lstat = specpages(filename, wave, predicted, observed, obs_unc, labels,
c3k_model=c3k_model, inbounds=inbounds, inhull=inhull,
showlines=showlines, show_native=True)
print('finished training and plotting in {:.1f}'.format(time.time()-ts))
return psi, loo_indices, predicted
def run_matrix(**run_params):
from itertools import product
nmiles = [78, 15, 68, 6, 35]
regimes = ['Hot Stars', 'Warm Giants', 'Warm Dwarfs', 'Cool Giants', 'Cool Dwarfs']
fake_weights = [ False]
c3k_weight = [1e-9, 1e-3, 1e-2]
for regime, wght, fake_unc in product(regimes, c3k_weight, fake_weights):
outroot = 'results/figures_v5b/{}_unc={}_cwght={:04.3f}'.format(regime.replace(' ','_'),
not fake_unc, wght)
_ = loo(regime=regime, c3k_weight=wght, fake_weights=fake_unc, outroot=outroot, **run_params)
if __name__ == "__main__":
try:
test = sys.argv[1] == 'test'
except(IndexError):
test = False
run_params = {'retrain': False,
'padding': True,
'tpad': 500.0, 'gpad': 0.25, 'zpad': 0.1,
'snr_max': 300,
'mask_mann': False,
'mlib': '/Users/bjohnson/Projects/spi/data/combined/culled_libv5_w_mdwarfs_w_unc_w_allc3k.h5',
'snr_threshold': 1e-10,
'nbox': -1,
}
if test:
print('Test mode')
psi, inds, pred = loo(regime='Warm Dwarfs', c3k_weight=1e-3, fake_weights=False,
outroot='test', **run_params)
else:
run_matrix(**run_params)
| bd-j/spi | demo/miles_irtf_c3k/loo_combined.py | loo_combined.py | py | 7,722 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "combined_model.CombinedInterpolator",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "combined_params.pad_bounds",
"line_number": 44,
"usage_type": "call"
},
{
"api_na... |
1969927426 | import os
import json
import argparse
import time
import logging
from bs4 import BeautifulSoup
from typing import Optional, Dict
from doc2txt.grobid2json.grobid.grobid_client import GrobidClient
from doc2txt.grobid2json.tei_to_json import convert_tei_xml_file_to_s2orc_json, convert_tei_xml_soup_to_s2orc_json
from doc2txt.json2txt.json2txt import process_json
BASE_TEMP_DIR = 'temp'
BASE_OUTPUT_DIR = 'output'
# create log object with current module name
log = logging.getLogger(__name__)
def process_pdf_stream(input_file: str, sha: str, input_stream: bytes, grobid_config: Optional[Dict] = None) -> Dict:
"""
Process PDF stream
:param input_file:
:param sha:
:param input_stream:
:return:
"""
# process PDF through Grobid -> TEI.XML
client = GrobidClient(grobid_config)
tei_text = client.process_pdf_stream(input_file, input_stream, 'temp', "processFulltextDocument")
# make soup
soup = BeautifulSoup(tei_text, "xml")
# get paper
paper = convert_tei_xml_soup_to_s2orc_json(soup, input_file, sha)
return paper.release_json('pdf')
def process_pdf_file(
input_file: str,
input_filename :str,
temp_dir: str,
output_dir: str,
grobid_config: Optional[Dict] = None
) -> [str, str, str]:
"""
Process a PDF file and get JSON representation
:param input_file: input file resource
:param input_filename: input filename resource
:param temp_dir:
:param output_dir:
:return: xml output file, json output file, txt output file
"""
os.makedirs(temp_dir, exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
# filenames for tei and json outputs
tei_file = os.path.join(temp_dir, f'{input_filename}.tei.xml')
json_file = os.path.join(output_dir, f'{input_filename}.json')
txt_file = os.path.join(output_dir, f'{input_filename}.txt')
# check if input file exists and output file doesn't
if not os.path.exists(input_file):
raise FileNotFoundError(f"{input_file} doesn't exist")
if os.path.exists(json_file):
log.warning(f'{json_file} already exists!')
# process PDF through Grobid -> TEI.XML
client = GrobidClient(grobid_config)
# TODO: compute PDF hash
# TODO: add grobid version number to output
client.process_pdf(input_file, input_filename, temp_dir, "processFulltextDocument")
# process TEI.XML -> JSON
assert os.path.exists(tei_file)
paper = convert_tei_xml_file_to_s2orc_json(tei_file)
# write to file
with open(json_file, 'w') as outf:
json.dump(paper.release_json(), outf, indent=4, sort_keys=False)
# extract text field from json and write to file
output_txt = process_json(json_file, "text")
with open(txt_file, 'w') as outfile:
for text in output_txt:
outfile.write(f"{text}\n")
return tei_file, json_file, txt_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run S2ORC PDF2JSON")
parser.add_argument("-i", "--input", default=None, help="path to the input PDF file")
parser.add_argument("-t", "--temp", default=BASE_TEMP_DIR, help="path to the temp dir for putting tei xml files")
parser.add_argument("-o", "--output", default=BASE_OUTPUT_DIR, help="path to the output dir for putting json and txt files")
parser.add_argument("-k", "--keep", action='store_true')
args = parser.parse_args()
input_path = args.input
temp_path = args.temp
output_path = args.output
keep_temp = args.keep
start_time = time.time()
os.makedirs(temp_path, exist_ok=True)
os.makedirs(output_path, exist_ok=True)
input_filename = os.path.splitext(os.path.basename(input_path))[0]
tei_file, json_file, txt_file = process_pdf_file(input_path, input_filename, temp_path, output_path)
runtime = round(time.time() - start_time, 3)
print("runtime: %s seconds " % (runtime))
print('done.') | clowder-framework/extractors-s2orc-pdf2text | doc2txt/grobid2json/process_pdf.py | process_pdf.py | py | 3,947 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "doc2txt.grobid2json.gro... |
26211091171 | import os
import unittest
import itertools
import traceback
from chesspy import players
from chesspy.game import Game
from chesspy.board import Board
from chesspy.color import Color
from multiprocessing import Pool
from chesspy.analyzers import is_in_check, is_in_mate, adjacent_kings
class PlayerTest:
class TestPlayer(unittest.TestCase):
def setUp(self):
self.game = Game()
self.game.assert_check = False
self.game.assert_mate = False
self.avoids_adjacent_kings_test_count = 1000
self.exit_check_test_count = 1000
self.pool = Pool()
def tearDown(self):
self.pool.close()
self.pool.join()
def test_pvp(self):
players = ((self.player_w, 'white'), (self.player_b, 'black'))
with open(f"logs/{str(self.player_w)}_v_{str(self.player_b)}.log", "w") as game_file:
for move, (player, color) in enumerate(itertools.cycle(players)):
if self.game.over or move > 300:
break
game_file.write(f"{str(self.game.board)}\n")
game_file.write(f"|{repr(self.game.board)}|\n")
sanstr = player.suggest_move_san()
game_file.write(f"{move}: {color}: {sanstr}\n")
game_file.write("\n")
game_file.flush()
if sanstr is None:
self.assertTrue(is_in_mate(self.game.board, self.game.turn))
break
try:
self.game.move_san(sanstr)
except (IndexError, AssertionError) as exc:
traceback.print_exception(exc, file=game_file)
raise
def test_exits_check(self):
# Player gets out of check
for _ in range(self.exit_check_test_count):
self.game.board = Board("rnb k nrpp p pp qp p p N b P P P PPPP RBQKBNR")
self.assertTrue(is_in_check(self.game.board, Color.WHITE))
self.assertFalse(is_in_check(self.game.board, Color.BLACK))
self.game.turn = Color.WHITE
sanstr = self.player_w.suggest_move_san()
self.game.move_san(sanstr)
self.assertFalse(is_in_check(self.game.board, Color.WHITE))
def test_avoids_adjacent_kings(self):
# Player doesn't move into adjacent kings
for _ in range(self.avoids_adjacent_kings_test_count):
self.game.board = Board(" rk p K N PP ")
self.assertFalse(adjacent_kings(self.game.board))
self.game.turn = Color.WHITE
sanstr = self.player_w.suggest_move_san()
self.game.move_san(sanstr)
self.assertFalse(adjacent_kings(self.game.board))
def test_checkmated(self):
# Player suggests None when he's checkmated
self.game.board = Board("P R k KR p")
self.assertTrue(is_in_check(self.game.board, Color.BLACK))
self.assertTrue(is_in_mate(self.game.board, Color.BLACK))
self.game.turn = Color.BLACK
self.assertIsNone(self.player_b.suggest_move_san())
@unittest.skip
def test_stalemated(self):
# Player suggests None when he's stalemated
# case A) only pieces left are kings
# case B) king isn't in check but could only move to check
self.assertFalse(True)
@unittest.skip
def test_castle(self):
# Player Castles once in a while
self.assertFalse(True)
class TestRandy(PlayerTest.TestPlayer):
def setUp(self):
super().setUp()
self.player_w = players.Randy(self.game, color=Color.WHITE)
self.player_b = players.Randy(self.game, color=Color.BLACK)
class TestRicky(PlayerTest.TestPlayer):
def setUp(self):
super().setUp()
self.player_w = players.Ricky(self.game, color=Color.WHITE)
self.player_b = players.Ricky(self.game, color=Color.BLACK)
class TestJulian(PlayerTest.TestPlayer):
def setUp(self):
super().setUp()
self.avoids_adjacent_kings_test_count = 1
self.exit_check_test_count = 1
self.player_w = players.Julian(self.game, color=Color.WHITE, pool=self.pool)
self.player_b = players.Julian(self.game, color=Color.BLACK, pool=self.pool)
class TestRandyVsRicky(PlayerTest.TestPlayer):
def setUp(self):
super().setUp()
self.player_w = players.Randy(self.game, color=Color.WHITE)
self.player_b = players.Ricky(self.game, color=Color.BLACK)
@unittest.skip
def test_ricky_usually_wins(self):
# Ricky is supposed to be smarter than Randy, so Ricky should win more often
self.assertFalse(True)
class TestRandyVsJulian(PlayerTest.TestPlayer):
def setUp(self):
super().setUp()
self.avoids_adjacent_kings_test_count = 1
self.exit_check_test_count = 1
self.player_w = players.Randy(self.game, color=Color.WHITE)
self.player_b = players.Julian(self.game, color=Color.BLACK, pool=self.pool)
@unittest.skip
def test_julian_usually_wins(self):
# Julian is supposed to be smarter than Randy, so Julian should win more often
self.assertFalse(True)
class TestRickyVsJulian(PlayerTest.TestPlayer):
def setUp(self):
super().setUp()
self.avoids_adjacent_kings_test_count = 1
self.exit_check_test_count = 1
self.player_w = players.Ricky(self.game, color=Color.WHITE)
self.player_b = players.Julian(self.game, color=Color.BLACK, pool=self.pool)
@unittest.skip
def test_julian_usually_wins(self):
# Julian is supposed to be smarter than Ricky, so Julian should win more often
self.assertFalse(True)
| mikepartelow/chesspy | app/tests/test_players.py | test_players.py | py | 6,059 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "chesspy.game.Game",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "chesspy... |
6831655892 | import numpy as np
import imageio
from skimage.transform import resize
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
import timeit
def mssim(
x: np.ndarray,
y: np.ndarray,
) -> float:
# Standard choice for the parameters
K1 = 0.01
K2 = 0.03
sigma = 1.5
truncate = 3.5
m = 1
C1 = (K1 * m) ** 2
C2 = (K2 * m) ** 2
x = x.astype(np.float64)
y = y.astype(np.float64)
# radius size of the local window (needed for
# normalizing the standard deviation)
r = int(truncate * sigma + 0.5)
win_size = 2 * r + 1
# use these arguments for the gaussian filtering
# e.g.
filter_args = {
'sigma': sigma,
'truncate': truncate
}
filtered = gaussian_filter(x, **filter_args)
# Implement Eq. (9) from assignment sheet
# S should be an "image" of the SSIM evaluated for a window
# centered around the corresponding pixel in the original input image
S = np.ones_like(x)
mu1 = gaussian_filter(x, **filter_args) # valid
mu2 = gaussian_filter(y, **filter_args)
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
sigma1_sq = gaussian_filter(x ** 2, **filter_args) - mu1_sq
sigma2_sq = gaussian_filter(y ** 2, **filter_args) - mu2_sq
sigma12 = gaussian_filter(x * y, **filter_args) - mu1_mu2
S = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
# crop to remove boundary artifacts, return MSSIM
pad = (win_size - 1) // 2
return S[pad:-pad, pad:-pad].mean()
def psnr(
x: np.ndarray,
y: np.ndarray,
) -> float:
# Implement Eq. (2) without for loops
diff_array = np.subtract(x,y)
squared_errors = np.square(diff_array)
mse = np.sum(squared_errors) / float(x.shape[0] * x.shape[1])
psnr = 10 * np.log10(1/mse)
return psnr
def psnr_for(
x: np.ndarray,
y: np.ndarray,
) -> float:
# Implement Eq. (2) using for loops
mse = 0.
for i in range(0, x.shape[0]):
for j in range(0, x.shape[1]):
error = x[i][j] - y[i][j]
error_sq = error * error
mse += error_sq
mse /= float(x.shape[0] * x.shape[1])
psnr = 10 * np.log10(1/mse)
return psnr
def interpolation_error():
x = imageio.imread('./girl.png') / 255.
shape_lower = (x.shape[0] // 2, x.shape[1] // 2)
# downsample image to half the resolution
# and successively upsample to the original resolution
# using no nearest neighbor, linear and cubic interpolation
nearest, linear, cubic = [
resize(resize(
x, shape_lower, order=order, anti_aliasing=False
), x.shape, order=order, anti_aliasing=False)
for order in [0, 1, 3]
]
for label, rescaled in zip(
['nearest', 'linear', 'cubic'],
[nearest, linear, cubic]
):
print(label)
print(mssim(x, rescaled))
m = round(float(mssim(x, rescaled)), 2)
mstr = str(m)
start1 = timeit.default_timer()
print(psnr(x, rescaled))
stop1 = timeit.default_timer()
f = round(float(psnr(x, rescaled)), 2)
fstr = str(f)
start2 = timeit.default_timer()
print(psnr_for(x, rescaled))
stop2 = timeit.default_timer()
print('psnr Time: ', stop1 - start1)
print('psnr_forTime: ', stop2 - start2)
#Plotting
fig, (axNear, axLin, axCub) = plt.subplots(1, 3)
axNear.imshow(nearest)
axNear.set_title('nearest')
axLin.imshow(linear)
axLin.set_title('linear')
axCub.imshow(cubic)
axCub.set_title('cubic')
plt.savefig("example12s.png")
if __name__ == '__main__':
interpolation_error()
| 6-62x10-34Js/ImageProcessingAndPatternRecognition | interpolation_error_bluethner.py | interpolation_error_bluethner.py | py | 4,093 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.ndarray",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.float64",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.float6... |
72974484835 | from datetime import datetime
from models.models import User, App, Lumos
from api import db
def resolve_users(obj, info):
return User.query.all()
def resolve_user(obj, info, user_id):
return User.query.get(user_id)
def resolve_create_user(obj, info, username, email, password):
new_user = User(username=username, email=email, password=password)
db.session.add(new_user)
db.session.commit()
return new_user
def resolve_update_user(obj, info, user_id, input):
user = User.query.get(user_id)
if user:
for key, value in input.items():
setattr(user, key, value)
db.session.commit()
return user
return None
def resolve_delete_user(obj, info, user_id, input):
user = User.query.get(user_id)
if user:
db.session.delete(user)
db.session.commit()
return True
return False
def resolve_apps(obj, info):
return App.query.all()
def resolve_app(obj, info, app_id):
return App.query.get(app_id)
def resolve_create_app(obj, info, app_name, app_icon):
new_app = App(app_name=app_name, app_icon=app_icon)
db.session.add(new_app)
db.session.commit()
return new_app
def resolve_update_app(obj, info, app_id, input):
app = App.query.get(app_id)
if app:
for key, value in input.items():
setattr(app, key, value)
db.session.commit()
return app
return None
def resolve_delete_app(obj, info, app_id):
app = App.query.get(app_id)
if app:
db.session.delete(app)
db.session.commit()
return True
return False
def resolve_lumos(obj, info, lumos_id):
return Lumos.query.get(lumos_id)
#possible resolvers for get Lumos information throught app or user
#def resolve_lumos_for_user(obj, info, user_id):
# return User.query.get(user_id).app_lumos
#def resolve_lumos_for_app(obj, info, app_id):
# return App.query.get(app_id).user_lumos
def resolve_create_lumos(obj, info, user_id, app_id, permission_level):
new_lumos = Lumos(
user_id=user_id,
app_id=app_id,
permission_level=permission_level,
activation_date=datetime,
expiration_date=datetime,
account_status='active'
)
db.session.add(new_lumos)
db.session.commit()
return new_lumos
def resolve_update_Lumos(obj, info, lumos_id, input):
lumos = Lumos.query.get(lumos_id)
if lumos:
for key, value in input.items():
setattr(lumos, key, value)
db.session.commit()
return lumos
return None
def resolve_delete_Lumos(obj, info, lumos_id):
lumos = Lumos.query.get(lumos_id)
if lumos:
db.session.delete(lumos)
db.session.commit()
return True
return False | EdMarzal97/dux-backend | api/resolvers.py | resolvers.py | py | 2,765 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.models.User.query.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.models.User.query",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.models.User",
"line_number": 7,
"usage_type": "name"
},
{
"api_na... |
31386333656 | from lingpy import *
from lingpy.evaluate.acd import *
from collections import defaultdict, OrderedDict
from lingpy.evaluate.acd import _get_bcubed_score
def get_rhymes(dataset):
csv = csv2list(dataset+'.tsv',
strip_lines=False)
header = [h.lower() for h in csv[0]]
rest = csv[1:]
out = []
for line in rest:
out += [OrderedDict(zip(header, line))]
return out
def to_dict(csv):
out = {}
for d in csv:
out[d['line'], d['stanza'], d['line_number']] = d
return out
wang = get_rhymes('Wang1980')
baxt = get_rhymes('Baxter1992')
wand, baxd = to_dict(wang), to_dict(baxt)
# add rhyme_id to wang's data
idxs, cogid = {}, 0
for key, val in wand.items():
if val['rhyme']:
rhyme = key[1] + '.' + val['rhyme']
if rhyme in idxs:
wand[key]['rhymeid'] = idxs[rhyme]
else:
idxs[rhyme] = cogid
cogid += 1
wand[key]['rhymeid'] = idxs[rhyme]
else:
wand[key]['rhymeid'] = 0
cogid += 1
for key, val in baxd.items():
if val['rhymeid'] == '0':
val['rhymeid'] = 0
cogid += 1
else:
val['rhymeid'] = int(val['rhymeid'])
def compare_stanza(rhymes1, rhymes2, stanza):
def get_rhymes(stanza, rhymes):
vals = sorted([x for x in rhymes.items() if stanza in x[0]],
key=lambda x: int(x[1]['id']))
patterns = [x[1]['rhymeid'] for x in vals]
cogid, rem = 0, {}
out = []
for p in patterns:
if p == 0:
out += [0]
elif p in rem:
out += [rem[p]]
else:
rem[p] = cogid
cogid += 1
out += [rem[p]]
return out
rhymes1p, rhymes2p = get_rhymes(stanza, rhymes1), get_rhymes(
stanza, rhymes2)
if rhymes1p == rhymes2p:
return 1, 1, 1
else:
rhymes1p_, rhymes2p_ = [], []
for a, b in zip(rhymes1p, rhymes2p):
if not (a == 0 and b == 0):
rhymes1p_ += [a]
rhymes2p_ += [b]
p = _get_bcubed_score(rhymes1p, rhymes2p)
r = _get_bcubed_score(rhymes2p, rhymes1p)
f = 2 * ((p*r) / (p+r))
return p, r, f
diffs = []
missed = []
stanzas = defaultdict(list)
missed_stanzas = []
for (l, s, n), d in wand.items():
if (l, s, n) in baxd:
stanzas[s] += [(l, s, n)]
else:
missed += [(l, s, n)]
for l, s, n in baxd:
if (l, s, n) in wand:
if wand[l, s, n]['rhyme'].strip():
rhyme = wand[l, s, n]['stanza'] + '.'+wand[l, s, n]['rhyme']
reconstruction = wand[l, s, n]['reconstruction']
else:
rhyme = ''
reconstruction = ''
else:
rhyme = ''
reconstruction = ''
baxd[l, s, n]['wangli_rhyme'] = rhyme
baxd[l, s, n]['wangli_reconstruction'] = reconstruction
missed_stanzas = [m[1] for m in missed]
for stanza in stanzas:
if stanza not in missed_stanzas:
a, b, c = compare_stanza(wand, baxd, stanza)
diffs += [(stanza, a, b, c)]
print('Total different lines', sum([1 for d in diffs if d[3] != 1]), len(diffs))
print('Proportion per stanza', sum([d[3] for d in diffs]) / len(diffs))
| digling/network-in-hcp-paper | evaluation/rhymes.py | rhymes.py | py | 3,252 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "collections.OrderedDict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lingpy.evaluate.acd._get_bcubed_score",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "lingpy.evaluate.acd._get_bcubed_score",
"line_number": 80,
"usage_type": "c... |
44625569644 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 00:02:01 2018
@author: elenabg
"""
import sys
import time
import pickle
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import re
import csv
df_all = pickle.load(open("df_all.p", "rb")) # cargar dataframe con scraping 1990-2018
df_all.columns=['Description', 'Date', 'Location', 'Victim', 'Alleged_Responsible', 'Type']
df_all.to_csv('data_nn_with_dups.csv')
"""
Observaciones acerca de la base:
1) El sitio tardar en cargar cada query
2) Hay anios donde el website no entrega los datos completos y eso imposibiliota el scraping -- Se intento la busqueda por otros
medios: csv, query bajo criterio regional o genero
3) Hay observaciones repetidas (misma victima, mismo crimen) -- Se limpio el dataframe de filas con misma victima: paso
de 43274 a 36131 obs.
4) Hay una entrada de fecha erronea en la base: agosto 2018
"""
regex_pat = re.compile(r'\w:\d+:\d+', flags=re.IGNORECASE)
df = df_all.drop_duplicates(subset= df_all.columns, keep='first', inplace = False) # no duplicate rows
df['Type'] = df.Type.str.replace(regex_pat, '')
df_all['Dup'] = df_all.duplicated(subset= df_all.columns, keep = False)
df_dup =df_all[df_all.Dup == True]
df_dup.to_csv('data_dup.csv') # all duplicated PAIRS (the first was kept in the dataframe, the second dropped)
df['Date'] = pd.to_datetime(df['Date'])
df = df.sort_values(by='Date') # order by date
df['year'] = pd.DatetimeIndex(df['Date']).year # year
df['month'] = df['Date'].apply(lambda x: x.strftime('%B')) # month
df['mnth_yr'] = df['Date'].apply(lambda x: x.strftime('%B-%Y')) # month-year
df.to_csv('data_nn.csv')
############### 1. DATA EXPLORATION ################################
# By Period
df_by_y = df['year'].value_counts().sort_index() # total count by year
df_by_y.plot(kind = 'bar')
plt.title("No. Cases by Year (1990-2018)")
# By Month
colors = plt.cm.GnBu(np.linspace(0, 1, 12))
df_gmth = pd.DataFrame({'Count' : df.groupby(['year', 'month']).size()}).reset_index()
df_gmth = df_gmth[df_gmth.Count >=0]
df_gmth_piv = df_gmth.pivot(index='year', columns='month', values='Count')
df_gmth_piv.plot(kind='bar', stacked=True, color = colors)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title("No. Cases per Year by Month (1990-2018)")
## By Crime Type
# Total
df_by_type = df['Type'].value_counts()
top10_type = df_by_type[:10]
top10_type.plot(kind = 'bar')
plt.title("No. Cases by Crime Type (1990-2018)")
# Per Year
colors = plt.cm.GnBu(np.linspace(0, 1, 65))
df_gtp = pd.DataFrame({'Count' : df.groupby(['year', 'Type']).size()}).reset_index()
df_gtp = df_gtp[df_gtp.Count >=20]
df_gtp_piv = df_gtp.pivot(index='year', columns='Type', values='Count')
df_gtp_piv.plot(kind='bar', stacked=True, color = colors)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title("No. Cases per Year by Crime Type (1990-2018)")
## By Location
# Total
df_by_loc = df['Location'].value_counts()
df_by_loc.sort_values(ascending=False)
top10_loc = df_by_loc[:20]
top10_loc.plot(kind = 'bar')
plt.title("No. Cases by Location (1990-2018)")
# Per Year
colors = plt.cm.GnBu(np.linspace(0, 1, 90))
df_locy = pd.DataFrame({'Count' : df.groupby(['year', 'Location']).size()}).reset_index()
df_locy = df_locy[df_locy.Count >=20] # this slightly changes the distribution over time (lower bound by loc)
df_locy_piv = df_locy.pivot(index='year', columns='Location', values='Count')
df_locy_piv.plot(kind='bar', stacked=True, color=colors)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title("No. Cases per Year by Location (1990-2018)")
## By Responsible Group
# Total
df_by_resp = df['Alleged_Responsible'].value_counts()
df_by_resp.sort_values(ascending=False)
top10_resp = df_by_resp[:10]
top10_resp.plot(kind = 'bar')
plt.title("No. Cases by Responsible Group (1990-2018)")
# Per Year
colors = plt.cm.GnBu(np.linspace(0, 1, 22))
df_gy = pd.DataFrame({'Count' : df.groupby(['year', 'Alleged_Responsible']).size()}).reset_index()
df_gy = df_gy[df_gy.Count >=25]
df_gy_piv = df_gy.pivot(index='year', columns='Alleged_Responsible', values='Count')
df_gy_piv.plot(kind='bar', stacked=True, color=colors)
plt.legend(bbox_to_anchor=(1.05, 1), loc=0, borderaxespad=0.)
plt.title("No. Cases per Year by Responsible Group(1990-2018)")
d_pob ={}
for i, pob in enumerate(pob_nm):
if pob not in d_pob:
if len(str(pob_cd[i])) == 7:
d_pob[pob] = '0' + str(pob_cd[i])
else:
d_pob[pob] = str(pob_cd[i])
| ElenaBadilloG/Noche-y-Niebla-Project | explore_all_data.py | explore_all_data.py | py | 4,608 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pickle.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pandas.to_datetime",
"l... |
41564702032 | from bs4 import BeautifulSoup
import xlsxwriter
workbook = xlsxwriter.Workbook('aliexpress.xlsx')
worksheet = workbook.add_worksheet()
orders = []
fileN = 14
def readDataHTML():
global days
global weekdayBuckets
global mptc
global tptc
global targetdir
global fileN
for i in range(1,fileN + 1):
f = open("ali" + str(i) + ".html", "r", encoding='utf8')
if f.mode == 'r':
soup = BeautifulSoup(f.read(), "html.parser")
#print("parsed",(i))
content = soup.find_all('tbody', attrs={"class": "order-item-wraper"})
tmp = []
#print("read",(i),'len=',len(content))
for order in content:
info = order.find_all('span', attrs={'class': 'info-body'})
id = info[0].contents[0]
date = info[1].contents[0]
#price = float(order.find('p', attrs={'class': 'amount-num'}).contents[0].split([' ','\n'])[0].replace(',','.'))
orderprice = int([x for x in order.find('p', attrs={'class': 'amount-num'}).contents[0].translate({ord('\n'): None}).split(' ') if x != ' ' and x != ''][0].replace(',',''))
items = order.find_all('tr', attrs={'class': 'order-body'})
productsprice = 0
products = []
#if id == '8012802117885974':
# print('hr')
exists = False
for o in orders:
if o[0] == id:
exists = True
if exists:
continue
for p in items:
name = p.find('a', attrs={'class': 'baobei-name'}).contents[0]
price = int(p.find('p', attrs={'class': 'product-amount'}).contents[1].contents[0].split(' ')[1].replace(',',''))
amount = int(p.find('p', attrs={'class': 'product-amount'}).contents[3].contents[0][1:])
productsprice = productsprice + price
products.append([name,price,amount])
shippingcost = orderprice - productsprice
tmp.append([id,date,orderprice,productsprice,products])
for i in reversed(tmp):
orders.append(i)
f.close()
else:
os.write(1, bytes('readfile error\n', 'utf-8'))
readDataHTML()
n = 0
def sheetWrite(row,col,data):
x = 0
for i in data:
worksheet.write(col,row+x,i)
x = x + 1
#worksheet.write(0, 0, 'Order')
#worksheet.write(0, 1, 'Product')
#worksheet.write(0, 2, 'Cost')
#worksheet.write(0, 3, 'Date')
sheetWrite(1,1,['Order','Product','Cost','Count','Date'])
for i in orders:
n = n + 1
#for x in range(0,len(i) - 1):
#if x == len(i) - 2:
#print(i[x],end='')
#else:
#print(i[x],end=' - ')
#print('\n')
#for x in range(0,len(i[len(i)-1])):
#print(' ',i[len(i)-1][x])
#print('\n')
n = 2
#[id,date,orderprice,productsprice,products]
for i in orders:
sheetWrite(1, n, [i[0],'shipping',float((i[2]-i[3])/100),1,i[1]])
n = n + 1
for x in range(0,len(i[len(i)-1])):
item = i[len(i)-1][x]
#sheetWrite(1, n, [i[0],item[0],item[1],i[1]])
sheetWrite(1, n, ['',item[0],float(item[1]/100),item[2],i[1]])
n = n + 1
#sheetWrite(1,n + 1,['','','{=SUMPRODUCT(D3:D'+str(n)+';E3:E'+str(n)+')}'])
#worksheet.write_formula('D'+str(n+2),'=SUMPRODUCT(D3:D'+str(n)+';E3:E'+str(n)+')')
#print('\n\n\n',len(orders))
worksheet.set_column('B:B', 20)
worksheet.set_column('C:C', 122)
worksheet.set_column('D:D', 7)
worksheet.set_column('E:E', 5)
worksheet.set_column('F:F', 20)
while True:
try:
workbook.close()
except xlsxwriter.exceptions.FileCreateError as e:
# For Python 3 use input() instead of raw_input().
decision = input("Exception caught in workbook.close(): %s\n"
"Please close the file if it is open in Excel.\n"
"Try to write file again? [Y/n]: " % e)
if decision != 'n':
continue
break | DawidPietrykowski/AliReader | AliReader/AliReader.py | AliReader.py | py | 4,155 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "xlsxwriter.Workbook",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "xlsxwriter.exceptions",
"line_number": 121,
"usage_type": "attribute"
}
] |
20628891853 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 19:54:41 2020
@author: phalinp
"""
import cv2 as cv
import numpy as np
def draw_rectangle(img):
cv.rectangle(img,(384,0),(510,128),(0,255,255),3)
#For rectangle if have to give top left corner i.e. (384,0)
#and bottom right i.e (510,128) (0,255,255) is colour and 3 is
#thickness
cv.imshow("image",img)
cv.waitKey(0)
cv.destroyAllWindows()
def draw_circle(img):
cv.circle(img,(447,63),63,(0,0,255),-1)
#for circle, center(447,63), radius = 63, colour and thickness is needed
#thickness = -1 to fill colour in circle.
cv.imshow("image",img)
cv.waitKey(0)
cv.destroyAllWindows()
def draw_ellipse(img):
cv.ellipse(img,(200,200),(100,100),0,0,360,(0,0,255),-1)
#For ellipse we have to define the center, the length of major and minor
#axes, angle of rotation of ellipse in anticlock vise direction, startangle
# and endangle denotes start and end of ellipse arc
cv.imshow("image",img)
cv.waitKey(0)
cv.destroyAllWindows()
def main():
img = np.zeros((512,512,3), dtype = np.uint8)
main() | P-H-Pancholi/opencv-python-tutorials | GUI_Features/draw_shapes_on_image.py | draw_shapes_on_image.py | py | 1,190 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.rectangle",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"lin... |
16284369692 | """
validate and maniopulate genbank files.
"""
from setuptools import find_packages, setup
dependencies = ['click']
setup(
name='faketool',
version='0.1.4',
url='https://github.com/sgordon007/fake-tool',
license='BSD',
author='Sean Gordon',
author_email='seangordon07@gmail.com',
description='validate and maniopulate genbank files.',
long_description=__doc__,
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=['click', 'biopython'],
entry_points={
'console_scripts': [
'genbank_validate = genbank_validate.cli:main',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| sgordon007/fake-tool | setup.py | setup.py | py | 1,617 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 17,
"usage_type": "call"
}
] |
42110400732 | import cv2
# using USB webcam number 1
cam = cv2.VideoCapture(0)
# You can save your video according to the same size as your webcam stream or hardcode the size you like
# frame_width = int(cam.get(3))
# frame_height = int(cam.get(4))
# recorder = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M','J','P','G'),20, (frame_width,frame_height))
# Saving the video in 640x480 size
recorder = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 20, (640, 480))
while cam.isOpened():
# read the frames from the webcam
ret, frame = cam.read()
# save the frame and write to our output file
recorder.write(frame)
# show the frames in a Windows
cv2.imshow('My Webcam', frame)
k = cv2.waitKey(1) & 0xFF
# if 'Q' is pressed, the programs exits
if k == ord('q'):
break
# release the webcam frames
cam.release()
# release the video recorder frames
recorder.release()
# destroy all windows
cv2.destroyAllWindows()
| abuelgasimsaadeldin/opencv-starter-pack | python/basic/video_writer.py | video_writer.py | py | 979 | python | en | code | null | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
... |
1887982137 | import datetime
def check_hour(hour):
"""
Check if an hour format is valid for the bot.
:param hour:
:return:
"""
if not hour:
return False
# If there are not ":" in the hour, is invalid.
if ":" not in hour:
return False
# Here it divides the hour in hours and minutes.
divided_hour = hour.split(":")
hours = divided_hour[0]
minutes = divided_hour[1]
# If there aren't numbers, is invalid.
if not hours.isdigit() or not minutes.isdigit():
return False
# Hours < 0 or > 23 are invalids.
if int(hours) < 0 or int(hours) > 23:
return False
# Minutes < 0 or > 59 are invalids.
if int(minutes) < 0 or int(minutes) > 59:
return False
# If there are not 2 digits, is invalid.
if len(minutes) < 2 or len(hours) < 2:
return False
# Else, is a valid format for the bot.
return True
def check_o_clock_hours(hour):
"""
Checks o'clock hours, and creates a correct format, if possible.
:param hour:
:return:
"""
# If the format contains minutes, goes to check_one_cifre_hours, and returns it.
if len(hour) > 2:
one_cifre_check = check_one_cifre_hours(hour)
return one_cifre_check
# If it is not a number, is invalid.
if not hour.isdigit():
return False
# If it is an hour o'clock (for example 7) returns it in a valid format for the bot.
if -1 < int(hour) < 10 and len(hour) == 1:
return "0{}:00".format(hour)
# The same with 2 digit hours.
elif int(hour) < 24:
return "{}:00".format(hour)
else:
return False
def check_one_cifre_hours(hour):
"""
Check hours that user sent with only one digit, and corrects them if possible.
:param hour:
:return:
"""
if ":" not in hour:
return False
# Separates hours and minutes.
separated_hours = hour.split(":")
# If they aren't numbers, return False.
if not separated_hours[0].isdigit() or not separated_hours[1].isdigit():
return False
# Is hour is less than ten, corrects it's format, else return False.
if -1 < int(separated_hours[0]) < 10:
hours = "0{}".format(format(separated_hours[0]))
else:
return False
# If minutes is less than 0 or more than 59, return False.
if not -1 < int(separated_hours[1]) < 60:
return False
# Corrects format if minutes have only one digit.
if int(separated_hours[1]) < 10:
separated_hours[1] = "0{}".format(separated_hours[1])
# Creates a correct format hour, and return it.
total_hour = ""
total_hour += hours
total_hour += ":"
total_hour += separated_hours[1]
return total_hour
def wrong_hour_format_text():
# Returns an error message with html format.
text = "It seems that you have introduced a wrong hour format. Remember:\n\n" \
"-Format goes from 00 to 23 for hours, and from 00 to 59 for minutes.\n\n" \
"-It <b>MUST</b> contain two digits (not 5:30, but 05:30) for minutes and hours. \n\n" \
"-Hours and minutes are separated with <b>':'</b> without any space.\n\n" \
"Examples: 17:00, 15:15, 06:30, 23:12, 00:00, 09:56 etc\n\n" \
"Write the command and try again!"
return text
def calculate_total_day_payment(user: dict):
"""
Calculates daily payment, receiving a dictionary containing the user information.
:param user:
:return:
"""
# Saves all the informations that will use.
pay_per_hour = float(user["payment_per_hour"])
start_hour = user["arrival_time"]
exit_hour = user["exit_time"]
# Calculate total time that passes between those two hours.
total_hours = calculate_total_hours(start_hour, exit_hour)
# Calculates money.
total_money = round(total_hours * pay_per_hour, 2)
# Returns it.
return total_money
def calculate_total_hours(start_hour, exit_hour):
"""
Given an start hour and exit hour, calculates time that passes between one another, in seconds.
:param start_hour:
:param exit_hour:
:return:
"""
# Necessary when using database. Not needed if user uses it directly in the chat.
if type(start_hour) == tuple:
start_hour = start_hour[0]
if type(exit_hour) == tuple:
exit_hour = exit_hour[0]
# For end of month functions
if start_hour is None:
return 0
# Divides hour in hours and minutes.
start_hour = start_hour.split(":")
# Calculates seconds that passed from midnight to that concrete hour.
total_start_seconds = int(start_hour[0]) * 3600 + int(start_hour[1]) * 60
# Still for database.
if exit_hour is None:
return 0
else:
exit_hour = exit_hour.split(":")
# Does the same with exit hour.
total_exit_seconds = int(exit_hour[0]) * 3600 + int(exit_hour[1]) * 60
# If exit seconds are larger than start seconds, all passed in the same day, and returns it's difference.
if total_exit_seconds > total_start_seconds:
return round((total_exit_seconds - total_start_seconds) / 3600, 2)
else:
# Otherwise, exit passed after midnight. Does the same.
return round(((24 * 3600) - total_start_seconds + total_exit_seconds) / 3600, 2)
def calculate_current_day(user_data):
"""
Given a certain number, from 0 to 6, calculates which day of the week name return.
:param user_data:
:return:
"""
day_list = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
number_of_day = user_data["work_days"]["number_of_day"]
day_name = day_list[number_of_day]
return day_name
def check_if_float_number(number: str):
"""
Checks if a number is float, and returns it rounded.
:param number:
:return:
"""
try:
number = float(number)
return round(number, 2)
except ValueError:
return False
def receive_current_day():
"""
Returns current day, in a format that the bot can use. IMPORTANT! This is modified to have the server in different
timezone. In this case, an hour less.
:return:
"""
current_time = datetime.datetime.now()
current_hour = datetime.datetime.strptime(receive_current_hour(), "%H:%M").time()
if current_hour.hour == 0:
next_day = current_time + datetime.timedelta(days=1)
return next_day.strftime("%d-%m-%Y")
else:
return current_time.strftime("%d-%m-%Y")
def receive_current_hour():
"""
Returns the current hour plus one hour using datetime library. IMPORTANT! This is modified to have the server in a
different timezone. In this case, an hour less.
:return:
"""
current_hour = datetime.datetime.now()
current_hour_plus_one = current_hour + datetime.timedelta(hours=1)
return current_hour_plus_one.strftime("%H:%M")
def check_if_lower_hour(start_hour: str, end_hour: str):
"""
Return the lower hour between two given hours.
:param start_hour:
:param end_hour:
:return:
"""
# Needed when working with database.
if type(start_hour) == tuple:
start_hour = start_hour[0]
if type(end_hour) == tuple:
end_hour = end_hour[0]
# Splits the hours in hours and minutes.
split_start_hour = start_hour.split(":")
split_end_hour = end_hour.split(":")
# Changes its type to integers.
start_only_hour = int(split_start_hour[0])
end_only_hour = int(split_end_hour[0])
# Check which hour is lower.
if start_only_hour < end_only_hour:
return start_hour
elif start_only_hour > end_only_hour:
return end_hour
# If they are equal, tries with the minutes.
else:
if split_start_hour[1] < split_end_hour[1]:
return start_hour
elif split_start_hour[1] > split_end_hour[1]:
return end_hour
# If they are also equal, returns start hour.
else:
return start_hour
def get_previous_day():
"""
Given a current day, returns it's previous.
:return:
"""
current_day = datetime.datetime.now()
previous_day = current_day - datetime.timedelta(days=1)
return previous_day.strftime("%d-%m-%Y")
def end_month_add_extra_hours_to_days(total_days, usual_hours, free_days_pattern):
"""
Registers don't have to be always complete. If the user don't use the chat for a day, or more, it will
suppose that everything went as in the normal work schedule.
This function receive those incomplete registers from the database, and completes them if needed, and
then calculates the extra hours.
:param total_days:
:param usual_hours:
:param free_days_pattern:
:return:
"""
# Creates an empty list an a variable at 0.
days_with_extra_hours = []
total_extra_hours = 0
# Calculate the difference in the normal schedule hours.
usual_hours_difference = calculate_total_hours(usual_hours[0], usual_hours[1])
# Produces a loop for every day in the given total days list.
for day in total_days:
# Calculates which day is a certain date.
week_day = calculate_what_day_is(day[1])
# If that day is a working day.
if free_days_pattern[week_day]:
# Extra hours equals the duration of that work day, minus the normal working time.
extra_hours = round(calculate_total_hours(day[2], day[3]) - usual_hours_difference, 1)
# If it is a free day.
else:
# If there are no registers, user didn't work that day.
if day[2] is None and day[3] is None:
extra_hours = 0
# If there are, calculates it normally.
else:
extra_hours = round(calculate_total_hours(day[2], day[3]), 1)
# Add results to total extra hours variable.
total_extra_hours += extra_hours
# Adds it to the list.
days_with_extra_hours.append([day[0], day[1], day[2], day[3], extra_hours])
# Rounds total extra hours, adds it at the end of the list, and return the list.
total_extra_hours = round(total_extra_hours, 1)
days_with_extra_hours.append({"total_extra_hours": total_extra_hours})
return days_with_extra_hours
def calculate_what_day_is(day):
"""
Given a certain day, calculates which day of the week is.
:param day:
:return:
"""
date = datetime.datetime.strptime(day, '%d-%m-%Y')
week_day = date.strftime('%A')
return week_day
def free_days_pattern(free_days):
"""
Transform free days register from the database, with 0 as False and 1 as True, to a python dictionary.
:param free_days:
:return:
"""
# Transforms tuple to a list.
day_list = list(free_days[0])
# Removes user telegram id, so it don't get an error while iterating.
day_list.remove(free_days[0][0])
# Creates a default dictionary, with everything at False.
day_dictionary = {"Monday": False, "Tuesday": False, "Wednesday": False, "Thursday": False,
"Friday": False, "Saturday": False, "Sunday": False}
# Iterates through the database register, modifying the dictionary when a day is True.
for i, day in enumerate(day_dictionary):
if day_list[i] == 1:
day_dictionary[day] = True
# Returns the dictionary.
return day_dictionary
def create_message_end_of_the_month(total_days, money_per_hour):
"""
Creates a detailed message to send to the user at the end of the month.
:param total_days:
:param money_per_hour:
:return:
"""
# Get the total extra hours from the end of the list.
total_extra_hours = total_days[-1]
total_extra_hours = total_extra_hours['total_extra_hours']
# Eliminates it from the list, to avoid getting an error while iterating.
total_days.pop()
# Creates an empty message.
message = ""
# Start iterating through every day in total days, and adding information to the empty message.
for day in total_days:
# If there are no register, prints that was a day off.
if day[2] is None:
message += "-{}, {} was your day off.\n\n".format(day[1], calculate_what_day_is(day[1]))
# if not, creates a message with a detailed schedule that day.
else:
message += "-{}, {} you worked from {} to {}, making a total of <b>{} extra hours</b> \n\n".format(
day[1], calculate_what_day_is(day[1]), day[2], day[3], day[4])
# Calculates total money, and adds it to the message.
total_money = round(total_extra_hours * money_per_hour[0], 2)
message += "\n\n Total extra hours are {}. Making a total of {}€".format(total_extra_hours, total_money)
# Return the message.
return message
def change_days_to_number(day):
"""
Given a certain date in a format dd-mm-yyyy, returns only the day.
:param day:
:return:
"""
split_days = day.split("-")
only_day = split_days[0]
return int(only_day)
def create_simplified_message(total_days, money_per_hour):
"""
Creates a simplified message with the extra hours that is easier to read fast, rounds them to half an hour
(0.5 hours), and sends it to the user.
:param total_days:
:param money_per_hour:
:return:
"""
# Eliminates last index in total days.
total_days.pop()
# Starts a total counter and half hour counter, at 0.
total_counter = 0
half_hour_counter = 0
# Creates an empty message.
message = ""
# Iterates through every day.
for day in total_days:
# Calculates the day.
day_number = change_days_to_number(day[1])
# Calculate the half an hours that day.
extra_hours = calculate_half_hours(half_hour_counter, day_number, day[4], day[2])
# Adds all to the message.
message += extra_hours[0]
# Adds the rest to half hour counter.
half_hour_counter = extra_hours[1]
# Adds the total amount to total counter.
total_counter += extra_hours[2]
# Adds total hours, total money and the minutes not added at the end of the message, and returns it.
message += "\nTotal = {} hours.\nTotal money = {}€.\n {} minutes not added to the extra hours."\
.format(total_counter, money_per_hour[0] * total_counter, round(half_hour_counter * 60, 1))
return message
def calculate_half_hours(counter, day, hours, start_hour):
"""
Creates simple messages for every day, to be included individually in the big simplified message.
:param counter:
:param day:
:param hours:
:param start_hour:
:return:
"""
# If it was a day off, adds a big line.
if start_hour is None:
message = "<b>-Day {} -----------------\n\n</b>".format(day)
complete_hours = 0
else:
# Calculates half hours and rest.
message = ""
half_hours = hours // 0.5
rest = hours % 0.5
# Add rest to counter.
counter += rest
# If the counter has exceeded 0.5, adds it to half hours, and rest 0.5 to the counter.
if counter >= 0.5:
half_hours += 1
counter -= 0.5
# Calculate complete hours, and adds everything to the message.
complete_hours = half_hours / 2
message += "<b>-Day {} - {} hours.</b>\n\n".format(day, complete_hours)
# Returns message, counter and complete hours.
return message, counter, complete_hours
def calculate_how_many_days(date_in_the_same_month):
"""
Given a certain month, calculates how many days it has.
:param date_in_the_same_month:
:return:
"""
date = datetime.datetime.strptime(date_in_the_same_month, "%d-%m-%Y").date()
last_day = date.replace(month=date.month + 1, day=1) - datetime.timedelta(days=1)
month_duration = last_day.day
return month_duration
def add_all_days(days, free_days, start_hour, finish_hour):
"""
Given a certain register, fill up with all the remaining days, where the user didn't entered a register.
:param days:
:param free_days:
:param start_hour:
:param finish_hour:
:return:
"""
# Takes the first day from the tuple.
first_day = days[0][1]
# Calculates month duration for that specific day and month.
month_duration = calculate_how_many_days(first_day)
# Creates an empty list to be returned
list_to_return = []
# Breaks the day string in three pieces: "03" + "12" + "1997"
day_pattern = first_day.split("-")
# Saves month and year, in a correct format: "12-1997"
month_and_year = day_pattern[1] + "-" + day_pattern[2]
# Gets the telegram id from the first day.
id = days[0][0]
# Creates a counter, for days without registries.
days_list_counter = 0
# Iterates over every day of the month.
for i in range(1, month_duration + 1):
# If the given day is not in days list, it adds it automatically, with a correct format.
if days_list_counter >= len(days) or change_days_to_number(days[days_list_counter][1]) != i:
if i < 10:
day = "0{}-".format(i)
else:
day = "{}-".format(i)
# Creates a complete date adding day plus month and year.
complete_day = day + month_and_year
# If it was a working day, adds it to the list with a start and finish hour.
if free_days[calculate_what_day_is(complete_day)]:
list_to_return.append((id, complete_day, start_hour[0], finish_hour[0], None))
# If it was a free day, adds it without hours.
else:
list_to_return.append((id, complete_day, None, None, None))
else:
# If the day was in the register, completes it and adds it to the list.
complete_day = complete_days(days[days_list_counter], start_hour, finish_hour)
list_to_return.append(complete_day)
days_list_counter += 1
# Returns the list.
return list_to_return
def complete_days(day, start_hour, finish_hour):
"""
Given a day in the database, completes it if required.
:param day:
:param start_hour:
:param finish_hour:
:return:
"""
# Needed when working with database data.
if type(start_hour) == tuple:
start_hour = start_hour[0]
if type(finish_hour) == tuple:
finish_hour = finish_hour[0]
# If there are not entry hour, completes it.
if day[2] is None and day[3] is not None:
return tuple([day[0], day[1], start_hour, day[3], day[4]])
# If there are not exit hour, completes it.
elif day[3] is None and day[2] is not None:
return tuple([day[0], day[1], day[2], finish_hour, day[4]])
# Else, returns it as it is.
else:
return day
| FernandooMarinn/Extra_hours_bot | Functionalities/Functionalities.py | Functionalities.py | py | 18,933 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 187,
"usage_type": "call"
},
{
"api_na... |
8139571648 | from functools import cmp_to_key
class Solution:
def reconstructQueue(self, people: list) -> list:
people = sorted(people, key=cmp_to_key(self.cmp))
ans = []
for p in people:
ans.insert(p[1], p)
return ans
def cmp(self, a: list, b: list):
if a[0] > b[0]:
return -1
elif a[0] < b[0]:
return 1
else:
if a[1] < b[1]:
return -1
else:
return 1 | MinecraftDawn/LeetCode | Medium/406. Queue Reconstruction by Height(sort&greedy).py | 406. Queue Reconstruction by Height(sort&greedy).py | py | 499 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "functools.cmp_to_key",
"line_number": 6,
"usage_type": "call"
}
] |
72557088994 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# In[ ]:
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
"""
Created on Fri Oct 13 20:37:30 2023
@author: saimo
"""
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import altair as alt
from PIL import Image
from sklearn.feature_selection import SelectKBest, chi2
import plotly.express as px
import hiplot as hip
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score, roc_curve, auc
from sklearn.impute import SimpleImputer
#import pandas as pd
from sklearn.utils import resample
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, roc_curve, auc, f1_score, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, RandomForestClassifier, ExtraTreesClassifier, BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier, XGBRFClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
st.set_option('deprecation.showPyplotGlobalUse', False)
st.set_page_config(layout="wide")
# Load the Framingham dataset
@st.cache_data
def load_data():
data = pd.read_csv('Framingham_app/framingham.csv') # Replace with the actual path to your dataset
return data
data = load_data()
tab1, tab2, tab3,tab4,tab5,tab6,tab7 = st.tabs(["About Data", "Basic information Plots", "UniVariant Analysis Plots","MultiVariant Analysis Plots","Machine Learning Models","Inference on Inputs","Bio"])
with tab1:
st.title("Framingham Heart Study Data")
image = Image.open('Framingham_app/Heart_img.png')
img = image.resize((image.height, 300))
st.image(img, caption='Image')
st.title("About the DATA")
st.write("The Framingham Heart Study is a long-term, ongoing cardiovascular cohort study that began in 1948 in Framingham, Massachusetts, USA. It's one of the most well-known and influential epidemiological studies of heart disease. The study has provided valuable insights into the risk factors associated with cardiovascular disease and has helped shape our understanding of heart disease and its prevention. The Framingham dataset is a collection of data generated through this study, and it's widely used in epidemiological and public health research.")
st.write("The dataset contains detailed information about a variety of cardiovascular risk factors and other health-related variables for thousands of individuals. Here's an overview of the key aspects of the Framingham dataset: 1. **Study Participants**: The dataset includes information about thousands of participants from the Framingham area. It has both original and offspring cohorts, meaning it has data from different generations.2. **Data Categories**: The Framingham dataset includes information on a wide range of variables, including:- Demographic information (age, gender, etc.).- Medical history (e.g., diabetes, hypertension).")
st.title("About this WebAPP")
st.write("The web app is designed to provide an interactive and visually engaging platform for exploring and visualizing data from the Framingham Heart Study dataset. Users can interact with the app to:")
st.write("1. Visualize data relationships: Explore relationships between various attributes and Ten-Year Coronary Heart Disease (CHD) status through interactive scatter plots.")
st.write("2. Understand CHD distribution: View the proportion of CHD cases versus no CHD cases using an interactive pie chart.")
st.write("3. Interactive 3D visualization: Discover how age, cigarettes per day, and systolic blood pressure relate to Ten-Year CHD using an interactive 3D scatter plot.")
st.write("4. Missing data analysis: Visualize missing data patterns using heatmaps and bar plots for a comprehensive data overview.")
st.write("5. Customizable exploration: Users can customize their exploration by selecting attributes and visualizations through drop-down menus and select boxes.")
st.write("This web app empowers users to gain insights and understand the Framingham dataset visually, enhancing the process of data exploration and analysis.")
col1,col2 = st.columns(2)
with col1:
on = st.toggle('feature list of the dataset')
if on:
#st.write('Feature activated!')
k = list(data.columns)
st.write(k)
st.write('Basic Framingham Dataset Information:')
st.write(f'Total Number of Samples: {data.shape[0]}')
st.write(f'Number of Features: {data.shape[1]}')
with col2:
on1 = st.toggle('summary statistics of the dataset')
if on1:
st.write("### Summary Statistics")
st.write(data.describe())
with tab2:
chd_counts = data['TenYearCHD'].value_counts()
chd_proportion = chd_counts / chd_counts.sum()
st.title("Pie Chart: Proportion of CHD vs. No CHD")
fig = px.pie(chd_proportion, values=chd_proportion, names=chd_proportion.index,
labels={'index': 'CHD Status'}, title="Proportion of CHD vs. No CHD")
st.plotly_chart(fig)
st.header('Data Filters')
age_filter = st.slider('Filter by Age', min_value=int(data['age'].min()), max_value=int(data['age'].max()))
filtered_data = data[data['age'] <= age_filter]
# st.write("### Filtered Data")
# st.write(filtered_data)
# Show an Altair plot of age distribution
st.write("### Age Distribution")
age_chart = alt.Chart(filtered_data).mark_bar().encode(
x=alt.X('age:Q', bin=True),
y='count()',
tooltip=['age:Q', 'count()']
).interactive()
st.altair_chart(age_chart)
st.write("### Filtered Data")
st.write(filtered_data)
# Interactive scatter plot use it in the 3rd tab.
# st.write("### Interactive Scatter Plot")
# x_column = st.selectbox("X-axis", filtered_data.columns)
# y_column = st.selectbox("Y-axis", filtered_data.columns)
# scatter_chart = alt.Chart(filtered_data).mark_circle().encode(
# x=x_column,
# y=y_column,
# tooltip=[x_column, y_column]
# ).interactive()
# st.altair_chart(scatter_chart)
# missing_data = data.isnull()
# # Use Seaborn to create a heatmap
# fig = plt.figure(figsize=(10, 6))
# sns.heatmap(missing_data, cbar=False, cmap='viridis')
# plt.title('Missing Data in Framingham Dataset')
# st.pyplot(fig)
missing_values_count = data.isnull().sum()
plt.figure(figsize=(10, 6))
missing_values_count.plot(kind='bar', color='skyblue')
plt.title("Missing Values by Attribute")
plt.xlabel("Attributes")
plt.ylabel("Count of Missing Values")
plt.xticks(rotation=45)
st.pyplot()
numeric_columns = data.select_dtypes(include=['number']).columns
categorical_columns = data.select_dtypes(exclude=['number']).columns
# Impute missing values based on data type
st.title ("Data Before Handling Missing Values")
imputed_data = data.copy()
col1,col2 = st.columns(2)
with col1:
on = st.toggle('feature list of the dataset',['features'])
if on:
st.write("Original Data:")
st.write(data)
else:
col1.empty()
#st.write('Feature activated!')
# k = list(data.columns)
# st.write(k)
# st.write('Basic Framingham Dataset Information:')
# st.write(f'Total Number of Samples: {data.shape[0]}')
# st.write(f'Number of Features: {data.shape[1]}')
with col2:
on1 = st.toggle('summary statistics of the dataset',['summary'])
if on1:
st.write("### Summary Statistics")
st.write(data.describe())
for col in numeric_columns:
imputed_data[col].fillna(imputed_data[col].mean(), inplace=True)
for col in categorical_columns:
imputed_data[col].fillna(imputed_data[col].mode().iloc[0], inplace=True)
st.title("Imputed Data (Mean for Numeric, Mode for Categorical):")
st.write(imputed_data.describe())
#framingham.csv" with the actual path to your Framingham dataset. This code handles missing values according to the data type of the attribute and provides an imputed dataset for further analysis or visualization.
# chd_counts = data['TenYearCHD'].value_counts()
# chd_proportion = chd_counts / chd_counts.sum()
# st.title("Interactive Pie Chart: Proportion of CHD vs. No CHD")
# fig = px.pie(chd_proportion, values=chd_proportion, names=chd_proportion.index,
# labels={'index': 'CHD Status'}, title="Proportion of CHD vs. No CHD")
# st.plotly_chart(fig)
# # Example: Plot a histogram of Age
# st.subheader("Age Distribution")
# plt.figure(figsize=(8, 6))
# sns.histplot(data['age'], bins=20, kde=True)
# st.pyplot()
# st.header("A dog")
# st.image("https://static.streamlit.io/examples/dog.jpg", width=200)
#st.subheader("Conclusion")
#st.write(" From the above analysis we can clearly see how the data is distributed and how the missing values look after imputation.")
with tab3:
data = load_data()
# Basic EDA plots for categorical variables using interactive violin plots
st.title("Violin Plots for Categorical Variables in Framingham Dataset")
st.write("Univariate analysis : Here we focus on examining individual variables one at a time. In the context of the Framingham Heart Study dataset, univariate analysis involves studying the relationship between each individual feature (independent variable) and the target variable 'TenYearCHD' (Coronary Heart Disease) to understand how each feature influences the presence of CHD. Univariate analysis helps identify which features have a significant impact on CHD.")
# Define the categorical variables to be visualized
categorical_columns = ["currentSmoker", "BPMeds", "prevalentStroke", "prevalentHyp","male"]
selected_variable = st.selectbox("Select a Categorical Variable", categorical_columns)
fig = px.violin(data, x=selected_variable, y="age", box=True, points="all", title=f"Interactive Violin Plot for {selected_variable} vs Age")
st.plotly_chart(fig)
selected_variable = st.radio("Select a Categorical Variable", categorical_columns)
# Create box plots for the selected variable with respect to CHD
fig = px.box(data, x=selected_variable, y="age", color="TenYearCHD",
labels={"age": "Age", selected_variable: selected_variable, "TenYearCHD": "CHD"})
fig.update_layout(
title=f"Interactive Box Plot for {selected_variable} with Respect to CHD",
xaxis_title='',
yaxis_title='Age',
showlegend=True,
)
st.plotly_chart(fig)
# Create interactive violin plots for categorical variables
# Create interactive violin plot for the selected variable
# for col in categorical_columns:
# fig = px.violin(data, x=col, y="age", box=True, points="all", title=f"Interactive Violin Plot for {col} vs Age")
# st.plotly_chart(fig)
st.title("KDE Plots for the Attributes with Respect to CHD")
# Define the numerical variables to be visualized
numerical_columns = ["age", "cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]
# Select box to choose a numerical variable
# selected_variable = st.selectbox("Select a Numerical Variable", numerical_columns)
# Create KDE plots for the selected numerical variable with respect to CHD
# Filter the data into two subsets based on TenYearCHD
chd_positive = data[data['TenYearCHD'] == 1]
chd_negative = data[data['TenYearCHD'] == 0]
# List of columns to plot
columns_to_plot = data.columns.drop('TenYearCHD')
# Plot categorical features (bar plots)
for column in columns_to_plot:
if data[column].dtype == 'object':
plt.figure(figsize=(8, 4))
sns.countplot(x=column, data=data, hue='TenYearCHD')
plt.title(f'{column} vs. Ten-Year CHD')
plt.xticks(rotation=45)
# plt.show()
st.pyplot()
# Plot continuous features (histograms)
for column in columns_to_plot:
if data[column].dtype != 'object':
plt.figure(figsize=(8, 4))
sns.histplot(chd_negative[column], kde=True, label='No CHD', color='blue', alpha=0.6)
sns.histplot(chd_positive[column], kde=True, label='CHD', color='red', alpha=0.6)
plt.title(f'{column} vs. Ten-Year CHD')
plt.xlabel(column)
plt.legend()
# plt.show(
st.pyplot()
st.subheader('Conclusion')
st.write('From the above plots we can see the relation that how each feature is being associate with the output and we also get know about the distribution of the data according to the target class')
with tab4:
st.title("Multivariant Analyis")
st.write("Here we examing the relationships between multiple variables simultaneously. In the context of the Framingham Heart Study dataset, you can perform multivariate analysis to understand how combinations of features (independent variables) collectively influence the presence of Ten-Year Coronary Heart Disease (CHD) represented by the 'TenYearCHD' target variable. ")
selected_variable = st.selectbox("Select a Numerical Variable", data.select_dtypes(include=['number']).columns)
#st.write("suggested attributes for the scatter plot are")
non_categorical_columns = data.select_dtypes(exclude=['object']).columns
# Print the column names that do not have categorical values
numerical_columns = ["cigsPerDay", "totChol", "sysBP", "diaBP", "BMI", "heartRate", "glucose"]
st.write ("Suggested Attributes")
st.write(numerical_columns)
# Create an interactive scatter plot showing age vs. the selected numerical variable with color-coded CHD
st.title(f"Interactive Scatter Plot: Age vs. {selected_variable} vs. Ten-Year CHD")
fig = px.scatter(data, x="age", y=selected_variable, color="TenYearCHD",
color_continuous_scale=["blue", "red"],
labels={"age": "Age", selected_variable: selected_variable, "TenYearCHD": "CHD"})
fig.update_layout(
title=f"Age vs. {selected_variable} vs. Ten-Year CHD",
xaxis_title="Age",
yaxis_title=selected_variable
)
st.plotly_chart(fig)
st.title("Interactive 3D Scatter Plot: Age, Cigarettes Per Day, Systolic BP vs. Ten-Year CHD")
fig = px.scatter_3d(data, x="age", y="cigsPerDay", z="sysBP", color="TenYearCHD",
color_continuous_scale=["blue", "red"],
labels={"age": "Age", "cigsPerDay": "Cigarettes Per Day", "sysBP": "Systolic BP", "TenYearCHD": "CHD"})
fig.update_layout(
scene=dict(xaxis_title="Age", yaxis_title="Cigarettes Per Day", zaxis_title="Systolic BP"),
title="Age, Cigarettes Per Day, Systolic BP vs. Ten-Year CHD"
)
st.plotly_chart(fig)
#visualization with HiPlot
def save_hiplot_to_html(exp):
output_file = "hiplot_plot_1.html"
exp.to_html(output_file)
return output_file
st.header("Visualization with HiPlot")
selected_columns = st.multiselect("Select columns to visualize", imputed_data.columns,default = ['age', 'cigsPerDay', 'totChol', 'sysBP','heartRate','TenYearCHD'])
#color='TenYearCHD', title='Interactive Parallel Coordinates Plot')
selected_data = imputed_data[selected_columns]
if not selected_data.empty:
experiment = hip.Experiment.from_dataframe(selected_data)
hiplot_html_file = save_hiplot_to_html(experiment)
st.components.v1.html(open(hiplot_html_file, 'r').read(), height=1500, scrolling=True)
else:
st.write("No data selected. Please choose at least one column to visualize.")
st.subheader('Conclusion')
st.write('This tab was mainly used to understand how two features of the data played a role in understanding the target disease. From the Hi-plot we can interactively and select the features and data accordingly to see what effect the features have on Heart Disease.')
with tab5:
st.title("Machine Learning Classifier Performance")
#data, selected_columns = oad_data()
st.write('The Predictive Analysis feature within the application utilizes sophisticated machine learning models such as Logistic Regression and Random Forest to unravel the features of the subjects. With a diverse array of models, users can gain varied analytical perspectives on Framingham Data.')
st.write('This tab serves as a conduit for translating complex data into accessible and interactive insights, enabling users, from decision-makers to the general public, to experiment with data and witness immediate results. This approach not only facilitates the prediction and comprehension of this complex data but also empowers users to engage with and respond to these critical issues proactively. The customization feature allows users to tailor the analysis with respect to the selected feautures and also use top 10 features to dervide insights.')
st.write('The inclusion of a range of models is pivotal, as it enables users to apply diverse analytical perspectives to the same dataset. This diversity is critical because different models can spotlight distinct facets of the data.')
st.markdown("""<hr style="height:3px;border:none;color:#333;background-color:#333;" /> """, unsafe_allow_html=True)
def oad_data():
data = pd.read_csv('Framingham_app/framingham.csv')
data = data.sample(frac=0.6, random_state=42) # Use a fixed random state for reproducibility
# Preprocessing steps (handle missing values, etc.)
missing_values_before = data.isnull().sum()
#st.write("Missing values before imputation:", missing_values_before)
# Impute missing values for numerical columns
num_cols = data.select_dtypes(include=['int64', 'float64']).columns
imputer = SimpleImputer(strategy='median')
data[num_cols] = imputer.fit_transform(data[num_cols])
# Check for missing values after imputation
missing_values_after = data.isnull().sum()
#st.write("Missing values after imputation:", missing_values_after)
# Check for and replace infinite values
data.replace([np.inf, -np.inf], np.nan, inplace=True)
# Check for missing values after replacing infinite values
missing_infinite = data.isnull().sum()
#st.write("Missing/infinite values after replacement:", missing_infinite)
# Drop any rows that still have NaNs (should be very few if any)
data.dropna(inplace=True)
# Preprocessing steps (handle missing values, etc.)
# ...
# Splitting the dataset based on class
target1 = data[data['TenYearCHD'] == 1]
target0 = data[data['TenYearCHD'] == 0]
# Resampling to balance the dataset
target1_resampled = resample(target1, replace=True, n_samples=len(target0), random_state=40)
data_balanced = pd.concat([target0, target1_resampled])
# Feature Selection
X = data_balanced.iloc[:, 0:15]
y = data_balanced.iloc[:, -1]
best = SelectKBest(score_func=chi2, k=10)
best.fit(X, y)
# Select the top 10 features
top_features = [X.columns[i] for i in best.get_support(indices=True)]
data_selected = data_balanced[top_features + ['TenYearCHD']]
return data_selected
data = oad_data()
def train_evaluate_model_cv(model, X, y, cv_folds):
scores = cross_val_score(model, X, y, cv=cv_folds, scoring='f1')
mean_score = np.mean(scores)
accuracy_scores = cross_val_score(model, X, y, cv=cv_folds, scoring='accuracy')
mean_accuracy_score = np.mean(accuracy_scores)
st.write(f"Mean Accuracy Score (Cross-Validation): {mean_accuracy_score:.4f}")
st.write(f"Mean F1 Score (Cross-Validation): {mean_score:.4f}")
return mean_score, mean_accuracy_score
#st.write(f"Mean F1 Score (Cross-Validation): {mean_score:.4f}")
#return mean_score
# Function to plot top 10 important features
def plot_top_features(X, y):
selector = SelectKBest(f_classif, k=10)
X_new = selector.fit_transform(X, y)
feature_scores = pd.DataFrame({'Feature': X.columns, 'Score': selector.scores_})
top_features = feature_scores.nlargest(10, 'Score')
fig, ax = plt.subplots()
top_features.plot(x='Feature', y='Score', kind='barh', ax=ax, color='skyblue')
ax.set_title('Top 10 Important Features')
st.pyplot(fig)
# Main app
#st.title("Machine Learning Classifier Performance")
#st.title("Machine Learning Classifier Performance")
# Load data
data = oad_data()
# Sidebar for feature selection
all_features = data.drop('TenYearCHD', axis=1).columns.tolist()
selected_features = st.multiselect("Select Features for Training", all_features, default=all_features[:10])
# Option to use top 10 features from SelectKBest
use_top_features = st.checkbox("Use Top 10 Features from SelectKBest", value=True)
st.markdown("""<hr style="height:3px;border:none;color:#333;background-color:#333;" /> """, unsafe_allow_html=True)
# Prepare data based on feature selection
if use_top_features:
X = data[selected_features]
else:
X = data[all_features]
y = data['TenYearCHD']
# Load data
data = oad_data()
# Sidebar for CV folds and feature selection
cv_folds = st.slider("Select Number of Cross-Validation Folds", min_value=2, max_value=10, value=5)
#selected_features = st.sidebar.multiselect("Select Features for Training",
#options=data.columns.drop('TenYearCHD').tolist(),
#default=data.columns.drop('TenYearCHD').tolist()[:10])
#selected_features = st.sidebar.multiselect("Select Features for Training", data.columns.drop('TenYearCHD'), default=data.columns.drop('TenYearCHD'))
st.markdown("""<hr style="height:3px;border:none;color:#333;background-color:#333;" /> """, unsafe_allow_html=True)
# Prepare data
#X = data[selected_features]
#y = data['TenYearCHD']
# Create tabs for each model
tab_lr, tab_rf, tab_nb, tab_gb = st.tabs(["Logistic Regression", "Random Forest", "Naive Bayes", "Gradient Boosting"])
with tab_lr:
C_lr = st.number_input("C (Regularization parameter)", min_value=0.01, max_value=10.0, step=0.01, value=1.0)
model_lr = LogisticRegression(C=C_lr, max_iter=1000)
if st.button('Run Logistic Regression'):
train_evaluate_model_cv(model_lr, X, y, cv_folds)
with tab_rf:
n_estimators_rf = st.slider("Number of trees in the forest", min_value=10, max_value=200, value=100)
model_rf = RandomForestClassifier(n_estimators=n_estimators_rf)
if st.button('Run Random Forest'):
train_evaluate_model_cv(model_rf, X, y, cv_folds)
# with tab_svm:
# C_svm = st.number_input("SVM - C (Regularization parameter)", 0.01, 10.0, step=0.01, value=1.0)
# kernel_svm = st.selectbox("SVM - Kernel", ("linear", "rbf", "poly"))
# model_svm = SVC(C=C_svm, kernel=kernel_svm, probability=True)
# if st.button('Run SVM'):
# train_evaluate_model_cv(model_svm, X, y, cv_folds)
with tab_gb:
n_estimators_gb = st.slider("Number of boosting stages", min_value=10, max_value=200, value=100)
learning_rate_gb = st.number_input("Gradient Boosting - Learning Rate", 0.01, 1.0, step=0.01, value=0.1)
model_gb = GradientBoostingClassifier(n_estimators=n_estimators_gb, learning_rate=learning_rate_gb)
if st.button('Run Gradient Boosting'):
train_evaluate_model_cv(model_gb, X, y, cv_folds)
with tab_nb:
#st.subheader("Naive Bayes Classifier")
model_nb = GaussianNB()
if st.button('Run Naive Bayes'):
train_evaluate_model_cv(model_nb, X, y, cv_folds)
st.subheader("How each model understands data ?")
st.write('Logistic Regression: Overview: Logistic Regression is a statistical method used for binary classification problems. It predicts the probability of an instance belonging to a particular class. Application: In the context of Framingham data, Logistic Regression can be used to predict the likelihood of a participant developing a cardiovascular condition based on various input features such as age, cholesterol levels, blood pressure, etc. However becaue of the complex features presnt in the dataset. This models fails to provide accurate predictions')
st.write('Random Forest: Random Forest is an ensemble learning method that builds multiple decision trees and merges their predictions. It is versatile and can be used for both classification and regression tasks. Application: In the Framingham dataset, Random Forest could be employed to identify important features contributing to cardiovascular risk and provide robust predictions by aggregating outputs from multiple decision trees. This models performs the best and it is able to provide predictions irrespective of the features')
st.write('Naive Bayes: Naive Bayes is a probabilistic classification algorithm based on Bayes theorem. Despite its simplicity, it often performs well, especially in text classification and simple datasets. Application: Naive Bayes can be applied to predict cardiovascular risk in the Framingham dataset by assuming independence between features, making it suitable for situations where this assumption is reasonable.')
st.write('Gradient Boosting:Gradient Boosting is an ensemble technique that builds a series of weak learners (usually decision trees) sequentially, with each one correcting errors of its predecessor. Application: In the context of the Framingham data, Gradient Boosting can effectively capture complex relationships between features and the target variable, providing accurate predictions by combining the strengths of multiple weak models.')
st.write('Experimenting with these models on the Framingham dataset allows for a nuanced understanding of their effectiveness in forecasting and dissecting cardiovascular risk. The choice of model depends on the specific characteristics of the data and the complexity of the scenarios being analyzed. By leveraging this diverse set of models, researchers and analysts can tailor their approach to gain comprehensive insights into the multifaceted nature of cardiovascular health and risk prediction.')
# Plot top features
plot_top_features(X, y)
#st.subheader("How each model understands data ?")
with tab6:
# Function to load and preprocess data
def oad_data():
data = pd.read_csv('Framingham_app/framingham.csv')
data = data.sample(frac=0.6, random_state=42)
# Preprocessing steps
num_cols = data.select_dtypes(include=['int64', 'float64']).columns
imputer = SimpleImputer(strategy='median')
data[num_cols] = imputer.fit_transform(data[num_cols])
data.replace([np.inf, -np.inf], np.nan, inplace=True)
data.dropna(inplace=True)
# Resampling to balance the dataset
target1 = data[data['TenYearCHD'] == 1]
target0 = data[data['TenYearCHD'] == 0]
target1_resampled = resample(target1, replace=True, n_samples=len(target0), random_state=40)
data_balanced = pd.concat([target0, target1_resampled])
# Feature Selection
X = data_balanced.iloc[:, :-1]
y = data_balanced['TenYearCHD']
best = SelectKBest(score_func=chi2, k=10)
best.fit(X, y)
top_features = [X.columns[i] for i in best.get_support(indices=True)]
data_selected = data_balanced[top_features + ['TenYearCHD']]
return data_selected, top_features
data, top_features = oad_data()
# Function to train the models
def train_models(X_train, y_train):
models = {
'Logistic Regression': LogisticRegression(max_iter=1000),
'Random Forest': RandomForestClassifier(n_estimators=100),
'Naive Bayes': GaussianNB(),
'Gradient Boosting': GradientBoostingClassifier(n_estimators=100)
}
for name, model in models.items():
model.fit(X_train, y_train)
models[name] = model
return models
# Main app
st.title("Machine Learning Classifier Performance")
# Split the data
X = data[top_features]
y = data['TenYearCHD']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# User input for prediction
with st.container():
st.subheader("Make Predictions")
columns = ['male','age','education','currentSmoker','cigsPerDay','BPMeds','prevalentStroke', 'prevalentHyp','diabetes','totChol','sysBP', 'diaBP', 'BMI', 'heartRate', 'glucose']
input_data = {}
for col in top_features:
if col in ['male', 'currentSmoker', 'prevalentStroke', 'prevalentHyp', 'diabetes','BPMeds']:
# Binary columns
input_data[col] = st.selectbox(f"{col.capitalize()}", [0, 1],key=col)
else:
# Numerical columns
# You might need to adjust min_value, max_value, and value based on the actual range of your data
input_data[col] = st.number_input(f"{col.capitalize()}", min_value=0, max_value=100, value=50,key=col)
if st.button("Predict"):
input_df = pd.DataFrame([input_data])
#input_data = {feature: st.number_input(f"{feature}", value=np.mean(X[feature])) for feature in top_features}
#if st.button("Predict"):
#input_df = pd.DataFrame([input_data])
models = train_models(X_train, y_train)
predictions = {name: model.predict(input_df)[0] for name, model in models.items()}
# Displaying the predictions
for model_name, prediction in predictions.items():
if model_name == 'Random Forest':
result = "Positive for CHD" if prediction == 1 else "Negative for CHD"
st.write(f"{model_name} Prediction: {result}")
#models = train_models(X_train, y_train)
#predictions = {name: model.predict(input_df)[0] for name, model in models.items()}
#for model_name, prediction in predictions.items():
#st.write(f"{model_name} Prediction: {prediction}")
st.markdown("""<hr style="height:3px;border:none;color:#333;background-color:#333;" /> """, unsafe_allow_html=True)
st.subheader("Conclusion")
st.write("Our innovative web application, built upon the rich Framingham dataset, offers users a powerful and insightful platform for comprehensive data analysis and prediction in cardiovascular health.")
st.write(" The univariate analysis component provides a meticulous examination of individual variables within the Framingham dataset. Users can delve into the distributions, central tendencies, and variations of key parameters, establishing a foundational understanding of each variable's characteristics. Advancing from univariate analysis, our app seamlessly integrates multivariate analysis capabilities, enabling users to uncover complex relationships and dependencies between various cardiovascular risk factors. This sophisticated exploration facilitates a holistic perspective, empowering users to discern intricate patterns and connections that contribute to a comprehensive understanding of cardiovascular health.")
st.write("The true value of our application lies in its predictive prowess, driven by machine learning classifiers trained on the Framingham dataset. These models offer users the ability to anticipate potential cardiovascular events, assess risk factors, and make informed decisions for proactive health management. The predictive insights gleaned from our models contribute to a more personalized and preventive approach to cardiovascular care. Our user-friendly interface ensures accessibility for a diverse audience, from healthcare professionals to individuals keen on monitoring their cardiovascular health. By seamlessly integrating analytical tools and machine learning models, our app becomes an indispensable resource for deriving actionable insights from the Framingham dataset, ultimately contributing to enhanced cardiovascular risk assessment and personalized health strategies.")
st.write("In summary, our web application on the Framingham dataset stands as a comprehensive solution, providing a deep dive into data analysis and predictive modeling specific to cardiovascular health. It is poised to make a meaningful impact on healthcare decision-making and individual well-being, aligning with the broader goals of proactive and personalized healthcare management.")
# Optional: Add model performance metrics or other analyses here
with tab7:
st.title("About the Developer ")
col1, col2 = st.columns(2)
col1.subheader("Sai Mohan Gajapaka")
col1.text("Master's in Data Science, MSU")
col1.write("As a dedicated Python programmer with a robust foundation in mathematical modeling and deep neural networks, I am currently advancing my journey in data science. My academic and research experiences have nurtured a strong proficiency in statistical analysis and machine learning, fueling my drive to tackle challenging problems with innovative solutions. My passion lies in applying my skills to real-world issues, particularly those that can make a positive social impact. I am constantly seeking opportunities that challenge me to grow and refine my abilities in this dynamic field. Beyond my academic pursuits, I have a keen interest in watching anime, which not only serves as a creative outlet but also often inspires my approach to complex problems. I'm deeply curious about the potential of deep learning and its applications, and I am committed to exploring its frontiers to contribute meaningfully to the field of data science.")
try :
col2.image("Framingham_app/profile.png")
except:
pass
| saimohan16/CMSE-830-Foundations-of-Data-Science | Framingham_app/app.py | app.py | py | 35,064 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.set_option",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "streamlit.set_page_config",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "stream... |
8865653662 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 09:10:52 2019
@author: Acer
"""
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_csv(r"D:\projects\zomato.csv")
data.describe()
data.columns
# Transforming rate column
data['rate_new'] = data['rate'].astype(str)
data['rate_new'] = data['rate_new'].apply(lambda x: x.split('/')[0])# Dealing with instanced with 'NEW'
data['rate_new'] = data['rate_new'].apply(lambda x: x.replace('NEW', str(np.nan)))
data['rate_new'] = data['rate_new'].apply(lambda x: x.replace('-', str(np.nan)))
# Changing data type
data['rate_new'] = data['rate_new'].astype(float)
data.drop(['rate'], axis=1, inplace=True)
print(f'{type(data["rate_new"][0])}')
data['approx_cost(for two people)'] = data['approx_cost(for two people)'].str.replace(',','').apply(lambda x:float(x))
#%%
#Dropping unnecessary data
data.drop(['url', 'address', 'dish_liked', 'phone', 'reviews_list', 'menu_item','location'], axis=1, inplace=True)
# Looking for null data
data.isnull().sum()
data = data.dropna(subset=['rate_new', 'approx_cost(for two people)'])
data = data.fillna('Not defined')
data.isnull().sum()
data.reset_index(drop=True)
data.describe()
data
#===========================EDA===========================
#%%
#1.Restaurant Rate Distribution
data['rate_new'].describe()
sns.set(style='darkgrid',palette='muted',color_codes=True)
fig, ax=plt.subplots(figsize=(12,5))
sns.distplot(data['rate_new'],bins=30,color='blue')
ax.set_title('Restaurant Rate Distribution',size=13)
ax.set_xlabel('Rate')
plt.show()
#%%
#2. Approx. cost of 2 people
data['approx_cost(for two people)']
sns.set(style='darkgrid',palette='muted',color_codes=True)
fig, ax=plt.subplots(figsize=(12,5))
sns.distplot(data['approx_cost(for two people)'],bins=10,color='blue')
ax.set_title('Approx cost for two people')
ax.set_xlabel('cost')
plt.show()
#%%
#3.Finding Outliers
#Online_Order
fig, ax=plt.subplots(figsize=(12,7))
sns.boxplot(x='online_order',y='rate_new',data=data)
#BookTable
fig, ax=plt.subplots(figsize=(12,7))
sns.boxplot(x='book_table',y='rate_new',data=data)
#%%
#4.Correlation between rating and cost
fig, ax = plt.subplots(figsize=(10 , 10))
sns.scatterplot(x='rate_new', y='approx_cost(for two people)', data=data, ax=ax)
ax.set_title('Correlation Between Rate and Approx Cost', size=14)
plt.show()
#%%
#5.Correlation between rating and Online Order or Booking Tables
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 10))
sns.scatterplot(x='rate_new', y='approx_cost(for two people)', hue='online_order',
data=data, ax=axs[0], palette=['navy', 'crimson'])
sns.scatterplot(x='rate_new', y='approx_cost(for two people)', hue='book_table',
data=data, ax=axs[1], palette=['navy', 'crimson'])
axs[0].set_title('Cost and Rate Distribution by Online Order', size=14)
axs[1].set_title('Cost and Rate Distribution by Book Table', size=14)
plt.show()
#%%
data.groupby(by='online_order').mean()
#%%
data.groupby(by='book_table').mean()
#%%
#6.Top Rated Restaurant
grouped_rate = data.groupby(by='name', as_index=False).mean()
top_rating = grouped_rate.sort_values(by='rate_new', ascending=False).iloc[:10, np.r_[0, -1]]
top_rating
top_rating.iloc[1, 0] = 'Santa Spa Cuisine'
# Plotting
fig, ax = plt.subplots(figsize=(13, 5))
ax = sns.barplot(y='name', x='rate_new', data=top_rating, palette='Blues_d')
ax.set_xlim([4.7, 4.95])
ax.set_xlabel('Mean Rate')
ax.set_ylabel('')
for p in ax.patches:
width = p.get_width()
ax.text(width+0.007, p.get_y() + p.get_height() / 2. + 0.2, '{:1.2f}'.format(width),
ha="center", color='grey')
ax.set_title('Top 10 Restaurants in Bengaluru by Rate', size=14)
plt.show()
#%%
#Label Encoding
from sklearn.preprocessing import LabelEncoder
lb_en=LabelEncoder()
data['online_order']=lb_en.fit_transform(data['online_order'])
data['online_order'].unique()
data['online_order']
#%%
data['book_table']=lb_en.fit_transform(data['book_table'])
data['book_table'].unique()
data['book_table']
#%%
data['listed_in(type)']=lb_en.fit_transform(data['listed_in(type)'])
data['listed_in(type)']
#%%
data['listed_in(city)'].unique()
data['listed_in(city)']=lb_en.fit_transform(data['listed_in(city)'])
data['listed_in(city)']
#%%
data=pd.read_excel(r"D:\projects\zomato2.xlsx")
data['rest_type'].unique()
data['rest_type']=lb_en.fit_transform(data['rest_type'])
data['rest_type'].unique()
data['rest_type']
data.drop(['cuisines'], axis=1, inplace=True)
data.columns
#data['rate_new'].unique()
data['listed_in(type)'].unique()
#%%
corr = data.corr(method='kendall')
plt.figure(figsize=(15,8))
sns.heatmap(corr, annot=True)
#%%
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score, StratifiedKFold
data.columns
x=data.iloc[:,1:8]
y=data['rate_new']
sc=StandardScaler()
sc.fit(x)
x=sc.transform(x)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.10,random_state=20)
#kfold=StratifiedKFold(n_splits=10,random_state=48)
#%%
#LASSO
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
alpha=0.1
lasso=Lasso(alpha=alpha)
lasso.fit(x_train,y_train)
y_train_pred=lasso.predict(x_train)
y_test_pred=lasso.predict(x_test)
print(lasso.coef_)
print('MSE train: %.3f,test: %.3f' % (mean_squared_error(y_train,y_train_pred),mean_squared_error(y_test,y_test_pred)))
print('R^2 train: %.3f,test: %.3f' % (r2_score(y_train,y_train_pred),r2_score(y_test,y_test_pred)))
r2_score_lasso=r2_score(y_test,y_test_pred)
print(lasso)
print("r^2 on test data: %f" % r2_score_lasso)
predictors=data.columns.values[1:8]
coef=pd.Series(lasso.coef_,predictors).sort_values()
coef.plot(kind='bar', title='Modal Coefficients')
#%%
#RandomForest
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
sel = SelectFromModel(RandomForestRegressor(n_estimators = 100))
sel.fit(x_train, y_train)
sel.get_support()
selected_feat= x_train.columns[(sel.get_support())]
print(selected_feat)
#%%
from xgboost import XGBRegressor
from numpy import sort
model = XGBRegressor()
model.fit(x_train, y_train)
# make predictions for test data and evaluate
y_pred = model.predict(x_test)
predictions = [round(value) for value in y_pred]
r2 = r2_score(y_test,y_test_pred)
print("R2: %.2f%%" % (r2 * 100.0))
mse=mean_squared_error(y_test,y_test_pred)
print("MSE: %.2f%%" % (mse * 100.0))
# Fit model using each importance as a threshold
thresholds = sort(model.feature_importances_)
for thresh in thresholds:
# select features using threshold
selection = SelectFromModel(model, threshold=thresh, prefit=True)
select_X_train = selection.transform(x_train)
# train model
selection_model = XGBRegressor()
selection_model.fit(select_X_train, y_train)
# eval model
select_X_test = selection.transform(x_test)
y_pred = selection_model.predict(select_X_test)
predictions = [round(value) for value in y_pred]
r2 = r2_score(y_test,y_test_pred)
#mse=mean_squared_error(y_test,y_test_pred)
print("Thresh=%.3f, n=%d, r2: %.2f%%" % (thresh, select_X_train.shape[1], r2*100.0))
#%%
#Model Building
#RANDOM FOREST
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
RForest=RandomForestRegressor(n_estimators=5,random_state=329,min_samples_leaf=.0001)
RForest.fit(x_train,y_train)
y_predict=RForest.predict(x_test)
from sklearn.metrics import r2_score
print("Random forest:", r2_score(y_test,y_predict))
#results=cross_val_score(RForest,x_train,y_train,cv=kfold)
#print("CVS:",results)
#Linear Regression
lm=LinearRegression()
lm.fit(x_train,y_train)
y_pred=lm.predict(x_test)
from sklearn.metrics import r2_score
print("Linear Regression:",r2_score(y_test,y_pred))
#results=cross_val_score(lm,x_train,y_train,cv=kfold)
#print("CVS:",results)
#DecisionTree
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import export_graphviz
from os import system
DTree=DecisionTreeRegressor(min_samples_leaf=.0001)
DTree.fit(x_train,y_train)
y_predict=DTree.predict(x_test)
from sklearn.metrics import r2_score
print("Decision Tree:",r2_score(y_test,y_predict))
#results=cross_val_score(DTree,x_train,y_train,cv=kfold)
#print("CVS:",results)
#SVM regressor
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
regressor.fit(x_train,y_train)
y_predict=regressor.predict(x_test)
from sklearn.metrics import r2_score
print("SVM regressor:", r2_score(y_test,y_predict))
#XGBoost Regressor
import xgboost
xgb = xgboost.XGBRegressor(n_estimators=500, learning_rate=0.5, gamma=0, subsample=0.75,colsample_bytree=1, max_depth=7)
xgb.fit(x_train,y_train)
predictions = xgb.predict(x_test)
print("XGB:",r2_score(y_test,predictions))
#KNN
from sklearn import neighbors
r2_val = []
for K in range(20):
K = K+1
model = neighbors.KNeighborsRegressor(n_neighbors = K)
model.fit(x_train, y_train) #fit the model
pred=model.predict(x_test) #make prediction on test set
r2 = r2_score(y_test,pred) #calculate rmse
r2_val.append(r2) #store rmse values
print('R2 for k= ' , K , 'is:', r2)
| atharva246/Machine-Learning-and-Data-Science | Zomato's Restaurant Rating Prediction.py | Zomato's Restaurant Rating Prediction.py | py | 9,234 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.nan",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "seaborn.set",
"line... |
72921628835 | import urllib.request, json
from libbottles.utils import connection
from libbottles.exceptions import NoConnection
class Request:
_headers = {}
def __init__(self, headers: dict = None):
self._headers["User-Agent"] = "libbottles client (usebottles.com)"
if headers is not None:
self._envs = {**self._headers, **headers}
if not connection.check():
raise NoConnection()
def get(self, url: str):
req = urllib.request.Request(
url,
data=None,
headers=self._headers
)
with urllib.request.urlopen(req) as url:
data = json.loads(url.read().decode())
return data
| bottlesdevs/libbottles | libbottles/utils/request.py | request.py | py | 730 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "libbottles.utils.connection.check",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "libbottles.utils.connection",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "libbottles.exceptions.NoConnection",
"line_number": 17,
"usage_type": "call... |
71346990433 | # Add a filter to a palette
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', default="Palette.dmp", help='Input tileset palette file. default is Palette.dmp')
parser.add_argument('-o', default="Palette2.dmp", help='Output tileset palette file. default is Palette2.dmp')
parser.add_argument('-r', default=4, type=int, help='Red value modifier, [-31, 31]. Default is 4')
parser.add_argument('-g', default=-16, type=int, help='Green value modifier, [-31, 31]. Default is -16')
parser.add_argument('-b', default=-16, type=int, help='Blue value modifier, [-31, 31]. Default is -16')
args = parser.parse_args()
input = open(args.i, "rb")
output = open(args.o, "wb")
# Add these values to each colour's R G and B.
redModifier = args.r
greenModifier = args.g
blueModifier = args.b
# Modify colours and concatenate.
for i in range(16):
inputEntry = ord(input.read(1)) | (ord(input.read(1)) << 8)
red = (inputEntry & 31) + redModifier
green = ((inputEntry >> 5) & 31) + greenModifier
blue = ((inputEntry >> 10) & 31) + blueModifier
if red < 0:
red = 0
elif red > 31:
red = 31
if green < 0:
green = 0
elif green > 31:
green = 31
if blue < 0:
blue = 0
elif blue > 31:
blue = 31
outputEntry = red | (green << 5) | (blue << 10)
output.write((outputEntry).to_bytes(2, byteorder='little', signed=False))
input.close()
output.close() | Huichelaar/HuichFE | Graphics/RGBFilter.py | RGBFilter.py | py | 1,442 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 4,
"usage_type": "call"
}
] |
21578951425 | # -*- coding:utf-8 -*-
"""
time:2021/2/24
author:李辰旭
organization: BIT
contact: QQ:316469360
——————————————————————————————
description:
$ 处理二值合成轨迹图的一些函数。
主要包括:
滤波降噪
提取轮廓质心
拟合二次曲线
计算像素高度
——————————————————————————————
note:
python3.7以上版本才可运行
"""
import numpy as np
import cv2 as cv
from scipy.optimize import leastsq
def apply(img_BW,k):
'''输入二值单通道图像
返回特定区域被涂黑(k=0)或涂白(k=1)的图像'''
a=img_BW.copy()
left=400
right=1920-left
mid_left=650
mid_right=1920-mid_left
mid_high=500
a[:,:left]=255*k
a[:,right:]=255*k
a[mid_high:,mid_left:mid_right]=255*k
return a
def find_centroid(img_BW):
'''输入:二值图像
返回:质心xy列表'''
cnts,_ = cv.findContours(img_BW, cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
X_Y=[]
# 遍历轮廓集
for c in cnts:
# 计算轮廓区域的图像矩。 在计算机视觉和图像处理中,图像矩通常用于表征图像中对象的形状。这些力矩捕获了形状的基本统计特性,包括对象的面积,质心(即,对象的中心(x,y)坐标),方向以及其他所需的特性。
M = cv.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
X_Y.append((cX,cY))
return X_Y
def func(params, x):
a, b, c = params
return a * x * x + b * x + c
# 误差函数,即拟合曲线所求的值与实际值的差
def error(params, x, y):
return func(params, x) - y
# 对参数求解
def slovePara(X,Y):
'''输入:两个列表
返回:二次函数的三个参数abc
'''
p0 = [10, 10, 10]#abc的初值,还要迭代呢
X=np.array(X)
Y=np.array(Y)
Para = leastsq(error, p0, args=(X, Y))
a, b, c = Para[0]
return a,b,c
def track_progress(img_BW,img,grand=950,startline=1250):
'''输入合成的轨迹图(二值化的单通道图及二值化的三通道图)
返回发球点的像素高度,返回处理过的图片'''
#涂抹噪声
applied=apply(img_BW,0)
#滤波降噪
#中值滤波
mid_filer=cv.medianBlur(applied,3)
#膨胀
kernel = np.ones((5, 5), np.uint8)
frame = cv.dilate(mid_filer, kernel, iterations=2)
#找质心
X_Y = find_centroid(frame)
X=[]
Y=[]
for i in X_Y:
X.append(i[0])
Y.append(i[1])
cv.circle(img, (i[0], i[1]), 10, (0, 0, 255), -1)
#拟合抛物线
try:
a,b,c=slovePara(X,Y)
print('抛物线参数:a=',a,'b=',b,'c=',c)
#画抛物线
for w in range(1920):
h=int(a*(w*w)+b*w+c)
if h>0&h<1080:
cv.circle(img, (w, h), 3, (0, 255, 0), -1)
except:
print('拟合抛物线时出现错误,好像是因为拟合点少于三个')
#画地面和起始线
cv.line(img,(0,grand), (1919,grand), (255,0,0),3)
cv.line(img,(startline,0), (startline,1079), (255,0,0),3)
#找发球点,求高度
try:
height=a*(startline*startline)+b*startline+c - grand
if height < 0:
height*=-1
print('像素高度=',height)
return height,frame
except:
print('拟合抛物线时出现错误,abc没值')
return -1,frame
if __name__ == '__main__':
import os
cv.namedWindow("obj", cv.WINDOW_NORMAL)
cv.resizeWindow("obj", int(1920/3), int(1080/3))
cv.moveWindow("obj", 0, 0)
cv.namedWindow("applied", cv.WINDOW_NORMAL)
cv.resizeWindow("applied", int(1920/3), int(1080/3))
cv.moveWindow("applied", int(1920/3), 0)
img = cv.imread("D:\\lalala.png")
img_BW = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
track_progress(img_BW,img)
cv.imshow("obj",img)
cv.imshow("applied",img_BW)
cv.waitKey(0)
| ChenXu-Li/kinect_measure_height | process.py | process.py | py | 4,148 | python | zh | code | 2 | github-code | 1 | [
{
"api_name": "cv2.findContours",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "... |
6536893855 | import torch
from torch.nn import functional as F
def compute_mvg(d_latents, latent_name, mean_v, inv_cov_v):
if latent_name == "W":
_w = d_latents["W"]
_v = F.leaky_relu(_w, negative_slope=5.0)
dv = (_v - mean_v)
loss = (dv.matmul(inv_cov_v).matmul(dv.T))
return loss
elif latent_name == "W+":
_wp = d_latents["W+"].double()
_vp = F.leaky_relu(_wp, negative_slope=5.0)
loss = 0.0
for idx in range(_vp.shape[1]):
_v = _vp[:, idx, :]
dv = (_v - mean_v)
loss += (dv@inv_cov_v@dv.T)
return loss.squeeze(0).squeeze(0)
def b_compute_mvg(d_latents, latent_name, mean_v, inv_cov_v):
if latent_name == "W+":
_wp = d_latents["W+"].double()
_vp = F.leaky_relu(_wp, negative_slope=5.0)
bs = _wp.shape[0]
inv_cov_v = inv_cov_v.reshape(1, 512, 512).repeat(bs, 1, 1)
loss = 0.0
for idx in range(_vp.shape[1]):
_v = _vp[:, idx, :]
dv = (_v - mean_v).reshape(-1, 1, 512)
loss += torch.bmm(torch.bmm(dv, inv_cov_v), torch.transpose(dv, 1, 2)).mean()
return loss
def delta_loss(latent):
loss = 0.0
first_w = latent[:, 0, :]
for i in range(1, latent.shape[1]):
delta = latent[:, i, :] - first_w
delta_loss = torch.norm(delta, 2, dim=1).mean()
loss += delta_loss
return loss
| adobe-research/sam_inversion | src/loss_utils.py | loss_utils.py | py | 1,421 | python | en | code | 168 | github-code | 1 | [
{
"api_name": "torch.nn.functional.leaky_relu",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.leaky_relu",
"line_number": 14,
"usage_type": "call"
},
{
"a... |
18555835371 | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import random
import os
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from model import *
from user_agent import generate_user_agent
import re
MAIN_URL = 'https://usa.tommy.com/ProductListingView'
db_engine = create_engine("sqlite:///calvin.db", echo=True)
basedir = os.path.abspath(os.path.dirname(__file__))
size_list = []
details_list = []
color_list = []
url_list = []
cat_url_list = []
cookie = {
'sr_browser_id': '33df22be-572e-4904-ba40-0ad50c8bf097',
'sr_pik_session_id': 'a93a676-c1cc-79d7-d8d1-e0f2e070f5ea',
'__utma': '230066073.559163455.1579546275.1583593346.1583599172.13',
'__utmb': '230066073.9.10.1583599172',
'__utmc': '230066073',
'__utmz': '230066073.1581354246.9.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided)',
'__wid': '496949504',
'_ga': 'GA1.2.559163455.1579546275',
'_px2': 'eyJ1IjoiNDdkNjNiNzAtNjA5NC0xMWVhLWE2NDQtZjFhZjM2YWNiMDBkIiwidiI6ImQ2MGQxMGEyLTNiYjUtMTFlYS1hMjE1LTAyNDJhYz'
'EyMDAwNSIsInQiOjE1ODM2MDA1OTkzNTgsImgiOiJlMTMzMjExNGY0YjE4N2VkZDU0OThlNTY5ZDRkZjIzYjU3NTgwMTVjY2FjNGFlYjk0N'
'DAyNzYwZWU1Y2ExMzJlIn0=',
'_pxvid': 'eyJ1IjoiNDdkNjNiNzAtNjA5NC0xMWVhLWE2NDQtZjFhZjM2YWNiMDBkIiwidiI6ImQ2MGQxMGEyLTNiYjUtMTFlYS1hMjE1LTAyNDJh'
'YzEyMDAwNSIsInQiOjE1ODM2MDA1OTkzNTgsImgiOiJlMTMzMjExNGY0YjE4N2VkZDU0OThlNTY5ZDRkZjIzYjU3NTgwMTVjY2FjNGFl'
'Yjk0NDAyNzYwZWU1Y2ExMzJlIn0=',
'sctr': '1|1583532000000',
'ADRUM': 's=1583600052675&r=https%3A%2F%2Fusa.tommy.com%2Fen%2Fwomen%2Fnewarrivals-women%2Ficon-wide-leg-stripe-pant-ww28013%3F0',
'JSESSIONID': '0000einWRcx7PVbTVe62TS9mlJv:1crovuh4f'
}
proxy = {'HTTPS': '157.245.138.230:8118'}
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'calvin.db')
HEADERS = {
'User-Agent': generate_user_agent(device_type="desktop", os=('mac', 'linux')),
'Accept':'*/*',
'Cache-Control':'no-cache',
'Host':'usa.tommy.com',
'Accept-Encoding':'gzip, deflate, br',
'Connection':'keep-alive',
'accept-language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,uz;q=0.6'}
payload = {
'catalogId': '10551',
'isHomeDepartment': 'false',
'pageSize': '30',
'disableProductCompare': 'true',
'langId': '-1',
'storeId': '10151', #CommerceSearch
'categoryId': '', #pageId
'beginIndex': '30',
'minFacetCount': '1',
'colorfacetselected': 'false',
'cache': 'true'
}
engine = create_engine(SQLALCHEMY_DATABASE_URI, echo=True)
Session = sessionmaker(bind=engine)
session = Session()
def read_file_url():
with open('input.txt', 'r') as file:
for line in file:
cat_url_list.append(line.strip('\n'))
return cat_url_list
def get_html(url, payload=None):
while True:
time.sleep(random.randint(random.randint(6, 10), random.randint(12, 27)))
html = requests.get(url, headers=HEADERS, proxies=proxy, params=payload, cookies=cookie)
if html.status_code == 200:
print(html.status_code)
return html
elif html.status_code == 403:
print(html.status_code)
print('weit to 600 sec')
time.sleep(random.randint(600,800))
else:
time.sleep(random.randint(14, 27))
print(html.status_code)
continue
def parser_content(html, image_list):
# порсит все данные из карточки кроме фото, подумать разбить на несколько функций
soup = BeautifulSoup(html.text, 'html.parser')
link = html.url
try:
# имя товара
product_name = soup.find('span', class_='productNameInner').text
except:
product_name = None
try:
# базовая цена(без скидки)
price = soup.find('div', id='price_display').find_all('span')[0].text[1:]
except:
price = None
try:
# акционная цена
price_sale = soup.find('div', id='price_display').find_all('span')[1].text[1:]
except (IndexError, ValueError):
price_sale = None
try:
# доступные размеры, на сайте все доступные размеры имеют класс available, поэтому парсим только их
block_size = soup.find('ul', id='sizes').find_all('li')
for li in block_size:
if li['class'] == ['available']:
size_list.append(li.find('span').text)
except:
print(f'Size {None}')
try:
# маркированый список Details с доп инфой снизу карточки
details_group = soup.find('ul', class_='bullets')
for details in details_group.find_all('li'):
details_list.append(details.text)
except:
details_list.append('')
try:
# цветовая схема доступных цветов с сайта
radiogrup = soup.find('ul', class_='productswatches')
for color in radiogrup.find_all('li'):
color_list.append(color['data-color-swatch'])
except:
color_list.append('')
try:
# парсим 1 цвет
color = soup.find('ul', class_='productswatches').find('li', class_='active')['data-color-swatch']
except:
color = ''
try:
# айди обьявления
universal_id = soup.find('div', class_='universalStyleNumber').find_all('span')[1].text
except:
universal_id = ''
try:
# парсим категорию товара
category = soup.find('div', id='breadcrumb').find_all('a')[-2].text + ' ' + \
soup.find('div', id='breadcrumb').find_all('a')[-1].text
except:
category = ''
count = 1
Session = sessionmaker(bind=db_engine)
session = Session()
try:
new_element = Tommy(product_name, price, price_sale, ','.join(size_list), color, ','.join(image_list),
','.join(details_list), category, ','.join(color_list), link)
session.add(new_element)
session.commit()
except:
pass
count += 1
size_list.clear()
color_list.clear()
details_list.clear()
def create_dir_name():
dir_name = 'images'
try:
os.mkdir(dir_name)
except OSError:
print('Папка существует')
return dir_name
def chek_images():
# проверяет номер последней фото в папке, и при запуске парсера след фото будет +1
num_file = []
last_image = 0
try:
file_list = os.listdir('images')
for list in file_list:
num_file.append(int(re.findall(r'\d*', list)[0]))
num_file.sort()
print(num_file[-1])
except(IndexError):
num_file.append(0)
last_image = num_file[-1]
return last_image
def get_photo(html, dir_name):
count_photo = chek_images()
image_list = []
img_name = []
soup = BeautifulSoup(html.content, 'html.parser')
image_url = soup.find('div', class_='product_main_image').find('img')['data-src']
image_list.append(image_url)
image_list.append(image_url.replace('main', 'alternate1'))
image_list.append(image_url.replace('main', 'alternate2'))
image_list.append(image_url.replace('main', 'alternate3'))
for img in image_list:
try:
photo_name = count_photo
file_obj = requests.get(img, stream=True)
if file_obj.status_code == 200:
with open(dir_name+'/'+str(photo_name)+'.JPG', 'bw') as photo:
for chunk in file_obj.iter_content(8192):
photo.write(chunk)
count_photo +=1
img_name.append(str(photo_name))
except:
print('Error file_obj')
return img_name
def get_url_category(html):
# функция будет парсить в список url всех карточек в список(отсылает отдельные запросы)
count = 0
soup = BeautifulSoup(html.content, 'html.parser')
page_count = soup.find('div', id='filterInfo')['data-total-count']
all_page = int(page_count) // 30
prod = soup.find('div', class_='grid').find_all('a', class_='productThumbnail')
for i in prod:
url_list.append(i['href'])
category_id = soup.find('head').find('meta', {'name': 'pageId'})['content']
payload.update({'categoryId': category_id})
for page in range(1, all_page + 1):
count += 30
payload.update({'beginIndex': count})
#response = requests.get(MAIN_URL, headers=HEADERS, proxies=proxy, params=payload)
response = get_html(MAIN_URL, payload=payload)
print(html.status_code)
sp = BeautifulSoup(response.content, 'html.parser')
try:
prod = sp.find('div', class_='grid').find_all('a', class_='productThumbnail')
for i in prod:
url_list.append(i['href'])
except:
continue
return url_list
def main():
Session = sessionmaker(bind=db_engine)
session = Session()
dir_name = create_dir_name()
cat_url_list = read_file_url()
for cat_url in cat_url_list:
html = get_html(cat_url)
url_list = get_url_category(html)
for url in url_list:
card_exist = session.query
html = get_html(url)
image_list = get_photo(html, dir_name)
parser_content(html, image_list)
if __name__ == '__main__':
main() | nonameuser2019/parser_tommy | parser.py | parser.py | py | 9,682 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname... |
11460338892 | from django.db import models
from django.utils.translation import gettext_lazy as _
class VCard(models.Model):
"""
Vcard model.
"""
title = models.CharField(
_("Title"),
blank=True,
max_length=150,
default="",
)
def __str__(self):
return self.title
class Meta:
verbose_name = _("VCard")
verbose_name_plural = _("VCards")
| 7saikat7/django-qr-vcard | qr_vcard/models/vcard.py | vcard.py | py | 409 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "... |
26885414895 | import json
from datetime import timedelta
from db.redis import init_redis_pool
async def add_test_result_to_redis(result_id: int, user_id: int, id_company: int, id_quiz: int, data: dict):
redis = await init_redis_pool()
key = f"result_test:{result_id:}:id_user:{user_id}:id_company:{id_company}:id_quiz:{id_quiz}"
await redis.setex(key, timedelta(hours=48), json.dumps(data))
async def get_value_by_keys(**kwargs):
values = await get_values_by_keys(**kwargs)
return None if len(values) == 0 else values[0]
async def get_values_by_keys(**kwargs):
redis = await init_redis_pool()
pattern = '*' + '*'.join([f"{key}:{value}" for key, value in kwargs.items()]) + '*'
keys = await redis.keys(pattern)
return await get_values(redis, keys)
async def get_values(redis, keys):
return [json.loads(await redis.get(key)) for key in keys if await redis.get(key)]
| saindi/internship | app/db/redis_actions.py | redis_actions.py | py | 905 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "db.redis.init_redis_pool",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "db.redis.init_r... |
32790785348 | import requests
from bs4 import BeautifulSoup
import json
# URL of the web page to scrape
url = 'https://nookipedia.com/wiki/Category:New_Horizons_fish_icons'
# Send an HTTP request to the web page
response = requests.get(url)
# Check if the request was successful
if response.status_code == 200:
# Parse the HTML content of the page using BeautifulSoup
soup = BeautifulSoup(response.text, 'html.parser')
# Find all image tags on the page
image_tags = soup.find_all('img')
# Extract the 'src' attribute from each image tag to get the image links
image_links = [img['src'] for img in image_tags]
# Create a dictionary with a "urls" key and the image links as its value
data = {"urls": image_links}
# Save the data to a JSON file
with open('image_links.json', 'w') as json_file:
json.dump(data, json_file, indent=4)
print("Image links saved to 'image_links.json'")
else:
print(f"Failed to retrieve the web page. Status code: {response.status_code}")
| JohnMcSwiney/acnh_encyclopedia | server/img_scraping.py | img_scraping.py | py | 1,012 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 27,
"usage_type": "call"
}
] |
1917816925 | from trackingsimpy.simulation.revisit_interval import BaseRISimulation
from trackingsimpy.tracking import TrackingComputer
from trackingsimpy.radar import PositionRadar
import filterpy.common
from filterpy.kalman import IMMEstimator, KalmanFilter
from trackingsimpy.common.motion_model import constant_turn_rate_matrix, kinematic_state_transition
from trackingsimpy.target import DefinedTargetProcess
from trackingsimpy.common.measurement_model import position_measurement_matrix
import numpy as np
class DefinedIMM(BaseRISimulation):
ORDER = 1
DIM = 2
DT = 0.1
MAX_STEPS = 700
def __init__(self, pfa=1e-6, sn0=50, beamwidth=0.02, n_max=20, x0=np.array([10e3, -200.0, 10e3, 0]),
P0=np.eye(4) * 1000, theta_accuracy=0.002):
var = 0.1*9.81**2
tr1 = -0.08
tr2 = 0.1
probs = np.ones(3) / 3
p_switch = 1.0 / 100.0
M = np.array([
[1 - p_switch, p_switch / 2, p_switch / 2],
[p_switch / 2, 1 - p_switch, p_switch / 2],
[p_switch / 2, p_switch / 2, 1 - p_switch]
])
filters = list()
for i in range(3):
filters.append(filterpy.common.kinematic_kf(self.DIM, self.ORDER, self.DT))
filters[i].x = x0
filters[i].Q = filterpy.common.Q_discrete_white_noise(self.DIM, self.DT, var=var, block_size=self.ORDER+1)
filters[1].F = constant_turn_rate_matrix(tr1, self.DT)
filters[2].F = constant_turn_rate_matrix(tr2, self.DT)
st_models = {
0: kinematic_state_transition(self.DT, self.ORDER, self.DIM),
200: constant_turn_rate_matrix(tr1, self.DT),
300: kinematic_state_transition(self.DT, self.ORDER, self.DIM),
400: constant_turn_rate_matrix(tr2, self.DT),
500: kinematic_state_transition(self.DT, self.ORDER, self.DIM)
}
p_noises = {
0: filterpy.common.Q_discrete_white_noise(self.DIM, self.DT, var=var, block_size=self.ORDER + 1)
}
target = DefinedTargetProcess(x0, st_models, p_noises, self.MAX_STEPS, self.ORDER, self.DIM)
tracker = IMMEstimator(filters, probs, M)
radar = PositionRadar(target, sn0, pfa, beamwidth, self.DIM, self.ORDER, angle_accuracy=theta_accuracy)
computer = TrackingComputer(tracker, radar, n_max, P0=P0)
super().__init__(computer)
class DefinedCVCAIMM(BaseRISimulation):
ORDER = 2
DIM = 2
DT = 0.1
MAX_STEPS = 3000
def __init__(self, pfa=1e-6, sn0=50, beamwidth=0.02, n_max=20, x0=np.array([30e3, -150, 0, 30e3, 150, 0]),
theta_accuracy=0.002):
# Trackers
probs = np.ones(2) / 2
p_switch = 1.0 / 1000.0
M = np.array([
[1 - p_switch, p_switch],
[p_switch, 1 - p_switch]])
F_ca = kinematic_state_transition(self.DT, self.ORDER, self.DIM)
F_cv = kinematic_state_transition(self.DT, self.ORDER, self.DIM)
F_cv[:, 2::3] = 0
g = 9.81
var = 4 * g ** 2
Q = filterpy.common.Q_discrete_white_noise(self.DIM, self.DT, var, block_size=self.ORDER + 1)
kf_cv = KalmanFilter(dim_x=self.DIM * (self.ORDER + 1), dim_z=self.DIM)
kf_cv.F = F_cv
kf_cv.Q = Q
kf_cv.H = position_measurement_matrix(self.DIM, self.ORDER)
kf_ca = KalmanFilter(dim_x=self.DIM * (self.ORDER + 1), dim_z=self.DIM)
kf_ca.F = F_ca
kf_ca.Q = Q
kf_ca.H = position_measurement_matrix(self.DIM, self.ORDER)
filters = [kf_cv, kf_ca]
tracker = IMMEstimator(filters, probs, M)
# Target
st_models = {
0: F_cv,
1000: F_ca,
2000: F_cv
}
p_noises = {
0: Q
}
target = DefinedTargetProcess(x0, st_models, p_noises, self.MAX_STEPS, self.ORDER, self.DIM)
# Radar
radar = PositionRadar(target, sn0, pfa, beamwidth, self.DIM, self.ORDER, angle_accuracy=theta_accuracy)
# Computer
P0 = np.zeros((x0.size,) * 2)
computer = TrackingComputer(tracker, radar, n_max, P0=P0)
super().__init__(computer)
| PetteriPulkkinen/TrackingSimPy | trackingsimpy/simulation/revisit_interval/defined_imm.py | defined_imm.py | py | 4,167 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "trackingsimpy.simulation.revisit_interval.BaseRISimulation",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 20,
"usage_type": "call"
},
{
"... |
2299030097 | import json
import subprocess
import os
import zmq
class CommandLineBarcodeReader():
def __init__(self, config_path="scandit_commandline",port=5556):
self.context = zmq.Context()
self.process = None
self.config_path = config_path
self.port = port
self.start_commandline_zmq_server_if_unstarted()
def start_commandline_zmq_server_if_unstarted(self):
socket = self.context.socket(zmq.REQ)
socket.connect("tcp://localhost:"+str(self.port))
socket.send(b"Hello")
message = ""
try:
message = socket.recv(flags=zmq.NOBLOCK)
print(message)
except Exception as e:
print("start error")
print(e)
f = open(self.config_path,"r")
commandline=[]
for line in f.readlines():
commandline.append(line.strip())
f.close()
self.process = subprocess.Popen(commandline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def stop_commandline_zmq_server_if_started(self):
try:
self.process.kill()
except:
print("process not opened")
def decode_file(self, img_path):
result_dict = {}
results = []
try:
socket = self.context.socket(zmq.REQ)
socket.connect("tcp://localhost:"+str(self.port))
socket.send(bytes(img_path,"utf-8"))
message = socket.recv()
json_object = json.loads(message.decode("utf-8"))
if "results" in json_object:
results=json_object["results"]
if "elapsedTime" in json_object:
result_dict["elapsedTime"]=json_object["elapsedTime"]
except Exception as e:
print("decode error")
print(e)
result_dict["results"] = results
return result_dict
if __name__ == '__main__':
#reader = CommandLineBarcodeReader()
#reader = CommandLineBarcodeReader(config_path="zxing_commandline",port=5557)
reader = CommandLineBarcodeReader(config_path="dbr88_commandline",port=6666)
results = reader.decode_file("D:\\test\\BarcodePerformance\\new\\black_qr_code.png")
print(results)
reader.stop_commandline_zmq_server_if_started()
| xulihang/Barcode-Reading-Performance-Test | barcode_reader/commandline.py | commandline.py | py | 2,416 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "zmq.Context",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "zmq.REQ",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "zmq.NOBLOCK",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line... |
22594227372 | ####################################################
##### This is focal loss class for multi class #####
##### University of Tokyo Doi Kento #####
####################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
# I refered https://github.com/c0nn3r/RetinaNet/blob/master/focal_loss.py
class FocalLoss2d(nn.Module):
def __init__(self, alpha=1.0, gamma=0, weight=None,ignore_index=-100, size_average=True, with_grad=True):
super(FocalLoss2d, self).__init__()
assert gamma>=0.0 and gamma<=5.0,'gamma in [0,5] is okay, but %0.2f'%gamma
assert alpha>0.0
self.alpha = alpha
self.gamma = gamma
self.weight = weight
self.size_average = size_average
self.ignore_index=ignore_index
self.with_grad=with_grad
def forward(self, input, target):
if input.dim()>2:
input = input.contiguous().view(input.size(0), input.size(1), -1)
input = input.transpose(1,2)
input = input.contiguous().view(-1, input.size(2)).squeeze()
if target.dim()==4:
target = target.contiguous().view(target.size(0), target.size(1), -1)
target = target.transpose(1,2)
target = target.contiguous().view(-1, target.size(2)).squeeze()
elif target.dim()==3:
target = target.view(-1)
else:
target = target.view(-1, 1)
# compute the negative likelyhood
logpt = -F.cross_entropy(input, target,
weight=self.weight,
ignore_index=self.ignore_index,
reduction='none')
if self.with_grad:
pt = torch.exp(logpt)
else:
with torch.no_grad():
pt = torch.exp(logpt)
# compute the loss
loss = - self.alpha* ((1-pt)**self.gamma) * logpt
# averaging (or not) loss
if self.size_average:
return loss.mean()
else:
return loss.sum()
if __name__ == '__main__':
input=torch.rand(2,10,3,3).float()
print(input.shape)
target=torch.rand(2,10,3,3)
target=torch.argmax(target,dim=1)
loss_fn=FocalLoss2d()
loss=loss_fn(input,target)
print(loss) | ISCAS007/torchseg | torchseg/utils/loss/focalloss2d.py | focalloss2d.py | py | 2,324 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.cross_entropy",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch... |
30523416801 | import sys
import random
import re
from functools import partial
from tqdm import tqdm
from junkdrawer import generator_looper
def lindenate(liRules, sInput="", lIterations=1):
"""This function iteratively processes a set of find-and-replace rules, liRules, on a given string, sInput.
By default, it only goes through a single iteration, and processes the rules against an empty string.
In a traditional Lindenmayer system, rules replace a single character (called the predecessor) with a string
(called the successor). In stochastic Lindenmayer systems, this successor can be chosen at random from a
number of options. This function is modeled after a stochastic L-system, but with a few modifications;
In this function, predecessors and successors are defined using Regex, which allows for multi-character and
pattern-based matching, as well as smart replacement via capture groups. This also opens the door for characters
or character sets which would match to more than one rule, meaning that, unlike L-systems, the order of rule
application here matters. With ordered rule application comes the possibility of a later rule affecting the
successors of the previous rules inside of a single iteration; what I refer as a rule being "protected".
:param Inputs:
:param liRules:
list of rule dictionaries. Each dctRule must have the following pairs:
"name":
string. Becomes description for progress bar
"enabled":
boolean. Whether this rule should be applied, or skipped.
"protected":
boolean. Whether the successor(s) introduced by this rule can be modified by rules
later in the list.
"predecessor":
string (regex). The regex expression searched for in the find-and-replace process
"successor":
list of tuples; [(number, string (regex)),(number, string (regex))]
The successor is stochastically determined via random.random.
The first element is a number between 0 and 1 representing the max random.random
value for which its corresponding successor, the second element, will be chosen.
If the random value is above all options, the successor is an empty string.
The tuples MUST be sorted in ascending order by their first element.
:param sInput:
String. The text to be mutated by the function
:param lIterations:
Number. The number of times to process the string through the rules.
:return:
String. The input text, as transformed by (lMaxGen - lCurrGen) iterations through liRules.
"""
# If we're out of iterations to perform...
if lIterations <= 0:
# ...return the input
return sInput
sOut = sInput
# This protection string is a string of equal length to the output.
# The character sOut[x] is protected if sProtect[x] == "1"
sProtect = "0" * len(sInput)
# Loop through each rule
for dctRule in liRules:
# if the rule is not enabled...
if not dctRule["enabled"]:
# ...skip it
continue
# sTempProtect serves the purpose of sProtect within each rule, as a rule is never allowed to overwrite itself
sTempProtect = sProtect
objRgx = re.compile(dctRule["predecessor"])
liReplacements = dctRule["successor"]
itMatches = objRgx.finditer(sOut)
lOffset = 0
# Loop through all matches
for objMatch in tqdm(itMatches, desc=dctRule["name"], file=sys.stdout):
lStart = objMatch.span()[0] + lOffset
lEnd = objMatch.span()[1] + lOffset
sShieldCheck = sTempProtect[lStart:lEnd]
# Check whether the match overlaps any protected substrings
if "1" in sShieldCheck:
# If there are some zeros in here, this match could be eclipsing another match.
if "0" not in sShieldCheck:
continue
# Find the next match. This will either be the eclipsed match, or simply the next math in the iterable
objMatch = objRgx.search(sOut[lStart+1:])
# If there aren't any matches left at all in the string, we're done.
if objMatch is None:
break
# Adjust lStart and lEnd to account for how we sliced the string in line 111
lStart += objMatch.span()[0] + 1
lEnd += objMatch.span()[0] + 1
# sPredecessor = objMatch.group(0)
# Choose a successor
fRand = random.random()
lChoice = -1
for i in range(len(liReplacements)):
if fRand > liReplacements[i][0]:
continue
else:
lChoice = i
break
if lChoice == -1:
sSuccessor = ''
else:
# The rest of the string is used here in case there are lookahead groups that are referenced by the
# successor pattern (since they will not be captured in objMatch.group(0))
sSuccessor = liReplacements[lChoice][1]
# Manually swap out backreferences, checking for all notation types: \1, \g<1>, \g<name>
# Step backward so that \20 gets replaced by group 20, not group 2
for i in reversed(range(len(objMatch.groups())+1)):
sSuccessor = sSuccessor.replace("\\" + str(i), objMatch.group(i))
sSuccessor = sSuccessor.replace(r"\g<" + str(i) + ">", objMatch.group(i))
for sGroupName in objMatch.groupdict():
sSuccessor = sSuccessor.replace(r"\g<" + sGroupName + ">", objMatch.group(sGroupName))
# Stitch things back together
sOut = sOut[:lStart] + sSuccessor + sOut[lEnd:]
# Protect the affected substring.
sShield = "1" * len(sSuccessor)
sTempProtect = sTempProtect[:lStart] + sShield + sTempProtect[lEnd:]
if dctRule["protected"]:
sProtect = sProtect[:lStart] + sShield + sProtect[lEnd:]
else:
sProtect = sProtect[:lStart] + "0"*len(sShield) + sProtect[lEnd:]
# The span of the remaining regex matches has already been set, so we need to accommodate for changing
# string lengths with the lOffset
lOffset += len(sSuccessor) - (lEnd - lStart)
# If we're just spinning our wheels and not transforming the string...
if sInput == sOut:
# ...there's no need to run through future iterations.
return sOut
lIterations -= 1
sOut = lindenate(liRules, sOut, lIterations)
return sOut
def lindenator(liRules, sInput="", lIterations=1, lMaxReturns=None):
"""returns a generator object that returns lIterations additional iteration(s) (by default, 1) of lindenate from its
previous return. First return is simply sInput. if specified, exhausts after lMaxReturns.
"""
# Are infinite loops better than recursion? I think so
# yield sInput
# yield from lindenator(liRules, lindenate(liRules, sInput, lIterations), lIterations)
if lMaxReturns is None:
while True:
yield sInput
sInput = lindenate(liRules, sInput, lIterations)
elif lMaxReturns > 0:
for _i in range(lMaxReturns):
yield sInput
sInput = lindenate(liRules, sInput, lIterations)
def main():
liRules = [
{
"name": "Rule1",
"enabled": False,
"protected": False,
"predecessor": r"test",
"successor": [(1, r"ans")]
}, {
"name": "Rule2",
"enabled": True,
"protected": True,
"predecessor": r"1",
"successor": [(1, r"3")]
}, {
"name": "Rule3",
"enabled": True,
"protected": True,
"predecessor": r"(2(?P<middleLetter>[a-z])2)",
"successor": [(1, r"Z\g<middleLetter>Z")]
}, {
"name": "Rule4",
"enabled": True,
"protected": True,
"predecessor": r"[a-zA-Z][\d]",
"successor": [(.5, r"7"), (1, r"9")]
},
]
liOverlapRules = [
{
"name": "Axiom",
"enabled": True,
"protected": True,
"predecessor": r"^$",
"successor": [(1, "ABBAAAA")]
}, {
"name": "Rule One",
"enabled": True,
"protected": True,
"predecessor": r"(.)(?=AAA)",
"successor": [(1, "Z")]
}, {
"name": "Rule Two",
"enabled": True,
"protected": True,
"predecessor": r"AA",
"successor": [(1, "CC")]
}, {
"name": "Rule Three",
"enabled": True,
"protected": True,
"predecessor": r"A",
"successor": [(1, "B")]
}, {
"name": "Rule Four",
"enabled": True,
"protected": True,
"predecessor": r"B",
"successor": [(1, "AAAA")]
},
]
"""
print(lindenate(liRules, "ttest8est82a22b2", 1))
print(lindenate(liRules, "ttest8est82a22b2", 2))
print(lindenate(liOverlapRules, "BBAAAA"))
print(lindenate(liOverlapRules, lIterations=8))
"""
sInput = input("test?")
objLReturn = generator_looper(partial(lindenator, liOverlapRules, sInput, lMaxReturns=10))
bKeepGoing = True
while bKeepGoing:
print(next(objLReturn))
sInput = input("type 'end' to end")
if sInput == "end":
bKeepGoing = False
print("ended")
if __name__ == "__main__":
main()
| Thelnar/Lindenmayer-Fractals-Web-App | lindenmayer.py | lindenmayer.py | py | 10,433 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "random.random",
"line_number... |
40759520304 | import json
import pickle
import random
from os.path import join, dirname
import nltk
from nltk.corpus import treebank
from nltk.tag.sequential import ClassifierBasedPOSTagger
MODEL_META = {
"corpus": "treebank",
"lang": "en",
"model_id": "nltk_treebank_clftagger",
"tagset": "Penn Treebank",
"algo": "ClassifierBasedPOSTagger",
"required_packages": ["nltk"]
}
# initializing training and testing set
nltk.download('treebank')
META = join(dirname(dirname(dirname(__file__))), "JarbasModelZoo", "res")
meta_path = join(META, MODEL_META["model_id"] + ".json")
corpus = treebank.tagged_sents() # 3914
random.shuffle(corpus)
train_data = corpus[:3000]
test_data = corpus[3000:]
tagger = ClassifierBasedPOSTagger(train=train_data)
a = tagger.evaluate(test_data)
MODEL_META["accuracy"] = a
with open(meta_path, "w") as f:
json.dump(MODEL_META, f)
print("Accuracy: ", a) # 0.9309734513274336
# save pickle
path = join(dirname(dirname(dirname(__file__))),
"models", "postag", MODEL_META["model_id"] + ".pkl")
with open(path, "wb") as f:
pickle.dump(tagger, f)
| OpenJarbas/ModelZoo | train/postag/nltk_treebank_clf_postag.py | nltk_treebank_clf_postag.py | py | 1,106 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "nltk.download",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_n... |
69878608033 | import sys
from collections import deque
N, M, V = map(int, sys.stdin.readline().split())
graph = [[0]*(N+1) for i in range(N+1)]
#인접행렬생성
for i in range(M):
a, b = map(int, sys.stdin.readline().split())
graph[a][b] = graph[b][a] = 1
visited = [False] * (N + 1)
def dfs(V):
visited[V] = True
print(V, end=' ')
for i in range(1, N+1):
if not visited[i] and graph[V][i] == 1:
dfs(i)
def bfs(V):
print()
visited = [False] * (N + 1)
queue = deque([V])
while queue:
visited[V] = True
V = queue.popleft()
print(V, end=" ")
for i in range(1, N+1):
if not visited[i] and graph[V][i] == 1:
queue.append(i)
visited[i] = True
dfs(1)
bfs(1)
| jjongram/demo-repository | self_study/src/baekjoon/bfsdfspractice.py | bfsdfspractice.py | py | 778 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.stdin.readline",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"li... |
43073974779 | import logging
from typing import Dict
import grpc
from needlestack.apis import collections_pb2
from needlestack.apis import servicers_pb2
from needlestack.apis import servicers_pb2_grpc
from needlestack.apis import serializers
from needlestack.collections.collection import Collection
from needlestack.collections.shard import Shard
from needlestack.cluster_managers import ClusterManager
from needlestack.servicers.settings import BaseConfig
from needlestack.utilities.rpc import unhandled_exception_rpc
logger = logging.getLogger("needlestack")
class SearcherServicer(servicers_pb2_grpc.SearcherServicer):
"""A gRPC servicer to perform kNN queries on in-memory index structures"""
collections: Dict[str, Collection]
collection_protos: Dict[str, collections_pb2.Collection]
def __init__(self, config: BaseConfig, cluster_manager: ClusterManager):
self.config = config
self.cluster_manager = cluster_manager
self.collections = {}
self.collection_protos = {}
self.cluster_manager.register_searcher()
self.load_collections()
@unhandled_exception_rpc(servicers_pb2.SearchResponse)
def Search(self, request, context):
X = serializers.proto_to_ndarray(request.vector)
k = request.count
collection = self.get_collection(request.collection_name)
if len(X.shape) == 1:
X = X.reshape(1, -1)
if collection.dimension == X.shape[1]:
results = collection.query(X, k, request.shard_names)
items = [item for i, item in enumerate(results) if i < k]
return servicers_pb2.SearchResponse(items=items)
else:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(
f"Collection {collection.name} expected matrix shaped ({collection.dimension}), got {X.shape}"
)
return servicers_pb2.SearchResponse()
@unhandled_exception_rpc(servicers_pb2.RetrieveResponse)
def Retrieve(self, request, context):
collection = self.get_collection(request.collection_name)
item = collection.retrieve(request.id, request.shard_names)
if item is not None:
return servicers_pb2.RetrieveResponse(item=item)
else:
return servicers_pb2.RetrieveResponse()
@unhandled_exception_rpc(collections_pb2.CollectionsLoadResponse)
def CollectionsLoad(self, request, context):
self.load_collections()
return collections_pb2.CollectionsLoadResponse()
def get_collection(self, name: str) -> Collection:
return self.collections[name]
def load_collections(self):
"""Load collections from Zookeeper configs
There are 4 states to handle for each collection:
- A new collection needs to be loaded
- An existing collection needs to be dropped
- An existing collection added/dropped shards
- No changes
"""
collection_protos = self.cluster_manager.list_local_collections(
include_state=False
)
current_collections = {name for name in self.collection_protos.keys()}
new_collections = {proto.name for proto in collection_protos}
for proto in collection_protos:
if proto.name in current_collections:
self._modify_collection(proto)
else:
self._add_collection(proto)
for name in current_collections:
if name not in new_collections:
self._drop_collection(name)
for collection in self.collections.values():
if collection.update_available():
logger.debug(f"Update collection {collection.name}")
self.cluster_manager.set_local_state(
collections_pb2.Replica.BOOTING, collection.name
)
collection.load()
self.cluster_manager.set_local_state(
collections_pb2.Replica.ACTIVE, collection.name
)
self.collection_protos = {proto.name: proto for proto in collection_protos}
def _add_collection(self, proto: collections_pb2.Collection):
logger.debug(f"Add collection {proto.name}")
collection = Collection.from_proto(proto)
self.cluster_manager.set_local_state(
collections_pb2.Replica.BOOTING, collection.name
)
self.collections[collection.name] = collection
collection.load()
self.cluster_manager.set_local_state(
collections_pb2.Replica.ACTIVE, collection.name
)
def _drop_collection(self, name: str):
logger.debug(f"Drop collection {name}")
del self.collections[name]
def _modify_collection(self, proto: collections_pb2.Collection):
old_proto = self.collection_protos[proto.name]
if old_proto.SerializeToString() != proto.SerializeToString():
collection = self.get_collection(proto.name)
collection.merge_proto(proto)
old_shards = {shard.name: shard for shard in old_proto.shards}
new_shards = {shard.name: shard for shard in proto.shards}
for name, new_shard in new_shards.items():
if name not in old_shards:
logger.debug(f"Add collection shard {proto.name}/{name}")
self.cluster_manager.set_local_state(
collections_pb2.Replica.BOOTING, collection.name, name
)
collection.add_shard(Shard.from_proto(new_shard))
elif (
new_shard.SerializeToString()
!= old_shards[name].SerializeToString()
):
logger.debug(f"Update collection shard {proto.name}/{name}")
self.cluster_manager.set_local_state(
collections_pb2.Replica.BOOTING, collection.name, name
)
collection.drop_shard(name)
collection.add_shard(Shard.from_proto(new_shard))
for name in old_shards.keys():
if name not in new_shards:
logger.debug(f"Drop collection shard {proto.name}/{name}")
collection.drop_shard(name)
collection.load()
self.cluster_manager.set_local_state(
collections_pb2.Replica.ACTIVE, collection.name, name
)
| needlehaystack/needlestack | needlestack/servicers/searcher.py | searcher.py | py | 6,488 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "needlestack.apis.servicers_pb2_grpc.SearcherServicer",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "needlestack.apis.servicers_pb2_grpc",
"line_number": 20,
"usa... |
29317960475 |
import sys
import time
import os
import gc
import json
import argparse
from pathlib import Path
os.environ["JAX_PLATFORMS"] = "cpu"
import jax
import flax
import numpy as np
import jax.numpy as jnp
import orbax
import orbax.checkpoint
from optax import MaskedNode
from etils import epath
from praxis import base_hyperparams
from praxis import pax_fiddle
from praxis import py_utils
from paxml import checkpoints
from paxml import checkpoint_managers
from paxml import train_states
from paxml import trainer_lib
from flax.traverse_util import flatten_dict, unflatten_dict
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
try:
import torch
except:
command = (
"pip install torch==2.0.0+cpu torchvision==0.15.1+cpu torchaudio==2.0.1 --index-url"
"https://download.pytorch.org/whl/cpu", "pip install transformers_stream_generator",
"pip install accelerate"
)
subprocess.run(command, stdout=subprocess.PIPE, shell=True)
import torch
TrainState = train_states.TrainState
CheckpointType = checkpoints.CheckpointType
Checkpointer = checkpoints.Checkpointer
PaxCheckpointHandler = checkpoints.PaxCheckpointHandler
NestedMap = py_utils.NestedMap
checkpoint_type = CheckpointType.GDA
SAVE_INTERVAL_STEPS = 1
LLAMA_STANDARD_CONFIGS = {
"7B": {
"dim": 4096,
"intermediate_size": 11008,
"n_layers": 32,
"n_heads": 32,
"norm_eps": 1e-6,
"vocab_size": 151936
},
"14B": {
"dim": 5120,
"intermediate_size": 13696,
"n_layers": 40,
"n_heads": 40,
"norm_eps": 1e-6,
"vocab_size": 152064
},
}
step = 0
model_size = '14B'
params = LLAMA_STANDARD_CONFIGS[model_size]
n_layers = params["n_layers"]
n_heads = params["n_heads"]
dim = params["dim"]
intermediate_size = params["intermediate_size"]
head_dim = dim // n_heads
save_opt = False
model = AutoModelForCausalLM.from_pretrained(f"Qwen/Qwen-{model_size}", device_map="auto", trust_remote_code=True).eval()
# pip install tiktoken
# tokenizer = AutoTokenizer.from_pretrained(f"Qwen/Qwen-{model_size}", trust_remote_code=True)
ckpt = {}
for k, v in model.named_parameters():
ckpt[k] = v
assert len(ckpt) > 0, print(f"ckpt is empty, please model path whether right or error.....")
save_dir = f'gs://llm_base_models/qwen/{model_size}/paxml/checkpoints/'
options = checkpoint_managers.CheckpointManagerOptions(
max_to_keep=10,
save_interval_steps=SAVE_INTERVAL_STEPS,
cleanup_tmp_directories=True,
)
checkpointer = Checkpointer(
PaxCheckpointHandler(
enforce_restore_shape_check=False,
use_ocdbt=False,
)
)
save_dir = epath.Path(save_dir)
checkpoint_manager = checkpoint_managers.OrbaxCheckpointManager(
save_dir,
checkpointer,
train_input_checkpointer=False,
options=options,
checkpoint_type=checkpoint_type,
tensorstore_use_ocdbt=False,
)
for k, v in model.named_parameters():
print(k, v.shape)
paxml_to_hf_key_and_shape = {
"params.lm.embedding_lookup.emb_var": {
"shape": (vocab_size, dim),
"map_to_hf": "wte.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1.linear.w": {
"shape": (dim, intermediate_size),
"map_to_hf": "w1.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer1_gate.linear.w": {
"shape": (dim, intermediate_size),
"map_to_hf": "w2.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.ff_layer.ffn_layer2.linear.w": {
"shape": (intermediate_size, dim),
"map_to_hf": "mlp.c_proj.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.self_attention.query.w": {
"shape": (dim, n_heads, head_dim),
"map_to_hf": "q_proj.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.self_attention.query.b": {
"shape": (dim, n_heads, head_dim),
"map_to_hf": "q_proj.bias",
},
"params.lm.transformer.repeat.sub.x_layers_0.self_attention.key.w": {
"shape": (dim, n_heads, head_dim),
"map_to_hf": "k_proj.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.self_attention.key.b": {
"shape": (dim, n_heads, head_dim),
"map_to_hf": "k_proj.bias",
},
"params.lm.transformer.repeat.sub.x_layers_0.self_attention.value.w": {
"shape": (dim, n_heads, head_dim),
"map_to_hf": "v_proj.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.self_attention.value.b": {
"shape": (dim, n_heads, head_dim),
"map_to_hf": "v_proj.bias",
},
"params.lm.transformer.repeat.sub.x_layers_0.self_attention.post.w": {
"shape": (dim, n_heads, head_dim),
"map_to_hf": "attn.c_proj.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.layer_norm.scale": {
"shape": (dim,),
"map_to_hf": "ln_1.weight",
},
"params.lm.transformer.repeat.sub.x_layers_0.ff_layer.layer_norm.scale": {
"shape": (dim,),
"map_to_hf": "ln_2.weight",
},
"params.lm.final_ln.scale": {"shape": (dim,), "map_to_hf": "ln_f.weight"},
"params.lm.softmax.logits_ffn.linear.w": {
"shape": (dim, vocab_size),
"map_to_hf": "lm_head",
},
}
gold_w = ckpt
split_qkv = {}
for k, v in gold_w.items():
if v.dtype == torch.float32:
pass
else:
v = v.to(torch.float32)
# o_proj不进行transpose,是个坑
if len(v.shape) == 2 and "wte.weight" not in k and "attn.c_proj.weight" not in k:
v = v.transpose(1, 0)
else:
print(f"No transpose k: {k}")
if "c_attn" in k:
qq = k.replace("c_attn", "q_proj")
kk = k.replace("c_attn", "k_proj")
vv = k.replace("c_attn", "v_proj")
print(f'v.shape')
if len(v.shape) == 1:
# v = v.reshape(n_heads, 3 * head_dim)
split_qkv[qq] = v[..., :dim].detach().numpy().reshape(-1)
split_qkv[kk] = v[..., dim: 2 * dim].detach().numpy().reshape(-1)
split_qkv[vv] = v[..., 2 * dim: ].detach().numpy().reshape(-1)
elif len(v.shape) == 2:
# v = v.reshape(dim, n_heads, 3 * head_dim)
split_qkv[qq] = v[..., :dim].detach().numpy().reshape(dim, -1)
split_qkv[kk] = v[..., dim: 2 * dim].detach().numpy().reshape(dim, -1)
split_qkv[vv] = v[..., 2 * dim: ].detach().numpy().reshape(dim, -1)
else:
raise ValueError(f'qkv shape is error!!!')
else:
split_qkv[k] = v.detach().numpy()
for k, v in split_qkv.items():
print(k, v.shape)
import re
trans_result = {}
flag = 0
with jax.default_device(jax.devices("cpu")[0]):
for k, v in paxml_to_hf_key_and_shape.items():
v = v["map_to_hf"]
k = tuple(k.split("."))
values = []
for gold_key, glod_values in split_qkv.items():
flag = 0
if v in gold_key:
flag = 1
match_res = re.findall("q_proj|k_proj|v_proj|attn.c_proj", v)
if match_res:
if len(glod_values.shape) > 1:
glod_values = glod_values.reshape(dim, n_heads, head_dim)
else:
glod_values = glod_values.reshape(n_heads, head_dim)
try:
layer_index = int(re.findall("\d+", gold_key)[0])
except:
layer_index = 0
values.append([layer_index, glod_values])
print(f"match_res: {match_res}|| {len(values)}")
values = sorted(values, key=lambda x: x[0])
if len(values) > 1:
stack_values = np.stack(list(zip(*values))[1])
else:
stack_values = values[0][1]
trans_result[k] = stack_values
print(f"Please simple check model shape and dtype...")
for k, v in trans_result.items():
k = '.'.join(k)
print(k, v.shape, v.dtype)
if step is None:
latest_step = checkpoint_manager.latest_step()
if save_dir == read_dir:
step = latest_step + SAVE_INTERVAL_STEPS if latest_step is not None else SAVE_INTERVAL_STEPS
else:
step = latest_step
print(f"Model save step is {step}")
start = time.time()
if save_opt:
with jax.default_device(jax.devices("cpu")[0]):
opt_state_mv = jax.tree_map(lambda x: jnp.zeros_like(x), trans_result)
temp_no_prefix, temp_other = {}, {}
for key_tuple, param in opt_state_mv.items():
if "repeat" in key_tuple:
temp_no_prefix[key_tuple] = MaskedNode()
temp_other[key_tuple] = param
else:
temp_no_prefix[key_tuple] = param
temp_other[key_tuple] = MaskedNode()
temp_no_prefix = unflatten_dict(temp_no_prefix)
temp_other = unflatten_dict(temp_other)
no_prefix = {"count": jnp.array(step), "m": temp_no_prefix, "v": temp_no_prefix}
other = {"count": jnp.array([step] * n_layers), "m": temp_other, "v": temp_other}
trans_opt_states = {
"no_prefix": [{"count": jnp.array(step)}] * 2 + [no_prefix, {"count": jnp.array(step)}],
f"p#{n_layers}#i-1": [{"count": jnp.array([step] * n_layers)}] * 2
+ [other, {"count": jnp.array([step] * n_layers)}],
}
trans_opt_states = [trans_opt_states]
else:
trans_opt_states = []
new_trainstate = TrainState(
step=jnp.array(step),
mdl_vars=unflatten_dict(trans_result),
opt_states=trans_opt_states,
)
padded_global_shapes = jax.tree_map(
lambda x: jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype) if hasattr(x, "shape") else x,
new_trainstate,
)
print(f"padded_global_shapes: {padded_global_shapes}")
checkpoint_manager.save(
step, new_trainstate, padded_global_shapes, train_input_pipeline=None, force=False
)
print(f"Saved model finished. take time: {time.time() - start}s...")
| Lisennlp/paxml_praxis | paxml/my_scripts/converts/qwen_hf_to_paxml.py | qwen_hf_to_paxml.py | py | 10,005 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "paxml.train_states.TrainState",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "paxml.train_states",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": ... |
29453296966 | #
from typing import List
import numpy as np
from verypy.classic_heuristics.parallel_savings import clarke_wright_savings_function
from verypy.classic_heuristics.gaskell_savings import gaskell_lambda_savings_function, gaskell_pi_savings_function
from verypy.classic_heuristics.sweep import bisect_angle
SAVINGS_FN = {
'clarke_wright': clarke_wright_savings_function,
'gaskell_lambda': gaskell_lambda_savings_function,
'gaskell_pi': gaskell_pi_savings_function
}
SWEEP_DIRECTIONS = {
"fw": [1],
"bw": [-1],
"both": [1, -1]
}
NODE_FEATURES = [
"x", "y", "centered_x", "centered_y",
"rho", "phi", "centered_rho", "centered_phi",
"demands"
]
NF_MAP = {k: v for k, v in zip(NODE_FEATURES, range(len(NODE_FEATURES)))}
class NoSolutionFoundError(Exception):
"""Error class for sub-solver time-outs, etc."""
# class FileWriter:
# """File writer based on numpy memmap using context manager."""
# def __init__(self, file_path, nrows: int, **kwargs):
# assert os.path.splitext(file_path)[-1] in [".dat", ".npy"]
# assert nrows > 0
# mode = 'r+' if os.path.exists(file_path) and os.path.isfile(file_path) else 'w+'
# self.file_path = file_path
# self.nrows = nrows
# self._file = np.memmap(self.file_path, dtype=object, mode=mode, shape=(nrows,), **kwargs)
# self._buffered = False
# self._idx = 0
# self._pos = 0
#
# def __enter__(self):
# return self
#
# def __exit__(self, type, value, traceback):
# self._file.flush()
# del self._file
# return True
#
# def __len__(self):
# return self._idx
#
# def write_to_buffer(self, row: Any):
# self._file[self._idx] = row
# self._idx += 1
# self._buffered = True
# if self._idx >= self._pos + self.nrows:
# # open new memmap slice
# self.flush()
# self._file = None
# self._pos += self.nrows
# self._file = np.memmap(
# self.file_path,
# dtype=object,
# mode="r+",
# shape=(self.nrows,),
# offset=self._pos
# )
#
# def flush(self):
# self._file.flush()
# self._buffered = False
#
# def read(self, idx: Union[int, np.ndarray]):
# if not self._buffered:
# self.flush()
# return self._file[idx].copy()
def compute_cost(routes: List[List], dist_mat: np.ndarray) -> np.ndarray:
"""calculate the cost of each route in solution."""
costs = np.zeros(len(routes))
for i, route in enumerate(routes):
assert route[0] == route[-1] == 0
costs[i] = dist_mat[route[:-1], route[1:]].sum()
return costs
# ============================================================== #
# The code below was taken from the VeRyPy library and adapted
# to select a set of nodes as sub-graph consisting of routes
# https://github.com/yorak/VeRyPy/blob/master/verypy/classic_heuristics/sweep.py
# ============================================================== #
def get_sweep_from_polar_coordinates(rhos,phis):
N = len(rhos)
# stack the arrays, so that we can sort them (along different dimensions)
customer_phirhos = np.stack( (phis, rhos, np.arange(N)) )
sweep_node_order = np.argsort(customer_phirhos[0])
sweep = customer_phirhos[:, sweep_node_order]
return sweep
def _step(current, inc, max_val):
current += inc
if current > max_val:
current = 0
if current < 0:
# reverse direction
current = max_val
return current
def sg_sweep(
N: int,
sizes: np.ndarray,
target_size: int,
sweep: np.ndarray,
start: int,
step_inc: int,
debug: bool = False,
) -> List[List[int]]:
"""
Sweeps a beam around the depot node to select a sub graph
of size close to the specified target size.
The provided nodes and their demands are not customer nodes,
but route nodes, i.e. representing the center of the route and
its total demand.
"""
sweep_pos_to_node_idx = lambda idx: int(sweep[2, idx])
assert len(sweep[0]) == len(sweep[2]) == N
max_sweep_idx = N-1
total_to_route = N
# Routes
sg_route_sets = []
selected = np.zeros(N, dtype=bool)
selected_cnt = 0
# Emerging route
current_sg = []
current_sg_size = 0
sg_complete = False
# THE MAIN SWEEP LOOP
# iterate until a full sweep is done and the backlog is empty
sweep_pos = start
sweep_node = sweep_pos_to_node_idx(sweep_pos)
while True:
if debug:
if sweep_node:
prev_pos = _step(sweep_pos, -step_inc, max_sweep_idx)
next_pos = _step(sweep_pos, step_inc, max_sweep_idx)
prev_ray = bisect_angle(sweep[0][prev_pos], sweep[0][sweep_pos], direction=step_inc)
next_ray = bisect_angle(sweep[0][sweep_pos], sweep[0][next_pos], direction=step_inc)
print("Considering n%d between rays %.2f, %.2f" % (sweep_node, prev_ray, next_ray))
# we want at least two tours in each SG,
# we only allow for 1 if there is only 1 left
proper = len(current_sg) > 1 or (~selected).sum() == 1
if not sg_complete and target_size:
sg_complete = proper and (
# is smaller but close to target size
current_sg_size > target_size*0.85 or
# adding next tour would far exceed target size
current_sg_size + sizes[sweep_node] > target_size*1.15
)
if sg_complete:
# If SG is complete, store it and start a new one
# Check if we have all selected, and can exit the main sweep loop
if proper:
selected_cnt += len(current_sg)
sg_route_sets.append(current_sg)
if selected_cnt >= total_to_route or selected.all():
break # SWEEP
current_sg = []
current_sg_size = 0
sg_complete = False
if (sweep_node is not None) and (not selected[sweep_node]):
current_sg.append(sweep_node)
selected[sweep_node] = True
if target_size:
current_sg_size += sizes[sweep_node]
start_stepping_from = sweep_pos
while True:
sweep_pos = _step(sweep_pos, step_inc, max_sweep_idx)
sweep_node = sweep_pos_to_node_idx(sweep_pos)
if (not selected[sweep_node]):
break # found an unselected node continue with it
if sweep_pos == start_stepping_from:
# We checked, and it seems there is no unselected non-blocked
# nodes left -> start a new route, reset blocked and try again.
sweep_node = None
sg_complete = True
break
return sg_route_sets
| jokofa/NRR | lib/nrr/utils.py | utils.py | py | 6,997 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "verypy.classic_heuristics.parallel_savings.clarke_wright_savings_function",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "verypy.classic_heuristics.gaskell_savings.gaskell_lambda_savings_function",
"line_number": 11,
"usage_type": "name"
},
{
"api_name"... |
34214137069 | """
Exp 00 - Tests Data preprocessing and trains a basic linear regression
Model for abalone age prediction
"""
from datetime import datetime
from sklearn.linear_model import LinearRegression
import numpy as np
class Exp00:
""" Experiment Class to test and run abalone data processing
And linear model training """
@staticmethod
def load_train_test_data(file_path_prefix=""):
"""
This method loads the training and testing data
:param file_path_prefix: Any prefix needed to correctly locate the files.
:return: x_train, y_train, x_test, y_test, which are to be numpy arrays.
"""
train = np.loadtxt(file_path_prefix + "abalone_train.csv", delimiter=',')
test = np.loadtxt(file_path_prefix + "abalone_test.csv", delimiter=',')
x_train = train[:,:-1]
y_train = train[:, -1]
x_test = test[:, :-1]
y_test = test[:, -1]
return x_train, y_train, x_test, y_test
@staticmethod
def compute_mean_absolute_error(true_y_values, predicted_y_values):
"""Computes mean error """
list_of_errors = []
for true_y, pred_y in zip(true_y_values, predicted_y_values):
error = abs(true_y - pred_y)
list_of_errors.append(error)
mean_abs_error = np.mean(list_of_errors)
return mean_abs_error
@staticmethod
def compute_mean_absolute_percentage_error(true_y_values, predicted_y_values):
""" Computes the mean absolute percentage error """
list_of_perc_errors = []
for true_y, pred_y in zip(true_y_values, predicted_y_values):
error = abs((true_y - pred_y) / true_y)
list_of_perc_errors.append(error)
list_of_perc_errors.append(error)
mean_abs_error = np.mean(list_of_perc_errors)
return mean_abs_error
@staticmethod
def print_error_report(trained_model, x_train, y_train, x_test, y_test):
""" Prints the error report """
print("\tEvaluating on Training Data")
# Evaluating on training data is a less effective as an indicator of
# accuracy in the wild. Since the model has already seen this data
# before, it is a lessrealistic measure of error when given novel/unseen
# inputs.
#
# The utility is in its use as a "sanity check" since a trained model
# which preforms poorly on data it has seen before/used to train
# indicates underlying problems (either more data or data preprocessing
# is needed, or there may be a weakness in the model itself.
y_train_pred = trained_model.predict(x_train)
mean_absolute_error_train = Exp00.compute_mean_absolute_error(y_train, y_train_pred)
mean_absolute_perc_error_train = Exp00.compute_mean_absolute_percentage_error(y_train,
y_train_pred)
print("\tMean Absolute Error (Training Data):", mean_absolute_error_train)
print("\tMean Absolute Percentage Error (Training Data):", mean_absolute_perc_error_train)
print()
print("\tEvaluating on Testing Data")
# Is a more effective as an indicator of accuracy in the wild.
# Since the model has not seen this data before, so is a more
# realistic measure of error when given novel inputs.
y_test_pred = trained_model.predict(x_test)
mean_absolute_error_test = Exp00.compute_mean_absolute_error(y_test, y_test_pred)
mean_absolute_perc_error_test = Exp00.compute_mean_absolute_percentage_error(y_test,
y_test_pred)
print("\tMean Absolute Error (Testing Data):", mean_absolute_error_test)
print("\tMean Absolute Percentage Error (Testing Data):", mean_absolute_perc_error_test)
print()
def run(self):
""" Runs the training """
start_time = datetime.now()
print("Running Exp: ", self.__class__, "at", start_time)
print("Loading Data")
x_train, y_train, x_test, y_test = Exp00.load_train_test_data()
print("Training Model...")
#######################################################################
# Complete this 2-step block of code using the variable name 'model' for
# the linear regression model.
# You can complete this by turning the given psuedocode to real code
#######################################################################
# (1) Initialize model; model = NameOfLinearRegressionClassInScikitLearn()
model = LinearRegression() # Fix this line
# (2) Train model using the function 'fit' and the variables 'x_train'
# and 'y_train'
model.fit(x_train, y_train) # Fix this line
print("Training complete!")
print()
print("Evaluating Model")
Exp00.print_error_report(model, x_train, y_train, x_test, y_test)
# End and report time.
end_time = datetime.now()
print("Exp is over; completed at", datetime.now())
total_time = end_time - start_time
print("Total time to run:", total_time)
| zaccross/Linear-Regression-Project-0.5 | exp00.py | exp00.py | py | 5,389 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.loadtxt",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number"... |
26844478578 | import pytest
import hashtags as ht
@pytest.fixture
def tweets():
return [
{
'id_str': '1',
'text': " Doesn't matter what the text is. ",
'entities': {
'hashtags': [
{'text': 'fOO'},
{'text': 'Bar'}
]
}
},
{
'id_str': '2',
'text': "Some other text.",
'entities': {
'hashtags': []
}
},
{
'id_str': '3',
'text': "More text.",
'entities': {
'hashtags': [
{'text': 'foo'},
{'text': 'blurfl'}
]
}
}
]
def test_get_hashtags(tweets):
tweet = tweets[0]
assert ht.get_hashtags(tweet) == ['foo', 'bar']
def test_has_hashtag(tweets):
tweet = tweets[0]
assert ht.has_hashtag(tweet, 'foo') == True
def get_ids(tweets):
return [t['id_str'] for t in tweets]
def test_filter_by_hashtag(tweets):
ts = ht.filter_by_hashtag(tweets, 'foo')
assert get_ids(ts) == ['1', '3']
ts = ht.filter_by_hashtag(tweets, 'blurfl')
assert get_ids(ts) == ['3']
ts = ht.filter_by_hashtag(tweets, 'quux')
assert get_ids(ts) == []
| marklar/massiu | test/test_hashtags.py | test_hashtags.py | py | 1,311 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytest.fixture",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "hashtags.get_hashtags",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "hashtags.has_hashtag",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "hashtag... |
26434848146 | """create item table
Revision ID: ff9dac589eea
Revises: 8c1c7409f4e5
Create Date: 2022-07-10 08:58:37.265281
"""
from alembic import op
import sqlalchemy as sa
from datetime import datetime
# revision identifiers, used by Alembic.
revision = 'ff9dac589eea'
down_revision = '8c1c7409f4e5'
branch_labels = None
depends_on = None
def upgrade() -> None:
op.create_table(
'item',
sa.Column('id', sa.Integer, primary_key=True, index=True),
sa.Column('price', sa.String, nullable=True),
sa.Column('is_active', sa.Boolean, nullable=False, default=True),
sa.Column('created_date', sa.DateTime, nullable=False, default=datetime.utcnow),
sa.Column('updated_date', sa.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow ),
sa.Column('model', sa.String, nullable=True),
sa.Column('brand', sa.String, nullable=True),
sa.Column('location', sa.String, nullable=True),
sa.Column('description', sa.String, nullable=True),
sa.Column('seller_id', sa.Integer, sa.ForeignKey("user.id"), nullable=True),
)
def downgrade() -> None:
op.drop_table('item')
| guneybilen/fastAPI_justlikenew | alembic/versions/ff9dac589eea_create_item_table.py | ff9dac589eea_create_item_table.py | py | 1,168 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
27286453553 | from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
import numpy as np
import pandas as pd
if TYPE_CHECKING: # pragma: no cover
from cleanlab.datalab.internal.data import Data
from cleanlab.datalab.internal.issue_manager import IssueManager
from cleanvision import Imagelab
class DataIssues:
"""
Class that collects and stores information and statistics on issues found in a dataset.
Parameters
----------
data :
The data object for which the issues are being collected.
Parameters
----------
issues : pd.DataFrame
Stores information about each individual issue found in the data,
on a per-example basis.
issue_summary : pd.DataFrame
Summarizes the overall statistics for each issue type.
info : dict
A dictionary that contains information and statistics about the data and each issue type.
"""
def __init__(self, data: Data) -> None:
self.issues: pd.DataFrame = pd.DataFrame(index=range(len(data)))
self.issue_summary: pd.DataFrame = pd.DataFrame(
columns=["issue_type", "score", "num_issues"]
).astype({"score": np.float64, "num_issues": np.int64})
self.info: Dict[str, Dict[str, Any]] = {
"statistics": get_data_statistics(data),
}
self._label_map = data.labels.label_map
@property
def statistics(self) -> Dict[str, Any]:
"""Returns the statistics dictionary.
Shorthand for self.info["statistics"].
"""
return self.info["statistics"]
def get_issues(self, issue_name: Optional[str] = None) -> pd.DataFrame:
"""
Use this after finding issues to see which examples suffer from which types of issues.
Parameters
----------
issue_name : str or None
The type of issue to focus on. If `None`, returns full DataFrame summarizing all of the types of issues detected in each example from the dataset.
Raises
------
ValueError
If `issue_name` is not a type of issue previously considered in the audit.
Returns
-------
specific_issues :
A DataFrame where each row corresponds to an example from the dataset and columns specify:
whether this example exhibits a particular type of issue and how severely (via a numeric quality score where lower values indicate more severe instances of the issue).
Additional columns may be present in the DataFrame depending on the type of issue specified.
"""
if issue_name is None:
return self.issues
columns = [col for col in self.issues.columns if issue_name in col]
if not columns:
raise ValueError(f"No columns found for issue type '{issue_name}'.")
specific_issues = self.issues[columns]
info = self.get_info(issue_name=issue_name)
if issue_name == "label":
specific_issues = specific_issues.assign(
given_label=info["given_label"], predicted_label=info["predicted_label"]
)
if issue_name == "near_duplicate":
column_dict = {
k: info.get(k)
for k in ["near_duplicate_sets", "distance_to_nearest_neighbor"]
if info.get(k) is not None
}
specific_issues = specific_issues.assign(**column_dict)
return specific_issues
def get_issue_summary(self, issue_name: Optional[str] = None) -> pd.DataFrame:
"""Summarize the issues found in dataset of a particular type,
including how severe this type of issue is overall across the dataset.
Parameters
----------
issue_name :
Name of the issue type to summarize. If `None`, summarizes each of the different issue types previously considered in the audit.
Returns
-------
issue_summary :
DataFrame where each row corresponds to a type of issue, and columns quantify:
the number of examples in the dataset estimated to exhibit this type of issue,
and the overall severity of the issue across the dataset (via a numeric quality score where lower values indicate that the issue is overall more severe).
"""
if self.issue_summary.empty:
raise ValueError(
"No issues found in the dataset. "
"Call `find_issues` before calling `get_issue_summary`."
)
if issue_name is None:
return self.issue_summary
row_mask = self.issue_summary["issue_type"] == issue_name
if not any(row_mask):
raise ValueError(f"Issue type {issue_name} not found in the summary.")
return self.issue_summary[row_mask].reset_index(drop=True)
def get_info(self, issue_name: Optional[str] = None) -> Dict[str, Any]:
"""Get the info for the issue_name key.
This function is used to get the info for a specific issue_name. If the info is not computed yet, it will raise an error.
Parameters
----------
issue_name :
The issue name for which the info is required.
Returns
-------
info:
The info for the issue_name.
"""
info = self.info.get(issue_name, None) if issue_name else self.info
if info is None:
raise ValueError(
f"issue_name {issue_name} not found in self.info. These have not been computed yet."
)
info = info.copy()
if issue_name == "label":
if self._label_map is None:
raise ValueError(
"The label map is not available. "
"Most likely, no label column was provided when creating the Data object."
)
# Labels that are stored as integers may need to be converted to strings.
for key in ["given_label", "predicted_label"]:
labels = info.get(key, None)
if labels is not None:
info[key] = np.vectorize(self._label_map.get)(labels)
info["class_names"] = self.statistics["class_names"]
return info
def collect_statistics(self, issue_manager: Union[IssueManager, "Imagelab"]) -> None:
"""Update the statistics in the info dictionary.
Parameters
----------
statistics :
A dictionary of statistics to add/update in the info dictionary.
Examples
--------
A common use case is to reuse the KNN-graph across multiple issue managers.
To avoid recomputing the KNN-graph for each issue manager,
we can pass it as a statistic to the issue managers.
>>> from scipy.sparse import csr_matrix
>>> weighted_knn_graph = csr_matrix(...)
>>> issue_manager_that_computes_knn_graph = ...
"""
key = "statistics"
statistics: Dict[str, Any] = issue_manager.info.get(key, {})
if statistics:
self.info[key].update(statistics)
def _update_issues(self, issue_manager):
overlapping_columns = list(set(self.issues.columns) & set(issue_manager.issues.columns))
if overlapping_columns:
warnings.warn(
f"Overwriting columns {overlapping_columns} in self.issues with "
f"columns from issue manager {issue_manager}."
)
self.issues.drop(columns=overlapping_columns, inplace=True)
self.issues = self.issues.join(issue_manager.issues, how="outer")
def _update_issue_info(self, issue_name, new_info):
if issue_name in self.info:
warnings.warn(f"Overwriting key {issue_name} in self.info")
self.info[issue_name] = new_info
def collect_issues_from_issue_manager(self, issue_manager: IssueManager) -> None:
"""
Collects results from an IssueManager and update the corresponding
attributes of the Datalab object.
This includes:
- self.issues
- self.issue_summary
- self.info
Parameters
----------
issue_manager :
IssueManager object to collect results from.
"""
self._update_issues(issue_manager)
if issue_manager.issue_name in self.issue_summary["issue_type"].values:
warnings.warn(
f"Overwriting row in self.issue_summary with "
f"row from issue manager {issue_manager}."
)
self.issue_summary = self.issue_summary[
self.issue_summary["issue_type"] != issue_manager.issue_name
]
issue_column_name: str = f"is_{issue_manager.issue_name}_issue"
num_issues: int = int(issue_manager.issues[issue_column_name].sum())
self.issue_summary = pd.concat(
[
self.issue_summary,
issue_manager.summary.assign(num_issues=num_issues),
],
axis=0,
ignore_index=True,
)
self._update_issue_info(issue_manager.issue_name, issue_manager.info)
def set_health_score(self) -> None:
"""Set the health score for the dataset based on the issue summary.
Currently, the health score is the mean of the scores for each issue type.
"""
self.info["statistics"]["health_score"] = self.issue_summary["score"].mean()
def get_data_statistics(data: Data) -> Dict[str, Any]:
"""Get statistics about a dataset.
This function is called to initialize the "statistics" info in all `Datalab` objects.
Parameters
----------
data : Data
Data object containing the dataset.
"""
statistics: Dict[str, Any] = {
"num_examples": len(data),
"multi_label": False,
"health_score": None,
}
if data.labels.is_available:
class_names = data.class_names
statistics["class_names"] = class_names
statistics["num_classes"] = len(class_names)
return statistics
| cleanlab/cleanlab | cleanlab/datalab/internal/data_issues.py | data_issues.py | py | 10,122 | python | en | code | 7,004 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "cleanlab.datalab.internal.data.Data",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 36,
"usage_type": "attribute"
},
{
"api... |
73116502755 | import json
from dataclasses import dataclass
from datetime import datetime
from functools import partial
from typing import Optional
import arrow
# import pytest
from arrow import Arrow
from lyubishchev.clockify_fetcher.fetcher import (
generate_time_interval_from_time_series,
) # generate_event_from_time_series,
from lyubishchev.data_model import Metadata, TimeInterval, time_diff_minutes
open_utf8 = partial(open, encoding="UTF-8")
def test_time_diff_minutes() -> None:
timestamp_1: Arrow = arrow.get(datetime(2022, 7, 2, 18, 50, 20), "Australia/Sydney")
timestamp_2: Arrow = arrow.get(datetime(2022, 7, 2, 18, 55, 10), "Australia/Sydney")
timestamp_3: Arrow = arrow.get("2022-07-02T18:59:05.970460+10:00")
# multi-days
timestamp_4: Arrow = arrow.get(datetime(2022, 7, 5, 18, 50, 20), "Australia/Sydney")
assert time_diff_minutes(timestamp_1, timestamp_2) == 4
assert time_diff_minutes(timestamp_2, timestamp_3) == 3
assert time_diff_minutes(timestamp_1, timestamp_4) == 4320 # 3 days
def test_generate_time_interval_from_time_series() -> None:
@dataclass
class TestCase:
description: str
test_data_path: str
expect_success: bool
expected_time_interval: TimeInterval
testcases: list[TestCase] = [
TestCase(
description="empty dict should raise ValueError",
test_data_path="empty.json",
expect_success=False,
expected_time_interval=TimeInterval.empty(),
),
TestCase(
description="label and tag should both parsed correctly",
test_data_path="time_series_meditation.json",
expect_success=True,
expected_time_interval=TimeInterval(
metadata=Metadata(
label={
"type": "thinking",
"meditation": "",
}
),
extra_info="meditation",
timestamp=arrow.get("2022-07-03T07:11:13Z").to("Australia/Sydney"),
duration_minutes=6,
),
),
TestCase(
description="wakeup record should pass correctly",
test_data_path="time_series_wakeup.json",
expect_success=True,
expected_time_interval=TimeInterval(
metadata=Metadata(
label={
"type": "pmo",
}
),
extra_info="morning wakeup",
timestamp=arrow.get("2022-07-03T00:30:00Z").to("Australia/Sydney"),
duration_minutes=40,
),
),
TestCase(
description="bed record should pass correctly",
test_data_path="time_series_bed.json",
expect_success=True,
expected_time_interval=TimeInterval(
metadata=Metadata(
label={
"type": "self-improving",
"reading": "",
}
),
extra_info="kindle",
timestamp=arrow.get("2022-07-01T15:10:00Z").to("Australia/Sydney"),
duration_minutes=25,
),
),
TestCase(
description="record with project should pass correctly",
test_data_path="time_series_project.json",
expect_success=True,
expected_time_interval=TimeInterval(
metadata=Metadata(
label={
"type": "self-improving",
"project": "software-engineering",
}
),
extra_info="lyubishchev",
timestamp=arrow.get("2022-07-02T10:23:39").to("Australia/Sydney"),
duration_minutes=5,
),
),
# time_series_error_dup_interval_type.json
TestCase(
description="record with duplicate interval type should fail",
test_data_path="time_series_error_dup_interval_type.json",
expect_success=False,
expected_time_interval=TimeInterval.empty(),
),
]
test_data_folder: str = "./tests/unit/clockify_fetcher/test_data/"
for index, case in enumerate(testcases):
assert_message: str = f"case {index} failed!"
with open_utf8(test_data_folder + case.test_data_path) as test_data:
try:
time_interval: Optional[
TimeInterval
] = generate_time_interval_from_time_series(json.load(test_data))
except ValueError as exp:
print(exp)
assert not case.expect_success, assert_message
except Exception:
print(assert_message)
raise
else:
assert case.expect_success
assert time_interval == case.expected_time_interval, assert_message
| eliteGoblin/lyubishchev | tests/unit/clockify_fetcher/test_generate_time_interval_from_time_series.py | test_generate_time_interval_from_time_series.py | py | 4,992 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.partial",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "arrow.Arrow",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "arrow.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line... |
30654758574 | import os
import numpy as np
import zarr
from pyproj import Proj, transform
from rasterio import Affine
from rasterio.crs import CRS
from rasterio.transform import rowcol, xy
from scipy.stats import binom
def albers_conus_extent():
return "-2493045.0 177285.0 2342655.0 3310005.0"
def albers_conus_crs():
return (
'PROJCS["Albers_Conical_Equal_Area",'
'GEOGCS["WGS 84",DATUM["WGS_1984",'
'SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],'
"TOWGS84[0,0,0,-0,-0,-0,0],"
'AUTHORITY["EPSG","6326"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4326"]],'
'PROJECTION["Albers_Conic_Equal_Area"],'
'PARAMETER["standard_parallel_1",29.5],'
'PARAMETER["standard_parallel_2",45.5],'
'PARAMETER["latitude_of_center",23],'
'PARAMETER["longitude_of_center",-96],'
'PARAMETER["false_easting",0],'
'PARAMETER["false_northing",0],'
'UNIT["meters",1]]'
)
def albers_conus_transform(res=4000):
return [res, 0.0, -2493045.0, 0.0, -res, 3310005.0]
def albers_ak_extent():
return "-2232345.0 344805.0 1494735.0 2380125.0"
def albers_ak_crs():
return (
'PROJCS["WGS_1984_Albers",'
'GEOGCS["WGS 84",DATUM["WGS_1984",'
'SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],'
'AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],'
'UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]],'
'PROJECTION["Albers_Conic_Equal_Area"],'
'PARAMETER["standard_parallel_1",55],'
'PARAMETER["standard_parallel_2",65],'
'PARAMETER["latitude_of_center",50],'
'PARAMETER["longitude_of_center",-154],'
'PARAMETER["false_easting",0],'
'PARAMETER["false_northing",0],'
'UNIT["metre",1,AUTHORITY["EPSG","9001"]]]'
)
def albers_ak_transform(res=4000):
return [res, 0.0, -2232345.0, 0.0, -res, 2380125.0]
def rowcol_to_latlon(row, col, res=250):
row = np.asarray(row) if type(row) is list else row
col = np.asarray(col) if type(col) is list else col
x, y = xy(Affine(*albers_conus_transform(res)), row, col)
p1 = Proj(CRS.from_wkt(albers_conus_crs()))
p2 = Proj(proj='latlong', datum='WGS84')
lon, lat = transform(p1, p2, x, y)
return lat, lon
def latlon_to_rowcol(lat, lon, res=250):
lat = np.asarray(lat) if type(lat) is list else lat
lon = np.asarray(lon) if type(lon) is list else lon
x, y = latlon_to_xy(lat, lon)
r, c = rowcol(albers_conus_transform(res), x, y)
return r, c
def latlon_to_xy(lat, lon, base_crs=albers_conus_crs()):
p1 = Proj(base_crs)
p2 = Proj(proj='latlong', datum='WGS84')
x, y = transform(p2, p1, np.asarray(lon), np.asarray(lat))
return x, y
def zscore_2d(x, mean=None, std=None):
recomputing = False
if mean is None or std is None:
recomputing = True
if mean is None:
mean = np.nanmean(x, axis=0)
if std is None:
std = np.nanstd(x, axis=0)
if recomputing:
return (
(x - mean) / std,
mean,
std,
)
else:
return (x - mean) / std
def remove_nans(x, y=None, return_inds=False):
if y is None:
inds = np.isnan(x).sum(axis=1) == 0
if return_inds:
return x[inds], inds
else:
return x[inds]
else:
inds = (np.isnan(x).sum(axis=1) == 0) & (~np.isnan(y)) & (~np.isinf(y))
if return_inds:
return x[inds], y[inds], inds
else:
return x[inds], y[inds]
def weighted_mean(ds, *args, **kwargs):
weights = ds.time.dt.days_in_month
return ds.weighted(weights).mean(dim='time')
def get_store(bucket, prefix, account_key=None):
''' helper function to create a zarr store'''
if account_key is None:
account_key = os.environ.get('BLOB_ACCOUNT_KEY', None)
store = zarr.storage.ABSStore(
bucket,
prefix=prefix,
account_name="carbonplan",
account_key=account_key,
)
return store
def integrated_risk(p):
return (1 - binom.cdf(0, 20, p)) * 100
| carbonplan/forest-risks | carbonplan_forest_risks/utils.py | utils.py | py | 4,259 | python | en | code | 29 | github-code | 1 | [
{
"api_name": "numpy.asarray",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "rasterio.transform.xy",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "rasterio.Affine",
... |
16001559321 |
import unittest
# Modules needed to support tests
import os
import os.path
import tempfile
# Module under test
import dedupe.detector.detector as detector
class TestProcessFilename(unittest.TestCase):
def _make_standard_file_at(self, filename):
fout = open(filename, 'w+b')
fout.write(self.standard_test_string)
fout.close()
def setUp(self):
self.files_by_size = {}
self.extensions = {}
self.standard_test_string = '1234567890'
self.tempdir = tempfile.mkdtemp(suffix="_unittest")
def tearDown(self):
del self.files_by_size
del self.extensions
os.rmdir(self.tempdir)
def test_text_file(self):
test_extension = 'txt'
test_filename = 'ima_unittest.' + test_extension
test_fqn = os.path.join(self.tempdir, test_filename)
self._make_standard_file_at(test_fqn)
# Check pre-test state
self.failIf(len(self.standard_test_string) in self.files_by_size,
"self.files_by_size incorrectly initialized.")
self.failIf(test_extension in self.extensions,
"self.extensions incorrectly initialized.")
# Test the function
detector.process_filename(test_fqn, self.files_by_size, self.extensions)
self.assert_(len(self.standard_test_string) in self.files_by_size,
"Didn't insert length into self.files_by_size correctly.")
self.assert_(test_extension in self.extensions,
"Didn't insert extension into self.extensions correctly.")
os.remove(test_fqn)
if __name__ == "__main__":
unittest.main()
| pcurry/DeDupe | test/python2.7/dedupe/detector/detector_test.py | detector_test.py | py | 1,694 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.rmdir",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"... |
13463218914 | import argparse
import sys
import os
from random import randint as rand
#this will store lists of all the predictions needed for the labels
preds = {}
def dataReader(data_file):
f = open(data_file)
data = []
i = 0
for line in f.readlines():
line = [float(x) for x in line.split()]
data.append([])
for j in range(len(line)):
data[i].append(line[j])
i += 1
f.close()
return data
def labelReader(labels_file):
f = open(labels_file)
label_lines = []
for line in f.readlines():
a = [int(x) for x in line.split()]
label_lines.append(a)
f.close()
return label_lines
def classMaker(label_lines):
class_d = {}
class_size = [0,0]
for line in label_lines:
class_d[line[1]] = line[0]
class_size[line[0]] = class_size[line[0]] + 1
return class_d, class_size
def filterdata(data, labels):
global preds
row_indeces = []
total_pres = 0
nrow = len(data)
for i in range(nrow):
if i not in labels:
preds[i] = {0:0,1:0}
total_pres += 1
else:
row_indeces.append(i)
return row_indeces
def Bagging(data, indeces, labels):
nrow, ncol = len(data), len(data[0])
new_data = []
new_labs = {}
cur = 0
while(len(new_data) < len(data)):
row_idx = indeces[rand(0,len(indeces)-1)]
if labels.get(row_idx) == None:
print("Unexpected bagged data (unclassified) row {}".format(row_idx))
continue
new_data.append(data[row_idx])
new_labs[cur] = labels[row_idx]
cur += 1
return new_data, new_labs
def gini_sel(data, labels):
nrow, ncol = len(data), len(data[0])
ginivals = [[0, 0] for j in range(ncol)]
temp, c, s = 0, 0, 0
for j in range(ncol):
listcol = [item[j] for item in data]
keys = sorted( range( len(listcol) ), key=lambda col: listcol[col])
listcol = sorted(listcol)
ginis = []
prevrow = 0
for k in range(1,nrow):
lsize, rsize = k, (nrow - k)
lp, rp = 0, 0
for l in range(k):
if (labels.get(keys[l]) == 0):
lp += 1
for r in range(k, nrow):
if (labels.get(keys[r]) == 0):
rp += 1
gini = float((lsize / nrow) * (lp / lsize) * (1 - lp / lsize) + (rsize / nrow) * (rp / rsize) * (1 - rp / rsize))
ginis.append(gini)
if (ginis[k - 1] == float(min(ginis))):
ginivals[j][0] = ginis[k - 1]
ginivals[j][1] = k
if (j == 0):
temp = ginivals[j][0]
if (ginivals[j][0] <= temp):
temp = ginivals[j][0]
c = j
s = ginivals[j][1]
if (s != 0):
s = float((listcol[s] + listcol[s - 1]) / 2)
left_count, right_count = 0, 0
left_label, right_label = 0, 0
for i in range(nrow):
if labels.get(i) != None:
if data[i][c] < s: #for all points left of the split
if labels[i] == 0: #check if more 0 or 1 labels exist
left_count += 1
else:
right_count += 1
if left_count > right_count:
right_label = 1
else:
left_label = 1
# print("gini index: {}\ncolumn with best split: {}\nbest split: {}".format(temp,c,s))
return c, s, left_label, right_label
def tally_predictions(col, split, data, labels, left, right):
global preds
nrow = len(data)
for i in range(nrow):
point = data[i][col]
if labels.get(i) == None:
if point < split:
preds[i][left] += 1
else:
preds[i][right] += 1
def print_predictions():
global preds
actual = {}
for key in preds:
if preds[key][0] > preds[key][1]:
print("{} {}".format(key, 0))
actual[key] = 0
else:
print("{} {}".format(key, 1))
actual[key] = 1
#return actual
def compare_predictions(ap, labels_path):
f = open(labels_path)
d = {}
for line in f:
l = line.split()
d[int(l[1])] = int(l[0])
f.close()
num_wrong = 0
num_correct = 0
for key in ap:
if ap[key] == d[key]:
num_correct += 1
else:
num_wrong += 1
print("error: {}/{} = {}".format(num_wrong, len(ap), 100 * num_wrong/len(ap)))
def parse_options():
parser = argparse.ArgumentParser(description="Bagging on the HW06 Decision Stump")
parser.add_argument("data_file", help="path to the data file")
parser.add_argument("labels_file", help="path to the training labels file")
parser.add_argument("--labs", help="path to the labels file")
ret_args = parser.parse_args()
return ret_args
if __name__ == "__main__":
args = parse_options()
data_filepath, labels_filepath = args.data_file, args.labels_file
data_content = dataReader(data_filepath)
label_content = labelReader(labels_filepath)
classes, class_sizes = classMaker(label_content)
training_indeces = filterdata(data_content, classes)
for i in range(101):
# print("_______iteration:{}________".format(i))
bag, bag_labs = Bagging(data_content, training_indeces, classes)
best_col, best_split, leftlab, rightlab = gini_sel(bag, bag_labs)
tally_predictions(best_col, best_split, data_content, classes, leftlab, rightlab)
print_predictions()
| FrancisDcruz/ML_Algorithms | Bagged_Decission_Stump/Bagged_Decission_Stump.py | Bagged_Decission_Stump.py | py | 5,659 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 180,
"usage_type": "call"
}
] |
33953298529 | from torch.utils.data import Dataset
from PIL import Image
from glob import glob
from tqdm import tqdm
import os
#SubClass of Dataset that takes the IN9L dataset stored in the folder
#indicated by the parameter "root" and perform operation on it
class IN9L_dataset(Dataset):
def __init__(
self,
root,
split,
transform=None,
) -> None:
super().__init__()
self.split = split
self.data_path = []
self.targets = []
self.transform = transform
#Determine the raw_img data directory based on the dataset we want to create
if split == 'train' or split == 'val':
self.raw_img_data_dir = os.path.join(root, split)
else:
self.raw_img_data_dir = os.path.join(
root, split, 'val')
#Create the variables data_path and targets
self.data_path = []
self.targets = []
data_class_names = sorted(os.listdir(self.raw_img_data_dir))
print("-"*10, f"indexing {self.split} data", "-"*10)
for data_class_name in tqdm(data_class_names):
try:
target = int(data_class_name.split('_')[0])
except:
continue
class_image_file_paths = glob(
os.path.join(self.raw_img_data_dir, data_class_name, '*'))
self.data_path += class_image_file_paths
self.targets += [target] * len(class_image_file_paths)
def __len__(self):
return len(self.data_path)
def __getitem__(self, index: int):
# Using index, you take image and label related to that value
target = self.targets[index]
path = self.data_path[index]
img = Image.open(path)
if self.transform is not None:
img = self.transform(img)
#Return (img, data_path, target)
return img, path , target | Giordano-Cicchetti/MaskTune_NN | IN9L/IN9L.py | IN9L.py | py | 1,910 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
37555770747 | import requests
import lxml.html
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"}
def Japanese_translation(english):
results = set()
url = 'https://jisho.org/search/'
response = requests.get(url + english, headers=headers)
html = lxml.html.fromstring(response.content)
gets = html.xpath('//*[@id="primary"]/div[1]/div/div[1]/div[2]/ul[1]/li[1]/a/text()')
for g in gets:
results.add(str(g).replace("Sentence search for ", ""))
gets = html.xpath('//*[@id="primary"]/div/div/div/ul[1]/li[1]/a/text()')
for g in gets:
results.add(str(g).replace("Sentence search for ", ""))
return results
def fluctuation_correction(japanese):
url = 'https://jisho.org/search/'
response = requests.get(url + japanese, headers=headers)
html = lxml.html.fromstring(response.content)
gets = html.xpath('//*[@id="primary"]/div/div[1]/div[1]/div[2]/ul/li[1]/a/text()')
if len(gets) == 0:
gets = japanese
return gets[0].replace("Sentence search for ", "")
| HiroRittsu/DevelopingEnglish | lib/JISHO_ORG.py | JISHO_ORG.py | py | 1,078 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "lxml.html.html.fromstring",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lxml.html.html",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "lxml.html"... |
24172843200 | import torch
def init_weights(m) -> None:
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
if isinstance(m, torch.nn.Embedding):
torch.nn.init.xavier_uniform_(m.weight)
class CategoryClassification(torch.nn.Module):
def __init__(
self,
num_embeddings: int,
n_classes: int,
embedding_dim: int = 256,
dropout: float = 0.1,
lstm_hidden_size: int = 128
) -> None:
super().__init__()
self.embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)
self.dropout = torch.nn.Dropout(p=dropout)
self.lstm = torch.nn.GRU(input_size=embedding_dim,
hidden_size=lstm_hidden_size,
bidirectional=True)
self.head = torch.nn.Linear(in_features=256, out_features=n_classes)
self.apply(init_weights)
def forward(self, x: 'torch.Tensor') -> 'torch.Tensor':
x = self.embedding(x)
x = self.dropout(x)
x, _ = self.lstm(x)
x = torch.mean(x, axis=1)
x = self.head(x)
return x
| alexflorensa/product-category-classification | models/categoryclassification.py | categoryclassification.py | py | 1,216 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.xavier_uniform_",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
... |
12951104701 | from typing import Any
from datetime import datetime
from datasets import load_dataset
import meilisearch
# https://huggingface.co/datasets/mc4
dataset = load_dataset("mc4", "ja", split="train", streaming=True)
documents: list[dict[str, Any]] = list(dataset.take(30000))
# add primary key and convert datetime string into int type
for i, document in enumerate(documents):
document["id"] = i
document["timestamp"] = int(
datetime.strptime(document["timestamp"], "%Y-%m-%dT%H:%M:%SZ").timestamp()
)
client = meilisearch.Client("http://0.0.0.0:7700", "masterKey")
index = client.index("mc4")
index.update_settings(
{
"filterableAttributes": ["id", "text", "timestamp", "url"],
"pagination": {"maxTotalHits": 200000},
}
)
index.add_documents_in_batches(documents, primary_key="id")
| Wattyyy/ms-error-reproduction | mc4_index.py | mc4_index.py | py | 824 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datasets.load_dataset",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
21603769373 | from typing import List, Callable, Tuple
SUPER_MODULO = 5*17*7*13*19*11*3*2
def basic_monkey_throw(value: int, divider: int, success_monkey: int, fail_monkey: int) -> Tuple[int, int]:
# new_value = int(value / 3) # PART 1
new_value = value % SUPER_MODULO # PART 2
if new_value % divider == 0:
# new_value = divider
return success_monkey, new_value
return fail_monkey, new_value
class Monkey:
def __init__(self, starting_items: List[int], inspect_operation: Callable[[int], int], throw_operation: Callable[[int], Tuple[int, int]]):
self.items = starting_items
self.inspect_operation = inspect_operation
self.throw_operation = throw_operation
self.inspect_counter = 0
class MonkeyBusiness:
def __init__(self):
self.monkeys: List[Monkey] = []
self.monkeys.append(Monkey([74, 64, 74, 63, 53], lambda x: x * 7, lambda x: basic_monkey_throw(x, 5, 1, 6))) # Monkey 0
self.monkeys.append(Monkey([69, 99, 95, 62], lambda x: x * x, lambda x: basic_monkey_throw(x, 17, 2, 5))) # Monkey 1
self.monkeys.append(Monkey([59, 81], lambda x: x + 8, lambda x: basic_monkey_throw(x, 7, 4, 3))) # Monkey 2
self.monkeys.append(Monkey([50, 67, 63, 57, 63, 83, 97], lambda x: x + 4, lambda x: basic_monkey_throw(x, 13, 0, 7))) # Monkey 3
self.monkeys.append(Monkey([61, 94, 85, 52, 81, 90, 94, 70], lambda x: x + 3, lambda x: basic_monkey_throw(x, 19, 7, 3))) # Monkey 4
self.monkeys.append(Monkey([69], lambda x: x + 5, lambda x: basic_monkey_throw(x, 3, 4, 2))) # Monkey 5
self.monkeys.append(Monkey([54, 55, 58], lambda x: x + 7, lambda x: basic_monkey_throw(x, 11, 1, 5))) # Monkey 6
self.monkeys.append(Monkey([79, 51, 83, 88, 93, 76], lambda x: x * 3, lambda x: basic_monkey_throw(x, 2, 0, 6))) # Monkey 7
# self.monkeys.append(Monkey([79, 98], lambda x: x * 19, lambda x: basic_monkey_throw(x, 23, 2, 3))) # Monkey 0
# self.monkeys.append(Monkey([54, 65, 75, 74], lambda x: x + 6, lambda x: basic_monkey_throw(x, 19, 2, 0))) # Monkey 0
# self.monkeys.append(Monkey([79, 60, 97], lambda x: x * x, lambda x: basic_monkey_throw(x, 13, 1, 3))) # Monkey 0
# self.monkeys.append(Monkey([74], lambda x: x + 3, lambda x: basic_monkey_throw(x, 17, 0, 1))) # Monkey 0
def monkey_in_the_middle(self):
for monkey in self.monkeys:
while len(monkey.items) >= 1:
item = monkey.items.pop(0)
worried_item = monkey.inspect_operation(item)
monkey.inspect_counter += 1
new_monkey, new_item = monkey.throw_operation(worried_item)
self.monkeys[new_monkey].items.append(new_item)
def play_rounds(self, rounds = 0):
for index in range(rounds):
self.monkey_in_the_middle()
print(f"Round {index} finished.")
# print(f"Round {index} finished. Each monkey contains these items:")
# for index, monkey in enumerate(self.monkeys):
# print(f"Monkey {index} has the following items: {monkey.items}")
# print()
for index, monkey in enumerate(self.monkeys):
print(f"Monkey {index} has {len(monkey.items)} items and inspected {monkey.inspect_counter} items")
print()
# Get the two monkeys with the highest insepction count
sorted_monkeys = sorted(self.monkeys, key=lambda monkey: monkey.inspect_counter, reverse=True)
print(f"Monkey has the highest inspection count with {sorted_monkeys[0].inspect_counter}")
print(f"Monkey has the second highest inspection count with {sorted_monkeys[1].inspect_counter}")
print(f"Multiplied together, they have {sorted_monkeys[0].inspect_counter * sorted_monkeys[1].inspect_counter}")
if __name__ == "__main__":
mb = MonkeyBusiness()
# for monkey in mb.monkeys:
# for item in monkey.items:
# SUPER_MODULO *= item
mb.play_rounds(10_000) | jochemvanweelde/adventofcode | aoc2022/Day 11/monkey_in_the_middle.py | monkey_in_the_middle.py | py | 4,374 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Tuple",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_numb... |
15087752784 | import base64
from Crypto.Cipher import AES as aes
from Crypto.Util.Padding import pad
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad, unpad
key = get_random_bytes(16)
iv = b''
if iv == b'':
iv = get_random_bytes(16)
print(iv)
# cipher = aes.new(key, aes.MODE_CBC, iv)
# msg = "If you try to encrypt file, you can either use the openSSL or a Python solution using Crypto contributed by Thijs.".encode()
# cipher_txt = cipher.encrypt(pad(msg, aes.block_size))
# print(cipher_txt)
# cc = aes.new(key, aes.MODE_CBC, iv)
# print(unpad(cc.decrypt(cipher_txt), aes.block_size))
def encryptCSV(username, key, iv):
original_filename = username + '.csv'
with open(original_filename, 'rb') as f:
original = f.read()
cipher = aes.new(key, aes.MODE_CBC, iv)
encrypted = cipher.encrypt(pad(original, aes.block_size))
enc_filename = username + '.txt'
with open(enc_filename, 'wb+') as f:
f.write(encrypted)
def decryptCSV(username, key, iv):
enc_filename = username + '.txt'
with open(enc_filename, 'rb') as f:
enc = f.read()
cipher = aes.new(key, aes.MODE_CBC, iv)
decrypted = unpad(cipher.decrypt(enc), aes.block_size)
with open(enc_filename, 'wb+') as f:
f.write(decrypted)
def testencryptCSV(username, key, iv):
filename = username + '.csv'
# open the unencrypted .csv
with open(filename, 'rb') as f:
original = f.read()
cipher = aes.new(key, aes.MODE_CBC, iv)
encrypted = cipher.encrypt(pad(original, aes.block_size))
# replace with an encrypted version of the .csv
with open(filename, 'wb+') as f:
f.write(encrypted)
def testdecryptCSV(username, key, iv):
filename = username + '.csv'
with open(filename, 'rb') as f:
enc = f.read()
cipher = aes.new(key, aes.MODE_CBC, iv)
decrypted = unpad(cipher.decrypt(enc), aes.block_size)
with open(filename, 'wb+') as f:
f.write(decrypted)
testencryptCSV('test', key, iv)
testdecryptCSV('test', key, iv) | alitcy/fyp-21-s1-02 | enc-dec.py | enc-dec.py | py | 2,111 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Crypto.Random.get_random_bytes",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "Crypto.Random.get_random_bytes",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "Crypto.Cipher.AES.new",
"line_number": 25,
"usage_type": "call"
},
{
... |
69971042913 | # import modules
import pygame
import time
import random
#initialize pygame
pygame.init()
#########################################################
# our game variables
#get colors
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
blue = (20, 136, 234)
display_width = 800 #game width
display_height = 600 #game height
gameDisplay = pygame.display.set_mode((display_width, display_height)) #screen size
pygame.display.set_caption('Slither') #pygame caption
icon = pygame.image.load('apple.png')
pygame.display.set_icon(icon)
img = pygame.image.load('snake.png')
appleimg = pygame.image.load('apple.png')
block_size = 20 #default size
clock = pygame.time.Clock() #get clock frames per sec
direction = 'right'
smallFont = pygame.font.SysFont('consolas', 15) #generate font variable
medFont = pygame.font.SysFont('consolas', 30) #generate font variable
largeFont = pygame.font.SysFont('consolas', 50) #generate font variable
# function for game intro menu
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
intro = False
if event.key == pygame.K_q:
pygame.quit()
quit()
#The messages to screen
gameDisplay.fill(blue)
message_to_screen('Welcome to Slither', white, -100, 'large')
message_to_screen('The objective of the game is to eat red apples', black, -30)
message_to_screen('The more apples you eat, the longer you get', black, 10)
message_to_screen('If you run into the edges you die', black, 50)
message_to_screen('Press C to continue or Q to quit', black, 100)
pygame.display.update()
clock.tick(15)
#function to draw the snake
def snakeSlither(block_size, snakeList):
if direction == 'right':
head = pygame.transform.rotate(img, 270)
if direction == 'left':
head = pygame.transform.rotate(img, 90)
if direction == 'up':
head = img
if direction == 'down':
head = pygame.transform.rotate(img, 180)
gameDisplay.blit(head, (snakeList[-1][0], snakeList[-1][1]))
for XnY in snakeList[:-1]:
pygame.draw.rect(gameDisplay, white, [XnY[0], XnY[1], block_size, block_size]) #draw a rectangle
def text_objects(text, color, size):
if size == 'small':
textSurface = smallFont.render(text, True, color)
elif size == 'medium':
textSurface = medFont.render(text, True, color)
elif size == 'large':
textSurface = largeFont.render(text, True, color)
return textSurface, textSurface.get_rect()
#function to send message to screen
def message_to_screen(msg, color, y_displace = 0, size='small'):
textSurf, textRect = text_objects(msg, color, size)
textRect.center = (display_width / 2), (display_height / 2) + y_displace
gameDisplay.blit(textSurf, textRect)
#function to make game loop
def gameLoop():
global direction
direction = 'right'
lead_x = display_width/2 # x axis default location
lead_y = display_height/2 # y axis default location
lead_x_change = 20 # x axis change
lead_y_change = 0 # y axis change
snakeList = [] #snakeList array
snakeLength = 1
gameExit = False # gameExit is negative
gameOver = False # gameOver issa negative
randAppleX = random.randrange(0, display_width-block_size)
randAppleY = random.randrange(0, display_height-block_size)
#while gameExit is negative - gma should run
while not gameExit:
while gameOver:
gameDisplay.fill(blue)
message_to_screen('Game over', white, -50, size='large')
message_to_screen('Press C to play again or Q to quit',white, 50, 'medium')
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = False
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
gameExit = True
gameOver = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
# if the user hits quit
if event.type == pygame.QUIT:
gameExit = True
# if key is pressed
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
direction = 'left'
lead_x_change -= block_size
lead_y_change = 0
elif event.key == pygame.K_RIGHT:
direction = 'right'
lead_x_change += block_size
lead_y_change = 0
elif event.key == pygame.K_UP:
direction = 'up'
lead_y_change -= block_size
lead_x_change = 0
elif event.key == pygame.K_DOWN:
direction = 'down'
lead_y_change += block_size
lead_x_change = 0
# if snake gets to game boundaries
if lead_x >= display_width or lead_x <= 0 or lead_y >= display_height or lead_y <= 0:
gameOver = True
lead_x += lead_x_change
lead_y += lead_y_change
gameDisplay.fill(blue) #fill background
AppleThickness = 30
#pygame.draw.rect(gameDisplay, red, [randAppleX, randAppleY, AppleThickness, AppleThickness])
gameDisplay.blit(appleimg, (randAppleX, randAppleY))
snakeHead = []
snakeHead.append(lead_x)
snakeHead.append(lead_y)
snakeList.append(snakeHead)
if len(snakeList) > snakeLength:
del snakeList[0]
for eachSegment in snakeList[:-1]:
if eachSegment == snakeHead:
gameOver = True
snakeSlither(block_size, snakeList) #function to draw our snake
pygame.display.update() #update gaming surface
# if the snake hits the apple
if lead_x > randAppleX and lead_x < randAppleX + AppleThickness or lead_x + block_size > randAppleX and lead_x + block_size < randAppleX + AppleThickness:
if lead_y > randAppleY and lead_y < randAppleY + AppleThickness:
#generate random location for apple
randAppleX = random.randrange(0, display_width-block_size)
randAppleY = random.randrange(0, display_height-block_size)
#increase snake size
snakeLength += 1
elif lead_y + block_size > randAppleY and lead_y + block_size < randAppleY + AppleThickness:
#generate random location for apple
randAppleX = random.randrange(0, display_width-block_size)
randAppleY = random.randrange(0, display_height-block_size)
#increase snake size
snakeLength += 1
clock.tick(5)
pygame.quit()
quit()
game_intro()
gameLoop() #call game loop
| evanswanjau/slither | slither.py | slither.py | py | 7,241 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
31533506345 | # 1. Connect to database
from pymongo import MongoClient
# from bson.objectid import ObjectId
uri = "mongodb://admin:Hanoi1@ds029224.mlab.com:29224/c4e21"
client = MongoClient(uri)
db = client.get_database()
# 2. Select collection
posts = db['posts']
# 3. Create document
post = {
"title": "Hôm nay là thứ 3",
"content": "Còn 3 hôm nữa mới là cuối tuần",
}
# 4. Insert document
# posts.insert_one(post)
# print("Done")
post_list = posts.find()
# for post in post_list:
# print(post)
cond = {
"title": {"$regex": "hôm nay", "$options": "i"}
}
post = posts.find_one(cond)
print(post)
| unpreghini/htanh-lab-c4e21 | Lab1/db_blog.py | db_blog.py | py | 623 | python | vi | code | 0 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
}
] |
40296713959 | '''
Rotina para gabarito da estratégia de busca de geolocalização do sensor SEARCH1
Programa de Autoria de Henrique Guimarães Coutinho. Domínio público.
Última atualização: 09/09/2021.
Como citar: endereço github.
'''
import numpy as np
#import matplotlib
import matplotlib.pyplot as plt
import math as m
#--criar as entradas específicas--#
#variáveis
temp = 31.592 #(ºC)
vel_som = 331 * m.sqrt(1+(temp/273)) #(m/s)
#diferenças de tempo de chegada captadas (TDoA)
t12 = 1 #(ms)
t13 = 2 #(ms)
t14 = 3 #(ms)
t23 = 5 #(ms)
t24 = 3 #(ms)
t34 = 2 #(ms)
#coordenadas dos sensores envolvidos#
beacon_x1 = 0.20
beacon_y1 = 0.55
beacon_x2 = 2.75
beacon_y2 = 3.00
beacon_x3 = 2.40
beacon_y3 = 9.00
beacon_x4 = 0.1
beacon_y4 = 9.75
#coordenadas do emissor de som
target_x = 1.45
target_y = 5.45
#criar geometria - tamanho dos eixos (metros)
axis_1 = [0, 2.9, 0 , 10.93]
#--mexendo com as distâncias, onde a mágica ocorre--#
#equação TDOA ===> c * Δt1,2 = Δd1,2 = √((target_x - beacon_x2)^2 + (target_y - beacon_y2)^2) - √((target_x - beacon_x1)^2 + (target_y - beacon_y1)^2)
'''obs: O'KEEFE estava errado! use a equação de LI, X., DENG., Z. (TOA e TDOA_2) '''
d12 = m.sqrt(m.pow(target_x - beacon_x1,2) + m.pow(target_y - beacon_y1,2)) - m.sqrt(m.pow(target_x - beacon_x2,2) + m.pow(target_y - beacon_y2,2))
d13 = m.sqrt(m.pow(target_x - beacon_x1,2) + m.pow(target_y - beacon_y1,2)) - m.sqrt(m.pow(target_x - beacon_x3,2) + m.pow(target_y - beacon_y3,2))
d14 = m.sqrt(m.pow(target_x - beacon_x1,2) + m.pow(target_y - beacon_y1,2)) - m.sqrt(m.pow(target_x - beacon_x4,2) + m.pow(target_y - beacon_y4,2))
d23 = m.sqrt(m.pow(target_x - beacon_x2,2) + m.pow(target_y - beacon_y2,2)) - m.sqrt(m.pow(target_x - beacon_x3,2) + m.pow(target_y - beacon_y3,2))
d24 = m.sqrt(m.pow(target_x - beacon_x2,2) + m.pow(target_y - beacon_y2,2)) - m.sqrt(m.pow(target_x - beacon_x4,2) + m.pow(target_y - beacon_y4,2))
d34 = m.sqrt(m.pow(target_x - beacon_x3,2) + m.pow(target_y - beacon_y3,2)) - m.sqrt(m.pow(target_x - beacon_x4,2) + m.pow(target_y - beacon_y4,2))
t12esp = (d12/vel_som)*1000 #(ms)
t13esp = (d13/vel_som)*1000 #(ms)
t14esp = (d14/vel_som)*1000 #(ms)
t23esp = (d23/vel_som)*1000 #(ms)
t24esp = (d24/vel_som)*1000 #(ms)
t34esp = (d34/vel_som)*1000 #(ms)
print("Tempo 1 -> 2 esperado:",t12esp)
print("Tempo 1 -> 3 esperado:",t13esp)
print("Tempo 1 -> 4 esperado:",t14esp)
print("Tempo 2 -> 3 esperado:",t23esp)
print("Tempo 2 -> 4 esperado:",t24esp)
print("Tempo 3 -> 4 esperado:",t34esp)
#criando o gráfico - grid e títulos
plt.grid()
plt.xlabel('Superfície x (metros)')
plt.ylabel('Superfície y (metros)')
plt.title('Mapeamento da Superfície de Teste')
#plotando os pontos que representam os sensores - vermelho
plt.plot(beacon_x1,beacon_y1, 'ro')
plt.plot(beacon_x2, beacon_y2, 'ro')
plt.plot(beacon_x3,beacon_y3, 'ro')
plt.plot(beacon_x4, beacon_y4, 'ro')
#plotando o ponto do target - azul
plt.plot(target_x, target_y, 'bo')
#escrevendo as legendas nos pontos
names = ['1', '2', '3', '4', 'Emissor']
plt.text(beacon_x1 + 0.1, beacon_y1 + 0.1, names[0], fontsize = 8)
plt.text(beacon_x2 + 0.1, beacon_y2 + 0.1, names[1], fontsize = 8)
plt.text(beacon_x3 - 0.1, beacon_y3 + 0.2, names[2], fontsize = 8)
plt.text(beacon_x4 + 0.1, beacon_y4 + 0.1, names[3], fontsize = 8)
plt.text(target_x + 0.1, target_y + 0.1, names[4], fontsize = 10)
#definindo os eixos e mostrando o gráfico
plt.axis(axis_1)
plt.gca().set_aspect(1)
plt.show()
| henriquecoutin/search1 | Explore_and_Analyse_Data.py | Explore_and_Analyse_Data.py | py | 3,585 | python | pt | code | 1 | github-code | 1 | [
{
"api_name": "math.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 56,
"... |
7305644829 | import json
from src import plugin_loader
from unittest import TestCase
from src.attribute_methods import attribute_runner
root_directory = 'unit-tests/attribute_methods/sources/'
class TestAttributeRunner(TestCase):
def test_equal(self):
max_allowed = 0.4
with open(root_directory + 'settings.json') as data_file:
data = json.load(data_file)
metrics = plugin_loader.load(data['attribute_methods'])
with open(root_directory + 'gauss_method.c') as new_source_file:
new_source = new_source_file.read()
old_sources = {'gauss': 3}
further = attribute_runner.compare(metrics, new_source, old_sources, max_allowed)
self.assertEquals(1, len(further))
| akhtyamovrr/plagchecker | unit-tests/attribute_methods/test_attribute_runner.py | test_attribute_runner.py | py | 732 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "src.plugin_loader.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "src.plugin_loader",... |
34308854571 | # [si]rc - Asynchronous source RCON tool.
from decorators import *
import functools
from textwrap import dedent
import logging
import model
import sqlalchemy
import time
__all__ = [
"list", "select",
"add", "set", "delete",
"stats", "status", "rcon",
"error", "help"
]
def list( c, e, channel, server, command, argv ):
"""List servers in this channel"""
reply = "Servers:\n"
for s in channel.servers:
reply += str(s) + "\n"
return reply
@admin
def select( c, e, channel, server, command, argv ):
"""Show or select the active server of this channel"""
if len(argv) > 1:
new = model.Server.search( argv[1], channel ).first()
if not new:
return "no such file, directory or server: " + str(argv[1])
for s in channel.servers:
s.selected = False
new.selected = True
model.session.commit()
return "now selected: " + str(new)
current = model.Server.select(channel).first()
return "selected: " + str(current)
@admin
@private
def add( c, e, channel, server, command, argv ):
"""
Add a server to this channel
only available as a private message due to the rcon password.
"""
args = ("name", "host", "port", "rcon", "servertype")
if len(argv) < 5:
return "usage: !add name host port rcon [normal|tv]"
if len(argv) < 6:
args = args[:5]
else:
if argv[5] != "tv":
argv[5] = "normal"
argv = map(unicode, argv)
info = dict(zip(args,argv[1:]))
try:
ss = model.Server.query.filter_by( channel=channel, name=argv[1] ).all()
if len(ss) > 0:
return "server '{0}' exists!".format(argv[1])
except sqlalchemy.orm.exc.NoResultFound:
s = None
s = model.Server( channel=channel, **info )
channel.servers.append( s )
model.session.commit()
return "server '{0}' created!".format(s.name)
@admin
@server_required
def set( c, e, channel, server, command, argv ):
"""Set/change properties of a server."""
valid = ("name", "host", "port", "rcon", "config", "servertype")
alias = {"hostname": "host",
"portnumber": "port",
"password": "rcon",
"pass": "rcon",
"cfg": "config",
"type": "servertype"}
reply = ""
if len(argv) < 2:
reply = "usage: [@server]!edit ["
reply += "=value] [".join(valid) + "=value]"
return reply
tokens = []
compl = []
for a in argv[1:]:
if len(a) > 1 and "=" in a:
tokens.extend( a.split("=", 1) )
elif a != "=":
tokens.append( a )
tokens = filter( lambda x: len(x)>0, tokens)
while tokens:
l = tokens.pop(0)
r = tokens.pop(0)
if l in alias:
l = alias[l]
ok = True
if l not in valid:
ok = False
elif not hasattr(server,l):
ok = False
if not ok:
reply += "no server property: {0}\n".format(l)
continue
if l == "rcon" and not e.eventtype().startswith( "priv" ):
reply += "not setting rcon password in public channel\n"
continue
server.__setattr__(l,r)
compl.append(l)
model.session.commit()
if len( compl )>0:
reply += "succesfully set for '{0}': ".format(server.name) + ", ".join(compl)
return reply.strip()
@admin
@server_required
def delete( c, e, channel, server, command, argv ):
"""
Delete the selected server.
For confirmation you are required to execute
the command twice.
"""
now = time.time()
try:
t = model.Server.delete_cache[channel.id,server.id]
except KeyError:
t = 0
if now-t > 5:
model.Server.delete_cache[channel.id,server.id] = now
reply = "Please confirm deletion of server '{0}'".format(server)
reply += " (same command again within 5 seconds)"
return reply
else:
name = str(server)
server.delete()
model.session.commit()
return "{0} deleted.".format(name)
@server_required
def stats( c, e, channel, server, command, argv ):
"""Display the command 'stats' on selected server."""
r= server.connection.execute( "stats",
cb=functools.partial( _ridretstr1_cb, c, e ) )
@server_required
def status( c, e, channel, server, command, argv ):
"""Display query info about the selected server."""
try:
info = server.info
except KeyError:
return "please wait"
try:
n = info['name']
except KeyError:
return "no status available"
reply = "'{name}' playing {mapname} ({players} players)".format(**info)
if info['password'] == 0x01:
reply += " password protected"
return reply
@admin
@server_required
def rcon( c, e, channel, server, command, argv ):
"""Execute a raw rcon command at the selected server."""
r= server.connection.execute( " ".join(argv[1:]),
cb=functools.partial( _ridretstr1_cb, c, e ) )
def error( c, e, channel, server, command, argv ):
return "3 / 0 = {0}".format( 3 / 0 )
def help( c, e, channel, server, command, argv ):
"""Show available commands"""
import commands
if len(argv)>1:
cmd = argv[1].lstrip("!")
if cmd not in __all__:
return "no such command or directory: " + cmd
func = getattr(commands, cmd)
doc = dedent(func.__doc__ or "no help available.. read source")
return doc.strip()
result = {}
maxlen = 0
for c in sorted(__all__):
func = getattr(commands,c)
if not hasattr( func, "__call__" ):
continue
if func.__doc__ is None:
doc = "n/a"
else:
doc = func.__doc__.strip()
if "\n\n" in doc:
doc = doc.split("\n\n",1)[0].strip()
result[c] = (doc, func)
if len(c) > maxlen:
maxlen = len(c)
reply = "Available commands:\n"
for c in sorted(result.keys()):
reply += " !{0:<{1}} : {2}\n".format(c,maxlen,result[c][0])
reply += "type !help <command> for more info"
return reply
def _ridretstr1_cb(c, e, rid, ret, str1):
if str1 and isinstance(str1, basestring):
for line in str1.strip().splitlines():
c.privmsg( e.target(), line )
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 textwidth=79:
| koenbollen/sirc | src/commands.py | commands.py | py | 6,447 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "model.Server.search",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "model.Server",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "model.session.commit",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "model.sess... |
17609033288 | from weconnect.models.reviews import Review
from flask import current_app as app
class ReviewController():
"""
Controls all CRUD operations of the Review object.
"""
def create_review(self, content, business_id, user_id):
"""
Creates and adds a review to the app database.
Returns:
A tuple of (True, content) if success adding review,
(False, error) otherwise.
"""
try:
ids = [x for x in app.database['Reviews'].keys()]
if ids:
review_id = max(ids) + 1
else:
review_id = 1
self.new_review = Review(review_id, content, business_id, user_id)
review_details = self.new_review.details()
app.database['Reviews'][self.new_review.id] = review_details
return (True, "Added review successfully!")
except Exception as e:
return (False, str(type(e)))
def retrieve_reviews(self, business_id):
"""
Retrieves Review/s for specific business and or user.
Returns:
A tuple of
(True, [(review_id, content, business_id, user_id)])
if success retrieving reviews,
(False, error) otherwise.
"""
all_reviews = app.database['Reviews']
self.business_reviews = [x for x in all_reviews
if all_reviews[x][1] == business_id]
reviews = {}
for i in self.business_reviews:
reviews[i] = app.database['Reviews'][i]
return (True, reviews)
| JoshuaOndieki/weconnect | weconnect/review_controller.py | review_controller.py | py | 1,641 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "flask.current_app.database",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "flask.current_app",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "weconnect.models.reviews.Review",
"line_number": 25,
"usage_type": "call"
},
{
... |
16139182495 | import torch
from torch import nn
import torchvision.datasets as datasets
from torch.utils.data import Subset, DataLoader, TensorDataset
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from typing import Tuple
import os
import cv2
def get_cv_datasets(
dataset: torch.Tensor,
epoch_nr: int,
indices: np.ndarray,
n_folds: int = 5,
batch_size: int = 64
):
n_train = len(indices)
fold_start = int((epoch_nr % n_folds) / n_folds * n_train)
fold_end = int((epoch_nr % n_folds + 1) / n_folds * n_train)
train_indices = np.concatenate((indices[:fold_start], indices[fold_end:]))
val_indices = indices[fold_start: fold_end]
train_dataset = Subset(dataset, train_indices)
val_dataset = Subset(dataset, val_indices)
return DataLoader(train_dataset, batch_size=batch_size), \
DataLoader(val_dataset, batch_size=batch_size)
def gaussian_converter(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
n_params = x.shape[1] // 2
mean = x[:, :n_params, 0, 0]
logvar = x[:, n_params:, 0, 0]
return mean, logvar
class VAE(nn.Module):
def __init__(self, z_dim = 2, device: str = "cuda", input_shape=(160, 100)) -> None:
super(VAE, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 64, (20, 3)),
nn.MaxPool2d(2, 2),
nn.ReLU(),
nn.Conv2d(64, 64, (10, 3)),
nn.MaxPool2d(2, 2),
nn.ReLU(),
nn.Conv2d(64, 128, (10, 3)),
nn.ReLU(),
nn.Conv2d(128, 256, 3),
nn.ReLU()
)
self.z_dim = z_dim
self.z_decoded_dim = [int(i / 10) for i in input_shape]
encoded_shape = torch.tensor(self.encoder(torch.empty((1, 1, *input_shape))).shape)
self.gaussian_param_encoder = nn.Linear(torch.prod(encoded_shape), 2 * z_dim)
self.hidden_dim = 64
self.z_linear = nn.Linear(z_dim, self.z_decoded_dim[0] * self.z_decoded_dim[1] * self.hidden_dim)
self.decoder = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.ReflectionPad2d(1),
nn.Conv2d(self.hidden_dim, 512, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.ReflectionPad2d(1),
nn.Conv2d(512, 256, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.ReflectionPad2d(1),
nn.Conv2d(256, 128, 3),
nn.ReLU(),
nn.Upsample(scale_factor=1.25, mode='bilinear', align_corners=True),
nn.ReflectionPad2d(1),
nn.Conv2d(128, 1, 3),
)
self.float()
self.to(device)
def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor):
std = torch.exp(logvar / 2)
eps = torch.randn_like(std)
return mu + std * eps
def decode(self, z: torch.Tensor, apply_sigmoid: bool = True) -> torch.Tensor:
batch_size = z.shape[0]
z_hidden = self.z_linear(z).reshape(batch_size, self.hidden_dim, *self.z_decoded_dim)
decoded = self.decoder(z_hidden)
if apply_sigmoid:
decoded = decoded.sigmoid()
return decoded
def forward(
self,
x: torch.Tensor,
apply_sigmoid: bool = True
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
encoded = self.encoder(x).view(x.shape[0], -1)
params = self.gaussian_param_encoder(encoded)
mu = params[:, :self.z_dim]
logvar = params[:, self.z_dim:]
z = self.reparameterize(mu, logvar)
decoded = self.decode(z, apply_sigmoid)
return decoded, mu, logvar
def main():
dataset_path = "C:/Users/Mattias/Documents/Facial Data/Edited/"
filenames = os.listdir(dataset_path)
data_files = [dataset_path + file for file in filenames]
img_shape = [160, 100]
trainset = torch.zeros((len(data_files), 1, *img_shape), dtype=torch.float)
labels = torch.zeros((len(data_files), 1), dtype=torch.float)
emotions = ["Angry", "Disgusted", "Happy", "Neutral", "Sad", "Surprised"]
for i, file in enumerate(data_files):
img = cv2.imread(file)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_resized = cv2.resize(img_gray, img_shape[::-1])
trainset[i, 0, :, :] = torch.tensor(img_resized) / 255.
for j, emotion in enumerate(emotions):
if filenames[i].startswith(emotion):
labels[i] = j
batch_size = 3
trainset = TensorDataset(trainset, labels)
train_dataset = DataLoader(trainset, batch_size=batch_size)
device = "cuda"
model = VAE(z_dim=2, device=device, input_shape=img_shape)
# Initialize optimizer
start_lr = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=start_lr)
# Initialize loss
loss_type = "mse"
losses = {
"bce": nn.BCEWithLogitsLoss(reduction="sum"),
"mse": nn.MSELoss(reduction="sum")
}
reconstruction_loss = losses[loss_type]
n_epochs = 10000
loss_history = np.zeros(n_epochs)
val_loss_history = np.zeros(n_epochs)
best_val_loss = np.inf
beta_kl = 1
model_name = "model4"
model_dir = f"./build/{model_name}"
if not os.path.exists(model_dir):
os.mkdir(model_dir)
# model.load_state_dict(torch.load(f"{model_dir}/{model_name}.pt"))
# Train model
for epoch in range(n_epochs):
loss_total = 0
model.train()
for data in tqdm(train_dataset, "Epoch progress"):
x = data[0].to(device)
apply_sigmoid = True if loss_type == "mse" else False
output, mu, logvar = model(x, apply_sigmoid=apply_sigmoid)
reconstruction_error = reconstruction_loss(output, x)
kl_div = beta_kl * 0.5 * torch.sum(-1 - logvar + mu.pow(2) + logvar.exp())
loss = reconstruction_error + kl_div
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_total += loss.item()
loss_total /= len(train_dataset)
loss_history[epoch] = loss_total
output_text = f'Epoch: {epoch:04d}, Loss: {round(loss_total, 3):.3f}'
if loss_total < best_val_loss:
output_text += "\tSaved model checkpoint"
best_val_loss = loss_total
torch.save(model.state_dict(), f'{model_dir}/{model_name}.pt')
print(output_text)
observations = np.zeros((0, 3), dtype=np.float32)
for data in train_dataset:
x = data[0].to(device)
y = data[1].detach().cpu().numpy()
y = y.reshape(len(y), 1)
_, mu, _ = model(x, apply_sigmoid=True)
mu = mu.detach().cpu().numpy()
obs = np.hstack((mu, y))
observations = np.append(observations, obs, axis=0)
with open(f'./build/{model_name}/observations.npy', 'wb') as f:
np.save(f, observations)
# Make figures of training accuracy, validation accuracy, and loss
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
ax[0].plot(np.arange(n_epochs), loss_history, color='black')
ax[0].set_title('Training loss')
ax[1].plot(np.arange(n_epochs), val_loss_history, color='black')
ax[1].set_title('Validation loss')
plt.show()
if __name__ == '__main__':
main() | m-ulmestrand/ego-generator | face/train.py | train.py | py | 7,497 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.Tensor",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.utils.da... |
41579158009 | from flask import Flask, render_template, request, redirect, url_for, flash, abort, session, jsonify
import json
import os.path
# from werkzeug.utils import secure_filename
import datetime
import os
app = Flask(__name__)
app.permanent_session_lifetime = datetime.timedelta(days=30)
# Set up this secret_key to be generated
app.secret_key = os.urandom(24)
print(__name__)
@app.route("/")
def home():
if 'user1' in session.keys():
res = session['user1']
return render_template('home.html', photos=res)
else:
return render_template('home.html')
@app.route("/uploaded-photo", methods=['GET', 'POST'])
def uploaded_photo():
if request.method == 'POST':
ct = datetime.datetime.now()
print("current time:-", ct)
# ts = ct.timestamp()
photos = {}
if os.path.exists('photos.json'):
with open('photos.json') as photo_file:
photos = json.load(photo_file)
# Secure the uploaded file
f = request.files['photo']
f_name = "user1_" + f.filename
# f_name = secure_filename(f.filename) + ct
f.save('C:/Users/Stella/PycharmProjects/dl-image-recognition/static/user_files/' + f_name)
if 'user1' in photos.keys():
# Add new photo under the user1
photos['user1'].append({'photo': f_name, 'letter': request.form['letter'],
'letterColor': request.form['letterColor']})
else:
# Create user1
photos['user1'] = [{'photo': f_name, 'letter': request.form['letter'],
'letterColor': request.form['letterColor']}]
with open('photos.json', 'w') as photo_file:
json.dump(photos, photo_file)
# Change this to timestamp later
session['user1'] = photos['user1']
return render_template('uploaded_photo.html', formData=request.form)
else:
return redirect(url_for('home'))
# @app.route('/<string:photo>')
@app.route('/gallery')
def gallery():
if os.path.exists('photos.json'):
with open('photos.json') as photo_file:
photos = json.load(photo_file)['user1']
return render_template('gallery.html', photos=photos)
@app.route('/api/user1/photos')
def fetch_all():
if 'user1' in session.keys():
res = session['user1']
return jsonify(res)
else:
flash('There is no data in the api. Please add an image')
return redirect(url_for('home'))
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404 | StellarApp/dl-image-recognition | app.py | app.py | py | 2,634 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.urandom",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.session.keys",
"lin... |
13056722078 | from django.core.management.base import BaseCommand, CommandError
from bhojanalayas.models import Address, Details
import csv
# from float import float
class Command(BaseCommand):
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
with open('../../../../restaurantsa9126b3.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
count_own = 0
for row in csv_reader:
if count_own > 0:
entry = Details(
resturant_id=row[0],
rest_name=row[1],
cuisines=row[2],
avg_cost_ofTwo=int(row[3]),
currency=row[4],
has_table_booking=row[5],
has_online_delivery=row[6],
aggregate_rating=float(row[7]),
rating_color=row[8],
rating_text=row[9],
votes=int(row[10]))
entry.save()
count_own += 1
print(count_own)
print(count_own)
with open('../../../../restaurant_addc9a1430.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
count_own = 0
for row in csv_reader:
if count_own > 0:
entry = Address(
country_code=row[1],
city=row[2],
address=row[3],
locality=row[4],
locality_verbose=row[5],
longitude=float(row[6]),
latitude=float(row[7]),
rest_id_id=row[0])
entry.save()
count_own += 1
print(count_own)
print(count_own)
| megharana/Fortinet-Challenge | WorldBhojanalaya/bhojanalayas/management/commands/moveCSVToDb.py | moveCSVToDb.py | py | 1,929 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bhojanalayas.models.Details",
"line_number": 19,
"usage_type": "call"
},
{
"api... |
10285812257 | from selenium import webdriver
from selenium.common.exceptions import TimeoutException
import xlsxwriter as xw
workbook = xw.Workbook("scrap2.xlsx")
worksheet = workbook.add_worksheet("Noticias")
# worksheet_error_page = workbook.add_worksheet("Erros de página")
# worksheet_error_content = workbook.add_worksheet("Erros de conteudo")
print ("Começando Scrap...")
driver = webdriver.Firefox()
global row
row = 0
def scrapPageError():
global row
url_error = driver.current_url
worksheet.write(row, 3, url_error)
row += 1
driver.back()
def scrapContentError():
global row
url_error = driver.current_url
worksheet.write(row, 3, url_error)
row += 1
driver.back()
# for i in range(1,20,2):
def getConteudo(x):
driver.set_page_load_timeout(10)
try:
# SELECIONA NOTICIA
driver.find_element_by_xpath("""//*[@id="body"]/div/div[3]/div[%s]/h3/a""" % x).click()
# SELECIONA CLASSE DO CONTEUDO E TITULO
header_element = driver.find_element_by_xpath("""/html/body/div[5]/div/div[1]/div/div[3]""")
title = header_element.find_element_by_tag_name("h1").text
date = header_element.find_element_by_tag_name("p").text
post_element = driver.find_element_by_xpath("""/html/body/div[5]/div/div[1]/div/div[4]""")
conteudo = post_element.text
driver.back()
return title, date, conteudo
except:
driver.execute_script("window.stop();")
def writePlanilha(z):
global row
worksheet.write(row, 0, getConteudo(z)[0])
worksheet.write(row, 1, getConteudo(z)[1])
worksheet.write(row, 2, getConteudo(z)[2])
row += 1
# for i in range(13):
def pageNext(x):
driver.set_page_load_timeout(10)
try:
driver.get("file:///C:/Users/Hugo/Downloads/casbantigo/casbantigo/noticias/index_ccm_paging_p_b400=%s.html" % (x))
except TimeoutException:
driver.execute_script("window.stop();")
driver.back()
pageNext(10)
for z in range(1,20,2):
try:
getConteudo(z)
writePlanilha(z)
except:
scrapContentError()
print("Erro ao extrair conteudo")
# for i in range(1,13):
# try:
# pageNext(i)
# print("Progresso: " + str(round(((i/13)*100), 2)) + "%")
# for z in range(1,20,2):
# try:
# getConteudo(z)
# writePlanilha(z)
# except:
# scrapContentError()
# print("Erro ao extrair conteudo")
# except:
# print("Erro ao entrar na página")
# scrapPageError()
# continue
print("Scrap Finalizado!")
workbook.close()
| HugoPfeffer/web-scrap-casb | noticias v2.py | noticias v2.py | py | 2,675 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "xlsxwriter.Workbook",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sele... |
71606823075 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/11 14:31
# @Author : Tao.Xu
# @Email : tao.xu2008@outlook.com
import sys
import json
import superelasticsearch
from superelasticsearch import SuperElasticsearch
from elasticsearch import serializer, exceptions
from tlib import log
from tlib.retry import retry
if sys.version_info > (3, 0, 0):
from imp import reload
reload(sys)
else:
reload(sys)
sys.setdefaultencoding('utf-8')
# =============================
# --- Global
# =============================
logger = log.get_logger()
ES_CONN_TIMEOUT = 300
ES_OPERATION_TIMEOUT = '60m'
SEARCH_ENGINE_TYPE_MAP = {
0: 'ElasticSearchClusterObj'
}
SEARCH_ENGINE_STATUS_MAP = {
'ENABLED': 0,
'DISABLED': 1
}
ES_CLUSTER_STATUS_MAP = {
'NORMAL': 0,
'MAINTAINING': 1,
'REJECTED': 2,
'SHUTDOWN': 3
}
class MyJSONSerializer(serializer.JSONSerializer):
def default(self, data):
if isinstance(data, set):
return list(data)
if isinstance(data, bytes):
return str(data, encoding='utf-8')
return serializer.JSONSerializer.default(self, data)
superelasticsearch.json = MyJSONSerializer()
class EsSuper(object):
_conn = None
target_index = None
update_index_list = None
def __init__(self, ip_list, port, user=None, password=None):
self.ips = ip_list
self.port = port
self.user = user
self.password = password
assert self.conn
def set_target_index(self, index_name):
self.target_index = index_name
def set_update_index_list(self, index_name_list):
self.update_index_list = index_name_list
@retry(tries=10, delay=30)
def connect(self):
try:
logger.info('Connect ES {0}:{1},user:{2},pwd:{3}'.format(
self.ips, self.port, self.user, self.password))
if self.user and self.password:
es_conn = SuperElasticsearch(
self.ips, port=self.port, maxsize=8,
http_auth=(self.user, self.password),
timeout=ES_CONN_TIMEOUT, serializer=MyJSONSerializer())
else:
es_conn = SuperElasticsearch(
self.ips, port=self.port, maxsize=8,
timeout=ES_CONN_TIMEOUT, serializer=MyJSONSerializer())
if not es_conn.ping:
raise Exception("client ping failed, cluster is not up!!!")
return es_conn
except Exception as e:
logger.error("Failed to connect the search engine!")
raise Exception(e)
@property
def conn(self):
if self._conn is None or not self._conn.ping():
self._conn = self.connect()
return self._conn
@property
def ping(self):
return self.conn.ping()
@property
def health(self):
return self.ping and 'red' not in self.conn.cat.health().split()[3]
@property
def es_status(self):
return self.conn.cat.health().split()[3] # TODO if no indices
@property
def es_nodes(self):
es_nodes = []
for es_node_info in self.conn.cat.nodes().strip().split('\n'):
es_nodes.append(es_node_info.split()[0])
return es_nodes
@property
def es_indices_names(self):
es_indices_names = []
for es_indices in self.conn.cat.indices().strip().split('\n'):
es_indices_info = es_indices.split()
if len(es_indices_info) > 3:
es_indices_names.append(es_indices_info[2])
return es_indices_names
def get_cat_index_info(self, index_name=None):
cat_result_list = self.conn.cat.indices(index=index_name,
v=True).split('\n')
index_info = dict()
if cat_result_list:
if index_name is None:
index_info = []
for i in range(1, len(cat_result_list)):
index_info.append(dict(zip(cat_result_list[0].split(),
cat_result_list[i].split())))
else:
index_info = dict(zip(cat_result_list[0].split(),
cat_result_list[1].split()))
return index_info
def get_cluster_settings(self):
response = self.conn.cluster.get_settings()
return response
def put_cluster_settings(self, body):
logger.info('PUT Settings:{0}'.format(body))
response = self.conn.cluster.put_settings(body=body)
return response
@property
def cluster_allocation_explain(self):
response = self.conn.cluster.allocation_explain()
return response
def cluster_state(self, index_name=None):
response = self.conn.cluster.state(index=index_name)
return response
def create_index(self, index_name, index_settings=None):
if self.is_index_exist(index_name):
logger.info('{0} index exist!'.format(index_name))
return True
logger.info(
"The target index {} does not exist, create it first".format(
index_name))
logger.info(
"Start creating index {} {}".format(index_name, index_settings))
try:
rtn = self.conn.indices.create(index_name,
index_settings) # , timeout=ES_OPERATION_TIMEOUT
logger.info("Create index {0} finished".format(index_name))
return rtn
except exceptions.TransportError as e:
logger.warning(e)
if 'exists' in e.info:
return True
raise e
def create_template(self, template_name, index_settings):
logger.info("Start creating template {} {}".format(template_name,
index_settings))
try:
return self.conn.indices.put_template(template_name,
index_settings,
master_timeout=ES_OPERATION_TIMEOUT)
except exceptions.TransportError as e:
logger.warning(e)
if 'exists' in e.info:
return True
raise e
def does_template_exist(self, template_name):
return self.conn.indices.exists_template(template_name)
def delete_index(self, index_name):
return self.conn.indices.delete(index_name)
def is_index_exist(self, index_name):
return self.conn.indices.exists(index_name)
def get_all_types(self, index_name):
return self.conn.indices.get_mapping(index_name)[index_name][
'mappings'].keys()
def delete_doc_type(self, index_name, doc_type):
return self.conn.delete_by_query(index_name,
{"query": {"match_all": {}}},
doc_type=doc_type,
wait_for_completion=True,
refresh=True)
def delete_match_docs(self, index_name, doc_type, condition_dict_list):
logger.info(
"Delete docs where index_name: {}, doc_type: {} and conditions: {}".format(
index_name, doc_type, json.dumps(condition_dict_list))
)
search_body = {
"query": {
"bool": {
"must": [
{
condition_dict.get('type_', 'term'): {
condition_dict['key']: condition_dict['value']
}
} for condition_dict in condition_dict_list
]
}
}
}
if doc_type:
return self.conn.delete_by_query(index_name,
search_body,
doc_type=doc_type,
wait_for_completion=True,
refresh=True,
conflicts='proceed')
else:
return self.conn.delete_by_query(index_name,
search_body,
wait_for_completion=True,
refresh=True,
conflicts='proceed')
def index_doc(self, index_name, doc_type, doc_data_dict):
return self.conn.index(
index=index_name,
doc_type=doc_type,
body=doc_data_dict,
timeout=ES_OPERATION_TIMEOUT
)
def create_doc(self, index_name, doc_type, doc_data_dict, id_):
return self.conn.index(
id=id_,
index=index_name,
doc_type=doc_type,
body=doc_data_dict,
timeout=ES_OPERATION_TIMEOUT
)
def bulk_create_docs(self, index_name, doc_data_dict_list,
max_bulk_size=20000, refresh=True):
num = 0
pre_num = 0
bulk = None
# for doc_data_dict in generate_docs(docs_num):
for doc_data_dict in doc_data_dict_list:
num += 1
if num % max_bulk_size == 1:
bulk = self.conn.bulk_operation()
bulk.index(
index=index_name,
doc_type='doc',
body=doc_data_dict
)
if num % max_bulk_size == 0:
logger.info(
"Start sending from items {} to {} to ElasticSearch Server".format(
pre_num, num))
pre_num = num
if bulk.execute(timeout=ES_OPERATION_TIMEOUT, refresh=refresh):
logger.info("Finished sending these items")
else:
return False
logger.info("Total file number: {}".format(num))
if num != pre_num:
logger.info(
"Start sending from items {} to {} to ElasticSearch Server".format(
pre_num, num))
rc = bulk.execute(timeout=ES_OPERATION_TIMEOUT, refresh=refresh)
if rc:
logger.info("Finished")
return rc
else:
return False
def bulk_update_docs(self, index_name, doc_type, doc_data_dict_list,
max_bulk_size=2000, refresh=True,
index_if_not_exist=True):
num = 0
pre_num = 0
bulk = None
for doc_data_dict in doc_data_dict_list:
num += 1
if num % max_bulk_size == 1:
bulk = self.conn.bulk_operation()
body = {
"doc": doc_data_dict,
"doc_as_upsert": True
} if index_if_not_exist else {
"doc": doc_data_dict
}
if 'to_delete' in doc_data_dict:
bulk.delete(
id=doc_data_dict['id_'],
index=index_name,
doc_type='doc'
)
else:
bulk.update(
id=doc_data_dict.pop("id_"),
index=index_name,
doc_type='doc',
body=body
)
if num % max_bulk_size == 0:
logger.info(
"Start sending from items {} to {} to ElasticSearch Server".format(
pre_num, num))
pre_num = num
if bulk.execute(timeout=ES_OPERATION_TIMEOUT, refresh=refresh):
logger.info("Finished sending these items")
else:
return False
logger.info("Total file number: {}".format(num))
if num != pre_num:
logger.info(
"Start sending from items {} to {} to ElasticSearch Server".format(
pre_num, num))
rc = bulk.execute(timeout=ES_OPERATION_TIMEOUT, refresh=refresh)
if rc:
logger.info("Finished")
return rc
else:
return False
def refresh(self, index_name=None):
return self.conn.indices.refresh(index_name)
def flush(self, index_name=None, wait_if_ongoing=True):
return self.conn.indices.flush(index=index_name,
wait_if_ongoing=wait_if_ongoing)
def thread_pool(self, thread_type=None):
return self.conn.cat.thread_pool(thread_type).split("\n")
@property
def index_queue_num(self):
queue_num = 0
for node_bulk_thread_info in self.thread_pool(thread_type="bulk"):
if node_bulk_thread_info:
logger.info(node_bulk_thread_info)
queue_num += int(node_bulk_thread_info.split()[3])
return queue_num
def delete_doc(self):
pass
def search(self, index=None, doc_type=None, body=None, scroll=None):
if body is None:
body = {"query": {"match_all": {}}, 'size': 15}
rtn = self.conn.search(index=index, doc_type=doc_type, body=body,
scroll=scroll)
logger.info('Search take times: {time}ms'.format(time=rtn['took']))
logger.info('Search hits docs: {num}'.format(num=rtn['hits']['total']))
return rtn
def count(self, index=None, doc_type=None, body=None):
return self.conn.count(index=index, doc_type=doc_type, body=body)
def scroll(self, scroll_id, scroll='30m'):
return self.conn.scroll(scroll_id=scroll_id, scroll=scroll)
def put_cluster_setting(self, body):
self.conn.cluster.put_settings(body)
# ===============
# Snapshot
# class elasticsearch.client.SnapshotClient(client)
# ===============
def get_snapshot_info(self, repository, snap_name):
"""
Returns information about a snapshot.
:param repository: A repository name
:param snap_name: A comma-separated list of snapshot names
:return:
"""
return self.conn.snapshot.get(repository, snap_name,
master_timeout=ES_CONN_TIMEOUT)
def get_snapshot_status(self, repository, snap_name):
"""
Returns information about the status of a snapshot.
:param repository:A repository name
:param snap_name:A comma-separated list of snapshot names
:return:
"""
return self.conn.snapshot.status(repository, snap_name,
ignore_unavailable=True,
master_timeout=ES_CONN_TIMEOUT)
def get_repository_info(self, repository):
"""
Returns information about a repository.
:param repository:A repository name
:return:
"""
return self.conn.snapshot.get_repository(repository, local=False,
master_timeout=ES_CONN_TIMEOUT)
def verify_repository(self, repository):
"""
Verifies a repository.
:param repository:A repository name
:return:
"""
return self.conn.snapshot.verify_repository(
repository,
master_timeout=ES_CONN_TIMEOUT,
timeout=ES_OPERATION_TIMEOUT
)
def create_repository(self, repository, body):
"""
Creates a repository.
:param repository: A repository name
:param body: The repository definition
:return:
"""
return self.conn.snapshot.create_repository(
repository, body,
master_timeout=ES_CONN_TIMEOUT,
timeout=ES_OPERATION_TIMEOUT, verify=True
)
def create_snapshot(self, repository, snap_name, body):
"""
Creates a snapshot in a repository.
:param repository: A repository name
:param snap_name: A snapshot name
:param body: The snapshot definition
:return:
"""
return self.conn.snapshot.create(
repository, snap_name, body,
master_timeout=ES_CONN_TIMEOUT, wait_for_completion=True
)
def cleanup_repository(self, repository):
"""
Removes stale data from repository.
:param repository: A repository name
:return:
"""
return self.conn.snapshot.cleanup_repository(
repository,
master_timeout=ES_CONN_TIMEOUT, timeout=ES_OPERATION_TIMEOUT
)
def delete_repository(self, repository):
"""
Deletes a repository.
:param repository:A comma-separated list of repository names
:return:
"""
return self.conn.snapshot.create_repository(
repository,
master_timeout=ES_CONN_TIMEOUT, timeout=ES_OPERATION_TIMEOUT
)
def delete_snapshot(self, repository, snap_name):
"""
Deletes a snapshot.
:param repository: A repository name
:param snap_name: A snapshot name
:return:
"""
return self.conn.snapshot.delete(
repository, snap_name, master_timeout=ES_CONN_TIMEOUT)
def restore_snapshot(self, repository, snap_name, body):
"""
:param repository:A repository name
:param snap_name:A snapshot name
:param body:Details of what to restore
:return:
"""
return self.conn.snapshot.restore(
repository, snap_name, body,
master_timeout=ES_CONN_TIMEOUT, wait_for_completion=True
)
if __name__ == "__main__":
es_ips = ['10.25.119.7']
es_port = 9200
es_user = 'root'
es_pwd = 'password'
es_obj = EsSuper(es_ips, es_port, es_user, es_pwd)
print(es_obj.es_status)
| txu2k8/libs-py | tlib/es/elasticsearch_super.py | elasticsearch_super.py | py | 17,979 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "imp.reload",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "imp.reload",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.setdefaultencoding",
... |
5118190626 | import copy
import logging
from random import sample, uniform
import unittest
import numpy as np
import pandas as pd
import time
from sklearn.ensemble import RandomForestClassifier
from make_data import make_data
import mr
N_SAMPLES = [100, 1000, 10000]
N_CLASSES = [(3, 1), (5, 1), (7, 1)]
N_FEATURES = [12]
N_INFO = [(0, 0, 0), (3, 1, 0), (4, 3, 1), (6, 4, 2)]
N_PER = 2
# Classifier Parameters
CLASSIFIER_PARAMS = dict(
n_estimators=100,
criterion="gini",
min_samples_leaf=3,
max_depth=15,
max_features="auto",
oob_score=True,
random_state=23
)
# Columns of output dataframe
DF_COLS = ["n_samples", "n_features", "n_informative", "n_redundant",
"n_repeated", "n_classes", "n_clusters_per_class", "seed",
"mr", "n_diff"]
all_data = make_data(
N_SAMPLES,
N_CLASSES,
N_FEATURES,
N_INFO,
N_PER,
use_seed=True
)
def run(idx):
print("--Running Job for Index {}--".format(idx))
# Generate Data
data = all_data[idx]["data"]
train_x, train_y = data["train"]
test_x, test_y = data["test"]
# Create, fit, and predict a RFC
initial_clf = RandomForestClassifier(
n_estimators=CLASSIFIER_PARAMS["n_estimators"],
criterion=CLASSIFIER_PARAMS["criterion"],
min_samples_leaf=CLASSIFIER_PARAMS["min_samples_leaf"],
max_depth=CLASSIFIER_PARAMS["max_depth"],
max_features=CLASSIFIER_PARAMS["max_features"],
oob_score=CLASSIFIER_PARAMS["oob_score"],
random_state=CLASSIFIER_PARAMS["random_state"]
)
initial_clf.fit(X=train_x, y=train_y)
initial_predictions = initial_clf.predict(test_x)
# Apply linear transform, manipulating the first three columns
# params = {"cols_to_transform": [0,1,2], "m":-0.5, "b": 1}
# train_x_2, train_y_2 = mr.mr_linear_transform((train_x, train_y), params)
# test_x_2, test_y_2 = mr.mr_linear_transform((test_x, train_y), params)
# params = {"uninformative_value": 0}
# train_x_2, train_y_2 = mr.mr_add_uninformative((train_x, train_y), params)
# test_x_2, test_y_2 = mr.mr_add_uninformative((test_x, test_y), params)
# params = {"new_order": [11, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0]}
# train_x_2, train_y_2 = mr.mr_reorder_predictors((train_x, train_y), params)
# test_x_2, test_y_2 = mr.mr_reorder_predictors((test_x, test_y), params)
train_x_2, train_y_2 = mr.mr_double_dataset((train_x, train_y))
test_x_2, test_y_2 = mr.mr_double_dataset((test_x, test_y))
# Create, fit, and predict a follow-up RFC
follow_up_clf = RandomForestClassifier(
n_estimators=CLASSIFIER_PARAMS["n_estimators"],
criterion=CLASSIFIER_PARAMS["criterion"],
min_samples_leaf=CLASSIFIER_PARAMS["min_samples_leaf"],
max_depth=CLASSIFIER_PARAMS["max_depth"],
max_features=CLASSIFIER_PARAMS["max_features"],
oob_score=CLASSIFIER_PARAMS["oob_score"],
random_state=CLASSIFIER_PARAMS["random_state"]
)
follow_up_clf.fit(X=train_x_2, y=train_y_2)
follow_up_predictions = follow_up_clf.predict(test_x_2)
# Print out the results
def print_results(initial, follow_up):
num = 0
for i, f in zip(initial, follow_up):
if i == f:
print("({}/{}): Equal".format(num, len(initial)))
else:
print("({}/{}): Initial: {}; Follow_up: {}"
.format(num, len(initial), i, f))
num += 1
def print_results2(initial, follow_up):
equal = True
for i, f in zip(initial, follow_up):
if i != f:
equal = False
break
if not equal:
print("Did not match")
print_results2(initial_predictions, follow_up_predictions)
# print_results(initial_predictions, follow_up_predictions)
for i in range(len(all_data)):
run(i)
| bradgwest/mtrf | debug_mr.py | debug_mr.py | py | 3,880 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "make_data.make_data",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "mr.mr_double_dataset",
"line_number": 80,
"usage_type": "call"
},
{
"... |
5699937924 | from django.conf import settings
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import ArrayField
from django.db import models, transaction
from django.db.models import F
from lottery.models import Draw, get_number_of_tickets
from main.base import BaseModel, ExtendedQ
from utils.numbers import format_integer, format_pesos
from .push_notifications import Interface as PushNotificationsInterface
def generate_initial_extra_tickets_ttl():
return settings.INITIAL_EXTRA_TICKETS_TTL
class UserQuerySet(models.QuerySet):
def send_push_notification(self, body, data=None):
for user in self:
user.send_push_notification(body=body, data=data)
class UserManager(BaseUserManager):
def get_queryset(self):
qs = UserQuerySet(self.model, using=self._db)
return qs.filter(is_active=True)
def everything(self):
return super().get_queryset()
@transaction.atomic
def create(self, **fields):
password = fields.pop("password", None)
user = super().create(**fields)
if password is not None:
user.set_password(password)
user.save()
return user
def create_superuser(self, **fields):
fields.setdefault("is_staff", True)
fields.setdefault("is_superuser", True)
return self.create_user(**fields)
class User(BaseModel, AbstractUser):
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
username = None
email = models.EmailField(unique=True, null=True, max_length=254, verbose_name="email address")
password = models.CharField(null=True, max_length=128, verbose_name="password")
rut = models.PositiveIntegerField(unique=True, null=True, default=None, verbose_name="RUT")
check_digit = models.PositiveSmallIntegerField(null=True, default=None, verbose_name="RUT check digit")
balance = models.PositiveIntegerField(default=0, verbose_name="balance")
winnings = models.PositiveIntegerField(default=0, verbose_name="winnings")
extra_tickets_ttl = ArrayField(
base_field=models.PositiveSmallIntegerField(),
blank=True,
default=generate_initial_extra_tickets_ttl,
verbose_name="extra tickets TTL",
)
objects = UserManager()
def delete(self, *args, **kwargs):
self.is_active = False
self.save(*args, **kwargs)
def restore(self, *args, **kwargs):
self.is_active = True
self.save(*args, **kwargs)
def hard_delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
def send_push_notification(self, body, data=None):
self.devices.all().send_push_notification(body=body, data=data)
@property
def current_tickets(self):
# This method assumes there is an ongoing draw.
return self.tickets.ongoing()
@property
def current_prize(self):
return self.current_tickets.prize()
@property
def owners(self):
return {self}
#####################
# NUMBER OF TICKETS #
#####################
@property
def number_of_standard_tickets(self):
return get_number_of_tickets(self.balance)
@property
def number_of_extra_tickets(self):
self.extra_tickets_ttl = [x for x in self.extra_tickets_ttl if (x > 0)]
self.save()
return len(self.extra_tickets_ttl)
@property
def number_of_tickets(self):
return self.number_of_standard_tickets + self.number_of_extra_tickets
@property
def current_number_of_tickets(self):
return self.current_tickets.count()
##############
# OPERATIONS #
##############
def deposit(self, amount):
with transaction.atomic():
self.balance = F("balance") + amount
self.save()
draw = Draw.objects.ongoing()
delta_tickets = get_number_of_tickets(amount)
draw.add_tickets(user=self, n=delta_tickets)
formatted_amount = format_pesos(amount)
self.send_push_notification(body=f"Se ha efectuado tu depósito de {formatted_amount}.")
def withdraw(self, amount):
with transaction.atomic():
# Avoid race conditions by locking the tickets until the end of the transaction.
# This means that the selected tickets will only be modified (or deleted)
# by a single instance of the back end at a time.
# The transaction will proceed unless these tickets were already locked by another instance.
# In that case, the transaction will block until they are released.
locked_tickets = self.current_tickets.select_for_update()
self.balance = F("balance") - amount
self.save()
ordered_tickets = locked_tickets.order_by("number_of_matches")
delta_tickets = get_number_of_tickets(amount)
pks_to_remove = ordered_tickets[:delta_tickets].values_list("pk")
tickets_to_remove = locked_tickets.filter(pk__in=pks_to_remove)
tickets_to_remove.delete()
formatted_amount = format_pesos(amount)
self.send_push_notification(body=f"Se ha efectuado tu retiro de {formatted_amount}.")
def consume_extra_tickets(self):
self.extra_tickets_ttl = [(x - 1) for x in self.extra_tickets_ttl]
self.save()
def award_prize(self, value):
self.balance = F("balance") + value
self.winnings = F("winnings") + value
self.save()
#####################
# LAZY REGISTRATION #
#####################
@property
def is_registered(self):
return bool(self.email) or bool(self.rut)
@property
def is_abandoned(self):
return self.devices.count() == 0
@property
def is_null(self):
return (not self.is_registered) and self.is_abandoned
###################
# REPRESENTATIONS #
###################
@property
def full_name(self):
name_components = filter(bool, [self.first_name, self.last_name])
name = " ".join(name_components)
return name
@property
def formatted_rut(self):
if (self.rut is None) or (self.check_digit is None):
return
formatted_rut_integer = format_integer(self.rut)
formatted_check_digit = "K" if (self.check_digit == 10) else self.check_digit
return f"{formatted_rut_integer}-{formatted_check_digit}"
def __str__(self):
return self.email if self.is_registered else "<anonymous>"
class DeviceQuerySet(models.QuerySet):
def send_push_notification(self, body, data=None):
interface = PushNotificationsInterface()
for device in self:
interface.send(device=device, body=body, data=data)
class DeviceManager(models.Manager):
def get_queryset(self):
return DeviceQuerySet(self.model, using=self._db)
class Device(BaseModel):
class Meta:
constraints = [
models.CheckConstraint(
check=ExtendedQ(android_id__isnull=False) ^ ExtendedQ(ios_id__isnull=False),
name="exactly_one_os_id",
)
]
user = models.ForeignKey(
to="accounts.User", null=True, verbose_name="user", related_name="devices", on_delete=models.SET_NULL
)
android_id = models.CharField(unique=True, null=True, max_length=255, verbose_name="Android ID")
ios_id = models.CharField(unique=True, null=True, max_length=255, verbose_name="iOS ID")
expo_push_token = models.CharField(unique=True, null=True, max_length=255, verbose_name="Expo push token")
objects = DeviceManager()
@property
def os(self):
return (self.android_id and "Android") or (self.ios_id and "iOS")
@property
def os_id(self):
return self.android_id or self.ios_id
def __str__(self):
return f"{self.user or 'Some one'}’s {self.os}"
| conyappa/backend | conyappa/accounts/models.py | models.py | py | 7,960 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.settings.INITIAL_EXTRA_TICKETS_TTL",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.QuerySet",
"line_number": 19,
"usage_type": "at... |
26401545941 | from flask import Flask, request, redirect, render_template, flash
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:buildablog@localhost:8889/build-a-blog'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = 'fjeioa;;fjeiaow;'
class Blog_Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(140), nullable=False)
body = db.Column(db.String(500), nullable=False)
def __init__(self, title, body):
self.title = title
self.body = body
@app.route('/blog')
def blog():
if request.args:
blog_id = request.args.get('id')
post = Blog_Post.query.get(blog_id)
return render_template('post.html', post=post)
blogs = Blog_Post.query.all()
return render_template('blog.html', blogs=blogs)
@app.route('/newpost', methods = ['POST', 'GET'])
def newpost():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
if not title or not body:
flash("Please enter a title and body for each post.", 'error')
return redirect('/newpost')
#do i need these if it's nullable? how would I add flash message?
else:
newpost = Blog_Post(title, body)
db.session.add(newpost)
db.session.commit()
post = Blog_Post.query.order_by(Blog_Post.id.desc()).first()
return render_template('post.html', post=post)
return render_template('newpost.html')
@app.route('/')
def go_to_main():
return redirect('/blog')
if __name__ == '__main__':
app.run() | noragharris/build-a-blog | main.py | main.py | py | 1,740 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "flask.r... |
70497481953 | from functools import cmp_to_key
class Player(object):
def __init__(self, name, score):
self.name = name
self.score = score
def __repr__(self):
return f'{self.name} {self.score}'
def comparator(a, b):
if a.score > b.score:
return -1
elif a.score < b.score:
return 1
else:
return -1 if a.name < b.name else 1
names = ['charlie', 'abby', 'bob', 'derek']
players = []
for x in names:
players.append(Player(x, names.index(x)))
players.append(Player('amy', 3))
for player in players:
print(player)
print()
players.sort(key=cmp_to_key(Player.comparator))
for player in players:
print(player)
# # DEFINE A SORT METHOD
# def sort_item(item):
# return item[1]
#
# items.sort(key=sort_item)
# print(items)
#
# # LAMBDA FUNCTION
# items.sort(key=lambda item: item[1], reverse=True)
# print(items)
| NathanFee/InterviewQuestions | sort_complex.py | sort_complex.py | py | 950 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "functools.cmp_to_key",
"line_number": 33,
"usage_type": "call"
}
] |
72427446434 | from django.shortcuts import get_object_or_404,render, HttpResponseRedirect
from django.shortcuts import render
from django.contrib import messages
from .forms import todoform,dateform
from django.shortcuts import redirect
from django.conf import settings
# Create your views here.
# import datetime
from datetime import datetime
from .models import ToDoList
from .forms import todoform,tododate
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
import logging
import pprint
import collections
# Put the logging info within your django view
pp = pprint.PrettyPrinter(indent=4)
d=datetime.now().date()
logger = logging.getLogger(__name__)
def handler404(request,exception):
# response = render_to_response(template_name)
# response.status_code = 404
# return HttpResponseRedirect("/error/err")
context={}
context['error']="Some error has occured"
return render(request,"accounts/base.html",context)
def handler500(request):
# response = render_to_response(template_name)
# response.status_code = 500
# return HttpResponseRedirect("/error/err")
context={}
context['error']="Some error has occured"
return render(request,"accounts/base.html",context)
def handler403(request,exception):
# response = render_to_response(template_name)
# response.status_code = 500
# return HttpResponseRedirect("/error/err")
context={}
context['error']="Some error has occured"
return render(request,"accounts/base.html",context)
def handler400(request,exception):
# response = render_to_response(template_name)
# response.status_code = 500
# return HttpResponseRedirect("/error/err")
context={}
context['error']="Some error has occured"
return render(request,"accounts/base.html",context)
@login_required
def diary(request):
k=ToDoList.objects.filter(user=request.user)
lenn=len(k)
context ={}
context['data']={};
if(lenn>0):
tt=0
for jj in k:
# a={str(jj.dardate):jj.id}
context['data'][str(jj.dardate)]=jj.id
tt=tt+1;
# context['data']=kk
context['data']=collections.OrderedDict(sorted(context['data'].items())[::-1])
else :
context['data']="Your haven't written yet."
formd=dateform(request.POST or None)
if request.method == "POST":
if formd.is_valid():
kd=formd.cleaned_data['choose_date']
dd=kd
kd=ToDoList.objects.filter(dardate=kd,user=request.user)
if (len(kd)<=0):
return HttpResponseRedirect("/new/"+str(dd))
kd=kd[0].id
return HttpResponseRedirect("/"+str(kd))
pp.pprint(kd)
# ss.delete()
# context['d']=d
return render(request,'accounts/diary.html',{"context":context,"formd":formd})
@login_required
def newdate(request,dd):
form=todoform(request.POST or None)
# yed=""
if form.is_valid():
profile=form.save(commit = False)
profile.user=request.user
profile.dardate=dd
profile.save()
return HttpResponseRedirect("/wholediary")
yed=str(dd)
return render(request,'accounts/new.html',{"form":form,"yed":yed})
@login_required
def first(request):
k=ToDoList.objects.filter(dardate=d,user=request.user)
lenn=len(k)
context ={}
context['d']=d
if(lenn>0):
kk=k[lenn-1].your_day
context['data']=kk
else :
context['data']="Your today's writing is empty."
return render(request,'accounts/home.html',{"context":context})
@login_required
def index(request):
context ={}
# fetch the object related to passed id
k=ToDoList.objects.filter(dardate = datetime.now().date(),user=request.user)
if(len(k)>0):
obj = get_object_or_404(k, dardate = datetime.now().date())
form = todoform(request.POST or None,instance = obj)
else :
form=todoform(request.POST or None)
# pass the object as instance in form
# save the data from the form and
# redirect to detail_view
if form.is_valid():
profile=form.save(commit = False)
profile.user=request.user
profile.save()
return HttpResponseRedirect("/"+str(profile.id))
# add form dictionary to context
context["form"] = form
context["yed"]=d;
return render(request, "accounts/update_view.html", context)
def sign_up(request):
context = {}
form = UserCreationForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = form.save()
login(request,user)
return render(request,'accounts/home.html')
context['form']=form
return render(request,'registration/sign_up.html',context)
@login_required
def detail_view(request, id):
# dictionary for initial data with
# field names as keys
context ={}
# add the dictionary during initialization
context["data"] = ToDoList.objects.get(id = id)
context["kkk"]=id
return render(request, "accounts/detail_view.html", context)
# update view for details
@login_required
def update_view(request, id):
# dictionary for initial data with
# field names as keys
context ={}
# fetch the object related to passed id
obj = get_object_or_404(ToDoList, id = id)
yedo=ToDoList.objects.filter(id=id)
if(len(yedo)>0):
yed=str(ToDoList.objects.filter(id=id)[0].dardate)
else :
yed=""
# pass the object as instance in form
form = todoform(request.POST or None, instance = obj)
# save the data from the form and
# redirect to detail_view
if form.is_valid():
form.save()
return HttpResponseRedirect("/"+id)
# add form dictionary to context
context["form"] = form
context["yed"]=yed
pp.pprint(yed)
return render(request, "accounts/update_view.html", context)
@login_required
def delo(request,id):
kkk=ToDoList.objects.filter(id=id).delete()
# messages.info(request, 'Deleted successfully')
return HttpResponseRedirect("/delsuccess/"+str(id))
@login_required
def dels(request,id):
k=ToDoList.objects.filter(user=request.user)
lenn=len(k)
context ={}
context['msg']="Your entry deleted successfully"
context['data']={};
if(lenn>0):
tt=0
for jj in k:
# a={str(jj.dardate):jj.id}
context['data'][str(jj.dardate)]=jj.id
tt=tt+1;
# context['data']=kk
context['data']=collections.OrderedDict(sorted(context['data'].items())[::-1])
else :
context['data']="Your haven't written yet."
formd=dateform(request.POST or None)
if request.method == "POST":
if formd.is_valid():
kd=formd.cleaned_data['choose_date']
dd=kd
kd=ToDoList.objects.filter(dardate=kd,user=request.user)
if (len(kd)<=0):
return HttpResponseRedirect("/new/"+str(dd))
kd=kd[0].id
return HttpResponseRedirect("/"+str(kd))
pp.pprint(kd)
# ss.delete()
# context['d']=d
return render(request,'accounts/diary.html',{"context":context,"formd":formd})
@login_required
def error(request):
context={}
context['error']="Some error has occured"
return render(request,"accounts/base.html",context) | ashtiv/django-diary | accounts/views.py | views.py | py | 7,635 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pprint.PrettyPrinter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "logging.... |
29219958398 | from PyQt5.QtWidgets import QApplication, QSystemTrayIcon,QMenu
from PyQt5.QtGui import QIcon
import sys
from PyQt5.QtWidgets import QApplication, QSystemTrayIcon,QMenu
from PyQt5.QtGui import QIcon
from firebase_admin import credentials
from firebase_admin import db
import os
import speech_recognition as sr
import win32console
import win32gui
import sys
import firebase_admin
import playsound as playsound
import pyttsx3
# ventana= win32console.GetConsoleWindow()
# win32gui.ShowWindow(ventana,0)
#<---------------------------------------------Rutas--------------------------------------------->
def Sonidito():
playsound('C:\\Program Files (x86)\\IDA\\rougue-studios\\IDA\\resources\\SonidoIDA.mp3')
def IniciarIDA():
os.startfile('C:\\Program Files (x86)\\IDA\\rougue-studios\\IDA\\scripts\\ida.py')
def IniciarIDAAutomatico():
os.startfile('C:\\Program Files (x86)\\IDA\\rougue-studios\\IDA\\scripts\\ida_automatico.py')
engine = pyttsx3.init('sapi5')
voces = engine.getProperty('voices')
engine.setProperty('voices',voces[2].id)
engine.setProperty('rate',150)
def habla(audio):
print(" ")
engine.say(audio)
print(f": {audio}")
engine.runAndWait()
def hacercomando():
comando = sr.Recognizer()
with sr.Microphone() as source:
comando.adjust_for_ambient_noise(source,duration=0.5)
print("Escuchando...")
comando.pause_threshold = 1
comando.energy_threshold = 400
audio = comando.listen(source)
try:
print("Entendiendo...")
consulta = comando.recognize_google(audio,language='es-mx')
print(f"Dijiste: {consulta}")
except Exception as Error:
return "none"
return consulta.lower()
#<--------------------------------------------OYE IDA-------------------------------------------->
#<-------------------------------------------Funciones------------------------------------------->
app=QApplication(sys.argv)
TrayIcon= QSystemTrayIcon(QIcon('C:\\Program Files (x86)\\IDA\\rougue-studios\\IDA\\resources\\iconoTask1000.png'),parent=app)
TrayIcon.setToolTip('IDA')
TrayIcon.show()
menu=QMenu()
exitAction=menu.addAction('Salir')
exitAction.triggered.connect(app.quit)
TrayIcon.setContextMenu(menu)
i=0
while True:
if(i!=1):
i+=1
consulta = hacercomando()
if 'oye' in consulta or 'modo simple' in consulta:
IniciarIDA()
elif 'modo automático' in consulta:
IniciarIDAAutomatico()
elif 'basta' in consulta:
habla('Saliendo')
break
else:
print("Falsa alarma...")
consulta = hacercomando()
if 'oye' in consulta or 'modo simple' in consulta:
IniciarIDA()
elif 'modo automático' in consulta:
IniciarIDAAutomatico()
elif 'estás ahí' in consulta or 'sigues ahí' in consulta or 'estás activada' in consulta:
habla('Sí, aquí estoy')
elif 'basta' in consulta:
habla('Saliendo')
break
else:
print("Falsa alarma...")
sys.exit()
| BryanTG1221/IDA | IDA/scripts/icon.py | icon.py | py | 3,113 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.startfile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.startfile",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyttsx3.init",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer"... |
44425282922 | import configparser
import os
class ProjectConfig:
_cf = None
def __init__(self):
if ProjectConfig._cf is None:
try:
# 拼接获得config.ini路径
__CONFIG_FILE_PATH = os.path.dirname(os.path.abspath(__file__))
__CONFIG_FILE_NAME = 'config.ini'
# 读入配置文件
ProjectConfig._cf = configparser.RawConfigParser()
ProjectConfig._cf.read(os.path.join(__CONFIG_FILE_PATH, __CONFIG_FILE_NAME), encoding='utf-8')
print(
'读入config.ini配置:\n配置文件路径:{}\n配置文件版本:{}'.format(os.path.join(__CONFIG_FILE_PATH, __CONFIG_FILE_NAME),
ProjectConfig._cf.get('version', 'name')))
except Exception as e:
print("载入配置文件失败: " + os.path.join(__CONFIG_FILE_PATH, __CONFIG_FILE_NAME))
print(e)
def get_value(self, section, option):
try:
value = ProjectConfig._cf.get(section, option)
return value
except Exception as e:
print("配置文件中没有该配置内容: section[" + section + "] option: " + option)
raise e
| cxb1004/emotion | config.py | config.py | py | 1,288 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "configparser.RawConfigPa... |
13786581127 | import cv2
img = cv2.imread('araba.png')
print(type(img))
# <class 'numpy.ndarray'>
print(img.shape)
cv2.imshow('orgin', img)
img_rotate_90_clockwise = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
cv2.imshow('cv_rotate_90_clockwise.jpg', img_rotate_90_clockwise)
# True
img_rotate_90_counterclockwise = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv2.imshow('cv_rotate_90_counterclockwise.jpg', img_rotate_90_counterclockwise)
# True
img_rotate_180 = cv2.rotate(img, cv2.ROTATE_180)
cv2.imshow('data/dst/lena_cv_rotate_180.jpg', img_rotate_180)
# True
cv2.waitKey()
cv2.destroyAllWindows() | MetehanYildiz25/ImageProcessing | Görüntü İşleme/aynalma_yöntem_2.py | aynalma_yöntem_2.py | py | 621 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.rotate",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.ROTATE_90_CLOCKWISE",
"line_nu... |
8785524037 | from __future__ import absolute_import, division, print_function
import tempfile
import pytest
import paayes
TEST_RESOURCE_ID = "file_123"
class TestFileUpload(object):
@pytest.fixture(scope="function")
def setup_upload_api_base(self):
paayes.upload_api_base = paayes.api_base
paayes.api_base = None
yield
paayes.api_base = paayes.upload_api_base
paayes.upload_api_base = "https://files.paayes.com"
def test_is_listable(self, request_mock):
resources = paayes.FileUpload.list()
request_mock.assert_requested("get", "/api/v1/files")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], paayes.FileUpload)
def test_is_retrievable(self, request_mock):
resource = paayes.FileUpload.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested("get", "/api/v1/files/%s" % TEST_RESOURCE_ID)
assert isinstance(resource, paayes.FileUpload)
def test_is_creatable(self, setup_upload_api_base, request_mock):
paayes.multipart_data_generator.MultipartDataGenerator._initialize_boundary = (
lambda self: 1234567890
)
test_file = tempfile.TemporaryFile()
resource = paayes.FileUpload.create(
purpose="dispute_evidence",
file=test_file,
file_link_data={"create": True},
)
request_mock.assert_api_base(paayes.upload_api_base)
request_mock.assert_requested(
"post",
"/api/v1/files",
headers={
"Content-Type": "multipart/form-data; boundary=1234567890"
},
)
assert isinstance(resource, paayes.FileUpload)
def test_deserializes_from_file(self):
obj = paayes.util.convert_to_paayes_object({"object": "file"})
assert isinstance(obj, paayes.FileUpload)
def test_deserializes_from_file_upload(self):
obj = paayes.util.convert_to_paayes_object({"object": "file_upload"})
assert isinstance(obj, paayes.FileUpload)
| paayes/paayes-python | tests/api_resources/test_file_upload.py | test_file_upload.py | py | 2,061 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "paayes.upload_api_base",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "paayes.api_base",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "paayes.api_base",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": ... |
14485940599 | from boggle import Boggle
from flask import Flask, request, render_template, session, jsonify
boggle_game = Boggle()
app = Flask(__name__)
app.config["SECRET_KEY"] = "Chicken fears Maximus"
# default page / board
@app.route("/")
def homepage():
"""Creating a new board for the game"""
board = boggle_game.make_board()
"""Adding the board variable to the session, and resetting a couple keys"""
session['board'] = board
highscore = session.get("highscore", 0)
nplays = session.get("nplays", 0)
"""Displaying the board on the screen"""
return render_template("index.html", board=board, highscore=highscore, nplays=nplays)
# checking the word
@app.route("/check-word")
def check_word():
"""Checking to see if the word chosen is a valid word (is in the words.txt file"""
word = request.args["word"]
board = session["board"]
response = boggle_game.check_valid_word(board, word)
"""Reurning the result of the check in a JSON object"""
return jsonify({'result': response})
# posting the score
@app.route("/post-score", methods=["POST"])
def post_score():
"""Fincing current score from the JSON object, and pulling highscore and number of plays from the session"""
score = request.json["score"]
highscore = session.get("highscore", 0)
nplays = session.get("nplays", 0)
"""Increasing the number of plays, and checking the high score"""
session['nplays'] = nplays + 1
session['highscore'] = max(score, highscore)
"""If a new high score is set..."""
return jsonify(brokeRecord=score > highscore) | shaunwo/19-flask-boggle | app.py | app.py | py | 1,600 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "boggle.Boggle",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.session.get",
"line_n... |
22462377193 | from sklearn.feature_extraction.text import TfidfVectorizer
from preprocess import *
from db_controller import *
from konlpy.tag import Okt, Kkma, Mecab
from numpy.linalg import norm
from numpy import dot
import numpy as np
import os
import sys
def text_slice(documents:list): # db에서 꺼낸 기사 데이터 정제 -> [' 기사본문 ', ' 기사본문 ', ... ' 기사본문 ']의 형태
cn = Cleaning_Noise()
result = []
for docu in documents:
clean_data = cn.cleaning(docu[0].strip('\n'))
result.append(clean_data)
return result
def train_set(db_names:list, section:str):
train_set = []
titles = select_title(db_names, section)
train_set += titles
return train_set
def tf_idf(data:list):
tf_idfv = TfidfVectorizer().fit(data)
tf_matrix = tf_idfv.transform(data).toarray()
return tf_matrix
def cos_sim(matrix:list):
return dot(matrix[0], matrix[1]) / (norm(matrix[0])*norm(matrix[1]))
def most_similar(titles, input_data:str):
with open('new_lab.txt', 'a', encoding='utf-8') as file:
new = []
new += titles
new.append(input_data)
tf_idfv = TfidfVectorizer().fit(new)
tf_matrix = tf_idfv.transform(new).toarray()
num_set = list(range(len(tf_matrix)-1))
max, max_idx = 0, 0
for idx in num_set:
data = [tf_matrix[-1], tf_matrix[idx]]
value = cos_sim(data)
if value > max:
max = value
max_idx = idx
else:
pass
file.write('입력값:' + '\t' + input_data + '\t' + '기존값:' + titles[max_idx] + '의 유사도' + ':' + str(max) + '\n')
new.remove(input_data) | Mayberry2021/tf_idf | DTM.py | DTM.py | py | 1,528 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 31,
"usage_type": "call"
},
{
"api_n... |
26070430980 | import pytest
from django.core.files.base import ContentFile
try:
from wagtail.core.models import Page
except ImportError:
from wagtail.wagtailcore.models import Page
from wagtail_svgmap.models import ImageMap
from wagtail_svgmap.tests.utils import EXAMPLE2_SVG_DATA, IDS_IN_EXAMPLE2_SVG, IDS_IN_EXAMPLE_SVG
@pytest.mark.django_db
def test_id_caching(example_svg_upload):
map = ImageMap.objects.create(svg=example_svg_upload)
assert map.ids == IDS_IN_EXAMPLE_SVG
assert map.size == (588, 588)
@pytest.mark.django_db
def test_image_replacing(example_svg_upload):
map = ImageMap.objects.create(svg=example_svg_upload)
assert map.ids == IDS_IN_EXAMPLE_SVG
map.svg.save('example2.svg', ContentFile(EXAMPLE2_SVG_DATA))
map.save()
map.refresh_from_db()
assert map.ids == IDS_IN_EXAMPLE2_SVG
@pytest.mark.django_db
def test_image_replacing_with_region(example_svg_upload):
"""
Test that replacing an image with a new one won't crash if the element IDs change.
Refs https://github.com/City-of-Helsinki/wagtail-svgmap/issues/11 (#11)
"""
map = ImageMap.objects.create(svg=example_svg_upload)
map.regions.create(element_id='red', link_external='https://google.com/')
map.svg.save('example2.svg', ContentFile(EXAMPLE2_SVG_DATA))
assert 'https://google.com' not in map.rendered_svg # can't be there as 'red' is not there
@pytest.mark.django_db
def test_rendering(root_page, example_svg_upload, dummy_wagtail_doc):
page = Page(title="nnep", slug="nnep")
page.set_url_path(root_page)
root_page.add_child(instance=page)
page.save()
assert page.url
map = ImageMap.objects.create(svg=example_svg_upload)
map.regions.create(element_id='green', link_external='/foobar', target='_blank')
map.regions.create(element_id='blue', link_page=page, target='_top')
map.regions.create(element_id='red', link_document=dummy_wagtail_doc)
svg = map.rendered_svg
assert '/foobar' in svg
assert '_blank' in svg
assert 'nnep' in svg
assert '_top' in svg
assert ('documents/%s' % dummy_wagtail_doc.pk) in svg
@pytest.mark.django_db
def test_auto_recache(root_page, example_svg_upload):
page = Page(title="nnep", slug="nnep")
page.set_url_path(root_page)
root_page.add_child(instance=page)
page.save()
assert page.url
map = ImageMap.objects.create(svg=example_svg_upload)
map.regions.create(element_id='blue', link_page=page)
map.recache_svg(save=True)
assert 'nnep' in map.rendered_svg
page.slug = 'ffflop'
page.save() # The `post_save` triggers will get called...
assert 'ffflop' in ImageMap.objects.get(pk=map.pk).rendered_svg
| City-of-Helsinki/wagtail-svgmap | wagtail_svgmap/tests/test_model.py | test_model.py | py | 2,694 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "wagtail_svgmap.models.ImageMap.objects.create",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "wagtail_svgmap.models.ImageMap.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "wagtail_svgmap.models.ImageMap",
"line_number": 15,... |
72000249953 | import time
import math
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import pytest
def calc():
return str(math.log(int(time.time())))
@pytest.fixture(scope="function")
def browser():
print("\nstart browser for test..")
browser = webdriver.Chrome()
yield browser
print("\nquit browser..")
browser.quit()
@pytest.mark.parametrize('link', ["https://stepik.org/lesson/236895/step/1","https://stepik.org/lesson/236896/step/1",
"https://stepik.org/lesson/236897/step/1",
"https://stepik.org/lesson/236898/step/1",
"https://stepik.org/lesson/236899/step/1",
"https://stepik.org/lesson/236903/step/1",
"https://stepik.org/lesson/236904/step/1",
"https://stepik.org/lesson/236905/step/1"])
def test_guest_should_see_login_link(browser, link):
browser.get(link)
area = WebDriverWait(browser, 15).until(EC.presence_of_element_located((By.TAG_NAME, "textarea")))
answer = calc()
area.send_keys(answer)
button = browser.find_element_by_css_selector("button.submit-submission")
button.click()
#hint = browser.find_element_by_css_selector("pre.smart-hints__hint")
hint = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, "pre.smart-hints__hint")))
hintMessage = hint.text
assert hintMessage == "Correct!", f"Should be Correct, but was {hintMessage}"
| lexeg/stepik---auto-tests-course | part#3/lesson-6_step-3.py | lesson-6_step-3.py | py | 1,482 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.log",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
... |
29069366892 | #blibliotheque
import json
from datetime import datetime
#variable
liste_taches = []
tache = {
"nom" :'',
"deadline" : '',
"statut" : '',
}
utilisateurs = {}
#///////LOGIN////////
nom_utilisateur = input("Entrez votre nom s'il vous plait :")
if nom_utilisateur in utilisateurs:
nom_utilisateur = input("Nom deja utilisé\nEntrez votre nom s'il vous plait:")
elif nom_utilisateur not in utilisateurs:
utilisateurs['nom'] = nom_utilisateur
print("Bonjour {} et bienvenue sur TO DO List" .format(utilisateurs['nom']))
#dedier a chaque utilisateur son propre tache
utilisateurs ['tache'] = tache
#il faut avoir un fichier json qui enregistre tout les utilisateurs
#print(utilisateurs)
#///////LES ACTIONS////////
while True:
print("//////////////////////////////////////////")
print("Bienvenue dans le gestionnaire des tâches")
print("1 - Ajout d'une tâche ")
print("2 - Affichage des tâches")
print("3 - Modification d'une tâche")
print("4 - Enregistrement des tâches")
print("5 - Quitter TO DO List")
# Ajout d'une tâche
user_action = input("Entrer une action: ")
while user_action != "1" and user_action != "2" and user_action != "3" and user_action != "4" and user_action != "5":
user_action = input("Action non reconnu\nEntrer une action: ")
#1 - Ajout d'une tâche
if user_action == "1":
# input nom, input deadline
print("Veuillez ajouter une tâche ")
input_nom = input("Entrer le nom: ")
input_deadline = input("Entrer le deadline (AAAA-MM-JJ HH:MM): ")
#affection du dictionnaire tache
tache['nom'] = input_nom
tache['deadline'] = datetime
#conversion en json
#data = json.dumps(tache)
#fichier json
#f = open('tache.json',"r")
#f.read()
#2 - Affichage des tâches
elif user_action == "2" :
#ajout de nouveau dans le liste des tach#Ajout des taches dans liste_taches
liste_taches.append(tache['nom'])
print("{} Votre To do list {} :".format(nom_utilisateur,liste_taches))
#3 - Modification d'une tâche
elif user_action == "3" :
print('Modification d\'une tâche:')
#type de status :commencer,en cours,terminer,rater
# statut = ['commencer','en cours','terminer','rater']
date0 = datetime(input_deadline)
deadline = datetime.fromisoformat(date0)
delai = datetime.now - deadline
#4 - Enregistrement des tâches
elif user_action == "4" :
#outfile.write(data)
print('Enregistrement d\'une tâche:')
#Quitter
elif user_action == "5":
break
| DTC-Formation/test-1-3-Woutnak | tp.py | tp.py | py | 2,680 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromisoformat",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "d... |
1502144500 | from typing import List
def rotate_clockwise(matrix: List[List[int]]) -> None:
"""
Rotate a nxn 2D int matrix 90 degrees clockwise in place
Args:
matrix: A nxn 2D int matrix
Returns:
matrix being roated 90 degree clockwise in place
Raises:
TypeError: If the matrix is not nxn 2D matrix
"""
if not isinstance(matrix, list):
raise TypeError("matrix argument is not a List.")
elif not isinstance(matrix[0], list):
raise TypeError("matrix argument is not a 2D List")
elif not len(matrix) == len(matrix[0]):
raise TypeError("matrix argument is not a sqaure 2D List")
n = len(matrix)
# idea: first reflect along the main diagonal, then reflect along y-axis
for i in range(1, n):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
for i in range(n):
for j in range(n//2):
matrix[i][j], matrix[i][n-1-j] = matrix[i][n-1-j], matrix[i][j]
return | ucsd-ets/python-docker-example | pyapp/rotate_clockwise.py | rotate_clockwise.py | py | 1,014 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 3,
"usage_type": "name"
}
] |
20495855214 | import torch
from typing import Tuple
def precompute_freqs_cis(dim: int, end: int, theta: float) -> torch.Tensor:
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device) # type: ignore
freqs = torch.outer(t, freqs).float() # type: ignore
return torch.polar(torch.ones_like(freqs), freqs) # complex64
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = freqs_cis[:, None, :]
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(2)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(2)
return xq_out.type_as(xq), xk_out.type_as(xk)
| mistralai/mistral-src | mistral/rope.py | rope.py | py | 882 | python | en | code | 4,296 | github-code | 1 | [
{
"api_name": "torch.arange",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.outer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.polar",
"line_number": 9... |
19037842932 | import re
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from imblearn.under_sampling \
import (RandomUnderSampler,
TomekLinks,
InstanceHardnessThreshold)
from imblearn.over_sampling \
import (ADASYN,
RandomOverSampler,
SMOTE)
from imblearn.combine import SMOTETomek
from experiments_workflows.isolation_forest import IsolationForestUnsupervised
from experiments_workflows.general import prepare_training_set, xy_retrieval
from threshold.optimization import clf_threshold_selection
from experiments_workflows.ll import LayeredLearning
class Workflows:
def __init__(self,
dataset,
train_index,
test_index,
ep_model,
resample_size,
resample_on_positives):
training = \
ep_model.subset_episodes(dataset=dataset,
ind=train_index,
resample_episodes=True,
resample_on_positives=resample_on_positives,
resample_size=resample_size)
testing = \
ep_model.subset_episodes(dataset=dataset,
ind=test_index,
resample_episodes=False)
training_df, train_sub_df, validation_df = prepare_training_set(training)
self.imputation_model = SimpleImputer(strategy='median')
self.training = training
self.training_df = training_df
self.train_sub_df = train_sub_df
self.validation_df = validation_df
self.testing = testing
self.train_index = train_index
self.test_index = test_index
self.ep_model = ep_model
self.thr = 0
def standard_classification(self,
model=RandomForestClassifier(),
resample_distribution: bool = True,
resampling_function=SMOTE(),
probabilistic_output: bool = False,
use_f1: bool = False):
X_tr, y_tr, _, _ = xy_retrieval(self.training_df, self.ep_model.target_variable)
X_subtr, y_subtr, _, _ = xy_retrieval(self.train_sub_df, self.ep_model.target_variable)
X_vld, y_vld, _, _ = xy_retrieval(self.validation_df, self.ep_model.target_variable)
self.imputation_model.fit(X_subtr)
print(pd.Series(y_tr).value_counts() / len(y_tr))
if resample_distribution:
print('fit res')
X_tr, y_tr = resampling_function.fit_resample(X_tr, y_tr)
X_subtr, y_subtr = resampling_function.fit_resample(X_subtr, y_subtr)
print(pd.Series(y_tr).value_counts() / len(y_tr))
print('fit thr')
self.thr = clf_threshold_selection(X_subtr, y_subtr, X_vld, y_vld, model, use_f1)
print(f'Best threshold is {self.thr}')
model.fit(X_tr, y_tr)
y_hat_values, y_hat_prob_values, y_values = {}, {}, {}
for k in self.testing:
patient_k = self.testing[k]
patient_k = patient_k.dropna().reset_index(drop=True)
X_ts, y_ts, _, _ = xy_retrieval(patient_k, self.ep_model.target_variable)
print(X_ts.shape)
if X_ts.shape[0] < 1:
continue
X_ts_t = self.imputation_model.transform(X_ts)
X_ts_t = pd.DataFrame(X_ts_t)
X_ts_t.columns = X_ts.columns
if probabilistic_output:
y_hat_k_p = model.predict_proba(X_ts_t)
y_hat_k_p = np.array([x[1] for x in y_hat_k_p])
y_hat_k = (y_hat_k_p > self.thr).astype(int)
else:
y_hat_k = model.predict(X_ts_t)
y_hat_k_p = y_hat_k.copy()
y_hat_values[k] = y_hat_k
y_hat_prob_values[k] = y_hat_k_p
y_values[k] = y_ts.values
return y_hat_values, y_hat_prob_values, y_values
def ad_hoc_rule(self):
target_ah = re.sub('_int$', '_dummy', self.ep_model.target_variable)
y_hat_values, y_values = {}, {}
for k in self.testing:
patient_k = self.testing[k]
patient_k = patient_k.dropna().reset_index(drop=True)
X_ts, y_ts, _, _ = xy_retrieval(patient_k, self.ep_model.target_variable)
if X_ts.shape[0] < 1:
continue
y_hat_k = patient_k[target_ah].values
y_hat_values[k] = y_hat_k
y_values[k] = y_ts.values
return y_hat_values, y_values
def layered_learning(self,
model_t1=RandomForestClassifier(),
model_t2=RandomForestClassifier(),
resample_distribution: bool = True,
resampling_function=SMOTE(),
probabilistic_output: bool = False,
use_f1: bool = False):
X_tr, y_tr, y_pce_tr, _ = xy_retrieval(self.training_df, self.ep_model.target_variable)
self.imputation_model.fit(X_tr)
X_t1_tr, y_t1_tr, X_t2_tr, y_t2_tr = LayeredLearning.formalization(X_tr, y_tr, y_pce_tr)
best_thr = LayeredLearning.threshold_opt(X_tr=X_tr,
y_tr=y_tr,
y_pce_tr=y_pce_tr,
algo_t1=model_t1,
algo_t2=model_t2,
use_f1=use_f1)
print('best_thr')
print(best_thr)
print(pd.Series(y_t1_tr).value_counts() / len(y_t1_tr))
print(pd.Series(y_t2_tr).value_counts() / len(y_t2_tr))
if resample_distribution:
X_t1_tr, y_t1_tr = resampling_function.fit_resample(X_t1_tr, y_t1_tr)
X_t2_tr, y_t2_tr = resampling_function.fit_resample(X_t2_tr, y_t2_tr)
model_t1.fit(X_t1_tr, y_t1_tr)
model_t2.fit(X_t2_tr, y_t2_tr)
y_hat_values, y_hat_prob_values, y_values = {}, {}, {}
for k in self.testing:
patient_k = self.testing[k]
X_ts, y_ts, _, _ = xy_retrieval(patient_k, self.ep_model.target_variable)
print(X_ts.shape)
if X_ts.shape[0] < 1:
continue
X_ts_t = self.imputation_model.transform(X_ts)
if probabilistic_output:
y_hat_k_p, y_hat_k_p1, y_hat_k_p2 = \
LayeredLearning.predict_proba(X_ts_t,
model_t1=model_t1,
model_t2=model_t2)
y_hat_k = np.asarray(y_hat_k_p > best_thr).astype(int)
else:
y_hat_k, _, _ = LayeredLearning.predict(X_ts_t, model_t1=model_t1, model_t2=model_t2)
y_hat_k_p = y_hat_k.copy()
y_hat_values[k] = y_hat_k
y_hat_prob_values[k] = y_hat_k_p
y_values[k] = y_ts.values
return y_hat_values, y_hat_prob_values, y_values
def isolation_forest(self,
probabilistic_output: bool = False,
use_f1: bool = False):
X_tr, y_tr, _, _ = xy_retrieval(self.training_df, self.ep_model.target_variable)
X_subtr, y_subtr, _, _ = xy_retrieval(self.train_sub_df, self.ep_model.target_variable)
X_vld, y_vld, _, _ = xy_retrieval(self.validation_df, self.ep_model.target_variable)
self.imputation_model.fit(X_subtr)
print(pd.Series(y_tr).value_counts() / len(y_tr))
model = IsolationForestUnsupervised()
print('fit thr')
self.thr = clf_threshold_selection(X_subtr, y_subtr, X_vld, y_vld, model, use_f1)
print(f'Best threshold is {self.thr}')
model.fit(X_tr, y_tr)
y_hat_values, y_hat_prob_values, y_values = {}, {}, {}
for k in self.testing:
patient_k = self.testing[k]
patient_k = patient_k.dropna().reset_index(drop=True)
X_ts, y_ts, _, _ = xy_retrieval(patient_k, self.ep_model.target_variable)
print(X_ts.shape)
if X_ts.shape[0] < 1:
continue
X_ts_t = self.imputation_model.transform(X_ts)
X_ts_t = pd.DataFrame(X_ts_t)
X_ts_t.columns = X_ts.columns
if probabilistic_output:
y_hat_k_p = np.asarray(model.predict_proba(X_ts_t))
# y_hat_k_p = np.array([x[1] for x in y_hat_k_p])
y_hat_k = (y_hat_k_p > self.thr).astype(int)
else:
y_hat_k = model.predict(X_ts_t)
y_hat_k_p = y_hat_k.copy()
y_hat_values[k] = y_hat_k
y_hat_prob_values[k] = y_hat_k_p
y_values[k] = y_ts.values
return y_hat_values, y_hat_prob_values, y_values
| vcerqueira/activity_monitoring_mimic | experiments_workflows/workflows.py | workflows.py | py | 9,133 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "experiments_workflows.general.prepare_training_set",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sklearn.impute.SimpleImputer",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestClassifier",
"line_number": 61,
... |
71940530913 | # -*- coding: utf-8 -*-
"""Tests for the LifeScan OneTouch Ultra 2 driver."""
__author__ = 'Diego Elio Pettenò'
__email__ = 'flameeyes@flameeyes.eu'
__copyright__ = 'Copyright © 2013, Diego Elio Pettenò'
__license__ = 'MIT'
import os
import sys
import unittest
import mock
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from glucometerutils.drivers import otultra2
from glucometerutils.support import lifescan
from glucometerutils import exceptions
class TestOTUltra2(unittest.TestCase):
def setUp(self):
self.addCleanup(mock.patch.stopall)
mock_serial = mock.patch('serial.Serial').start()
self.mock_readline = mock_serial.return_value.readline
self.device = otultra2.Device('mockdevice')
def _set_return_string(self, string):
self.mock_readline.return_value = bytes(string, 'ascii')
def test_checksum(self):
checksum = otultra2._calculate_checksum(bytes('T', 'ascii'))
self.assertEqual(0x0054, checksum)
checksum = otultra2._calculate_checksum(
bytes('T "SAT","08/03/13","22:12:00 "', 'ascii'))
self.assertEqual(0x0608, checksum)
def test_missing_checksum(self):
self._set_return_string('INVALID')
self.assertRaises(lifescan.MissingChecksum,
self.device.get_serial_number)
def test_short_response(self):
self._set_return_string('.\r')
self.assertRaises(exceptions.InvalidResponse,
self.device.get_serial_number)
def test_invalid_response(self):
self._set_return_string('% 2500\r')
self.assertRaises(exceptions.InvalidResponse,
self.device.get_serial_number)
def test_invalid_serial_number(self):
self._set_return_string('@ "12345678O" 0297\r')
self.assertRaises(lifescan.InvalidSerialNumber,
self.device.get_serial_number)
def test_invalid_checksum(self):
self._set_return_string('% 1337\r')
self.assertRaises(exceptions.InvalidChecksum,
self.device.get_serial_number)
def test_broken_checksum(self):
self._set_return_string('% 13AZ\r')
self.assertRaises(lifescan.MissingChecksum,
self.device.get_serial_number)
if __name__ == '__main__':
unittest.main()
| hrishioa/Juventas | Code/glucometerutils/test/test_otultra2.py | test_otultra2.py | py | 2,392 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
22273310545 | import urllib.request
import json
import os
import ssl
from decouple import config
def allowSelfSignedHttps(allowed):
# bypass the server certificate verification on client side
if (
allowed
and not os.environ.get("PYTHONHTTPSVERIFY", "")
and getattr(ssl, "_create_unverified_context", None)
):
ssl._create_default_https_context = ssl._create_unverified_context
def Calculate(form_data):
allowSelfSignedHttps(
True
) # this line is needed if you use self-signed certificate in your scoring service.
# Request data goes here
# The example below assumes JSON formatting which may be updated
# depending on the format your endpoint expects.
# More information can be found here:
# https://docs.microsoft.com/azure/machine-learning/how-to-deploy-advanced-entry-script
data = {
"Inputs": {"data": [form_data]},
"GlobalParameters": 0.0,
}
print(data)
body = str.encode(json.dumps(data))
url = "http://c12792b1-ec04-4c37-ac90-8140af8e7225.centralindia.azurecontainer.io/score"
# Replace this with the primary/secondary key or AMLToken for the endpoint
api_key = config("API_KEY")
if not api_key:
raise Exception("A key should be provided to invoke the endpoint")
headers = {
"Content-Type": "application/json",
"Authorization": ("Bearer " + api_key),
}
req = urllib.request.Request(url, body, headers)
try:
response = urllib.request.urlopen(req)
result = response.read()
return result
except urllib.error.HTTPError as error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(error.read().decode("utf8", "ignore"))
| dhrumilpatel30/MachineLearingDemo | mlapp/mlconfigration.py | mlconfigration.py | py | 1,901 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_default_https_context",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": ... |
22963801181 | from __future__ import unicode_literals
import unittest
import os
import dxfgrabber
filename = os.path.join(os.path.dirname(__file__), "assure_3d_coords.dxf")
DWG = dxfgrabber.readfile(filename, {"assure_3d_coords": True})
pcoords = [(1., 1., 0.), (-3., 2., 0.), (7., -1., 0.), (10., 10., 0.)]
class TestAssure3dCoords(unittest.TestCase):
def test_line(self):
line = [e for e in DWG.entities if e.dxftype == 'LINE'][0]
self.assertEqual((1., 1., 0.), line.start)
self.assertEqual((2., 2., 0.), line.end)
def test_circle(self):
circle = [e for e in DWG.entities if e.dxftype == 'CIRCLE'][0]
self.assertEqual((12., 24., 0.), circle.center)
def test_lwpolyline(self):
# LWPOLYLINE can not return 3d coordinates (x, y, start_width, end_width, bulge)
lwpolyline = [e for e in DWG.entities if e.dxftype == 'LWPOLYLINE'][0]
self.assertEqual(pcoords, lwpolyline.points)
def test_polyline2d(self):
polyline = [e for e in DWG.entities if e.dxftype == 'POLYLINE'][0]
self.assertEqual(pcoords, list(polyline.points))
if __name__ == '__main__':
unittest.main()
| mozman/dxfgrabber | tests/test_assure_3d_coords.py | test_assure_3d_coords.py | py | 1,159 | python | en | code | 63 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dxfgrabber.readfile",
"lin... |
1585935309 | from typing import Optional, List
from highcharts_core.options.series.base import SeriesBase
from highcharts_core.options.series.data.treegraph import TreegraphData
from highcharts_core.options.plot_options.treegraph import TreegraphOptions
from highcharts_core.utility_functions import mro__to_untrimmed_dict
class TreegraphSeries(SeriesBase, TreegraphOptions):
"""General options to apply to all :term:`Treegraph` series types.
A treegraph visualizes a relationship between ancestors and descendants with a clear parent-child relationship,
e.g. a family tree or a directory structure.
.. figure:: ../../../_static/treegraph-example.png
:alt: Treegraph Example Chart
:align: center
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def data(self) -> Optional[List[TreegraphData]]:
"""Collection of data that represents the series. Defaults to
:obj:`None <python:None>`.
While the series type returns a collection of :class:`TreegraphData` instances,
it accepts as input:
.. tabs::
.. tab:: 1D Array of Arrays
A one-dimensional collection where each member of the collection is itself
a collection of data points.
.. note::
If using the Array of Arrays pattern you *must* set
:meth:`.keys <highcharts_core.options.series.treegraph.TreegraphSeries.keys>` to indicate
which value in the inner array corresponds to
:meth:`.id <highcharts_core.options.series.treegraph.TreegraphSeries.id>`,
:meth:`.parent <highcharts_core.options.series.treegraph.TreegraphSeries.parent>`, or
:meth:`.name <highcharts_core.options.series.treegraph.TreegraphSeries.name>`.
.. tab:: Object Collection
A one-dimensional collection of :class:`TreegraphData` objects or
:class:`dict <python:dict>` instances coercable to :class:`TreegraphData`
:rtype: :class:`list <python:list>` of :class:`TreegraphData` or
:obj:`None <python:None>`
"""
return self._data
@data.setter
def data(self, value):
if not value:
self._data = None
else:
self._data = TreegraphData.from_array(value)
@classmethod
def _get_kwargs_from_dict(cls, as_dict):
kwargs = {
'accessibility': as_dict.get('accessibility', None),
'allow_point_select': as_dict.get('allowPointSelect', None),
'animation': as_dict.get('animation', None),
'class_name': as_dict.get('className', None),
'clip': as_dict.get('clip', None),
'color': as_dict.get('color', None),
'cursor': as_dict.get('cursor', None),
'custom': as_dict.get('custom', None),
'dash_style': as_dict.get('dashStyle', None),
'data_labels': as_dict.get('dataLabels', None),
'description': as_dict.get('description', None),
'enable_mouse_tracking': as_dict.get('enableMouseTracking', None),
'events': as_dict.get('events', None),
'include_in_data_export': as_dict.get('includeInDataExport', None),
'keys': as_dict.get('keys', None),
'label': as_dict.get('label', None),
'legend_symbol': as_dict.get('legendSymbol', None),
'linked_to': as_dict.get('linkedTo', None),
'marker': as_dict.get('marker', None),
'on_point': as_dict.get('onPoint', None),
'opacity': as_dict.get('opacity', None),
'point': as_dict.get('point', None),
'point_description_formatter': as_dict.get('pointDescriptionFormatter', None),
'selected': as_dict.get('selected', None),
'show_checkbox': as_dict.get('showCheckbox', None),
'show_in_legend': as_dict.get('showInLegend', None),
'skip_keyboard_navigation': as_dict.get('skipKeyboardNavigation', None),
'sonification': as_dict.get('sonification', None),
'states': as_dict.get('states', None),
'sticky_tracking': as_dict.get('stickyTracking', None),
'tooltip': as_dict.get('tooltip', None),
'turbo_threshold': as_dict.get('turboThreshold', None),
'visible': as_dict.get('visible', None),
'animation_limit': as_dict.get('animationLimit', None),
'boost_blending': as_dict.get('boostBlending', None),
'boost_threshold': as_dict.get('boostThreshold', None),
'color_index': as_dict.get('colorIndex', None),
'crisp': as_dict.get('crisp', None),
'crop_threshold': as_dict.get('cropThreshold', None),
'find_nearest_point_by': as_dict.get('findNearestPointBy', None),
'get_extremes_from_all': as_dict.get('getExtremesFromAll', None),
'relative_x_value': as_dict.get('relativeXValue', None),
'soft_threshold': as_dict.get('softThreshold', None),
'step': as_dict.get('step', None),
'point_interval': as_dict.get('pointInterval', None),
'point_interval_unit': as_dict.get('pointIntervalUnit', None),
'point_start': as_dict.get('pointStart', None),
'stacking': as_dict.get('stacking', None),
'allow_traversing_tree': as_dict.get('allowTraversingTree', None),
'collapse_button': as_dict.get('collapseButton', None),
'color_by_point': as_dict.get('colorByPoint', None),
'fill_space': as_dict.get('fillSpace', None),
'link': as_dict.get('link', None),
'reversed': as_dict.get('reversed', None),
'data': as_dict.get('data', None),
'id': as_dict.get('id', None),
'index': as_dict.get('index', None),
'legend_index': as_dict.get('legendIndex', None),
'name': as_dict.get('name', None),
}
return kwargs
def _to_untrimmed_dict(self, in_cls = None) -> dict:
untrimmed = mro__to_untrimmed_dict(self, in_cls = in_cls) or {}
return untrimmed
| highcharts-for-python/highcharts-core | highcharts_core/options/series/treegraph.py | treegraph.py | py | 6,249 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "highcharts_core.options.series.base.SeriesBase",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "highcharts_core.options.plot_options.treegraph.TreegraphOptions",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number":... |
17713082668 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 11:54:08 2020
@author: dohertyguirand
"""
from bert import Ner
import urllib.request
import io
import PyPDF2 as p2
import sys
sys.stdout = open('ex13', 'w')
url = 'https://pdf.usaid.gov/pdf_docs/PA00WPD5.pdf'
open = urllib.request.urlopen(url).read()
memoryFile = io.BytesIO(open)
pdfread = p2.PdfFileReader(memoryFile)
docInfo = pdfread.getNamedDestinations()
print(docInfo)
#model = Ner("out_large/")
i = 0
fullText = ""
textArr = []
#while i< pdfread.getNumPages():
pageinfo = pdfread.getPage(2)
newText = str(pageinfo.extractText())
fullText +=newText
'''textArr.append(newText)
i = i + 1'''
foundTitles = []
relevantTitles = [" COP", " DCdocOP", " AOR", " COR", " AOR/COR", " Chief of Party", " CHIEF OF PARTY", " Deputy Chief of Party", " DEPUTY CHIEF OF PARTY", " Evaluation Specialist", " Evaluation Team Leader", " National Expert", " Research Consultant", " Field Researcher"]
'''for i in textArr:
pageArr = i.split("\n")
for k in pageArr:
for t in relevantTitles:
if t in k:
foundTitles.append(k)
print(foundTitles)'''
'''dic = {}
n = 500
chunks = [fullText[i:i+n] for i in range(0,len(fullText), n)]
for c in chunks:
output = model.predict(c)
print(output)
line = ""
tag = ""
for i in output:
if "PER" in i['tag']:
print(i)
if(i['tag'][0:1] == "B"):
if line != "":
dic[line] = i['tag']
line = ""
line += " " + i['word']
tag = i['tag']
for i in dic:
print(i)
print("\n")''' | yoditgetahun/decevals | decevals/findingtitles.py | findingtitles.py | py | 1,688 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdout",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_na... |
24881547819 | from pathlib import Path
from downloader.config import default_config, UpdateLinuxEnvironment
from downloader.constants import K_DATABASES, K_DB_URL, K_SECTION, K_VERBOSE, K_CONFIG_PATH, K_USER_DEFINED_OPTIONS, \
K_COMMIT, K_UPDATE_LINUX_ENVIRONMENT, K_FAIL_ON_FILE_ERROR, K_UPDATE_LINUX
from downloader.full_run_service import FullRunService as ProductionFullRunService
from test.fake_os_utils import SpyOsUtils
from test.fake_waiter import NoWaiter
from test.fake_external_drives_repository import ExternalDrivesRepository
from test.fake_file_downloader_factory import FileDownloaderFactory
from test.fake_importer_implicit_inputs import FileSystemState
from test.fake_base_path_relocator import BasePathRelocator
from test.fake_db_gateway import DbGateway
from test.fake_file_system_factory import FileSystemFactory
from test.fake_linux_updater import LinuxUpdater
from test.fake_local_repository import LocalRepository
from test.fake_logger import NoLogger
from test.fake_online_importer import OnlineImporter
from test.fake_offline_importer import OfflineImporter
from test.fake_reboot_calculator import RebootCalculator
from test.objects import db_empty
from test.fake_certificates_fix import CertificatesFix
class FullRunService(ProductionFullRunService):
def __init__(self, config=None, db_gateway=None, file_system_factory=None, linux_updater=None, os_utils=None, certificates_fix=None, external_drives_repository=None):
config = config or default_config()
file_system_factory = FileSystemFactory() if file_system_factory is None else file_system_factory
system_file_system = file_system_factory.create_for_system_scope()
file_downloader_factory = FileDownloaderFactory(file_system_factory=file_system_factory)
linux_updater = linux_updater or LinuxUpdater(system_file_system)
super().__init__(config,
NoLogger(),
LocalRepository(config=config, file_system=system_file_system),
db_gateway or DbGateway(config, file_system_factory=file_system_factory),
OfflineImporter(file_downloader_factory=file_downloader_factory),
OnlineImporter(file_system_factory=file_system_factory),
linux_updater,
RebootCalculator(file_system=system_file_system),
BasePathRelocator(),
certificates_fix or CertificatesFix(),
external_drives_repository or ExternalDrivesRepository(file_system=system_file_system),
os_utils or SpyOsUtils(),
NoWaiter())
@staticmethod
def with_single_empty_db() -> ProductionFullRunService:
config = default_config()
config.update({
K_DATABASES: {
db_empty: {
K_DB_URL: db_empty,
K_SECTION: db_empty,
'base_files_url': '',
'zips': {}
}
},
K_VERBOSE: False,
K_CONFIG_PATH: Path(''),
K_USER_DEFINED_OPTIONS: [],
K_COMMIT: 'test', K_UPDATE_LINUX_ENVIRONMENT: UpdateLinuxEnvironment.TRUE, K_FAIL_ON_FILE_ERROR: True
})
file_system_state = FileSystemState(files={db_empty: {'unzipped_json': {}}})
file_system_factory = FileSystemFactory(state=file_system_state)
return FullRunService(
config,
DbGateway(config, file_system_factory=file_system_factory),
file_system_factory=file_system_factory
)
@staticmethod
def with_single_db(db_id, db_descr, linux_updater=None, linux_update_environment=None, update_linux=None, os_utils=None, certificates_fix=None) -> ProductionFullRunService:
update_linux = update_linux if update_linux is not None else True
config = default_config()
config.update({
K_DATABASES: {
db_id: {
K_DB_URL: db_id,
K_SECTION: db_id,
'base_files_url': '',
'zips': {}
}
},
K_VERBOSE: False,
K_USER_DEFINED_OPTIONS: [],
K_CONFIG_PATH: Path(''),
K_COMMIT: 'test',
K_UPDATE_LINUX: update_linux,
K_UPDATE_LINUX_ENVIRONMENT: linux_update_environment or UpdateLinuxEnvironment.TRUE,
K_FAIL_ON_FILE_ERROR: True
})
return FullRunService(
config,
DbGateway.with_single_db(db_id, db_descr, config=config),
linux_updater=linux_updater,
os_utils=os_utils,
certificates_fix=certificates_fix
)
@staticmethod
def with_no_dbs() -> ProductionFullRunService:
config = default_config()
config.update({
K_DATABASES: {}, K_VERBOSE: False, K_CONFIG_PATH: Path(''), K_USER_DEFINED_OPTIONS: [],
K_COMMIT: 'test', K_UPDATE_LINUX_ENVIRONMENT: UpdateLinuxEnvironment.TRUE, K_FAIL_ON_FILE_ERROR: True
})
return FullRunService(
config,
DbGateway(config),
)
| theypsilon-test/downloader | src/test/fake_full_run_service.py | fake_full_run_service.py | py | 5,295 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "downloader.full_run_service.FullRunService",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "downloader.config.default_config",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "test.fake_file_system_factory.FileSystemFactory",
"line_number": ... |
8499194960 | #!/usr/bin/env python3
"""
libminutaria-cli
================
:Authors:
Locynaeh
:Version:
1.0
Command Line Interface (CLI)) based on the libminutaria library.
This script is directly usable in a terminal. Use -h/--help arguments for more
information on how to use the CLI provided.
"""
from datetime import timedelta
from libminutaria import Timer, Preset, logger, get_cli_args, handle_cli_args
if __name__ == '__main__':
# Default parameters to be use if the script is launched without argument
# or modified by user input
TIMER_HOURS = 0 # min 0, max 23
TIMER_MIN = 0 # min 0, max 59
TIMER_SEC = 5 # min 0, max 59
# Printable default duration
default_duration = timedelta(hours=+TIMER_HOURS,
minutes=+TIMER_MIN,
seconds=+TIMER_SEC)
DEFAULT = str(default_duration)
# Launch CLI and get timer values if user input
args = get_cli_args(DEFAULT)
timer_values, debug_option = handle_cli_args(args)
# Initiate logger
logger = logger(debug_option)
# Update timer parameters if modified by CLI
if (timer_values["timer_hours"]
or timer_values["timer_min"]
or timer_values["timer_secs"]):
TIMER_HOURS = timer_values["timer_hours"]
TIMER_MIN = timer_values["timer_min"]
TIMER_SEC = timer_values["timer_secs"]
# Initialize and launch a timer according to parameters
timer = Timer(hours=TIMER_HOURS, minutes=TIMER_MIN, seconds=TIMER_SEC)
# Check remaining time along the timer and print it
counter = timer.is_timing_reached()
while counter is False:
print("libminutaria -", "Remaining :", timer.get_timing[:9], end='\r',
flush=True)
counter = timer.is_timing_reached()
# Timer reached 00:00:00
# Print 3 "GONG !" and some spaces to clear the line
print("GONG ! " * 3 + ' '*17)
| Locynaeh/minutaria | minutaria-cli.py | minutaria-cli.py | py | 1,930 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.timedelta",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "libminutaria.get_cli_args",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "libminutaria.handle_cli_args",
"line_number": 36,
"usage_type": "call"
},
{
"api_nam... |
26383762180 | # -*- coding: utf-8 -*-
"""
ETM
"""
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
from sklearn.feature_extraction import DictVectorizer
from collections import Counter, OrderedDict
from sklearn.metrics import mean_squared_error
from math import sqrt
nltk.download('stopwords')
import string
from sklearn.model_selection import train_test_split
from scipy.stats import wasserstein_distance
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
from google.colab import drive
drive.mount('/content/drive')
######################## read data ################################
headlines = np.load('/content/drive/MyDrive/REN20k_short_text/REN-20k_headline_abstract_data.npy')
labels = np.load('/content/drive/MyDrive/REN20k_short_text/REN-20k_headline_abstract_labels.npy')
print("Headline shape: "+str(headlines.shape))
print("Label shape: "+str(labels.shape))
########################## pre-processing #######################
def get_wordnet_pos(word):
"""Map POS tag to first character lemmatize() accepts"""
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
Tokens = []
finalTokens =[]
tokenizer = RegexpTokenizer(r'\w+')
stop_words = set(stopwords.words('english'))
for i in range(len(headlines)):
tempTokens = headlines[i].lower() #converting to lower case
tempTokens = tempTokens.translate(str.maketrans('','',"~@#$%^&*()_-+={}[]|\/><'.,-+`:;1234567890"))
tempTokens = tokenizer.tokenize(tempTokens) #tokenization
# for j in range(len(tempTokens)):
# tempTokens[j] = lemmatizer.lemmatize(tempTokens[j] , get_wordnet_pos(tempTokens[j] )) #lemetization
tempTokensStopRemoval = [word for word in tempTokens if word not in stop_words] #stopword removal
Tokens.append(tempTokens) # tokens with out stopword removal
finalTokens.append(tempTokensStopRemoval) # tokens after stopword removal
# De-tokenized sentances
deTokenized = []
for j in range(len(finalTokens)):
tempTokens = []
tempDetoken = finalTokens[j]
tempDetoken = "".join([" "+i if not i.startswith("'") and i not in string.punctuation else i for i in tempDetoken]).strip()
deTokenized.append(tempDetoken)
tokenised = finalTokens
################################ train test val split ######################
x_train_val, x_test, y_train_val, y_test = train_test_split(tokenised, labels, test_size=0.20, random_state=seed)
x_train, x_val, y_train, y_val = train_test_split(x_train_val, y_train_val, test_size=0.25, random_state=seed)
print("x_train: "+str(len(x_train)))
print("y_train: "+str(y_train.shape))
print("x_val: "+str(len(x_val)))
print("y_val: "+str(y_val.shape))
print("x_test: "+str(len(x_test)))
print("y_test: "+str(y_test.shape))
############################## Training #################
#Gama_de == document vs. {Anger, Fear, Joy, Sadness, Surprise} matrix
gamma_de = y_train
#Delta_dw == document vs. word count matrix
v = DictVectorizer()
X = v.fit_transform(Counter(f) for f in x_train)
delta_dw = np.int64(X.A)
vocab = v.vocabulary_
# to veryty the cont use the below code
d = Counter(delta_dw[1])
# Word vs. emotion matrix
wordEmo = np.zeros((len(vocab),5)) #numerator
s = 1
for i in range(len(vocab)):
for j in range(5):
wordEmo[i][j] = s + sum(delta_dw[:,i] * gamma_de[:,j])
denominator = np.reshape(np.sum(wordEmo,0), (1,5))
#Probablity of word given emotion matrix
probWordEmo = np.divide(wordEmo,denominator)
############################## Testing #################
#Delta_dw for Testing == document vs. word count matrix
v = DictVectorizer()
Xtest = v.fit_transform(Counter(f) for f in x_test)
delta_dwTest = Xtest.A
vocabTest = v.vocabulary_
#Prediction using bayes theorem
docs= x_test
#prob(e) : priori probablity of emotion e
probE = np.sum(gamma_de,0)/np.size(gamma_de,0)
#probability of emotion given document
lenDoc = len(docs)
probEmoDoc = np.zeros((lenDoc,5))
for i in range(5):
for j in range(lenDoc):
words = len(docs[j])
indArray = np.zeros((words,4))
for k in range(words):
if docs[j][k] in vocab:
indArray[k,0] = vocab[docs[j][k]] #word index
indx_in_vocab = np.int(indArray[k,0])
indArray[k,1] = probWordEmo[indx_in_vocab][i] #Prob(word given emotion)
indx_in_vocabtest = vocabTest[docs[j][k]]
indArray[k,2] = delta_dwTest[j][indx_in_vocabtest] #delta_document,word
indArray[k,3] = np.power(indArray[k,1],indArray[k,2]) #[Prob(word given emotion)]^[delta_document,word]
else:
indArray[k,3] = 1
productTemp = np.product(indArray[:,3]) # product[Prob(word given emotion)]^[delta_document,word]
probEmoDoc[j][i] = probE[i] * productTemp #prob(e) * [product[Prob(word given emotion)]^[delta_document,word]]
####################### Evaluation1: RMSE ######################
predict_test = probEmoDoc
rms_test = sqrt(mean_squared_error(y_test, predict_test))
print("RMSE_test = "+ str(rms_test))
########### Evaluation2: Acc@N, N == 1, 2, 3 ############
#Acc@N : N==1, 2, 3
maxEmoPredict_test = np.argmax(predict_test,1)
sortdMaxEmoActual_test = np.argsort(-y_test, axis=1)
#Acc@1
sumAT1_test = np.sum(maxEmoPredict_test == sortdMaxEmoActual_test[:,0])
accAT1_test = sumAT1_test / np.size(y_test,0)
print("Acc@1_test = "+ str(accAT1_test))
########### Evaluation 3: APdocument ############
X = predict_test # Predicted Labels
Y = y_test # Y --> Actual Labels
#Result Matrix
APDocmatrix = np.zeros((len(X), 12))
xMean = np.mean(X,axis=1)
APDocmatrix[:,0] = xMean #xMean
yMean = np.mean(Y,axis=1)
APDocmatrix[:,1] = yMean # yMean
XE0 = (X[:,0] - xMean) * (Y[:,0] - yMean) #(X1i - xMean) * (Y1i - yMean)
APDocmatrix[:,2] = XE0
XE1 = (X[:,1] - xMean) * (Y[:,1] - yMean) #(X2i - xMean) * (Y2i - yMean)
APDocmatrix[:,3] = XE1
XE2 = (X[:,2] - xMean) * (Y[:,2] - yMean) #(X3i - xMean) * (Y3i - yMean)
APDocmatrix[:,4] = XE2
XE3 = (X[:,3] - xMean) * (Y[:,3] - yMean) #(X4i - xMean) * (Y4i - yMean)
APDocmatrix[:,5] = XE3
XE4 = (X[:,4] - xMean) * (Y[:,4] - yMean) #(X5i - xMean) * (Y5i - yMean)
APDocmatrix[:,6] = XE4
sigmaX = np.std(X, axis= 1) #standerd Deviation of X
APDocmatrix[:,7] = sigmaX
sigmaY = np.std(Y, axis= 1) #standerd deviation of Y
APDocmatrix[:,8] = sigmaY
emoLen = X.shape[1]
denominator = (emoLen - 1) *(sigmaX) * (sigmaY) #Denominator
APDocmatrix[:,9] = denominator
#if zero in denominator
loc_zero = np.where(denominator == 0)
loc = np.array(loc_zero)
for r in range(len(loc)):
for c in range(len(loc[r])):
ind = loc[r][c]
if denominator[ind] == 0:
denominator[ind] = 1
numerator = np.sum(APDocmatrix[:,2:7], axis = 1) #Numerator
APDocmatrix[:,10] = numerator
APdocument = numerator / denominator #APdocument value for each document
APDocmatrix[:,11] = APdocument
#Find the location of any NAN entry and replace with 0.0
nanLoc = np.argwhere(np.isnan(APdocument))
for i in range(len(nanLoc)):
print("nan@: " + str(nanLoc[i][0]))
APdocument[nanLoc[i][0]] = 0.0
#Mean of APdocument
apDocumentMean = np.mean(APdocument)
print("Mean APdocument : " + str(apDocumentMean))
#Variance of APdocument
APdocumentnVariance = np.var(APdocument)#Variance of APemotion
print("Variance APemotion:" + str(APdocumentnVariance))
########### Evaluation 4: APemotion ############
A = predict_test # Predicted labels, Aj
B = y_test # Original Labels, Bj
AMean = np.mean(A,axis=0) #Acap
AMean = np.reshape(AMean,(1,5)) #Acap reshaped into 1x5
AMean4docs = np.repeat(AMean, repeats = [len(A)], axis=0) # Repeat AMean vector for all documents
BMean = np.mean(B,axis=0) #Bcap
BMean = np.reshape(BMean,(1,5)) #Bcap reshaped into 1x5
BMean4docs = np.repeat(BMean, repeats = [len(B)], axis=0) # Repeat BMean vector for all documents
AminusAmean = A - AMean4docs # Aj - Acap
BminusBmean = B - BMean4docs #Bj - Bcap
AjxBj = AminusAmean * BminusBmean #(Aj - Acap) * (Bj - Bcap)
nominator = np.sum(AjxBj, axis = 0) #suummation of ((Aj - Acap) * (Bj - Bcap)) -- > #Nominator
nominator = np.reshape(nominator,(1,5)) #nominator reshaped into 1x5
docLen = len(A) #document length
sigmaA = np.std(A, axis= 0) #standerd Deviation of A
sigmaA = np.reshape(sigmaA,(1,5)) #sigmaA reshaped into 1x5
sigmaB = np.std(B, axis= 0) #standerd Deviation of B
sigmaB = np.reshape(sigmaB,(1,5)) #sigmaB reshaped into 1x5
denomi = (docLen - 1) *(sigmaA) * (sigmaB) #Denominator
APemotion = nominator / denomi #APemotion value for each document
APemotionMean = np.mean(APemotion) #Mean of APemotion
print("Mean APemotion:" + str(APemotionMean))
APemotionVariance = np.var(APemotion)#Variance of APemotion
print("Variance APemotion:" + str(APemotionVariance))
print("\n")
#correlation coefficient over each emotion label
#Labels: Anger → Angry Fear → Afraid Joy → Happy Sadness → Sad Surprise → Inspired
print("Anger:"+str(APemotion[0][0]))
print("Fear:"+str(APemotion[0][1]))
print("Joy:"+str(APemotion[0][2]))
print("Sadness:"+str(APemotion[0][3]))
print("Surprise:"+str(APemotion[0][4])+ "\n")
########### Evaluation 1.1: wasserstein_distance ############
wasserDistance_test = 0
wasserDistance_test_alldocs = np.zeros((len(predict_test),1))
for i in range(len(predict_test)):
wasserDistance_test_alldocs[i] = wasserstein_distance(predict_test[i],y_test[i])
wasserDistance_test = np.mean(wasserDistance_test_alldocs)
print("wasserstein_distance_test = "+ str(wasserDistance_test)) | anoopkdcs/REDAffectiveLM | Baselines/emotion_term_model.py | emotion_term_model.py | py | 9,858 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "nltk.stem.WordNetLemmatizer",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.ran... |
16991129528 | import dash_bootstrap_components as dbc
import pandas as pd
import plotly.express as px
from dash import html, dcc
import plotly.io as pio
pio.templates.default = "simple_white"
class FastViewCo2(object):
def __init__(self, data):
self.df_co2 = data
Un_Kt = 1000
co2_country = data.groupby("name")['CO2'].sum()
co2_country_acum = sum(co2_country)
co2_country_df = pd.DataFrame(co2_country).reset_index()
co2_country_df['% Representation'] = (co2_country_df['CO2'] / co2_country_acum) * 100
co2_country_df['CO2'] = co2_country_df['CO2'] / Un_Kt
co2_country_df = co2_country_df.sort_values(by='% Representation', ascending=False)
co2_country_df['acumulado'] = co2_country_df['% Representation'].cumsum()
co2_country_df = co2_country_df.reset_index()
co2_country_df = co2_country_df
ten_top = co2_country_df.iloc[10, 4]
ten_top = str(f'{ten_top:.2f}' + '%')
top_countries = list(co2_country_df['name'].head(8))
g20_df = data[data['name'].isin(top_countries)]
self.co2_country_df = co2_country_df
self.ten_top = ten_top
self.top_countries = top_countries
self.g20_df = g20_df
def get_fig_geo_co2(self):
data = self.df_co2
fig_geo_co2 = px.choropleth(data, locations='code',
animation_frame='year',
color='CO2',
hover_name='name',
color_continuous_scale='temps_r'
)
fig_geo_co2.update_layout(height=350)
return fig_geo_co2
def get_fig_countries_co2(self):
fig_countries_co2 = px.area(self.g20_df.sort_values(by='CO2', ascending=False),
x="year",
y="CO2",
color="name",
line_group="name")
fig_countries_co2.update_layout(height=350, )
return fig_countries_co2
def get_fig_pie_co2(self):
fig_pie_co2 = px.pie(self.co2_country_df.head(10).sort_values(by='CO2', ascending=False),
values='% Representation', names='name',
hover_data=['name'], labels={'% Representation': '%'})
fig_pie_co2.update_traces(textposition='inside', textinfo='label+value')
return fig_pie_co2
def get_fig_scatter(self):
fig_scatter = px.scatter(
self.df_co2, x='GDP', y='CO2',
animation_frame='year',
animation_group='CO2',
size='pop',
color='code_region',
hover_name='name',
log_x=True,
range_x=[100, 100000],
range_y=[100, 12000000]
)
return fig_scatter
def get_fig_area_case_ukraine(self):
case_ukraine = px.area(self.df_co2[self.df_co2['name'] == 'Ukraine'],
x="year",
y="CO2",
color="name",
line_group="name")
return case_ukraine
def get_html_components(self):
return dbc.CardBody([
dbc.Row([
dbc.Col([html.Label("Overall view", className="align-middle")])],
style={
"background-color": "#EEFFD6",
'height': '35px',
'border-radius': '5px',
'padding': '5px 0px',
'text-align': 'left'
}
),
dbc.Row([
dbc.Col([dcc.Graph(id='id-geo-co2', figure=self.get_fig_geo_co2())], width=6),
dbc.Col([dcc.Graph(id='id-area-country-co2', figure=self.get_fig_countries_co2())], width=6),
]),
dbc.Row(
[dbc.Col(html.Div('Comparative with other economics indicators'))],
style={
"background-color": "#EEFFD6",
'height': '35px',
'border-radius': '5px',
'padding': '5px 0px',
'text-align': 'left',
}
),
dbc.Row([
dbc.Col(
[
html.Label('% Emitions top ten', className='card-tittle'),
html.H4(self.ten_top)
],
style={
'border-radius': '5px',
'margin-top': '30px',
'padding': '30px 0 0 30px'
},
width=2),
dbc.Col(
[
dcc.Graph(
id='id-pie-co2',
config={'displayModeBar': False},
figure=self.get_fig_pie_co2())
],
width=4),
dbc.Col(
[
dcc.Graph(
id='id-scatter-co2',
config={'displayModeBar': False},
figure=self.get_fig_scatter())
],
width=6),
]),
dbc.Row([
dbc.Col(html.Div('Case Ukraine'),)],
style={
"background-color": "#EEFFD6",
'height': '35px',
'border-radius': '5px',
'padding': '5px 0px',
'text-align': 'left',
}
),
dbc.Row(
[
dbc.Col(
[
dcc.Graph(
id='id-area-case-ukraine',
config={'displayModeBar': False},
figure=self.get_fig_area_case_ukraine())
],
width=12),
]),
]) | apinzonf/ds4a-carbon-market-project | app/fast_view_co2.py | fast_view_co2.py | py | 6,124 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "plotly.io.templates",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "plotly.io",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "plotly.express.chor... |
32838489808 | from enum import Enum
users = []
class Account(Enum):
USD = "USD"
KZT = "KZT"
RUB = "RUB"
EUR = "EUR"
class BankAccount:
name: str
surname: str
amount: int = 0
account: Account = 'KZT'
def __init__(self, name:str, surname:str, account:Account) -> None:
self.name = name
self.surname = surname
self.account = account
def set_user(self, name: str, surname: str, account: Account) -> None:
self.name = name
self.surname = surname
self.account = account
def set_amount(self, amount:int):
self.amount = amount
def name(self) -> str:
return self.name
def surname(self) -> str:
return self.surname
def account(self) -> Account:
return self.account
def amount(self) -> int:
return self.amount
def addToBankAccount(self, x:int):
self.amount += x
print("Счет успешно пополнен")
def substractFromBankAccount(self, x:int):
if self.amount < x:
print("Недостаточно средств")
else:
self.amount -= x
print("Вы успешно сняли деньги")
def moneyConversion(self, b):
a = self.account
kurs_kzt = {"KZT":1, "RUB":7.53, "USD":470.69, "EUR":496.17}
kurs_rub = {"RUB":1, "KZT":0.13, "USD":62.52, "EUR":65.90 }
kurs_usd = {"RUB":0.016, "KZT":0.0021, "USD":1, "EUR":1.05 }
kurs_eur = {"RUB":0.015, "KZT":0.0020, "USD":0.95, "EUR":1}
if b == "KZT":
self.amount *= kurs_kzt[a]
self.account = "KZT"
elif b == "RUB":
self.amount *= kurs_rub[a]
self.account = "RUB"
elif b == "USD":
self.amount *= kurs_usd[a]
self.account = "USD"
elif b == "EUR":
self.amount *= kurs_eur[a]
self.account = "EUR"
def __repr__(self):
return f'{self.name} {self.surname} {self.amount} {self.account}'
def create_account(name: str, surname: str, amount:int, account: Account) -> BankAccount:
user = BankAccount(name=name, surname=surname, account=account)
user.set_amount(amount=amount)
users.append(user)
return user
def get_user(name: str, surname: str) -> BankAccount | None:
user = next((u for u in users if name == u.name and surname == u.surname), None)
if not user:
print('User not found')
return
return f'{user.name} {user.surname}. Ваш счет: {user.amount} {user.account}'
def delete_user(name: str, surname: str) -> BankAccount | None:
user = next((u for u in users if name == u.name and surname == u.surname), None)
if not user:
print('User not found')
return
users.remove(user)
print("Пользователь удален")
fake_account = BankAccount(name="Mark", surname="Doe", account="RUB")
fake_account.amount = 1500
users.append(fake_account)
d = {"1":"KZT", "2":"RUB", "3":"USD", "4":"EUR"}
while(True):
inp = input("Выберите действие: \n 1. Создание пользователя \n 2. Выбрать пользователя \n 3. Удалить пользователя \n 0. Выход \n")
if inp == '0':
break
elif inp == "1":
name = input("Введите имя: ")
surname = input("Введите фмаилия: ")
v = input("Выберите курс валют: \n 1. KZT \n 2. RUB \n 3. USD \n 4. EUR \n")
account = d[v]
user = create_account(name=name, surname=surname, account=account, amount=0)
print("Ваш аккаунт создан. ")
elif inp == '2':
name = input("Введите имя: ")
surname = input("Введите фмаилия: ")
print(get_user(name=name, surname=surname))
while(True):
inp2 = input("Выберите операцию: \n 1. Добавить на счет \n 2. Снять деньги \n 3. Сконвертировать \n 0. Назад\n")
if inp2 == "0":
break
elif inp2 == "1":
x = int(input("Введите суммy: \n"))
user.addToBankAccount(x)
print(get_user(name=name, surname=surname))
elif inp2 == "2":
x = int(input("Введите суммy: \n"))
user.substractFromBankAccount(x)
print(get_user(name=name, surname=surname))
elif inp2 == "3":
b = input("Выберите курс валют: \n 1. KZT \n 2. RUB \n 3. USD \n 4. EUR \n")
account = d[b]
user.moneyConversion(account)
print(get_user(name=name, surname=surname))
elif inp == "3":
name = input("Введите имя: ")
surname = input("Введите фмаилия: ")
delete_user(name=name, surname=surname)
| akmaral0519/lab3 | task1.py | task1.py | py | 5,153 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 4,
"usage_type": "name"
}
] |
36458767221 | import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def iterative_algorithm(img):
img_array = np.array(img).astype(np.float32)
I=img_array
Ti=50 #Set initial arbitrary value as threshold value
b=1
m,n=I.shape
diff = 255
count = 1
while diff!=0:
foreground=0
background=0
sum_fg=0
sum_bg=0
for i in range(1,m):
for j in range(1,n):
tmp=I[i][j]
if tmp>=Ti:
foreground = foreground + 1
sum_fg= sum_fg + int(tmp)
else:
background = background + 1
sum_bg = sum_bg + int(tmp)
mean_fg = int(sum_fg/foreground)
mean_bg = int(sum_bg/background)
mean = int((mean_bg+mean_fg)/2)
diff = abs(mean - Ti)
Ti=mean
print("Iteration " + str(count) + " Threshold value : " + str(Ti))
count = count + 1
return Ti
img = cv2.imread("C:/Users/imrk0/Desktop/Github/Image_Processing_nd_Computer_Vision_Programs/00_img/img10.jpg")
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
orignal_img = img
gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
img = cv2.resize(gray,(1017,1199))
final_threshold_value = iterative_algorithm(img)
print("Final Threshold value : " + str(final_threshold_value))
ret1, th1 = cv2.threshold(img, final_threshold_value, 255, cv2.THRESH_BINARY)
plot1 = plt.figure("Original")
plt.imshow(orignal_img)
plot2 = plt.figure("Segmented image : ")
plt.imshow(th1,cmap=cm.gray)
plt.show()
| ravi-kr-singh/Image_Processing_nd_Computer_Vision_Programs | 07_iterative_algorithm.py | 07_iterative_algorithm.py | py | 1,614 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_numb... |
71864036835 | from django.forms import ModelForm
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseGone
from django.shortcuts import render, redirect, get_object_or_404
import datetime
from app.models import Book
class BookForm(ModelForm):
class Meta:
model = Book
fields = ['name', 'pages', 'date_written', 'type']
def home(request):
html = """
<h1>Django CRUD Example</h1>
<a href="/books/">Book list</a><br>
"""
return HttpResponse(html)
def book_list(request, template_name='book_list.html'):
books = Book.objects.all()
data = {}
data['object_list'] = books
return render(request, template_name, data)
def book_list_range(request, y1, m1, d1, y2, m2, d2, template_name='book_list.html'):
date1 = datetime.date(int(y1), int(m1), int(d1))
date2 = datetime.date(int(y2), int(m2), int(d2))
books = Book.objects.exclude(date_written__lt=date1).exclude(date_written__gt=date2)
print(len(books), date1, date2)
data = {}
data['object_list'] = books
data['date_range_start'] = date1
data['date_range_end'] = date2
return render(request, template_name, data)
def book_create(request, template_name='book_form.html'):
form = BookForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('book_list')
return render(request, template_name, {'form':form})
def book_update(request, pk):
book= get_object_or_404(Book, pk=pk)
form = BookForm(request.POST or None, instance=book)
print(request.POST);
if form.is_valid():
form.save()
return HttpResponse(status=200)
return HttpResponse(status=400)
def book_delete(request, pk):
book= get_object_or_404(Book, pk=pk)
if request.method=='DELETE':
book.delete()
return HttpResponse(status=204)
return HttpResponse(status=405)
| AshtonIzmev/crud-datatables-django | app/views.py | views.py | py | 1,879 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "app.models.Book",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "app.mo... |
14928155080 | """Some useful type aliases relevant to this project."""
import pathlib
from typing import AbstractSet, Callable, List, Mapping, Optional, Tuple, Union
import torch
Layer = Union[int, str]
Unit = Tuple[Layer, int]
PathLike = Union[str, pathlib.Path]
TensorPair = Tuple[torch.Tensor, torch.Tensor]
TensorTriplet = Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
OptionalTensors = Tuple[Optional[torch.Tensor], ...]
StateDict = Mapping[str, torch.Tensor]
Device = Union[str, torch.device]
# All strings are also Sequence[str], so we have to distinguish that we
# mean lists or tuples of strings, or sets of strings, not other strings.
StrSequence = Union[List[str], Tuple[str, ...]]
StrSet = AbstractSet[str]
StrIterable = Union[StrSet, StrSequence]
StrMapping = Mapping[str, str]
# Some common transforms.
TransformTensor = Callable[[torch.Tensor], torch.Tensor]
TransformStr = Callable[[str], str]
TransformStrSeq = Callable[[StrSequence], StrSequence]
| evandez/neuron-descriptions | src/utils/typing.py | typing.py | py | 960 | python | en | code | 59 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number"... |
4454586278 | import json
from django.test import TestCase
from wagtail.models import Site
from ..models import GeneralPage
class TestGeneral(TestCase):
def setUp(self):
root = Site.objects.get().root_page
self.general_page = GeneralPage(
title="General page",
teaser_text="test",
body=json.dumps(
[
{"type": "paragraph", "value": {"text": "This is a paragraph"}},
]
),
)
root.add_child(instance=self.general_page)
def test_view_uses_correct_template(self):
url = self.general_page.get_url()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "generic_pages/general_page.html")
| nationalarchives/ds-wagtail | etna/generic_pages/tests/test_models.py | test_models.py | py | 802 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "django.test.TestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "wagtail.models.Site.objects.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "wagtail.models.Site.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
9980427418 | from helpers import *
import cv2
import tensorflow as tf
import json
import sys
# Checking for incorrect usage
if len(sys.argv) != 2:
print("Usage: python main.py path_to_image")
exit(-1)
image_file = sys.argv[1]
# These are set to the default names from exported models, update as needed.
INPUT_TENSOR_NAME = 'image_tensor:0'
OUTPUT_TENSOR_NAMES = ['detected_boxes:0', 'detected_scores:0', 'detected_classes:0']
filename = "Object_Identification_Model/model.pb"
labels_filename = "Object_Identification_Model/labels.txt"
# Create tf graph and return list of labels
labels = create_tf_graph(filename, labels_filename)
image = cv2.imread(image_file)
if image is None:
print("Invalid Path")
exit(-1)
image = resize_down_to_1600_max_dim(image)
# Create clone to visualize on
clone = image.copy()
# Necessary preprocessing
network_input_size = 320
augmented_image = cv2.resize(image, (network_input_size, network_input_size))
# Extracting probabilities and box bounds
pred = predict_from_graph(OUTPUT_TENSOR_NAMES, augmented_image, INPUT_TENSOR_NAME)
final = []
# Scaling percentage box bounds
h, w = image.shape[:2]
for i in pred[0]:
for j in range(4):
if j % 2 == 0:
i[j] *= w
else:
i[j] *= h
if i[j] < 0:
i[j] = 0
coords = pred[0]
# Applying non-maxima suppression
ind = tf.image.non_max_suppression(pred[0], pred[1], len(coords), iou_threshold=0.8,
score_threshold=float('-inf'), name=None)
# Only taking into account images with a high probability
for i in ind:
j = pred[1][i]
if j > 0.6:
j = [j, pred[0][i], i]
final.append(j)
coords_list = []
# Creating a final list of box bounds which only included those of selected boxes
for j in final:
coords_list.append(list(coords[j[2]]))
# Order boxes according to their rows and columns
# Rows and columns of each box stored in loc
loc, coords_list = order_boxes(coords_list)
# Extracting all lettuce images from main image
images = []
for i in coords_list:
images.append(image[int(i[1]):int(i[3]), int(i[0]):int(i[2])])
if len(images) == 0:
print("No lettuces detected")
exit(-1)
# Calculating green intensity of each leaf
green_intensity = []
for i in images:
total = 0
count = 0
for j in i:
for x in j:
if int(x[1]) > 0.75*(int(x[0])+int(x[2])):
total += x[1]
count += 1
avg = total/count
if avg > 220:
avg = 220
green_intensity.append(1-avg/220)
# Calculating the relative size of each lettuce
sizes = []
total = 0
for i in images:
total += i.shape[0] * i.shape[1]
avg = total/len(images)
for i in images:
area = i.shape[0] * i.shape[1]
if area > 1.25*avg:
sizes.append("Large")
elif area < 0.75*avg:
sizes.append("Small")
else:
sizes.append("Medium")
# These are set to the default names from exported models, update as needed.
output_layer = 'loss:0'
input_node = 'Placeholder:0'
filename = "Image_Classification_Model/model.pb"
labels_filename = "Image_Classification_Model/labels.txt"
# Load graph and extract labels
labels = create_tf_graph(filename, labels_filename)
pred = []
for j in images:
# Necessary preprocessing
img = resize_down_to_1600_max_dim(j)
h, w = img.shape[:2]
min_dim = min(w, h)
max_square_image = crop_center(img, min_dim, min_dim)
augmented_image = resize_to_256_square(max_square_image)
with tf.compat.v1.Session() as sess:
input_tensor_shape = sess.graph.get_tensor_by_name('Placeholder:0').shape.as_list()
network_input_size = input_tensor_shape[1]
# Crop the center for the specified network_input_Size
augmented_image = crop_center(augmented_image, network_input_size, network_input_size)
predictions = predict_from_graph([output_layer], augmented_image, input_node)
# Convert numpy array to list
predictions = predictions.tolist()
# Take the most likely result into account
prob = max(predictions[0])
for i in range(len(predictions[0])):
if predictions[0][i] == prob:
ind = i
# If diseased probability is low, output as mixed
if labels[ind] == "Diseased" and prob < 0.5:
label = "Mixed"
else:
label = labels[ind]
pred.append((label, prob))
# Resize to fit visualization on screen
if clone.shape[0] > 800 or clone.shape[1] > 800:
scale = 800/max(clone.shape)
clone = cv2.resize(clone, (int(clone.shape[1]*scale), int(clone.shape[0]*scale)))
scale = clone.shape[0]/image.shape[0]
for i in range(len(coords_list)):
for j in range(len(coords_list[i])):
coords_list[i][j] *= scale
output_info = []
# Output to json and visualize as rectangles
for i in range(len(coords_list)):
output_info.append({
"Coordinates": coords_list[i],
"Object_Identification_Probability": float(final[i][0]),
"Status": pred[i][0], "Classification_Probability": pred[i][1],
"Row": loc[i][0],
"Column": loc[i][1],
"Green_Intensity": green_intensity[i],
"Size": sizes[i]})
if pred[i][0] == 'Healthy':
color = (0, 255, 0)
elif pred[i][0] == 'Mixed':
color = (0, 255, 255)
else:
color = (0, 0, 255)
cv2.rectangle(clone, (int(coords_list[i][0]), int(coords_list[i][1])),
(int(coords_list[i][2]), int(coords_list[i][3])), color, 2)
cv2.putText(clone, pred[i][0], (int(coords_list[i][0]), int(coords_list[i][1])+10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 0), 2)
cv2.putText(clone, str(loc[i][0]) + ":" + str(loc[i][1]),
(int(coords_list[i][0]), int(coords_list[i][1]) + 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 0), 2)
with open("output_info.json", "w") as write_file:
json.dump(output_info, write_file, indent=4)
cv2.imshow('Visual Representation', clone)
cv2.waitKey(0)
cv2.imwrite('Visual_Representation.png', clone)
| AdvaitTahilyani/plant-health-classifier | main.py | main.py | py | 6,144 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number":... |
5025490453 | # coding=utf-8
from pickle import FALSE
from sys import flags, version_info
from tkinter import filedialog
from STCore.Component import StarElement
from logging import root
from operator import contains
from os import scandir
from tkinter.constants import W
import matplotlib
from matplotlib import axes
import numpy
from matplotlib import use, figure
from matplotlib.axes import Axes
from numpy.lib.histograms import histogram
from STCore.item import Star
use("TkAgg")
import matplotlib as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.patches import Rectangle
from matplotlib.colors import Normalize, PowerNorm, LogNorm
from matplotlib.artist import setp, getp
import tkinter as tk
from tkinter import ttk
from STCore.item.Star import *
from STCore import SetStar, Tracker
import STCore.DataManager
from time import time
import STCore.Settings
import STCore.RuntimeAnalysis
import gc
from PIL import Image
import STCore.utils.Icons as icons
from STCore import DataManager, RuntimeAnalysis
from Component import Levels, StarElement
#region Messages and Events
params = {"ytick.color" : "w",
"xtick.color" : "w",
"axes.labelcolor" : "grey",
"axes.edgecolor" : "grey"}
plt.rcParams.update(params)
#endregion
#region Global Variables
ViewerFrame = None
Data = None
level_perc = (0,0)
Stars = []
canvas = None
implot = None
ImageFrame = None
axis : Axes = None
SidebarList = None
SliderLabel = None
ColorMaps = {"Escala de grises" : "gray", "Temperatura" : "seismic", "Arcoiris" : "rainbow", "Negativo" : "binary"}
Modes = {"Linear" : Normalize(), "Raiz cuadrada": PowerNorm(gamma = 0.5), "Logaritmico" : LogNorm()}
SelectedStar = -1
MousePress = None
MousePressTime = -1
img_limits : tuple = None
img_offset : tuple = (0,0)
zoom_factor = 1
z_container : Rectangle = None
z_box : Rectangle = None
App : ttk.Frame = None
levelFrame : Levels = None
Viewport : tk.Canvas = None
Sidebar : tk.Canvas = None
sidebar_buttons : tk.Frame = None
sidebar_elements = []
isInitialized = False
#endregion
#region Main Body
def Awake(root):
global ViewerFrame, Data, Stars, canvas, implot, ImageFrame, axis, Sidebar, SidebarList, SliderLabel, level_perc, levelFrame, isInitialized
STCore.DataManager.CurrentWindow = 2
App.pack(fill=tk.BOTH, expand=1)
#ViewerFrame = tk.Frame(root)
#ttk.Label(ViewerFrame,text="Visor de Imagen").pack(fill = tk.X)
Data = DataManager.FileItemList[0].data
level_perc = STCore.DataManager.Levels
# Setting Levels
if not isinstance(level_perc, tuple):
level_perc = (numpy.percentile(Data, 99.8), numpy.percentile(Data, 1))
STCore.DataManager.Levels = level_perc
#BuildLayout(root)
if implot is None:
App.after(10, DrawCanvas)
levelFrame.set_limits(numpy.nanmin(Data), numpy.nanmax(Data))
levelFrame.setMax(level_perc[0])
levelFrame.setMin(level_perc[1])
# Star version control
version_changed = False
index = 0
for star in Stars:
version_changed = version_changed or CheckVersion(star, index)
index += 1
if version_changed:
print ("Se actualizaron las estrellas de una version anterior")
SetStar.closedTime = 0
OnStarChange()
isInitialized = True
# Draws the layout in a single pass
def BuildLayout(root : tk.Tk):
global App, Viewport, Sidebar, levelFrame, isInitialized
# Checks if Viewport object hasn't been destroyed or unloaded
fresh = Viewport is None
# Check whether the layout hadn't been built yet
if isInitialized == False:
App = ttk.Frame(root, width=root.winfo_width(), height=root.winfo_height(), name="imageview")
App.pack(fill=tk.BOTH, expand=1)
App.columnconfigure(tuple(range(2)), weight=1)
App.columnconfigure(1, weight=0)
App.rowconfigure(tuple(range(2)), weight=1)
CreateCanvas()
CreateLevels()
CreateSidebar(root)
#Sidebar.grid_propagate(0)
Viewport.grid(row=0, column=0, rowspan=2, sticky="nsew")
Sidebar.grid(row=0, column=1, rowspan=2, sticky="nsew")
levelFrame.grid(row=2, column=0, sticky=tk.EW)
sidebar_buttons.grid(row=2, column=1, sticky="ew")
if fresh:
Destroy()
isInitialized = True
#else: # No need to rebuild
#Viewport.grid()
#Sidebar.grid()
#levelFrame.grid()
#sidebar_buttons.grid()
#region Create Funcions
# Creates the viewport, but doesn't draw it to the UI
def CreateCanvas():
global canvas, implot, ImageFrame, axis, Viewport
#ImageFrame = ttk.Frame(app, width = 700, height = 350)
#ImageFrame.pack(side=tk.LEFT, fill = tk.BOTH, expand = True, anchor = tk.W)
fig = figure.Figure(figsize = (7,3.6), dpi = 100)
fig.set_facecolor("black")
# Create Canvas before any complex calculations
canvas = FigureCanvasTkAgg(fig, master=App)
Viewport = canvas.get_tk_widget()
Viewport.configure(bg="black")
Viewport.config(cursor = "fleur")
axis = fig.add_subplot(111)
fig.subplots_adjust(0.0,0.05,1,1)
canvas.mpl_connect("button_press_event", OnMousePress)
canvas.mpl_connect("motion_notify_event", OnMouseDrag)
canvas.mpl_connect("button_release_event", OnMouseRelease)
canvas.mpl_connect('scroll_event',OnMouseScroll)
# Fill the Canvas window for the viewport
def DrawCanvas():
global canvas, implot, ImageFrame, axis
axis.clear()
implot = axis.imshow(Data, vmin = level_perc[1], vmax = level_perc[0], cmap=ColorMaps[STCore.Settings._VISUAL_COLOR_.get()], norm = Modes[STCore.Settings._VISUAL_MODE_.get()])
if STCore.Settings._SHOW_GRID_.get() == 1:
axis.grid()
axis.relim()
canvas.draw()
# Get axis limits and save it as a tuple
global img_limits
img_limits = (axis.get_xlim(), axis.get_ylim())
UpdateCanvasOverlay()
# Creates the siderbar, but does not draw it to the UI
def CreateSidebar(root):
global App, Sidebar, SidebarList, sidebar_buttons
Sidebar = tk.Canvas(App, width = 300, relief = "flat", bg = "gray16")
Sidebar.config(scrollregion=(0,0, 300, 1))
SidebarList = ttk.Frame(Sidebar, width=300,height=root.winfo_height())
Sidebar.create_window(300, 0, anchor=tk.NE, window=SidebarList, width=300, height=600)
SidebarList.grid_columnconfigure(0, weight=1)
ScrollBar = ttk.Scrollbar(App, command=Sidebar.yview)
ScrollBar.grid(row=0, column=2, rowspan=3, sticky=tk.NS)
Sidebar.config(yscrollcommand=ScrollBar.set)
cmdTrack = lambda : Apply(root)
def CommandCreate():
if Data is None:
return
loc = (int(Data.shape[0] * 0.5), int (Data.shape[1] * 0.5))
SetStar.Awake(Data, None, OnStarChange, AddStar, location = loc, name = "Estrella " + str(len(Stars) + 1))
def CommandBack():
import STCore.ImageSelector
Destroy()
STCore.ImageSelector.Awake(root, [])
def CommandExport():
with filedialog.asksaveasfile(mode="w", filetypes=[("Valores separados por comas", "*.csv"), ("Archivo de texto", "*.txt")]) as f:
n=0
for star in Stars:
# Reemplazar; con cualquier caracter separador v
#star.PrintData((NAME, SUM, FBACK, AREA, SBR, VALUE, FLUX, MBACK, DBACK, VBACK, BSIZE), header= n==0, sep= "{};", stdout=f)
star.PrintData((NAME, VALUE, SUM, AREA, FLUX, SUMVBACK, BACKREFS, ABACK, FLUXBACK, NETFLUX, ABSMAG), header= n==0, sep= "{};", stdout=f)
n+=1
sidebar_buttons = ttk.Frame(App)
AddButton = ttk.Button(sidebar_buttons, text = "Agregar estrella", command = CommandCreate, style="Highlight.TButton", image=icons.GetIcon("add"), compound="left")
PrevButton = ttk.Button(sidebar_buttons, text = " Volver", image = icons.GetIcon("prev"), command = CommandBack, compound="left")
ExpButton = ttk.Button(sidebar_buttons, text= "Exportar datos", image=icons.GetIcon("export"), compound="left", command=CommandExport)
NextButton = ttk.Button(sidebar_buttons, text = "Continuar", command = cmdTrack, image = icons.GetIcon("next"), compound = "right")
AddButton.grid(row = 0, column = 0, columnspan=3, sticky = "ew")
PrevButton.grid(row = 1, column = 0, sticky = "ew")
ExpButton.grid(row=1, column=1, sticky="ew")
NextButton.grid(row = 1, column = 2, sticky = "ew")
def CreateLevels():
global levelFrame
levelFrame = Levels(App, ChangeLevels)
#endregion
#region Update Funcions
sidebar_dirty = False
def AddStar(star : StarItem, onlyUI = False):
global Stars, sidebar_elements
global SidebarList
index = len(sidebar_elements)
# onlyUI flag tells whether the program is adding new stars to the list, or just refreshing their UI elements
if not onlyUI:
Stars.append(star)
def SetTrackerDirty():
Tracker.DataChanged = True
def SetSidebarDirty():
global sidebar_dirty
sidebar_dirty = True
cmd_star = lambda i=index: SetStar.Awake(Data, Stars[index], OnStarChange, None, i)
cmd_delete = lambda i=index: (Stars.pop(i), sidebar_elements.pop(i), OnStarChange(), SetTrackerDirty(), SetSidebarDirty())
element = StarElement(SidebarList, star, index, cmd_star, SetGuideStar, cmd_delete)
element.grid(row=index, column=0, sticky= "nsew")
sidebar_elements.append(element)
def SetGuideStar(index):
i = 0
for star in Stars:
star.type = 1 if i == index else 0
i += 1
UpdateStarList()
def UpdateStarList():
global SidebarList, sidebar_elements, sidebar_dirty
index = 0
# Checks if sidebar is dirty
if sidebar_dirty:
for s in sidebar_elements:
s.destroy()
sidebar_elements = []
sidebar_dirty = False
# Recreate the list of elements if its size doesn't match the Stars (i.e. Load a trak file)
if len(sidebar_elements) != len(Stars):
for star in Stars:
AddStar(star, onlyUI=True)
# Assing the guide star if all or none of them are already set
# brightest star index, guide star count, brightest star value
if len(Stars) > 0:
bsi, gs, bs = 0, 0, 0
for star in Stars:
if star.type == 1:
gs += 1
if star.value > bs:
bsi = index
bs = star.value
index += 1
if gs > 1 or gs == 0:
SetGuideStar(bsi)
return
index = 0
# Update elements if necessary
star : StarItem
for star in Stars:
element :StarElement = sidebar_elements[index]
element.update_star(star)
index += 1
SidebarList.config(height=32 * index)
Sidebar.update_idletasks()
Sidebar.config(scrollregion=SidebarList.bbox())
#Sidebar.after(10, lambda:Sidebar.config(scrollregion=(0,0, 250, 32 * index)))
#Sidebar.update_idletasks()
App.after(40, UpdateCanvasOverlay)
def CheckVersion(star : StarItem, index):
# Version is way too old. needs to recompute
if not hasattr(star, "version"):
SetStar.Awake(Data, star, OnStarChange, skipUI = True, starIndex=index)
return True
changed = False
# File is from another version, needs to be re-registered
if star.version != CURRENT_VER:
SetStar.Awake(Data, star, OnStarChange, skipUI = True, starIndex=index)
changed = True
return changed
def UpdateCanvasOverlay():
# Si se elimina el primer elemento de un lista en un ciclo for, entonces
# ya no podra seguir iterando, lo que producir errores, se utiliza reversed para eliminar
# el ultimo elemento de la lista primero y asi.
for a in reversed(axis.artists):
if a.label == "zoom_container" or a.label == "zoom_box":
continue
a.remove()
for t in reversed(axis.texts):
t.remove()
for s in Stars:
rect_pos = (s.location[1] - s.radius, s.location[0] - s.radius)
rect = Rectangle(rect_pos, s.radius *2, s.radius *2, edgecolor = "w", facecolor='none')
rect.label = "Rect"+str(Stars.index(s))
bound_pos = (s.location[1] - s.bounds, s.location[0] - s.bounds)
bound = Rectangle(bound_pos, s.bounds*2, s.bounds *2, edgecolor = "y", linestyle = 'dashed', facecolor='none')
bound.label = "Bound"+str(Stars.index(s))
axis.add_artist(rect)
axis.add_artist(bound)
text_pos = (s.location[1], s.location[0] - s.bounds - 6)
text = axis.annotate(s.name, text_pos, color='w', weight='bold',fontsize=6, ha='center', va='center')
text.label = "Text"+str(Stars.index(s))
canvas.draw_idle()
def UpdateZoomGizmo(scale, xrange, yrange):
global axis, zoom_factor, img_offset, z_container, z_box
aspect = yrange/xrange
# Change the size of the Gizmo
size = 320
if zoom_factor > 1:
gizmo_w = size * scale
gizmo_h = size * scale * aspect
gizmo_pos = img_offset[0] - xrange * scale, img_offset[1] + yrange * scale - gizmo_h
if z_container is None:
z_container = Rectangle(gizmo_pos, gizmo_w, gizmo_h, edgecolor = "w", facecolor='none')
z_container.label = "zoom_container"
z_box = Rectangle(gizmo_pos, gizmo_w, gizmo_h, alpha = 0.5)
z_box.label = "zoom_box"
axis.add_artist(z_container)
axis.add_artist(z_box)
else:
z_container.set_xy(gizmo_pos)
z_container.set_width(gizmo_w)
z_container.set_height(gizmo_h)
z_box.set_x(gizmo_pos[0] + 0.5*(img_offset[0] * gizmo_w / xrange- gizmo_w * scale) )
z_box.set_y(gizmo_pos[1] + 0.5*(img_offset[1] * gizmo_h / yrange- gizmo_h * scale) )
z_box.set_width(gizmo_w * scale)
z_box.set_height(gizmo_h * scale)
else:
if z_container is not None:
z_container.remove()
z_container = None
z_box.remove()
z_box = None
def ChangeLevels():
global level_perc
if implot is None:
return
if levelFrame.getMin() > levelFrame.getMax():
levelFrame.setMin(levelFrame.getMax() - 1)
if levelFrame.getMax() <= levelFrame.getMin():
levelFrame.setMax(levelFrame.getMin() + 1)
_min = levelFrame.getMin()
_max = levelFrame.getMax()
implot.norm.vmax = _max
implot.norm.vmin = _min + 0.01
implot.set_cmap(ColorMaps[STCore.Settings._VISUAL_COLOR_.get()])
implot.set_norm(Modes[STCore.Settings._VISUAL_MODE_.get()])
STCore.DataManager.Levels = (_max, _min)
canvas.draw_idle()
#endregion
def Destroy():
global img_limits, zoom_factor, img_offset, z_container, z_box
# Reset current viewport
zoom_factor = 1
axis.relim()
axis.autoscale()
if z_container is not None:
z_container.remove()
z_box.remove()
z_container = None
z_box = None
img_offset = (0,0)
img_limits = (axis.get_xlim(), axis.get_ylim())
App.pack_forget()
#gc.collect()
def Apply(root):
items = DataManager.FileItemList
from tkinter import messagebox
if len(Stars) > 0:
Destroy()
Tracker.Awake(root, Stars, items)
if DataManager.RuntimeEnabled == True:
RuntimeAnalysis.StartRuntime(root)
else:
messagebox.showerror("Error", "Debe tener al menos una estrella para comenzar el analisis")
return
DataManager.StarItemList = Stars
def ClearStars():
global Stars, sidebar_elements
Stars = []
for s in sidebar_elements:
s.destroy()
sidebar_elements = []
#endregion
def OnMouseScroll(event):
global Data, canvas, axis, zoom_factor, img_limits, img_offset
# Check if for some reason, no limits were defined
if img_limits is None:
axis.relim()
axis.autoscale(True)
img_limits = (axis.get_xlim(), axis.get_ylim()) # By some reason mpl axis are inverted
# Modify this for faster/slower increments
increment = 0.5
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
# If we are outside the viewport, then stop the function
if xdata is None or ydata is None:
return
xrange = 0.5 * (img_limits[0][1] - img_limits[0][0])
yrange = 0.5 * (img_limits[1][0] - img_limits[1][1])
if event.button == 'up':
# deal with zoom in
if zoom_factor < 10:
zoom_factor += increment
elif event.button == 'down':
# deal with zoom out
if zoom_factor > 1:
zoom_factor -= increment
else:
# deal with something that should never happen
zoom_factor = 1
print (event.button)
scale = 1. / zoom_factor
# Set the offset to the current mouse position
img_offset = numpy.clip(xdata * scale + (1-scale)*img_offset[0], xrange * scale, img_limits[0][1] - xrange * scale), numpy.clip(ydata * scale + (1-scale)*img_offset[1], yrange*scale, img_limits[1][0] - yrange * scale)
axis.set_xlim([img_offset[0] - xrange * scale,
img_offset[0] + xrange * scale])
axis.set_ylim([img_offset[1] + yrange * scale,
img_offset[1] - yrange * scale])
UpdateZoomGizmo(scale, xrange, yrange)
canvas.draw_idle() # force re-draw
#drag displacement = lastX, lastY, dispX, dispY
drag_displacement = (0, 0, 0, 0)
def OnMousePress(event):
global canvas, MousePress, SelectedStar, axis, drag_displacement
MousePress = 0, 0, event.xdata, event.ydata
drag_displacement = event.xdata, event.ydata, 0, 0
for a in axis.artists:
contains, attrd = a.contains(event)
if contains:
x0, y0 = a.xy
MousePress = x0, y0, event.xdata, event.ydata
# Check if we selected the zoom controls
if a.label == "zoom_container" or a.label == "zoom_box":
setp(z_box, alpha = 1)
setp(z_box, edgecolor = "w")
SelectedStar = -100 # We'll use the code -100 to identify whether the zoom controls are selected (to avoid declaring more global variables)
break
SelectedStar = int(next(filter(str.isdigit, a.label)))
setp(a, linewidth = 4)
else:
setp(a, linewidth = 1)
canvas.draw_idle()
def OnMouseDrag(event):
global MousePress, Stars, drag_displacement
if MousePress is None or event.inaxes is None:
return
x0, y0, xpress, ypress = MousePress
dx = event.xdata - xpress
dy = event.ydata - ypress
# Check whether the zoom controls are selected
if SelectedStar == -100:
if z_container is not None:
global img_limits, axis, img_offset
w, h = getp(z_container, "width"), getp(z_container, "height")
xy = getp(z_container, "xy")
xrange = 0.5 * (img_limits[0][1] - img_limits[0][0])
yrange = 0.5 * (img_limits[1][0] - img_limits[1][1])
scale = 1./zoom_factor
xcenter = 2*(event.xdata - xy[0]) * xrange / w
ycenter = 2*(event.ydata - xy[1]) * yrange / h
xcenter = numpy.clip(xcenter, xrange * scale, img_limits[0][1] - xrange * scale)
ycenter = numpy.clip(ycenter, yrange * scale, img_limits[1][0] - yrange * scale)
img_offset = xcenter, ycenter
axis.set_xlim([xcenter - xrange * scale,
xcenter + xrange * scale])
axis.set_ylim([ycenter + yrange * scale,
ycenter - yrange * scale])
UpdateZoomGizmo(scale, xrange, yrange)
canvas.draw_idle() # fo
return # Stop the function here
# Fail conditions
if SelectedStar == -1 or len(Stars) == 0: return
sel = list(filter(lambda obj: obj.label == "Rect"+str(SelectedStar), axis.artists))
bod = list(filter(lambda obj: obj.label == "Bound"+str(SelectedStar), axis.artists))
text = list(filter(lambda obj: obj.label == "Text"+str(SelectedStar), axis.texts))
if len(sel) > 0 and len(text) > 0:
sel[0].set_x(x0+dx + Stars[SelectedStar].bounds - Stars[SelectedStar].radius)
sel[0].set_y(y0+dy + Stars[SelectedStar].bounds - Stars[SelectedStar].radius)
bod[0].set_x(x0+dx)
bod[0].set_y(y0+dy)
text[0].set_x(x0 + dx + Stars[SelectedStar].bounds)
text[0].set_y(y0 -6 +dy )
Stars[SelectedStar].location = (int(y0 + dy + Stars[SelectedStar].bounds), int(x0 + dx + Stars[SelectedStar].bounds))
canvas.draw_idle()
sx = drag_displacement[2] + abs(event.xdata - drag_displacement[0])
sy = drag_displacement[3] + abs(event.ydata - drag_displacement[1])
drag_displacement = event.xdata, event.ydata, sx, sy
def OnMouseRelease(event):
global MousePress, SelectedStar, drag_displacement
# Change this value for lower/higher drag tolerance
drag_tolerance = 0.2
if SelectedStar == -100:
if z_box is not None:
setp(z_box, alpha = 0.5)
setp(z_box, edgecolor = None)
SelectedStar = -1
return
if SelectedStar >= 0:
OnStarChange()
SelectedStar = -1
if drag_displacement[2] < drag_tolerance and drag_displacement[3] < drag_tolerance:
OnImageClick(event)
for a in axis.artists:
setp(a, linewidth = 1)
MousePress = None
canvas.draw_idle()
def OnImageClick(event):
loc = (int(event.ydata), int(event.xdata))
SetStar.Awake(Data, None, OnStarChange, AddStar, location = loc, name = "Estrella " + str(len(Stars) + 1))
def OnStarChange(star : StarItem = None, index = -1):
global Stars
if star is not None:
Stars[index] = star
UpdateStarList()
#UpdateCanvasOverlay()
STCore.DataManager.StarItemList = Stars | JotaRata/StarTrak | STCore/ImageView.py | ImageView.py | py | 20,497 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams.update",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "ma... |
22545016509 | import pygame
from levels import *
from constants import *
from player import Player
from sounds import Sound
class Game:
def __init__(self):
pygame.init()
self.background_sound = Sound()
pygame.key.set_repeat(50, 50)
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
self.screen = pygame.display.set_mode(size)
pygame.display.set_caption(TITLE)
self.clock = pygame.time.Clock()
self.font_name = pygame.font.match_font(FONT)
self.running = True
self.isover = False
self.gameispaused = False
self.levels = {
1: Level_01,
2: Level_02,
3: Level_03,
4: Level_04,
5: Level_05,
6: Level_06,
7: Level_07,
8: Level_08,
9: Level_09,
10: Level_10}
self.show_start_screen()
def new(self, level = 0):
self.player = Player()
self.active_sprite_list = pygame.sprite.Group()
self.active_sprite_list.add(self.player)
self.level = level + 1
self.current_level = self.levels[self.level](self.player)
self.player.level = self.current_level
self.background_sound.next_song()
self.run()
def run(self):
self.playing = True
while self.playing:
self.events()
self.update()
self.draw()
self.end_game()
def update(self):
if self.player.iskill:
self.playing = False
self.isover = True
self.gameover()
return
if not self.gameispaused and self.running:
self.active_sprite_list.update()
self.current_level.update()
# If the player gets near the right side, shift the world left (-x)
if self.player.rect.right >= 500:
diff = self.player.rect.right - 500
self.player.rect.right = 500
self.current_level.shift_world(-diff)
# If the player gets near the left side, shift the world right (+x)
if self.player.rect.left <= 120:
diff = 120 - self.player.rect.left
self.player.rect.left = 120
self.current_level.shift_world(diff)
# If the player gets to the end of the level, go to the next level
current_position = (self.player.rect.x
+ self.current_level.world_shift)
if current_position < self.current_level.level_limit:
self.player.rect.x = 120
if self.level < len(self.levels) - 1:
self.level += 1
self.current_level = self.levels[self.level](self.player)
self.player.level = self.current_level
else:
self.end_screen();
else:
return
def events(self):
for bullet in self.player.bullets:
for enemies in self.current_level.enemy_list:
if (bullet.y - bullet.radius < enemies.rect.y + 147
and bullet.y + bullet.radius > enemies.rect.y):
if (bullet.x + bullet.radius > enemies.rect.x
and bullet.x - bullet.radius < enemies.rect.x + 52):
enemies.loseenergy(self.player.power)
if bullet in self.player.bullets:
self.player.bullets.remove(bullet)
if bullet.x < SCREEN_WIDTH and bullet.x > 0:
bullet.x += bullet.vel
else:
if bullet in self.player.bullets:
self.player.bullets.remove(bullet)
for event in pygame.event.get():
if event.type == pygame.USEREVENT:
self.background_sound.next_song()
if event.type == pygame.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
self.gameispaused = True
self.pause_screen()
if event.key == pygame.K_LEFT:
self.player.sprint(self.player.direction)
self.player.go_left()
if event.key == pygame.K_RIGHT:
self.player.sprint(self.player.direction)
self.player.go_right()
if event.key == pygame.K_UP:
self.last_key_pressed = pygame.K_UP
if self.level < 2:
self.draw_text(GRAVITY_WARN,
24,
RED,
SCREEN_WIDTH/2,
SCREEN_HEIGHT/2)
self.draw_text(OPTIONS,
22,
RED,
SCREEN_WIDTH/2,
SCREEN_HEIGHT * 3/4)
pygame.display.flip()
self.wait_for_key()
else:
self.player.jump()
if event.key == pygame.K_SPACE:
self.player.increasepower()
self.player.shoot()
if event.key == pygame.K_DOWN:
self.player.invisibility()
if event.type == pygame.KEYUP:
self.player.stop()
def draw(self):
self.screen.fill(BLACK)
self.current_level.draw(self.screen)
font = pygame.font.SysFont(FONT, 20, True)
if not self.player.invisible:
self.active_sprite_list.draw(self.screen)
text = font.render(HEALTH .format(self.player.health),
1,
RED)
self.screen.blit(text,
(self.player.rect.x -10,
self.player.rect.y -20))
text = font.render(PRESS_ME,
1,
WHITE)
self.screen.blit(text,
(SCREEN_WIDTH - 200, 10))
for bullet in self.player.bullets:
bullet.draw(self.screen)
self.clock.tick(FPS)
for enemies in self.current_level.enemy_list:
enemies.draw(self.screen)
if enemies.power is not 0:
text = font.render(LEVEL .format(enemies.power),
1,
WHITE)
self.screen.blit(text,
(enemies.rect.x- 10,
enemies.rect.y - 20))
pygame.display.flip()
pygame.display.update()
def show_start_screen(self):
self.screen.fill(WHITE)
self.draw_text(TITLE,
48,
BLACK,
SCREEN_WIDTH /2,
SCREEN_HEIGHT / 4)
self.draw_text(START,
32,
BLACK,
SCREEN_WIDTH / 2,
SCREEN_HEIGHT / 2)
self.draw_text(TIP_LVL,
12,
BLACK,
SCREEN_WIDTH / 5,
SCREEN_HEIGHT - 21)
pygame.display.flip()
self.wait_for_key()
def gameover(self):
self.background_sound.stop_music()
self.screen.fill(WHITE)
self.draw_text(GAME_OVER,
48,
BLACK,
SCREEN_WIDTH /2,
SCREEN_HEIGHT / 4)
self.draw_text(OPTIONS,
22,
BLACK,
SCREEN_WIDTH / 2,
SCREEN_HEIGHT * 3 / 4)
pygame.display.flip()
self.wait_for_key()
def pause_screen(self):
self.current_level.tip(self.screen)
self.draw_text(GAME_PAUSED,
22,
WHITE,
SCREEN_WIDTH /2,
SCREEN_HEIGHT/4)
self.draw_text(OPTIONS,
22,
WHITE,
SCREEN_WIDTH /2,
SCREEN_HEIGHT * 1/2)
pygame.display.flip()
self.wait_for_key()
def wait_for_key(self):
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
waiting = False
self.running = False
if event.type == pygame.KEYUP:
if event.key == pygame.K_q:
self.running = False
self.playing = False
waiting = False
if event.key == pygame.K_RETURN:
waiting = False
if self.gameispaused:
self.gameispaused = False
if self.isover:
self.isover = False
self.new()
def end_screen(self):
self.screen.fill(WHITE)
self.isover = True
self.draw_text(GAME_WIN,
40,
BLACK,
SCREEN_WIDTH/2,
SCREEN_HEIGHT/2)
self.draw_text(OPTIONS,
22,
BLACK,
SCREEN_WIDTH / 2,
SCREEN_HEIGHT * 3 / 4)
pygame.display.flip()
self.wait_for_key()
def draw_text(self, text, size, color, x, y):
font = pygame.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
def end_game(self):
if self.isover and not self.running:
pygame.quit()
| gustavooquinteiro/mathgame | mathgame/game.py | game.py | py | 10,259 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sounds.Sound",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.key.set_repeat",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.