seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
28924212361 | from typing import List
"""
method 1 : iterative call without for loop
"""
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
results = []
length = len(nums)
def make_dfs_subset(subset, cur_idx):
if cur_idx == length :
print("list:", subset)
results.append(list(subset))
return
# Include current cur_idx
make_dfs_subset(subset, cur_idx+1)
#Exclude current cur_idx
subset.append(nums[cur_idx])
make_dfs_subset(subset, cur_idx+1)
subset.remove(nums[cur_idx])
make_dfs_subset([],0)
return results
"""
idx = 0
"""
[1]
s = Solution()
print(s.subsets([1,2,3]))
| GuSangmo/BOJ_practice | Leetcode/78.subsets.py | 78.subsets.py | py | 766 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
}
] |
21723031249 | from simbolo import Simbolo, TOKENS, ZONA_DE_CODIGO, TIPO_DATO
from error import Error
import json
palabras_reservadas = [
'bool',
'call',
'char',
'do',
'else',
'float',
'for',
'function',
'if',
'int',
'main',
'read',
'return',
'string',
'then',
'to',
'void',
'while',
'write',
'false',
'true']
#? Lista de Palabras reservadas
class Lexico:
def __init__(self, codigo): #Constructor del Analizador Lexico.
self.codigo = " " + codigo + " " #codigo fuente a compilar.
self.tablaSimb = [] #tabla de Simbolos
self.index = 0 #indice del caracter actual
self.inicioLex = 1 #inicio del lexema actual
self.Lexema = "" #Ultimo lexema encontrado
self.num_linea = 1 #numero de linea del codigo fuente
self.estado = 0
self.caracteres_permitidos = "(){}[],;+-*/\\%&|!" #estado actual en los automatas.
self.tipo_de_dato_actual = 0 # Registra el tipo de dato de los identificadores.
self.zona_de_codigo = ZONA_DE_CODIGO['DEF_VARIABLES_GLOBALES'] # Indica la zona del codigo
# fuente que se esa procesando.
self.fin_definicion_palabras_reservadas = None # Indica donde termina la definicion de
# Palabra Reservadas
self.fin_definicion_variables_globales = None # Inidica donde termina la definicion de
# Variables Globales
self.inicio_definicion_variables_locales = None # Indica donde inicia la definicion de
# Variables Locales en la funcion actual
self.fin_definicion_variables_locales = None # Indica donde finaliza la definicion de
# Variables locales en la funcion actual
self.error = Error()
self.cargar_palabras_reservadas() #Cargar las palabras reservadas en
#la tabla de simbolos.
def insertar_simbolo(self, simbolo): #inserta un nuevo simbolor en la TS.
if simbolo:
self.tablaSimb.append(simbolo)
return self.tablaSimb[len(self.tablaSimb)-1]
else:
raise ValueError("Se esperaba un simbolo")
def cargar_palabras_reservadas(self): #Carga las palabras reservadas en TS
for p in palabras_reservadas:
self.insertar_simbolo(Simbolo(p, TOKENS[p.upper()]))
self.fin_definicion_palabras_reservadas = len(self.tablaSimb)
def mostrar_tabla_simbolos(self): #muestra el contenido de la TS.
for s in self.tablaSimb:
print(s)
def buscar_lexema(self, lexema): #busca un lexema en la TS.
if self.zona_de_codigo == ZONA_DE_CODIGO['DEF_VARIABLES_GLOBALES']:
for simb in self.tablaSimb:
if lexema == simb.Lexema:
return simb
return None
elif self.zona_de_codigo == ZONA_DE_CODIGO['DEF_VARIABLES_LOCALES']:
for simb in self.tablaSimb[self.inicio_definicion_variables_locales:]:
if lexema == simb.Lexema:
return simb
for simb in self.tablaSimb[:self.fin_definicion_palabras_reservadas]:
if lexema == simb.Lexema:
return simb
return None
elif self.zona_de_codigo == ZONA_DE_CODIGO['CUERPO_FUNCION_LOCAL']:
for simb in self.tablaSimb[self.inicio_definicion_variables_locales:]:
if lexema == simb.Lexema:
return simb
for simb in self.tablaSimb[:self.fin_definicion_variables_globales]:
if lexema == simb.Lexema:
return simb
return None
elif self.zona_de_codigo == ZONA_DE_CODIGO['CUERPO_PRINCIPAL']:
for simb in self.tablaSimb[:self.fin_definicion_variables_globales]:
if lexema == simb.Lexema:
return simb
return None
def tablaSimb2JSON(self): #regresa el contenido de TS en JSON
return json.dumps([obj.__dict__ for obj in self.tablaSimb])
def siguiente_caracter(self): #regresa el siguiente caracter del
self.index += 1 #codigo fuente.
try:
return self.codigo[self.index]
except IndexError:
return '\0'
def saltar_caracter(self): #ignora el caracter actual, por eje-
self.index += 1 #mplo: tabs, espacios, enters, etc.
self.inicioLex = self.index
def leer_lexema(self): #regresa la cadena que se encuentra
self.Lexema = self.codigo[self.inicioLex:self.index + 1]
self.estado = 0 #entre inicioLex y el index.
self.avanza_inicio_lexema()
return self.Lexema
def regresa_caracter(self): #Representa el (*) en un estado de
self.index -= 1 #aceptacion.
def avanza_inicio_lexema(self): #mueve el incioLex un caracter hacia
self.inicioLex = self.index + 1 #adelante
def deshacer_automata(self):
self.index = self.inicioLex
return self.codigo[self.index]
def siguiente_componente_lexico(self): #regresa el siguiente simbolo encon-
while(True): #trado en el codigo fuente.
if self.estado == 0:
c = self.siguiente_caracter()
if c ==' ' or c =='\t' or c == '\n':
self.avanza_inicio_lexema() #Ignorar todo tipo de espacios
if c == '\n': #en blanco
self.num_linea += 1 #incrementar num_line en enter.
elif c == '\0':
return None
elif c == '<':
self.estado = 1
elif c == '=':
self.estado = 5
elif c == '>':
self.estado = 6
else:
self.estado = self.fallo() #Probar el siguiente automata.
elif self.estado == 1:
c = self.siguiente_caracter() #Todos los estados intermedios
if c == '=': #deben llamar a siguiente_caracter
self.estado = 2
elif c == '>':
self.estado = 3
else:
self.estado = 4
elif self.estado == 2:
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['MEI']))
elif self.estado == 3:
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['DIF']))
elif self.estado == 4:
self.regresa_caracter()
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['MEN']))
elif self.estado == 5:
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['IGU']))
elif self.estado == 6:
c = self.siguiente_caracter()
if c == '=':
self.estado = 7
else:
self.estado = 8
elif self.estado == 7:
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['MAI'])
elif self.estado == 8:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['MAY'])
elif self.estado == 9:
if c.isalpha():
self.estado = 10
else:
self.estado = self.fallo()
elif self.estado == 10:
c = self.siguiente_caracter()
if not c.isalnum():
self.estado = 11
elif self.estado == 11:
self.regresa_caracter()
self.leer_lexema()
simb = self.buscar_lexema(self.Lexema)
if self.zona_de_codigo == 0 or self.zona_de_codigo == 1:
if simb and simb.Token != TOKENS['ID']:
return simb
elif simb is None:
return self.insertar_simbolo(Simbolo(self.Lexema, TOKENS['ID'], self.tipo_de_dato_actual))
elif simb.Token == TOKENS['ID']:
self.error.reportar_error(self.num_linea, "Semantico", "La variable '{}' ya fue definida en el ambito actual.".format(self.Lexema))
return simb
elif self.zona_de_codigo == 2 or self.zona_de_codigo == 3:
if simb:
return simb
else:
self.error.reportar_error(self.num_linea, "Semantico", "La variable '{}' no fue declarada.".format(self.Lexema))
return self.insertar_simbolo(Simbolo(self.Lexema, TOKENS['ID'], TIPO_DATO['na']))
elif self.estado == 12:
if c.isdigit():
self.estado = 13
else:
self.estado = self.fallo()
elif self.estado == 13:
c = self.siguiente_caracter()
if c == 'E' or c == 'e':
self.estado = 16
elif c == '.':
self.estado = 14
elif not c.isdigit():
self.estado = 20
elif self.estado == 14:
c = self.siguiente_caracter()
if c.isdigit():
self.estado = 15
else:
self.estado = self.fallo()
elif self.estado == 15:
c = self.siguiente_caracter()
if c == 'E' or c == 'e':
self.estado = 16
elif not c.isdigit():
self.estado = 21
elif self.estado == 16:
c = self.siguiente_caracter()
if c == '+' or c == '-':
self.estado = 17
elif c.isdigit():
self.estado = 18
else:
self.es = self.fallo()
elif self.estado == 17:
c = self.siguiente_caracter()
if c.isdigit():
self.estado = 18
else: self.estado = self.fallo()
elif self.estado == 18:
c =self.siguiente_caracter()
if not c.isdigit():
self.estado = 19
elif self.estado == 19:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['NUMF'])
elif self.estado == 20:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['NUM'])
elif self.estado == 21:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['NUMF'])
elif self.estado == 22:
if c == '"':
self.estado = 23
else:
self.estado = self.fallo()
elif self.estado == 23:
c = self.siguiente_caracter()
if c == "\\":
self.estado = 24
elif c == '"':
self.estado = 25
elif self.estado == 24:
c = self.siguiente_caracter()
if c in 'nta"\\r':
self.estado = 23
else:
self.estado = self.fallo()
elif self.estado == 25:
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['CONST_STRING'])
elif self.estado == 26:
if c == "'":
self.estado = 27
else:
self.estado = self.fallo()
elif self.estado == 27:
c = self.siguiente_caracter()
if c == '\\':
self.estado = 28
else:
self.estado = 29
elif self.estado == 28:
c = self.siguiente_caracter()
if c in "nta'\\r":
self.estado = 29
else:
self.estado = self.fallo()
elif self.estado == 29:
c = self.siguiente_caracter()
if c == "'":
self.estado = 30
else:
self.estado = self.fallo()
elif self.estado == 30:
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['CONST_CHAR'])
elif self.estado == 31:
if c == "/":
self.estado = 32
else:
self.estado = self.fallo()
elif self.estado == 32:
c = self.siguiente_caracter()
if c == "/":
self.estado = 34
elif c == "*":
self.estado = 33
else:
c = self.deshacer_automata()
self.estado = self.fallo()
elif self.estado == 33:
c = self.siguiente_caracter()
if c == "*":
self.estado = 35
elif self.estado == 34:
c = self.siguiente_caracter()
if c == "\n" or c == "\0":
self.estado = 36
elif self.estado == 35:
c = self.siguiente_caracter()
if c == "/":
self.estado = 37
else:
self.estado = 33
elif self.estado == 36:
self.regresa_caracter()
self.leer_lexema()
elif self.estado == 37:
self.leer_lexema()
elif self.estado == 38:
if c in self.caracteres_permitidos:
self.leer_lexema()
return Simbolo(c,ord(c))
else:
self.estado = self.fallo()
else:
self.leer_lexema()
self.error.reportar_error(self.num_linea, "Lexico", "Simbolo no permitido '{}'.".format(self.Lexema))
def fallo(self):
if self.estado <= 8:
return 9
elif self.estado <= 11:
return 12
elif self.estado <= 21:
return 22
elif self.estado <= 25:
return 26
elif self.estado <= 30:
return 31
elif self.estado <= 37:
return 38
else:
return 99
| AbrahamupSky/Compilador | lexico/lexico.py | lexico.py | py | 12,471 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "simbolo.ZONA_DE_CODIGO",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "error.Error",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "simbolo.Simbolo",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "simbolo.TOKENS",
... |
21407967690 | import requests
'''
r = requests.get("http://www.google.com")
r.encoding = 'utf-8'
s = r.text
print(s)
'''
def getText(url):
try:
r = requests.get(url, timeout = 500)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "Erroroccur!"
if __name__ == "__main__":
url = "http://www.bilibili.com"
print(getText(url)) | zIDAedgen/crawlerAirbnb | venv/include/spider.py | spider.py | py | 402 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
72743077225 | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Exploratory cluster analysis
# %%
# Exploratory cluster analysis
# %load_ext autoreload
# %autoreload 2
# %% [markdown]
# ## Preamble
# %%
import altair as alt
import numpy as np
from scipy.stats import ttest_ind
import pandas as pd
import requests
from toolz import pipe
from afs_neighbourhood_analysis.getters.clustering import clustering_diagnostics
from afs_neighbourhood_analysis.pipeline.lad_clustering.cluster_utils import (
extract_clusters,
clustering_params,
)
from afs_neighbourhood_analysis.getters.clustering import (
early_years_for_clustering,
public_health_for_clustering,
)
# %%
phf_long
# %%
def fetch_geojson(url):
return pipe(
requests.get(url),
lambda req: [e["properties"] for e in req.json()["features"]],
pd.DataFrame,
)
def make_code_name_lookup():
county_json = pipe(
fetch_geojson(
"https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/CTY_APR_2019_EN_NC/FeatureServer/0/query?outFields=*&where=1%3D1&f=geojson"
),
lambda df: df.set_index("CTY19CD")["CTY19NM"].to_dict(),
)
lad_json = (
pd.read_excel(
"https://www.arcgis.com/sharing/rest/content/items/c4f647d8a4a648d7b4a1ebf057f8aaa3/data"
)
.set_index(["LAD21CD"])["LAD21NM"]
.to_dict()
)
return {**county_json, **lad_json}
def get_code_nuts_lookup():
lad_nuts = (
pd.read_excel(
"https://www.arcgis.com/sharing/rest/content/items/c110087ae04a4cacb4ab0aef960936ce/data"
)
.set_index("LAD20CD")["ITL121NM"]
.to_dict()
)
lad_county = pipe(
fetch_geojson(
"https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/LAD21_CTY21_EN_LU/FeatureServer/0/query?outFields=*&where=1%3D1&f=geojson"
),
lambda df: df.set_index("LAD21CD")["CTY21CD"],
)
return {
**{code: name for code, name in lad_nuts.items()},
**{lad_county[lad]: lad_nuts[lad] for lad in set(lad_county.keys())},
}
def plot_ey_perf(ey, year, gender):
"""LALALA"""
return (
alt.Chart(ey.query(f"year=={year}").query(f"gender=='{gender}'"))
.mark_boxplot()
.encode(
y="cluster:O",
color="cluster:N",
x="zscore",
tooltip=["la_name"],
column="indicator",
)
).properties(width=200)
def plot_ey_trend(ey, gender="Total"):
""" """
return (
alt.Chart(ey_bench.query(f"gender=='{gender}'"))
.mark_boxplot()
.encode(
x="year:O",
y="zscore",
column="cluster",
color="cluster:N",
row="indicator",
tooltip=["la_name", "cluster"],
)
).properties(width=100, height=100)
def plot_ey_year_comp(ey, gender="Total"):
""" """
return (
alt.Chart(ey_bench.query(f"gender=='{gender}'"))
.mark_boxplot()
.encode(
column="year:O",
y="zscore",
x="cluster:N",
color="cluster:N",
row="indicator",
tooltip=["la_name", "cluster"],
)
).properties(width=100, height=100)
def plot_ey_evol(ey_table, gender):
""" """
# return (alt.Chart(ey_table.query(f"gender=='{gender}'"))
# .mark_line(point=True)
# .encode(x="year",y="zscore",color="cluster:N",
# row="indicator",column="cluster",
# detail="la_name",tooltip=["la_name","year","zscore"])
# .properties(width=100,height=100))
all_lads = (
alt.Chart()
.mark_line(point=True)
.encode(
x="year:N",
y=alt.Y("zscore", scale=alt.Scale(zero=False)),
color="cluster:N",
detail="la_name",
tooltip=["la_name", "zscore"],
)
).properties(width=100, height=100)
mean = (
alt.Chart()
.mark_line(point=False, color="black")
.encode(
x="year:N",
y=alt.Y("median(zscore)", scale=alt.Scale(zero=False), title="Score"),
)
).properties(width=100, height=100)
return (
alt.layer(all_lads, mean, data=ey_table.query(f"gender=='{gender}'"))
.facet(row="indicator", column="cluster:N")
.resolve_scale(y="independent")
)
def phf_for_analysis(ph_table, cluster_lookup, code_name_lookup):
return (
phf.stack()
.reset_index(name="score")
.assign(cluster=lambda df: df["area_code"].map(cluster_lookup))
.assign(area_name=lambda df: df["area_code"].map(code_name_lookup))
)
def calc_mean_ph(ph_long):
ph_agg = pd.concat(
[
ph_long.rename(columns={"score": name})
.groupby(["cluster", "indicator_name_expanded"])[name]
.apply(lambda x: function(x))
for function, name in zip([np.mean, np.std], ["mean", "std"])
],
axis=1,
).reset_index()
return ph_agg.assign(
rank=lambda df: df["indicator_name_expanded"].map(
ph_agg.groupby("indicator_name_expanded")["mean"].std().rank(ascending=True)
)
)
# return (pd.concat([ph_long
# .rename(columns={"score":name})
# .groupby(["cluster","indicator_name_expanded"])[name].apply(lambda x: function(x)) for
# function,name in zip([np.mean, np.std],["mean","std"])],axis=1)
# .reset_index()
# .assign(rank = lambda df: df["indicator_name_expanded"]
# .map(ph_mean
# .groupby("indicator_name_expanded")["mean"]
# .std().rank(ascending=True))))
def phf_ttest(phf_long, sig_level=0.05, equal_var=True):
""" """
test_results = []
for ind in phf_long["indicator_name_expanded"].unique():
ind_df = phf_long.query(f"indicator_name_expanded == '{ind}'").reset_index(
drop=True
)
for cl in ind_df["cluster"].unique():
ttest = ttest_ind(
ind_df.query(f"cluster=={cl}")["score"],
ind_df.query(f"cluster!={cl}")["score"],
equal_var=equal_var,
)
test_results.append([ind, cl, ttest.pvalue])
return pd.DataFrame(
test_results, columns=["indicator", "cluster", "ttest_sign"]
).assign(is_sig=lambda df: df["ttest_sign"] < sig_level)
def plot_phf_differences(phf_long, sig_level=0.05, equal_var=True):
return (
alt.Chart(
pipe(phf_long, calc_mean_ph)
.merge(
phf_ttest(phf_long, sig_level, equal_var),
left_on=["indicator_name_expanded", "cluster"],
right_on=["indicator", "cluster"],
)
.query("is_sig == True")
)
.mark_rect(filled=True)
.encode(
x=alt.X(
"indicator_name_expanded",
sort=alt.EncodingSortField("rank", order="descending"),
axis=alt.Axis(labels=False, ticks=False),
),
y="cluster:N",
color=alt.Color("mean", scale=alt.Scale(scheme="Redblue", reverse=True)),
tooltip=["cluster", "indicator_name_expanded", "mean"],
)
.properties(width=800, height=300)
)
def plot_gender_gap_trend(gender_gap):
""" """
all_lads = (
alt.Chart()
.mark_line(point=True)
.encode(
x="year:N",
y=alt.Y("ratio", scale=alt.Scale(zero=False)),
color="cluster:N",
detail="new_la_code",
tooltip=["la_name", "ratio"],
)
).properties(width=100, height=100)
mean = (
alt.Chart()
.mark_line(point=False, color="black")
.encode(
x="year:N",
y=alt.Y("median(ratio)", scale=alt.Scale(zero=False), title="Gender ratio"),
)
).properties(width=100, height=100)
return (
alt.layer(all_lads, mean, data=gender_gap)
.facet(row="indicator", column="cluster:N")
.resolve_scale(y="independent")
)
def get_gender_gap(ey):
"""Calculates gender gap between boys and girls"""
return (
ey.groupby(["year", "indicator"])
.apply(
lambda df: df.pivot_table(
index="new_la_code", columns="gender", values="score"
).assign(ratio=lambda df_2: (df_2["Girls"] / df_2["Boys"]))["ratio"]
)
.reset_index(drop=False)
.assign(cluster=lambda df: df["new_la_code"].map(clust_lu))
# .assign(la_name = lambda df: df["new_la_code"].map(names_codes_lookup_2))
.assign(la_name=lambda df: df["new_la_code"].map(code_name_lookup))
# .dropna(axis=0,subset=["cluster"])
)
def plot_gender_gap_comp(gender_gap, year=2019):
"""Boxplot comparing gender gap across clusters"""
return (
alt.Chart(gender_gap.query(f"year=={year}"))
.mark_boxplot()
.encode(
y="cluster:N",
x=alt.X("ratio", scale=alt.Scale(zero=False)),
column="indicator",
tooltip=["la_name", "ratio"],
color="cluster:N",
)
.resolve_axis(x="independent")
.properties(width=200)
)
def plot_gender_gap_trend(gender_gap):
"""Visualise trends in gender gap"""
all_lads = (
alt.Chart()
.mark_line(point=True)
.encode(
x="year:N",
y=alt.Y("ratio", scale=alt.Scale(zero=False)),
color="cluster:N",
detail="new_la_code",
tooltip=["la_name", "ratio"],
)
).properties(width=100, height=100)
mean = (
alt.Chart()
.mark_line(point=False, color="black")
.encode(
x="year:N",
y=alt.Y("median(ratio)", scale=alt.Scale(zero=False), title="Gender ratio"),
)
).properties(width=100, height=100)
return (
alt.layer(all_lads, mean, data=gender_gap)
.facet(row="indicator", column="cluster:N")
.resolve_scale(y="independent")
)
# %% [markdown]
# ## Clustering diagnostics
# %%
diag = clustering_diagnostics()
# %%
alt.Chart(diag).mark_point(filled=True).encode(
x="pca", y="value", row="diagnostic_var", color="comm_resolution:O"
).resolve_scale(y="independent")
# %%
pca_mean = (
diag.groupby(["pca", "diagnostic_var"])["value"].median().reset_index(drop=False)
)
alt.Chart(pca_mean).mark_line(point=True).encode(
x="pca", y="value", color="diagnostic_var"
)
# %%
com_res_mean = (
diag.groupby(["comm_resolution", "diagnostic_var"])["value"]
.median()
.reset_index(drop=False)
)
alt.Chart(com_res_mean).mark_line(point=True).encode(
x="comm_resolution", y="value", color="diagnostic_var"
)
# %% [markdown]
# ## Extract clusters
# %%
ey = early_years_for_clustering()
phf = public_health_for_clustering()
# %%
clust = extract_clusters(phf, 5, 0.9, clustering_params)
# %%
code_name_lookup = make_code_name_lookup()
code_nut_lookup = get_code_nuts_lookup()
# %%
cluster_df = (
pd.Series(clust[1])
.reset_index(name="cluster")
.assign(geo_name=lambda df: df["index"].map(code_name_lookup))
.assign(nuts_name=lambda df: df["index"].map(code_nut_lookup))
.rename(columns={"index": "geo_code"})
)
# %% [markdown]
# ## Explore cluster results
# %% [markdown]
# ### Regional differences
# %%
clust_region_shares = (
cluster_df.groupby("cluster")["nuts_name"]
.apply(lambda x: x.value_counts(normalize=True))
.unstack()
.fillna(0)
.stack()
.reset_index(name="share")
.rename(columns={"level_1": "region"})
)
reg_bar = (
alt.Chart(clust_region_shares)
.mark_bar()
.encode(y="cluster:O", x="share", color="region")
)
reg_bar
# %% [markdown]
# ### EFSYP performance differences
# %%
clust_lu = cluster_df.set_index("geo_code")["cluster"].to_dict()
# %%
ey_bench = ey.assign(cluster=lambda df: df["new_la_code"].map(clust_lu)).dropna(
axis=0, subset=["cluster"]
)
# %%
ey_comp = (
ey.query("year==2019")
.query("gender=='Total'")
.assign(cluster=lambda df: df["new_la_code"].map(clust_lu))
.dropna(axis=0, subset=["cluster"])
)
# %%
plot_ey_perf(ey_bench, 2019, "Total")
# %%
plot_ey_perf(ey_bench, 2019, "Boys")
# %% [markdown]
# ### Evolution of differences
# %%
plot_ey_trend(ey_bench)
# %% [markdown]
# ### Year on year comparisons
# %%
plot_ey_year_comp(ey_bench)
# %%
(
ey_bench.query("gender=='Total'")
.groupby("indicator")
.apply(
lambda x: x.pivot_table(
index="new_la_code", columns="year", values="zscore"
).corr()
)[2019]
.unstack()
)
# %% [markdown]
# ### Differences between clusters
# %%
phf_long = phf_for_analysis(phf, clust_lu, code_name_lookup)
# %%
plot_phf_differences(phf_long, sig_level=0.01)
# %% [markdown]
# ### Improvements in performance inside clusters
# %%
# Where have we seen the greatest improvements inside clusters?
# %%
plot_ey_evol(ey_bench, "Girls")
# %%
# Other things to do:
# 1. Calculate SHAPLEY values for variables
# 2. measure gender gap in performance inside clusters
# 3. Create choropleth
# %%
# Gender gap
# %%
# %%
gender_gap = get_gender_gap(ey)
# %%
gender_gap.columns
# %%
gender_gap.loc[gender_gap["la_name"].isna()]
# %%
plot_gender_gap_comp(gender_gap, year=2019)
# %%
plot_gender_gap_trend(gender_gap)
# %%
la = fetch_geojson(
"https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/Counties_and_Unitary_Authorities_December_2021_UK_BGC/FeatureServer/0/query?outFields=*&where=1%3D1&f=geojson"
)
# %%
names_codes_lookup_2 = la.set_index("CTYUA21CD")["CTYUA21NM"].to_dict()
# %%
names_codes_lookup_2
# %%
| nestauk/afs_neighbourhood_analysis | afs_neighbourhood_analysis/analysis/cluster_eda.py | cluster_eda.py | py | 14,185 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "toolz.pipe",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "toolz.pipe",
"line_... |
36955296199 | import wiredtiger, wttest, string, random, time
from wtbound import *
from enum import Enum
from wtscenario import make_scenarios
class operations(Enum):
UPSERT = 1
REMOVE = 2
TRUNCATE = 3
class key_states(Enum):
UPSERTED = 1
DELETED = 2
NONE = 3
class bound_scenarios(Enum):
NEXT = 1
PREV = 2
SEARCH = 3
SEARCH_NEAR = 4
class bound_type(Enum):
LOWER = 1
UPPER = 2
class key():
key_state = key_states.NONE
data = -1
value = "none"
prepared = False
timestamp = 0
def __init__(self, data, value, key_state, timestamp):
self.key_state = key_state
self.data = data
self.value = value
self.timestamp = timestamp
def clear_prepared(self):
self.prepared = False
def is_prepared(self):
return self.prepared
def is_deleted(self):
return self.key_state == key_states.DELETED
def is_out_of_bounds(self, bound_set):
return not bound_set.in_bounds_key(self.data)
def is_deleted_or_oob(self, bound_set):
return self.is_deleted() or self.is_out_of_bounds(bound_set)
def update(self, value, key_state, timestamp, prepare):
self.value = value
self.key_state = key_state
self.timestamp = timestamp
self.prepared = prepare
def to_string(self):
return "Key: " + str(self.data) + ", state: " + str(self.key_state) + ", prepared: " + str(self.prepared)
def equals(self, key, value):
if (self.key_state == key_states.UPSERTED and self.data == key and self.value == value):
return True
else:
return False
# test_cursor_bound_fuzz.py
# A python test fuzzer that generates a random key range and applies bounds to it, then runs
# randomized operations and validates them for correctness.
class test_cursor_bound_fuzz(wttest.WiredTigerTestCase):
file_name = 'test_fuzz.wt'
iteration_count = 200 if wttest.islongtest() else 50
# For each iteration we do search_count searches that way we test more cases without having to
# generate as many key ranges.
search_count = 20
key_count = 10000 if wttest.islongtest() else 1000
# Large transactions throw rollback errors so we don't use them in the long test.
transactions_enabled = False if wttest.islongtest() else True
value_size = 100000 if wttest.islongtest() else 100
prepare_frequency = 5/100
update_frequency = 2/10
min_key = 1
# Max_key is not inclusive so the actual max_key is max_key - 1.
max_key = min_key + key_count
# A lot of time was spent generating values, to achieve some amount of randomness we pre
# generate N values and keep them in memory.
value_array = []
value_array_size = 20
current_ts = 1
applied_ops = False
key_range = {}
types = [
('file', dict(uri='file:')),
('table', dict(uri='table:'))
]
data_format = [
('row', dict(key_format='i')),
('column', dict(key_format='r'))
]
scenarios = make_scenarios(types, data_format)
# Iterates valid keys from min_key to max_key, the maximum key is defined as max_key - 1.
# Python doesn't consider the end of the range as inclusive.
def key_range_iter(self):
for i in range(self.min_key, self.max_key):
yield i
def dump_key_range(self):
for i in self.key_range_iter():
self.pr(self.key_range[i].to_string())
# Generate a random ascii value.
def generate_value(self):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(self.value_size))
# Get a value from the value array.
def get_value(self):
return self.value_array[random.randrange(self.value_array_size)]
# Get a key within the range of min_key and max_key.
def get_random_key(self):
return random.randrange(self.min_key, self.max_key)
# Update a key using the cursor and update its in memory representation.
def apply_update(self, cursor, key_id, prepare):
value = self.get_value()
cursor[key_id] = value
self.key_range[key_id].update(value, key_states.UPSERTED, self.current_ts, prepare)
self.verbose(3, "Updating " + self.key_range[key_id].to_string())
# Remove a key using the cursor and mark it as deleted in memory.
# If the key is already deleted we skip the remove.
def apply_remove(self, cursor, key_id, prepare):
if (self.key_range[key_id].is_deleted()):
return
cursor.set_key(key_id)
self.assertEqual(cursor.remove(), 0)
self.key_range[key_id].update(None, key_states.DELETED, self.current_ts, prepare)
self.verbose(3, "Removing " + self.key_range[key_id].to_string())
# Apply a truncate operation to the key range.
def apply_truncate(self, session, cursor, cursor2, prepare):
lower_key = self.get_random_key()
if (lower_key + 1 < self.max_key):
upper_key = random.randrange(lower_key + 1, self.max_key)
cursor.set_key(lower_key)
cursor2.set_key(upper_key)
self.assertEqual(session.truncate(None, cursor, cursor2, None), 0)
# Mark all keys from lower_key to upper_key as deleted.
for key_id in range(lower_key, upper_key + 1):
self.key_range[key_id].update(None, key_states.DELETED, self.current_ts, prepare)
self.verbose(3, "Truncated keys between: " + str(lower_key) + " and: " + str(upper_key))
# Each iteration calls this function once to update the state of the keys in the database and
# in memory.
def apply_ops(self, session, cursor, prepare):
op = random.choice(list(operations))
if (op is operations.TRUNCATE and self.applied_ops):
cursor2 = session.open_cursor(self.uri + self.file_name)
self.apply_truncate(session, cursor, cursor2, prepare)
else:
for i in self.key_range_iter():
if (random.uniform(0, 1) < self.update_frequency):
continue
op = random.choice(list(operations))
if (op is operations.TRUNCATE):
pass
elif (op is operations.UPSERT):
self.apply_update(cursor, i, prepare)
elif (op is operations.REMOVE):
self.apply_remove(cursor, i, prepare)
else:
raise Exception("Unhandled operation generated")
self.applied_ops = True
# As prepare throws a prepare conflict exception we wrap the call to anything that could
# encounter a prepare conflict in a try except, we then return the error code to the caller.
def prepare_call(self, func):
try:
ret = func()
except wiredtiger.WiredTigerError as e:
if wiredtiger.wiredtiger_strerror(wiredtiger.WT_PREPARE_CONFLICT) in str(e):
ret = wiredtiger.WT_PREPARE_CONFLICT
else:
raise e
return ret
# Once we commit the prepared transaction, update and clear the prepared flags.
def clear_prepare_key_ranges(self):
for i in self.key_range_iter():
self.key_range[i].clear_prepared()
# Given a bound, this functions returns the start or end expected key of the bounded range.
# Note the type argument determines if we return the start or end limit. e.g. if we have a lower
# bound then the key would be the lower bound, however if the lower bound isn't enabled then the
# lowest possible key would be min_key. max_key isn't inclusive so we subtract 1 off it.
def get_expected_limit_key(self, bound_set, type):
if (type == bound_type.LOWER):
if (bound_set.lower.enabled):
if (bound_set.lower.inclusive):
return bound_set.lower.key
return bound_set.lower.key + 1
return self.min_key
if (bound_set.upper.enabled):
if (bound_set.upper.inclusive):
return bound_set.upper.key
return bound_set.upper.key - 1
return self.max_key - 1
# When a prepared cursor walks next or prev it can skip deleted records internally before
# returning a prepare conflict, we don't know which key it got to so we need to validate that
# we see a series of deleted keys followed by a prepared key.
def validate_deleted_prepared_range(self, start_key, end_key, next):
if (next):
step = 1
else:
step = -1
self.verbose(3, "Walking deleted range from: " + str(start_key) + " to: " + str(end_key))
for i in range(start_key, end_key, step):
self.verbose(3, "Validating state of key: " + self.key_range[i].to_string())
if (self.key_range[i].is_prepared()):
return
elif (self.key_range[i].is_deleted()):
continue
else:
self.assertTrue(False)
# Validate a prepare conflict in the cursor->next scenario.
def validate_prepare_conflict_next(self, current_key, bound_set):
self.verbose(3, "Current key is: " + str(current_key) + " min_key is: " + str(self.min_key))
start_range = None
if current_key == self.min_key:
# We hit a prepare conflict while walking forwards before we stepped to a valid key.
# Therefore validate all the keys from start of the range are deleted followed by a prepare.
start_range = self.get_expected_limit_key(bound_set, bound_type.LOWER)
else:
# We walked part of the way through a valid key range before we hit the prepared
# update. Therefore validate the range between our current key and the
# end range.
start_range = current_key
end_range = self.get_expected_limit_key(bound_set, bound_type.UPPER)
# Perform validation from the start range to end range.
self.validate_deleted_prepared_range(start_range, end_range, True)
# Validate a prepare conflict in the cursor->prev scenario.
def validate_prepare_conflict_prev(self, current_key, bound_set):
self.verbose(3, "Current key is: " + str(current_key) + " max_key is: " + str(self.max_key))
start_range = None
if current_key == self.max_key - 1:
# We hit a prepare conflict while walking backwards before we stepped to a valid key.
# Therefore validate all the keys from start of the range are deleted followed by a
# prepare.
start_range = self.get_expected_limit_key(bound_set, bound_type.UPPER)
else:
# We walked part of the way through a valid key range before we hit the prepared
# update. Therefore validate the range between our current key and the
# end range.
start_range = current_key
end_range = self.get_expected_limit_key(bound_set, bound_type.LOWER)
# Perform validation from the start range to end range.
self.validate_deleted_prepared_range(start_range, end_range, False)
# Walk the cursor using cursor->next and validate the returned keys.
def run_next(self, bound_set, cursor):
# This array gives us confidence that we have validated the full key range.
checked_keys = []
self.verbose(3, "Running scenario: NEXT")
key_range_it = self.min_key - 1
ret = self.prepare_call(lambda: cursor.next())
while (ret != wiredtiger.WT_NOTFOUND and ret != wiredtiger.WT_PREPARE_CONFLICT):
current_key = cursor.get_key()
current_value = cursor.get_value()
self.verbose(3, "Cursor next walked to key: " + str(current_key) + " value: " + current_value)
self.assertTrue(bound_set.in_bounds_key(current_key))
self.assertTrue(self.key_range[current_key].equals(current_key, current_value))
checked_keys.append(current_key)
# If the cursor has walked to a record that isn't +1 our current record then it
# skipped something internally.
# Check that the key range between key_range_it and current_key isn't visible
if (current_key != key_range_it + 1):
for i in range(key_range_it + 1, current_key):
self.verbose(3, "Checking key is deleted or oob: " + str(i))
checked_keys.append(i)
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
key_range_it = current_key
ret = self.prepare_call(lambda: cursor.next())
key_range_it = key_range_it + 1
# If we were returned a prepare conflict it means the cursor has found a prepared key/value.
# We need to validate that it arrived there correctly using the in memory state of the
# database. We cannot continue from a prepare conflict so we return.
if (ret == wiredtiger.WT_PREPARE_CONFLICT):
self.validate_prepare_conflict_next(key_range_it, bound_set)
return
# If key_range_it is < key_count then the rest of the range was deleted
# Remember to increment it by one to get it to the first not in bounds key.
for i in range(key_range_it, self.max_key):
checked_keys.append(i)
self.verbose(3, "Checking key is deleted or oob: " + str(i))
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
self.assertTrue(len(checked_keys) == self.key_count)
# Walk the cursor using cursor->prev and validate the returned keys.
def run_prev(self, bound_set, cursor):
# This array gives us confidence that we have validated the full key range.
checked_keys = []
self.verbose(3, "Running scenario: PREV")
ret = self.prepare_call(lambda: cursor.prev())
key_range_it = self.max_key
while (ret != wiredtiger.WT_NOTFOUND and ret != wiredtiger.WT_PREPARE_CONFLICT):
current_key = cursor.get_key()
current_value = cursor.get_value()
self.verbose(3, "Cursor prev walked to key: " + str(current_key) + " value: " + current_value)
self.assertTrue(bound_set.in_bounds_key(current_key))
self.assertTrue(self.key_range[current_key].equals(current_key, current_value))
checked_keys.append(current_key)
# If the cursor has walked to a record that isn't -1 our current record then it
# skipped something internally.
# Check that the key range between key_range_it and current_key isn't visible
if (current_key != key_range_it - 1):
# Check that the key range between key_range_it and current_key isn't visible
for i in range(current_key + 1, key_range_it):
self.verbose(3, "Checking key is deleted or oob: " + str(i))
checked_keys.append(i)
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
key_range_it = current_key
ret = self.prepare_call(lambda: cursor.prev())
# If key_range_it is > key_count then the rest of the range was deleted
key_range_it -= 1
if (ret == wiredtiger.WT_PREPARE_CONFLICT):
self.validate_prepare_conflict_prev(key_range_it, bound_set)
return
for i in range(self.min_key, key_range_it + 1):
checked_keys.append(i)
self.verbose(3, "Checking key is deleted or oob: " + str(i))
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
self.assertTrue(len(checked_keys) == self.key_count)
# Run basic cursor->search() scenarios and validate the outcome.
def run_search(self, bound_set, cursor):
# Choose a N random keys and perform a search on each
for i in range(0, self.search_count):
search_key = self.get_random_key()
cursor.set_key(search_key)
ret = self.prepare_call(lambda: cursor.search())
if (ret == wiredtiger.WT_PREPARE_CONFLICT):
self.assertTrue(self.key_range[search_key].is_prepared())
elif (ret == wiredtiger.WT_NOTFOUND):
self.assertTrue(self.key_range[search_key].is_deleted_or_oob(bound_set))
elif (ret == 0):
# Assert that the key exists, and is within the range.
self.assertTrue(self.key_range[search_key].equals(cursor.get_key(), cursor.get_value()))
self.assertTrue(bound_set.in_bounds_key(cursor.get_key()))
else:
raise Exception('Unhandled error returned by search')
# Check that all the keys within the given bound_set are deleted.
def check_all_within_bounds_not_visible(self, bound_set):
for i in range(bound_set.start_range(self.min_key), bound_set.end_range(self.max_key)):
self.verbose(3, "checking key: " +self.key_range[i].to_string())
if (not self.key_range[i].is_deleted()):
return False
return True
# Run a cursor->search_near scenario and validate that the outcome was correct.
def run_search_near(self, bound_set, cursor):
# Choose N random keys and perform a search near.
for i in range(0, self.search_count):
search_key = self.get_random_key()
cursor.set_key(search_key)
self.verbose(3, "Searching for key: " + str(search_key))
ret = self.prepare_call(lambda: cursor.search_near())
if (ret == wiredtiger.WT_NOTFOUND):
self.verbose(3, "Nothing visible checking.")
# Nothing visible within the bound range.
# Validate.
elif (ret == wiredtiger.WT_PREPARE_CONFLICT):
# Due to the complexity of the search near logic we will simply check if there is
# a prepared key within the range.
found_prepare = False
for i in range(bound_set.start_range(self.min_key), bound_set.end_range(self.max_key)):
if (self.key_range[i].is_prepared()):
found_prepare = True
break
self.assertTrue(found_prepare)
self.verbose(3, "Received prepare conflict in search near.")
else:
key_found = cursor.get_key()
self.verbose(3, "Found a key: " + str(key_found))
current_key = key_found
# Assert the value we found matches.
# Equals also validates that the key is visible.
self.assertTrue(self.key_range[current_key].equals(current_key, cursor.get_value()))
if (bound_set.in_bounds_key(search_key)):
# We returned a key within the range, validate that key is the one that
# should've been returned.
if (key_found == search_key):
# We've already determined the key matches. We can return.
pass
if (key_found > search_key):
# Walk left and validate that all isn't visible to the search key.
while (current_key != search_key):
current_key = current_key - 1
self.assertTrue(self.key_range[current_key].is_deleted())
if (key_found < search_key):
# Walk right and validate that all isn't visible to the search key.
while (current_key != search_key):
current_key = current_key + 1
self.assertTrue(self.key_range[current_key].is_deleted())
else:
# We searched for a value outside our range, we should return whichever value
# is closest within the range.
if (bound_set.lower.enabled and search_key <= bound_set.lower.key):
# We searched to the left of our bounds. In the equals case the lower bound
# must not be inclusive.
# Validate that the we returned the nearest value to the lower bound.
if (bound_set.lower.inclusive):
self.assertTrue(key_found >= bound_set.lower.key)
current_key = bound_set.lower.key
else:
self.assertTrue(key_found > bound_set.lower.key)
current_key = bound_set.lower.key + 1
while (current_key != key_found):
self.assertTrue(self.key_range[current_key].is_deleted())
current_key = current_key + 1
elif (bound_set.upper.enabled and search_key >= bound_set.upper.key):
# We searched to the right of our bounds. In the equals case the upper bound
# must not be inclusive.
# Validate that the we returned the nearest value to the upper bound.
if (bound_set.upper.inclusive):
self.assertTrue(key_found <= bound_set.upper.key)
current_key = bound_set.upper.key
else:
self.assertTrue(key_found < bound_set.upper.key)
current_key = bound_set.upper.key - 1
while (current_key != key_found):
self.assertTrue(self.key_range[current_key].is_deleted())
current_key = current_key - 1
else:
raise Exception('Illegal state found in search_near')
# Choose a scenario and run it.
def run_bound_scenarios(self, bound_set, cursor):
scenario = random.choice(list(bound_scenarios))
if (scenario is bound_scenarios.NEXT):
self.run_next(bound_set, cursor)
elif (scenario is bound_scenarios.PREV):
self.run_prev(bound_set, cursor)
elif (scenario is bound_scenarios.SEARCH):
self.run_search(bound_set, cursor)
elif (scenario is bound_scenarios.SEARCH_NEAR):
self.run_search_near(bound_set, cursor)
else:
raise Exception('Unhandled bound scenario chosen')
# Generate a set of bounds and apply them to the cursor.
def apply_bounds(self, cursor):
cursor.reset()
lower = bound(self.get_random_key(), bool(random.getrandbits(1)), bool(random.getrandbits(1)))
upper = bound(random.randrange(lower.key, self.max_key), bool(random.getrandbits(1)), bool(random.getrandbits(1)))
# Prevent invalid bounds being generated.
if (lower.key == upper.key and lower.enabled and upper.enabled):
lower.inclusive = upper.inclusive = True
bound_set = bounds(lower, upper)
if (lower.enabled):
cursor.set_key(lower.key)
cursor.bound("bound=lower,inclusive=" + lower.inclusive_str())
if (upper.enabled):
cursor.set_key(upper.key)
cursor.bound("bound=upper,inclusive=" + upper.inclusive_str())
return bound_set
# The primary test loop is contained here.
def test_bound_fuzz(self):
uri = self.uri + self.file_name
create_params = 'value_format=S,key_format={}'.format(self.key_format)
# Reset the key range for every scenario.
self.key_range = {}
# Setup a reproducible random seed.
# If this test fails inspect the file WT_TEST/results.txt and replace the time.time()
# with a given seed. e.g.:
# seed = 1660215872.5926154
# Additionally this test is configured for verbose logging which can make debugging a bit
# easier.
seed = time.time()
self.pr("Using seed: " + str(seed))
random.seed(seed)
self.session.create(uri, create_params)
read_cursor = self.session.open_cursor(uri)
write_session = self.setUpSessionOpen(self.conn)
write_cursor = write_session.open_cursor(uri)
# Initialize the value array.
self.verbose(3, "Generating value array")
for i in range(0, self.value_array_size):
self.value_array.append(self.generate_value())
# Initialize the key range.
for i in self.key_range_iter():
key_value = self.get_value()
self.key_range[i] = key(i, key_value, key_states.UPSERTED, self.current_ts)
self.current_ts += 1
if (self.transactions_enabled):
write_session.begin_transaction()
write_cursor[i] = key_value
if (self.transactions_enabled):
write_session.commit_transaction('commit_timestamp=' + self.timestamp_str(self.key_range[i].timestamp))
self.session.checkpoint()
# Begin main loop
for i in range(0, self.iteration_count):
self.verbose(3, "Iteration: " + str(i))
bound_set = self.apply_bounds(read_cursor)
self.verbose(3, "Generated bound set: " + bound_set.to_string())
# Check if we are doing a prepared transaction on this iteration.
prepare = random.uniform(0, 1) <= self.prepare_frequency and self.transactions_enabled
if (self.transactions_enabled):
write_session.begin_transaction()
self.apply_ops(write_session, write_cursor, prepare)
if (self.transactions_enabled):
if (prepare):
self.verbose(3, "Preparing applied operations.")
write_session.prepare_transaction('prepare_timestamp=' + self.timestamp_str(self.current_ts))
else:
write_session.commit_transaction('commit_timestamp=' + self.timestamp_str(self.current_ts))
# Use the current timestamp so we don't need to track previous versions.
if (self.transactions_enabled):
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(self.current_ts))
self.run_bound_scenarios(bound_set, read_cursor)
if (self.transactions_enabled):
self.session.rollback_transaction()
if (prepare):
write_session.commit_transaction(
'commit_timestamp=' + self.timestamp_str(self.current_ts) +
',durable_timestamp='+ self.timestamp_str(self.current_ts))
self.clear_prepare_key_ranges()
self.current_ts += 1
if (i % 10 == 0):
# Technically this is a write but easier to do it with this session.
self.session.checkpoint()
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_cursor_bound_fuzz.py | test_cursor_bound_fuzz.py | py | 27,014 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 22,
"... |
25293683614 | from django.shortcuts import render, redirect, reverse
from . import forms, models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required, user_passes_test
from django.conf import settings
from django.db.models import Q
from insurance import models as CMODEL
from insurance import forms as CFORM
from django.contrib.auth.models import User
def customerclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request, 'customer/customerclick.html')
def customer_signup_view(request):
userForm = forms.CustomerUserForm()
customerForm = forms.CustomerForm()
mydict = {'userForm': userForm, 'customerForm': customerForm}
if request.method == 'POST':
userForm = forms.CustomerUserForm(request.POST)
customerForm = forms.CustomerForm(request.POST, request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user = userForm.save()
user.set_password(user.password)
user.save()
customer = customerForm.save(commit=False)
customer.user = user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('customerlogin')
return render(request, 'customer/customersignup.html', context=mydict)
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
@login_required(login_url='customerlogin')
def customer_dashboard_view(request):
dict = {
'customer': models.Customer.objects.get(user_id=request.user.id),
'available_policy': CMODEL.Policy.objects.all().count(),
'applied_policy': CMODEL.PolicyRecord.objects.all().filter(status='Approved',
customer=models.Customer.objects.get(user_id=request.user.id)).count(),
'total_category': CMODEL.Category.objects.all().count(),
'total_question': CMODEL.Question.objects.all().filter(
customer=models.Customer.objects.get(user_id=request.user.id)).count(),
'total_events': CMODEL.InsuranceEventRecord.objects.all().filter(
customer=models.Customer.objects.get(user_id=request.user.id)).count(),
}
return render(request, 'customer/customer_dashboard.html', context=dict)
def apply_policy_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
policies = CMODEL.Policy.objects.all().order_by('category', 'sum_assurance')
return render(request, 'customer/apply_policy.html', {'policies': policies, 'customer': customer})
def apply_view(request, pk):
customer = models.Customer.objects.get(user_id=request.user.id)
policy = CMODEL.Policy.objects.get(id=pk)
policyrecord = CMODEL.PolicyRecord()
policyrecord.Policy = policy
policyrecord.customer = customer
policyrecord.save()
return redirect('apply-policy')
def my_products_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
policies = CMODEL.PolicyRecord.objects.all().filter(customer=customer, status='Approved')
total_sum = CMODEL.PolicyRecord.objects.filter(customer=customer).aggregate(sum=Sum('premium'))
total_sum_number = total_sum['sum']
return render(request, 'customer/myproducts.html', {'policies': policies, 'customer': customer, 'totalsum': total_sum_number})
def ask_question_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
questionForm = CFORM.QuestionForm()
if request.method == 'POST':
questionForm = CFORM.QuestionForm(request.POST)
if questionForm.is_valid():
question = questionForm.save(commit=False)
question.customer = customer
question.save()
return redirect('question-history')
return render(request, 'customer/ask_question.html', {'questionForm': questionForm, 'customer': customer})
def question_history_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
questions = CMODEL.Question.objects.all().filter(customer=customer)
return render(request, 'customer/question_history.html', {'questions': questions, 'customer': customer})
# PŘÍDAT TŘÍDA INS EVENTS
def customer_event_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
events = CMODEL.InsuranceEventRecord.objects.all().filter(customer=customer).order_by('creation_date')
return render(request, 'customer/customer_view_event.html', {'events': events, 'customer': customer})
def customer_event_add(request):
customer = models.Customer.objects.get(user_id=request.user.id)
eventForm = CFORM.EventForm()
if request.method == 'POST':
eventForm = CFORM.EventForm(request.POST)
if eventForm.is_valid():
question = eventForm.save(commit=False)
question.customer = customer
question.save()
return redirect('customer-event-views')
return render(request, 'customer/customer_event_add.html', {'eventForm': eventForm, 'customer': customer}) | LiborSehnal/RichInsure | customer/views.py | views.py | py | 5,259 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.Group.objects.get_or_create",
"line_number": 34,
"usa... |
7625551099 | from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Edge(executable_path = r'Path of msedgedriver here')
url = "YOUTUBE URL HERE"
driver.get(url)
elem = driver.find_element_by_tag_name('html')
elem.send_keys(Keys.END)
time.sleep(3)
elem.send_keys(Keys.END)
innerHTML = driver.execute_script("return document.body.innerHTML")
page_soup = bs(innerHTML, 'html.parser')
res = page_soup.find_all('a',{'id':'video-title'})
titles = []
for video in res:
print(video.get('title'))
if video.get('title') != None:
titles.append((video.get('title')))
file = open('YoutubeList.txt','w+', encoding="utf-8")
for title in titles:
file.write(title+'\n')
file.close()
driver.close()
| Hiperultimate/Youtube-Playlist-Save-Title | youtubeListMaker.py | youtubeListMaker.py | py | 824 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Edge",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.END",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
3147265908 | """
Common Flitter initialisation
"""
import logging
import sys
from loguru import logger
try:
import pyximport
pyximport.install()
except ImportError:
pass
LOGGING_LEVEL = "SUCCESS"
LOGGING_FORMAT = "{time:HH:mm:ss.SSS} {process}:{extra[shortname]:16s} | <level>{level}: {message}</level>"
class LoguruInterceptHandler(logging.Handler):
@classmethod
def install(cls):
handler = cls()
logging.basicConfig(handlers=[handler], level=0)
return handler
def uninstall(self):
logging.getLogger().removeHandler(self)
def emit(self, record):
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
frame, depth = logging.currentframe().f_back, 1
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
def configure_logger(level=None):
global LOGGING_LEVEL
if level is None:
level = LOGGING_LEVEL
else:
LOGGING_LEVEL = level
logger.configure(handlers=[dict(sink=sys.stderr, format=LOGGING_FORMAT, level=level, enqueue=True)],
patcher=lambda record: record['extra'].update(shortname=record['name'].removeprefix('flitter')))
LoguruInterceptHandler.install()
return logger
def name_patch(logger, name):
return logger.patch(lambda record, name=name: (record.update(name=name),
record['extra'].update(shortname=name.removeprefix('flitter'))))
| jonathanhogg/flitter | src/flitter/__init__.py | __init__.py | py | 1,653 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "pyximport.install",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.Handler",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.ge... |
34399989971 | from typing import List
from entity.MountAnimal import MountAnimal
class Camel(MountAnimal):
bent_quantity: int
def __init__(self, id: str, name: str, date_of_birth: str, commands: List[str], bent_quantity: int):
super().__init__(id, name, date_of_birth, commands)
self.bent_quantity = bent_quantity
def to_dict(self) -> dict:
return {
"type": "camel",
"id": self.id,
"name": self.name,
"date_of_birth": self.date_of_birth,
"bent_quantity": self.bent_quantity,
"commands": self.commands
}
@classmethod
def from_json(cls, json_object):
id = json_object["id"]
name = json_object["name"]
date_of_birth = json_object["date_of_birth"]
commands = json_object["commands"]
bent_quantity = int(json_object["bent_quantity"])
return cls(id, name, date_of_birth, commands, bent_quantity)
def spit(self):
print("Yackh!")
| G0ncharovAA/GB_FINAL_TEST | app/entity/Camel.py | Camel.py | py | 1,030 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "entity.MountAnimal.MountAnimal",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
}
] |
36208541438 | from django.urls import path
from proyecto.views import index_proyecto, ProyectoCreate, ProyectoList, ProyectoUpdate, ProyectoDelete, proyecto_list_total
app_name = 'proyecto'
urlpatterns = [
path("", index_proyecto, name="index_proyecto"),
path("registrar/", ProyectoCreate.as_view(), name="registrar_proyecto"),
path("listar/<int:id>", ProyectoList.as_view(), name="proyecto_listar"),
path('listaDeProyecto/', proyecto_list_total, name= 'lista_total'),
path("editar/<int:pk>", ProyectoUpdate.as_view(), name="editar_proyecto"),
path("eliminar/<int:pk>", ProyectoDelete.as_view(), name="eliminar_proyecto"),
] | joseiba/GestionSoftware | ProyectoIS2/Proyecto/proyecto/urls.py | urls.py | py | 636 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "proyecto.views.index_proyecto",
"line_number": 7,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "proy... |
6123602959 | import pandas as pd
from textslack.textslack import TextSlack
from gensim.models import doc2vec
# Architecture of the NLP Model
class NLPModel:
# The constructor instantiates all the variables that would be used throughout the class
def __init__(self, sp, conn, max_epochs=100, vec_size=50, alpha=0.025):
self.sp = sp
self.conn = conn
self.slack = TextSlack(variety='BrE', lang='english')
self.max_epochs = max_epochs
self.vec_size = vec_size
self.alpha = alpha
self.df = pd.read_sql_table('SPOTIFY_DATA', con=self.conn)
# Function that tags the list of words with indices
def _create_tagged_document(self, list_of_list_of_words):
for i, list_of_words in enumerate(list_of_list_of_words):
yield doc2vec.TaggedDocument(list_of_words, [i])
# Function to prepare the training data
def _training_data(self):
key_features = (self.df['album'] + ' ' + self.df['name'] + ' ' + self.df['artist']).tolist()
cleaned_key_features = self.slack.transform(key_features)
list_list_words = [sent.split() for sent in cleaned_key_features]
return list_list_words
# Function to build and train the model
def build_model(self):
list_list_words = self._training_data()
train_data = list(self._create_tagged_document(list_list_words))
model = doc2vec.Doc2Vec(size=self.vec_size,
alpha=self.alpha,
min_alpha=0.00025,
min_count=1,
dm=1)
model.build_vocab(train_data)
for epoch in range(self.max_epochs):
print('iteration {0}'.format(epoch))
model.train(train_data,
total_examples=model.corpus_count,
epochs=model.iter)
# decrease the learning rate
model.alpha -= 0.0002
# fix the learning rate, no decay
model.min_alpha = model.alpha
model.save('d2v.model')
print("Model Saved")
# Function to predict the most similar doc in the doc2vec model
def most_similar_doc(self, target):
model = doc2vec.Doc2Vec.load('d2v.model')
model.random.seed(95)
cleaned_target = self.slack.transform(target).split()
pred_vector = model.infer_vector(cleaned_target)
sim_vector = model.docvecs.most_similar([pred_vector])
pred_index = sim_vector[0][0]
return self.df.loc[pred_index, self.df.columns[6:-1]]
| CUTR-at-USF/muser-data-analysis | AI/models.py | models.py | py | 2,575 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "textslack.textslack.TextSlack",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_table",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gensim.models.doc2vec.TaggedDocument",
"line_number": 22,
"usage_type": "call"
},
... |
789669298 | import argparse
from experiment import Experiment
import Limited_GP
parser = argparse.ArgumentParser()
parser.add_argument('--number-of-batches', type=int, default=1)
parser.add_argument('--current-batch', type=int, default=1)
parser.add_argument('--budget', type=int, default=10000)
parser.add_argument('--suite-name', default='bbob')
args = parser.parse_args()
e = Experiment(suite_name=args.suite_name, solver=Limited_GP.solve, algorithm_name='Limited-GP')
e.run(budget=args.budget, current_batch=args.current_batch, number_of_batches=args.number_of_batches)
| pfnet-research/limited-gp | solver/run.py | run.py | py | 564 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "experiment.Experiment",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "Limited_GP.solve",
"line_number": 12,
"usage_type": "attribute"
}
] |
13712488384 | import os
from os.path import join
from os.path import dirname
import shutil
import sys
from threading import Thread
import time
import zmq
import yaml
def get_pyneal_scanner_test_paths():
""" Return a dictionary with relevant paths for the pyneal_scanner_tests
within the `tests` dir
"""
# set up directory structure
testingDir = dirname(dirname(os.path.abspath(__file__))) # path to `tests` dir
pynealDir = dirname(testingDir)
pynealScannerDir = join(pynealDir, 'pyneal_scanner')
testDataDir = join(testingDir, 'testData')
GE_dir = join(testDataDir, 'GE_env')
GE_funcDir = join(GE_dir, 'funcData')
Philips_dir = join(testDataDir, 'Philips_env')
Philips_funcDir = join(Philips_dir, 'funcData')
Siemens_dir = join(testDataDir, 'Siemens_env')
Siemens_funcDir = join(Siemens_dir, 'funcData')
# store paths in dict
paths = {}
paths['pynealDir'] = pynealDir
paths['pynealScannerDir'] = pynealScannerDir
paths['testDataDir'] = testDataDir
paths['GE_dir'] = GE_dir
paths['GE_funcDir'] = GE_funcDir
paths['Philips_dir'] = Philips_dir
paths['Philips_funcDir'] = Philips_funcDir
paths['Siemens_dir'] = Siemens_dir
paths['Siemens_funcDir'] = Siemens_funcDir
return paths
def createFakeSeriesDir(newSeriesDir):
""" Mimic the creation of a new series directory at the start of the scan.
Parameters
----------
newSeriesDir : string
full path for the new series directory you'd like to create
"""
if not os.path.isdir(newSeriesDir):
os.makedirs(newSeriesDir)
def copyScanData(srcDir, dstDir):
""" copy the contents of srcDir to dstDir """
for f in os.listdir(srcDir):
if os.path.isfile(join(srcDir, f)):
shutil.copy(join(srcDir, f), dstDir)
### Functions for updating and cleaning the test scannerConfig.yaml files
def replace_scannerConfig_sessionDir(configFile, newSessionDir):
""" Write newSessionDir to the scannerSessionDir field of given scannerConfig file
In order to run these tests, the `scannerConfig.yaml` file for every
scanner enviorment in the testData directory needs to be updated to reflect
the local path to the scannerSessionDir. Since that varies depending on where
this test is being run from, this function will swap out that field with
the current path base on the local path to the pynealDir
"""
# read in contents of existing yaml file
with open(configFile, 'r') as ymlFile:
configSettings = yaml.safe_load(ymlFile)
# update with new setting
configSettings['scannerSessionDir'] = newSessionDir
# overwrite yaml file
with open(configFile, 'w') as ymlFile:
yaml.dump(configSettings, ymlFile, default_flow_style=False)
def cleanConfigFile(configFile):
""" Remove local paths from scannerConfig file.
After testing, remove the local path to the scannerSessionDir to it does
not get pushed to gitHub
"""
with open(configFile, 'r') as ymlFile:
configSettings = yaml.safe_load(ymlFile)
# clear existing scannerSessionDir
configSettings['scannerSessionDir'] = ' '
# overwrite yaml file
with open(configFile, 'w') as ymlFile:
yaml.dump(configSettings, ymlFile, default_flow_style=False)
### Class for creating a simple server to simulate Pyneal receiving socket
class SimRecvSocket(Thread):
def __init__(self, host, port, nVols):
Thread.__init__(self)
self.host = host
self.port = port
self.nVols = nVols
self.alive = True
self.receivedVols = 0
def run(self):
host = '*'
self.context = zmq.Context.instance()
sock = self.context.socket(zmq.PAIR)
sock.bind('tcp://{}:{}'.format(host, self.port))
# wait for initial contact
while True:
msg = sock.recv_string()
sock.send_string(msg)
break
while self.alive:
# receive header info as json
volInfo = sock.recv_json(flags=0)
# retrieve relevant values about this slice
volIdx = volInfo['volIdx']
volDtype = volInfo['dtype']
volShape = volInfo['shape']
# receive data stream
data = sock.recv(flags=0, copy=False, track=False)
voxelArray = np.frombuffer(data, dtype=volDtype)
voxelArray = voxelArray.reshape(volShape)
# send response
sock.send_string('got it')
self.receivedVols += 1
if self.receivedVols == self.nVols:
self.alive = False
def stop(self):
self.context.destroy()
self.alive = False
class ServerTest(Thread):
def __init__(self):
Thread.__init__(self)
self.alive = True
def run(self):
context = zmq.Context.instance()
self.socket = context.socket(zmq.PAIR)
self.socket.bind('tcp://*:5555')
while self.alive:
msg = self.socket.recv_string()
self.socket.send_string(msg)
if msg == 'end':
self.alive = False
def stop(self):
self.alive = False
| jeffmacinnes/pyneal | tests/pyneal_scanner_tests/pynealScanner_helper_tools.py | pynealScanner_helper_tools.py | py | 5,196 | python | en | code | 30 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"l... |
4153153837 | import datetime
import difflib
import os
from inspect import isabstract
from typing import Any, List, Mapping, Optional, Set, Tuple, Type
import ee # type: ignore
from requests.structures import CaseInsensitiveDict
config_path = os.path.expanduser("~/.config/taskee.ini")
def initialize_earthengine() -> None:
"""Initialize the Earth Engine API."""
try:
ee.Initialize()
except ee.EEException:
ee.Authenticate()
ee.Initialize()
def _get_case_insensitive_close_matches(
word: str, possibilities: List[str], n: int = 3, cutoff: float = 0.6
) -> List[str]:
"""A case-insensitive wrapper around difflib.get_close_matches.
Parameters
----------
word : str
A string for which close matches are desired.
possibilites : List[str]
A list of strings against which to match word.
n : int, default 3
The maximum number of close matches to return. n must be > 0.
cutoff : float, default 0.6
Possibilities that don't score at least that similar to word are ignored.
Returns
-------
List[str] : The best (no more than n) matches among the possibilities are returned in
a list, sorted by similarity score, most similar first.
"""
lower_matches = difflib.get_close_matches(
word.lower(), [p.lower() for p in possibilities], n, cutoff
)
return [p for p in possibilities if p.lower() in lower_matches]
def _all_subclasses(cls: Type[Any]) -> Set[Type[Any]]:
"""Recursively find all subclasses of a given class."""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in _all_subclasses(c)]
)
def _list_subclasses(superclass: Type[Any]) -> Mapping[str, Type[Any]]:
"""List all non-abstract subclasses of a given superclass. Return as a dictionary mapping the
subclass name to the class. This is recursive, so sub-subclasses will also be returned.
Parameters
----------
superclass : Type[Any]
The superclass to list subclasses of.
Returns
-------
Dict[str, Type[Any]]
A dictionary mapping the subclass name to the class.
"""
return CaseInsensitiveDict(
{
cls.__name__: cls
for cls in _all_subclasses(superclass)
if not isabstract(cls)
}
)
def _get_subclasses(names: Tuple[str, ...], superclass: Type[Any]) -> Set[Type[Any]]:
"""Retrieve a set of subclasses of a given superclass.
Parameters
----------
names : Tuple[str, ...]
A tuple of subclass names to retrieve from the superclass.
superclass : Type[Any]
The superclass to retrieve subclasses of.
Returns
-------
Set[Type[Any]]
A set of subclasses of the superclass.
"""
options = _list_subclasses(superclass)
keys = list(options.keys())
if "all" in [name.lower() for name in names if isinstance(name, str)]:
return set(options.values())
selected = []
for name in names:
try:
selected.append(options[name])
except KeyError:
close_matches = _get_case_insensitive_close_matches(name, keys, n=3)
hint = " Close matches: {}.".format(close_matches) if close_matches else ""
raise AttributeError(
f'"{name}" is not a supported {superclass.__name__} type. Choose from {keys}.{hint}'
)
return set(selected)
def _millis_to_datetime(
millis: str, tz: Optional[datetime.timezone] = None
) -> datetime.datetime:
"""Convert a timestamp in milliseconds (e.g. from Earth Engine) to a datetime object."""
return datetime.datetime.fromtimestamp(int(millis) / 1000.0, tz=tz)
def _datetime_to_millis(dt: datetime.datetime) -> int:
"""Convert a datetime to a timestamp in milliseconds"""
return int(dt.timestamp() * 1000)
| aazuspan/taskee | taskee/utils.py | utils.py | py | 3,865 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "os.path.expanduser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "ee.Initialize",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ee.EEException",
"l... |
10746701513 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020/10/15 10:19 PM
# @Author: Zechen Li
# @File : aug.py.py
from glue.tasks import get_task
import pandas as pd
import numpy as np
import os
from augment.eda import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--alpha', default=0.1, type=float)
parser.add_argument('--num_aug', default=1, type=int)
parser.add_argument('--num_type', default=4, type=int)
parser.add_argument('--task_name', default='CoLA')
parser.add_argument('--dataroot', default='./glue_data/')
parser.add_argument('--aug_dataroot', default='./aug_data/')
args = parser.parse_args()
alpha = args.alpha
num_aug = args.num_aug
num_type = args.num_type
task_name = args.task_name
task_dir = os.path.join(args.dataroot, task_name)
task = get_task(task_name.lower(), task_dir)
output_dir = os.path.join(args.aug_dataroot, task_name)
try:
os.makedirs(output_dir)
except OSError:
pass
ori_train_df = task.get_train_df()
ori_dev_df = task.get_dev_df()
aug_train_df = pd.DataFrame(columns=["sentence", "label"])
print("Trainning dataset preview:")
print("train sentences num:", len(ori_train_df))
print("Original:", ori_train_df.head())
for i in ori_train_df.sentence:
ori_train_sentence = i
method_label = np.random.randint(0, num_type, 1)[0]
method = augment_single_with_label(method_label)
aug_train_sentences = eda(ori_train_sentence, alpha=alpha, num_aug=num_aug, method=method)
for aug_sentence in aug_train_sentences:
aug_train_df = aug_train_df.append({'sentence': aug_sentence, 'label': method}, ignore_index=True)
print("Augment:", aug_train_df.head())
print(aug_train_df['label'].value_counts(normalize=True) * 100)
aug_train_df.to_csv(os.path.join(output_dir, "train.tsv"), sep='\t', index=False)
print('---------------------------------------------------------')
aug_dev_df = pd.DataFrame(columns=["sentence", "label"])
print("Dev dataset preview:")
print("dev sentences num:", len(ori_dev_df))
print("Original:", ori_dev_df.head())
for i in ori_dev_df.sentence:
ori_dev_sentence = i
method_label = np.random.randint(0, num_type, 1)[0]
method = augment_single_with_label(method_label)
aug_dev_sentences = eda(ori_dev_sentence, alpha=alpha, num_aug=num_aug, method=method)
for aug_sentence in aug_dev_sentences:
aug_dev_df = aug_dev_df.append({'sentence': aug_sentence, 'label': method}, ignore_index=True)
print("Augment:", aug_dev_df.head())
print(aug_dev_df['label'].value_counts(normalize=True) * 100)
aug_dev_df.to_csv(os.path.join(output_dir, "dev.tsv"), sep='\t', index=False)
print("generated augmented sentences finished.")
| UCSD-AI4H/SSReg | SSL-Reg-SATP/aug.py | aug.py | py | 2,685 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "glue.tasks.get_task... |
19352159399 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
def read_file(file_name):
with open(file_name, "r") as f:
return f.read()
setuptools.setup(
name="influx-line-protocol",
description="Implementation of influxdata line protocol format in python",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.1.5",
url="https://github.com/SebastianCzoch/influx-line-protocol",
author="Sebastian Czoch",
author_email="sebastian@czoch.pl",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
)
| SebastianCzoch/influx-line-protocol | setup.py | setup.py | py | 879 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 29,
"usage_type": "call"
}
] |
5199805944 | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from widgets.mixins import TextColorMixin
from utils.encoders import EncodeMethod, create_encoder
from strings import _
class EncodePage(QWidget, TextColorMixin):
def __init__(self, parent=None):
super(EncodePage, self).__init__(parent)
self.setupUi()
def setupUi(self):
self.method_base64 = self.create_method_radio('BASE64', EncodeMethod.base64)
self.method_md5 = self.create_method_radio('MD5', EncodeMethod.md5)
self.method_url = self.create_method_radio('URL', EncodeMethod.url)
self.method_html = self.create_method_radio('HTML', EncodeMethod.html)
btnEncode = QPushButton('&Encode')
btnDecode = QPushButton('&Decode')
self.src_edit = QTextEdit()
self.src_edit.setFixedHeight(300)
self.dest_edit = QTextEdit()
self.dest_edit.setFixedHeight(300)
method_box = QHBoxLayout()
method_box.addWidget(QLabel(_('method')))
method_box.addWidget(self.method_base64)
method_box.addWidget(self.method_md5)
method_box.addWidget(self.method_url)
method_box.addWidget(self.method_html)
method_box.addStretch()
btnBox = QVBoxLayout()
btnBox.addStretch()
btnBox.addWidget(btnEncode)
btnBox.addWidget(btnDecode)
btnBox.addStretch()
center_box = QHBoxLayout()
center_box.addWidget(self.src_edit, 1)
center_box.addLayout(btnBox)
center_box.addWidget(self.dest_edit, 1)
vbox = QVBoxLayout()
vbox.addLayout(method_box)
vbox.addLayout(center_box)
vbox.addStretch()
self.setLayout(vbox)
btnEncode.clicked.connect(self.on_encode)
btnDecode.clicked.connect(self.on_decode)
self.src_edit.textChanged.connect(self.on_srcEdit_textChanged)
self.dest_edit.textChanged.connect(self.on_destEdit_textChanged)
def create_method_radio(self, text, value):
radio = QRadioButton(text)
radio.setProperty('value', value)
return radio
def on_initialized(self):
self.method_base64.setChecked(True)
def get_encoder(self):
method = EncodeMethod.base64
if self.method_md5.isChecked():
method = EncodeMethod.md5
elif self.method_url.isChecked():
method = EncodeMethod.url
elif self.method_html.isChecked():
method = EncodeMethod.html
return create_encoder(method)
@pyqtSlot()
def on_encode(self):
try:
src = self.src_edit.toPlainText().strip()
encoder = self.get_encoder()
result = encoder.encode(src)
self.setColoredText(self.dest_edit, result, True)
except Exception as e:
self.setColoredText(self.dest_edit, str(e), False)
@pyqtSlot()
def on_decode(self):
try:
src = self.dest_edit.toPlainText().strip()
encoder = self.get_encoder()
result = encoder.decode(src)
self.setColoredText(self.src_edit, result, True)
except Exception as e:
self.setColoredText(self.src_edit, str(e), False)
@pyqtSlot()
def on_srcEdit_textChanged(self):
if self.src_edit.hasFocus():
self.on_encode()
@pyqtSlot()
def on_destEdit_textChanged(self):
if self.dest_edit.hasFocus():
self.on_decode()
| shuhari/DevToolbox | src/pages/py/encode_page.py | encode_page.py | py | 3,455 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "widgets.mixins.TextColorMixin",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "utils.encoders.EncodeMethod.base64",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "utils.encoders.EncodeMethod",
"line_number": 15,
"usage_type": "name... |
14432332319 | #from ast import If
#from pprint import pp
#from typing import final
from genericpath import exists
from multiprocessing.reduction import duplicate
import re
import string
from unittest import result
from tokens import tokens
from tkinter import *
from tkinter import messagebox as MessageBox
from tkinter import messagebox
#from Interfaz import datos
#resultReservadas = []
#resultCaracteresEspeciales = []
#resultDelimitadores = []
class analizador:
tokens = tokens()
def inicio_analizador(self, palabras):
resultReservadas = []
resultCaracteresEspeciales = []
resultDelimitadores = []
resultIndefinidas = []
resultErrores = []
resultDigitos = []
listResultados = []
print("--- Lexico ---")
for i in palabras:
if(i in tokens.reservadas):
resultReservadas.append(i)
palabras.remove(i)
if(i in tokens.caracteres_especiales):
resultCaracteresEspeciales.append(i)
palabras.remove(i)
if(i in tokens.delimitadores):
resultDelimitadores.append(i)
palabras.remove(i)
for g in range (len(palabras)):
dato = re.search("[a-zA-Z][a-zA-Z0-9_]*", palabras[g])
if dato:
resultIndefinidas.append(palabras[g])
else:
dato1 = re.search("^[0-9]+$", palabras[g])
if dato1:
resultIndefinidas.append(palabras[g])
else:
resultErrores.append(palabras[g])
print("Token Reservadas: ",resultReservadas)
print("Token Caracteres Especiales: ",resultCaracteresEspeciales)
print("Token Delimitadores: ",resultDelimitadores)
print("Token Indefinidas: ",resultIndefinidas)
print("Errores: ",resultErrores)
listResultados.append(resultReservadas)
listResultados.append(resultCaracteresEspeciales)
listResultados.append(resultDelimitadores)
listResultados.append(resultIndefinidas)
listResultados.append(resultDigitos)
listResultados.append(resultErrores)
return listResultados
| AngelHernandez20/Mantenimiento | analizadorlexico.py | analizadorlexico.py | py | 2,210 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "tokens.tokens",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "tokens.tokens.reservadas",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "tokens.tokens",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "tokens.toke... |
6798954451 | import re
import datetime
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import Permission
from django.shortcuts import get_object_or_404
from exo_role.models import ExORole
from auth_uuid.jwt_helpers import _build_jwt
from auth_uuid.tests.test_mixin import RequestMockAccount
from languages.models import Language
from utils.faker_factory import faker
from ..faker_factories import (
FakeOpportunityFactory,
FakeQuestionFactory,
)
class OpportunityTestMixin:
def get_sow_data(self):
return {
'title': faker.text(),
'description': faker.text(),
'mode': settings.OPPORTUNITIES_CH_MODE_DEFAULT,
'location': faker.city(),
'start_date': datetime.date.today(),
'end_date': datetime.date.today() + datetime.timedelta(days=20),
'duration_unity': settings.OPPORTUNITIES_DURATION_UNITY_DAY,
'duration_value': 2,
'start_time': faker.time(),
'timezone': faker.timezone(),
'entity': faker.name(),
'budgets': [
{
'budget': '222',
'currency': settings.OPPORTUNITIES_CH_CURRENCY_DOLLAR
}
],
}
def get_api_data(self, users=[]):
keywords = [
{'name': faker.word() + faker.numerify()},
{'name': faker.word() + faker.numerify()},
]
data = {
'title': faker.word(),
'description': faker.text(),
'mode': settings.OPPORTUNITIES_CH_MODE_ONSITE,
'location': '{}, {}'.format(faker.city(), faker.country()),
'exo_role': ExORole.objects.get(code=settings.EXO_ROLE_CODE_OTHER_OTHER).code,
'other_category_name': faker.word(),
'other_role_name': faker.word(),
'certification_required': None,
'due_date': timezone.now().date(),
'deadline_date': (timezone.now() + timedelta(days=10)).date(),
'duration_unity': settings.OPPORTUNITIES_DURATION_UNITY_DAY,
'duration_value': 2,
'num_positions': 2,
'keywords': keywords,
'entity': faker.company(),
'files': [{
'filestack_status': 'Stored',
'url': 'https://cdn.filestackcontent.com/Lr59QG8oQRWliC6x70cx',
'filename': 'gato.jpg',
'mimetype': 'image/jpeg'}],
'budgets': [
{
'currency': settings.OPPORTUNITIES_CH_CURRENCY_EUR,
'budget': '{}.0'.format(int(faker.numerify()))
},
{
'currency': settings.OPPORTUNITIES_CH_CURRENCY_EXOS,
'budget': '{}.0'.format(int(faker.numerify()))
},
]
}
if users:
data['target'] = settings.OPPORTUNITIES_CH_TARGET_FIXED
data['users_tagged'] = [
{'user': user.uuid.__str__()} for user in users
]
return data
def add_marketplace_permission(self, user):
perm = settings.AUTH_USER_PERMS_MARKETPLACE_FULL
permission = get_object_or_404(
Permission,
codename=perm)
user.user_permissions.add(permission)
def create_opportunity(
self, user=None, questions=3, num_positions=3, target=None,
duration_unity=None, role=None, group=None,
):
if not user:
user = self.super_user
data = {
'user_from': user,
'num_positions': num_positions,
}
if target:
data['target'] = target
if duration_unity:
data['duration_unity'] = duration_unity
if role:
data['exo_role'] = role
if group:
data['group'] = group
opportunity = FakeOpportunityFactory.create(**data)
languages = [
Language.objects.create(name=faker.word() + faker.numerify()) for _ in range(2)]
opportunity.languages.add(*languages)
FakeQuestionFactory.create_batch(size=questions, opportunity=opportunity)
return opportunity
def init_mock(self, m):
matcher = re.compile('{}/api/accounts/me/'.format(settings.EXOLEVER_HOST))
m.register_uri(
'GET',
matcher,
json=mock_callback)
m.register_uri(
'GET',
re.compile(
'{}/api/consultant/consultant/can-receive-opportunities/'.format(
settings.EXOLEVER_HOST)),
json=[])
m.register_uri(
'GET',
re.compile(
'{}/api/accounts/groups/{}/'.format(
settings.EXOLEVER_HOST,
settings.OPPORTUNITIES_DELIVERY_MANAGER_GROUP)),
json={'user_set': []})
m.register_uri(
'POST',
re.compile(
'{}{}api/mail/'.format(
settings.EXOLEVER_HOST,
settings.SERVICE_EXO_MAIL_HOST)),
json={})
def setup_credentials(self, user):
token = _build_jwt(user)
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)
def setup_username_credentials(self):
self.client.credentials(HTTP_USERNAME=settings.AUTH_SECRET_KEY)
request_mock_account = RequestMockAccount()
def mock_callback(request, context):
uuid = request.path.split('/')[-2]
return request_mock_account.get_request(uuid)
| tomasgarzon/exo-services | service-exo-opportunities/opportunities/tests/test_mixin.py | test_mixin.py | py | 5,626 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.faker_factory.faker.text",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.faker_factory.faker",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "utils.faker_factory.faker.text",
"line_number": 27,
"usage_type": "call"
},
... |
73819292265 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import enum
import logging
from logging import StreamHandler
handler = StreamHandler()
logger = logging.getLogger(__name__)
class Level(enum.Enum):
FATAL = logging.FATAL
ERROR = logging.ERROR
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
class ColoredFormatter(logging.Formatter):
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
COLOR_START = "COLOR_START"
COLOR_END = "COLOR_END"
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
COLORS = {
'ERROR': RED,
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': CYAN,
}
def __init__(self, fmt):
"""
:param str fmt: Format string.
"""
logging.Formatter.__init__(self, fmt)
self.fmt = fmt.replace(ColoredFormatter.COLOR_END, ColoredFormatter.RESET_SEQ)
def format(self, record):
"""
Output colored log
:param logging.LogRecord record:
:return: Format result.
:rtype str
"""
levelname = record.levelname
if levelname in ColoredFormatter.COLORS:
cs = ColoredFormatter.COLOR_SEQ % (30 + ColoredFormatter.COLORS[levelname])
fmt = self.fmt.replace(ColoredFormatter.COLOR_START, cs)
# update color of format
self._style._fmt = fmt
return logging.Formatter.format(self, record)
def init():
"""
Initialize log module.
"""
handler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
logger.propagate = False
f = ColoredFormatter("COLOR_START%(message)sCOLOR_END")
handler.setFormatter(f)
def set_level(level: Level):
"""
Set logging level
:param Level level: Log level
"""
logger.setLevel(level.value)
handler.setLevel(level.value)
if level == Level.DEBUG:
f = ColoredFormatter("COLOR_START%(asctime)s %(levelname)-7sCOLOR_END %(message)s")
f.default_time_format = '%H:%M:%S'
f.default_msec_format = '%s.%03d'
handler.setFormatter(f)
def d(msg):
"""
Debug log
:param str | bytes msg: Message string.
"""
if isinstance(msg, str):
logger.debug(msg)
elif isinstance(msg, bytes):
logger.debug(msg.decode("utf-8"))
def i(msg):
"""
Info log
:param str msg: Message string.
"""
logger.info(msg)
def w(msg):
"""
Warning log
:param str msg: Message string.
"""
logger.warning(msg)
def e(msg):
"""
Error log
:param str msg: Message string.
"""
logger.error(msg)
| ujiro99/auto_logger | logger/log.py | log.py | py | 2,655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.StreamHandler",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "logging.FATAL",
... |
10140994218 | from django.urls import reverse
def reverse_querystring(view, urlconf=None, args=None, kwargs=None, current_app=None, query_kwargs=None):
"""Custom reverse to handle query strings.
Usage:
reverse_querystring('app.views.my_view', kwargs={'pk': 123}, query_kwargs={'search': 'Bob'})
for multivalue query string
reverse_querystring('app.views.my_view', kwargs={'pk': 123}, query_kwargs={'search': ['Bob', 'Jack']})
"""
base_url = reverse(view, urlconf=urlconf, args=args, kwargs=kwargs, current_app=current_app)
if query_kwargs:
lst = []
for k, v in query_kwargs.items():
if isinstance(v, (list, tuple)):
for ret in v:
lst.append("%s=%s" % (k, ret))
else:
lst.append("%s=%s" % (k, v))
query_string = "&".join(lst)
return "%s?%s" % (base_url, query_string)
return base_url
| chiemerieezechukwu/django-api | core/utils/reverse_with_query_string.py | reverse_with_query_string.py | py | 930 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.reverse",
"line_number": 12,
"usage_type": "call"
}
] |
13933428778 | import pytest
from uceasy.ioutils import load_csv, dump_config_file
@pytest.fixture
def config_example():
config = {
"adapters": {
"i7": "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC*ATCTCGTATGCCGTCTTCTGCTTG",
"i5": "AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT",
}
}
expected = """[adapters]
i7:AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC*ATCTCGTATGCCGTCTTCTGCTTG
i5:AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT
"""
return (config, expected)
@pytest.fixture
def config_novalue():
config = {"samples": {"sample1": None, "sample2": None}}
expected = """[samples]
sample1
sample2
"""
return (config, expected)
def test_load_csv_returns_a_list(context):
csv = load_csv(context["csv_file"])
assert isinstance(csv, list)
def test_config_file_is_created(context, config_example):
dump_config_file(context["output"] + "test.conf", config_example[0])
with open(context["output"] + "test.conf", "r") as fl:
assert fl.read() == config_example[1]
| uceasy/uceasy | tests/test_ioutils.py | test_ioutils.py | py | 1,048 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "uceasy.ioutils.load_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "uceasy... |
38514393390 | """
This module contains functions to check whether a schedule is:
1. view-serializable
2. conflict-serializable
3. recoverable
4. avoids cascading aborts
5. strict
It also contains some nice functions to tabularize schedules into tex and draw
a conflict graph using matplotlib.
"""
from action import *
from collections import defaultdict
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
################################################################################
# helper functions
################################################################################
def flatten(ls):
"""
>>> flatten([[], [1], [2,3], [4]])
[1, 2, 3, 4]
"""
return [x for l in ls for x in l]
def graphs_eq(g1, g2):
"""
Returns if two networkx graphs are 100% identical.
>>> G1 = nx.DiGraph()
>>> G1.add_nodes_from([1, 2, 3])
>>> G1.add_edges_from([(1, 2), (2, 3), (3, 1)])
>>> G2 = nx.DiGraph()
>>> G2.add_nodes_from([3, 2, 1])
>>> G2.add_edges_from([(3, 1), (2, 3), (1, 2)])
>>> G3 = nx.DiGraph()
>>> G3.add_nodes_from([1, 2, 3, 4])
>>> G3.add_edges_from([(1, 2), (2, 3), (3, 1)])
>>> G4 = nx.DiGraph()
>>> G4.add_nodes_from([1, 2, 3])
>>> G4.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)])
>>> graphs_eq(G1, G2)
True
>>> graphs_eq(G2, G1)
True
>>> graphs_eq(G1, G3)
False
>>> graphs_eq(G1, G4)
False
"""
return (set(g1.nodes()) == set(g2.nodes()) and
set(g1.edges()) == set(g2.edges()))
def transaction_ids(schedule):
"""
Return a list of the _unique_ transaction ids in the schedule in the order
that they appear.
>>> transaction_ids([r(1, "A"), r(2, "A"), w(1, "A"), r(3, "A")])
[1, 2, 3]
"""
js = []
for a in schedule:
if a.i not in js:
js.append(a.i)
return js
def transactions(schedule):
"""
Partitions a schedule into the list of transactions that compose it.
Transactions are returned in the order in which an operation of the
transaction first appears.
>>> transactions([
... r(1, "A"),
... w(2, "A"),
... commit(2),
... w(1, "A"),
... commit(1),
... w(3, "A"),
... commit(3),
... ])
[[R_1(A), W_1(A), Commit_1], [W_2(A), Commit_2], [W_3(A), Commit_3]]
>>> transactions([
... w(2, "A"),
... r(1, "A"),
... commit(2),
... w(1, "A"),
... commit(1),
... w(3, "A"),
... commit(3),
... ])
[[W_2(A), Commit_2], [R_1(A), W_1(A), Commit_1], [W_3(A), Commit_3]]
"""
js = transaction_ids(schedule)
partitions = [[] for _ in range(len(js))]
index = {js: i for (i, js) in enumerate(js)}
for a in schedule:
partitions[index[a.i]].append(a)
return partitions
def drop_aborts(schedule):
"""
Remove all transactions that abort.
>>> drop_aborts([r(1, "A"), r(2, "A"), r(3, "A"), abort(1), commit(2), abort(3)])
[R_2(A), Commit_2]
"""
aborteds = {a.i for a in schedule if a.op == ABORT}
return [a for a in schedule if a.i not in aborteds]
def add_commits(schedule):
"""
Add a commit for every transaction that doesn't end in a commit or abort.
Commits are added in the order of the first action of the transaction.
>>> add_commits([r(1, "A"), r(2, "A"), r(3, "A"), r(4, "A"), commit(2), abort(4)])
[R_1(A), R_2(A), R_3(A), R_4(A), Commit_2, Abort_4, Commit_1, Commit_3]
"""
ends = {a.i for a in schedule if a.op == COMMIT or a.op == ABORT}
no_ends = [i for i in transaction_ids(schedule) if i not in ends]
return schedule + [commit(i) for i in no_ends]
def first_read(schedule):
"""
Returns a mapping from each object to the transaction ids that initially
read it. If an object is never read, it is not included in the return.
>>> first_read([w(1, "A"), w(2, "B")])
{}
>>> first_read([r(1, "A"), r(2, "B"), r(2, "A")])
{'A': [1, 2], 'B': [2]}
"""
fr = defaultdict(list)
written = set()
for a in schedule:
if a.op == READ and a.obj not in written:
fr[a.obj].append(a.i)
elif a.op == WRITE:
written.add(a.obj)
return dict(fr)
def number(schedule):
"""
Enumerates each action according to its appearance within its transaction.
The enumeration begins at 0.
>>> number([r(1, "A"), r(1, "B"), r(2, "A"), w(3, "A"), commit(2)])
[(0, R_1(A)), (1, R_1(B)), (0, R_2(A)), (0, W_3(A)), (1, Commit_2)]
"""
ns = {i: 0 for i in transaction_ids(schedule)}
s = []
for a in schedule:
s.append((ns[a.i], a))
ns[a.i] += 1
return s
def view_graph(schedule):
"""
First, the schedule is numbered using the number function. Then, an edge is
added from each read of an object to the most recent write to the same
object.
>>> view_graph([w(1, "A"), r(2, "A"), r(1, "A")]) #doctest: +SKIP
+------------+ +------------+
| (0, W_1(A) |<----| (0, R_2(A) |
+------------+ +------------+
^
|
+------------+
| (1, R_1(A) |
+------------+
"""
G = nx.DiGraph()
last_written = {}
for (i, a) in number(schedule):
if a.op == WRITE:
last_written[a.obj] = (i, a)
elif a.op == READ:
if a.obj in last_written:
G.add_edge((i, a), last_written[a.obj])
else: # a.op == COMMIT or a.op == ABORT
pass
return G
def last_written(schedule):
"""
Returns a mapping from each object to the transaction id that last writes
it. If an object is never written, it is not included in the return.
>>> last_written([r(1, "A"), r(2, "B")])
{}
>>> last_written([w(1, "A"), w(2, "B"), w(2, "A")])
{'A': 2, 'B': 2}
"""
lw = {}
for a in schedule:
if a.op == WRITE:
lw[a.obj] = a.i
return lw
def view_equivalent(s1, s2):
"""
Two schedules s1 and s2 are view equivalent if
1. If Ti reads the initial value of object A in s1, it must also read
the initial value of A in s2.
2. If Ti reads a value of A written by Tj in s1, it must also read the
value of A written by Tj in s2.
3. For each data object A, the transaction (if any) that performs the
final write on A in s1 must also perform the final write on A in s2.
"""
assert set(transaction_ids(s1)) == set(transaction_ids(s2))
# condition 1
if not (first_read(s1) == first_read(s2)):
return False
# condition 2
if not graphs_eq(view_graph(s1), view_graph(s2)):
return False
# condition 3
if not (last_written(s1) == last_written(s2)):
return False
return True
################################################################################
# predicates
################################################################################
def view_serializable(schedule):
"""
A schedule is view serializable if it is view equivalent to a some serial
schedule over the same transactions. Aborted transactions are ignored.
"""
schedule = drop_aborts(schedule)
# conflict serializability implies view serializability
if conflict_serializable(schedule):
return True
# if a schedule is not conflict serializable but doesn't have blind writes,
# then it isn't view serializabile
partitions = transactions(schedule)
blind_write = False
for t in partitions:
objects_read = set()
for a in t:
if a.op == WRITE and a.obj not in objects_read:
blind_write = True
elif a.op == READ:
objects_read.add(a.obj)
else: # a.op == COMMIT or a.op == ABORT
pass
if not blind_write:
return False
# brute force check over all serializations to see if the schedule is view
# equivalent to any serial schedule over the same set of transactions
for s in itertools.permutations(transactions(schedule)):
s = flatten(list(s))
if view_equivalent(s, schedule):
return True
return False
def conflict_serializable(schedule):
"""
A schedule is conflict serializable if its conflict graph is acyclic.
Aborted transactions are ignored.
"""
return len(list(nx.simple_cycles(conflict_graph(schedule)))) == 0
def recoverable(schedule):
"""
A schedule is recoverable if all the transactions whose changes it read
commit and the schedule commits after them.
"""
schedule = add_commits(schedule)
written_by = defaultdict(list) # object -> ids
read_from = defaultdict(set) # id -> ids
committed = set() # ids
for a in schedule:
if a.op == WRITE:
written_by[a.obj].append(a.i)
elif a.op == READ:
if a.obj in written_by and \
len(written_by[a.obj]) > 0 and \
written_by[a.obj][-1] != a.i:
read_from[a.i].add(written_by[a.obj][-1])
elif a.op == COMMIT:
if not all(i in committed for i in read_from[a.i]):
return False
committed.add(a.i)
elif a.op == ABORT:
for (o, ids) in written_by.iteritems():
written_by[o] = [i for i in ids if i != a.i]
return True
def aca(schedule):
"""A schedule avoids cascading aborts if it only reads commited changes."""
schedule = add_commits(schedule)
last_write = defaultdict(list) # object -> ids
committed = set() # ids
for a in schedule:
if a.op == WRITE:
last_write[a.obj].append(a.i)
elif a.op == READ:
if a.obj in last_write and \
len(last_write[a.obj]) > 0 and \
last_write[a.obj][-1] not in committed \
and last_write[a.obj][-1] != a.i:
return False
elif a.op == COMMIT:
committed.add(a.i)
elif a.op == ABORT:
for (o, ids) in last_write.iteritems():
last_write[o] = [i for i in ids if i != a.i]
return True
def strict(schedule):
"""
A schedule is strict if never reads or writes to an uncommited changed
variable.
"""
schedule = add_commits(schedule)
last_write = defaultdict(list) # object -> id
committed = set() # ids
for a in schedule:
if a.op == WRITE or a.op == READ:
if a.obj in last_write and \
len(last_write[a.obj]) > 0 and \
last_write[a.obj][-1] not in committed and \
last_write[a.obj][-1] != a.i:
return False
if a.op == WRITE:
last_write[a.obj].append(a.i)
elif a.op == COMMIT:
committed.add(a.i)
elif a.op == ABORT:
for (o, ids) in last_write.iteritems():
last_write[o] = [i for i in ids if i != a.i]
return True
################################################################################
# misc
################################################################################
def tex(schedule):
"""
Return a texed tabular representation of a schedule.
>>> tex([r(1,"A"), r(1,"B"), r(2,"B"), r(3,"B"), r(1,"A"), r(2,"B")]) #doctest: +SKIP
+--------+--------+--------+
| T_1 | T_2 | T_3 |
+--------+--------+--------+
| R_1(A) | | |
| R_1(B) | | |
| | R_2(B) | |
| | | R_3(B) |
| R_1(A) | | |
| | R_2(B) | |
+--------+--------+--------+
"""
transactions = sorted(transaction_ids(schedule))
s = r"\begin{tabular}{" + ("|" + "|".join("c" for _ in transactions) + "|" )+ "}\n"
s += r"\hline" + "\n"
s += "&".join("$T_{}$".format(t) for t in transactions) + r"\\\hline" + "\n"
for a in schedule:
index = transactions.index(a.i)
s += ("&" * index) + a.tex() + ("&" * (len(transactions) - 1 - index))
s += r"\\\hline" + "\n"
s += r"\end{tabular}" + "\n"
return s
def conflict_graph(schedule):
"""
A graph with an edge from a to b for each pair of actions (a, b) from
different transactions on the same object where at least one of the actions
is a write and a precedes b.
"""
schedule = drop_aborts(schedule)
G = nx.DiGraph()
G.add_nodes_from(transaction_ids(schedule))
for (i, a) in enumerate(schedule):
for b in schedule[i+1:]:
same_obj = a.obj == b.obj
diff_txn = a.i != b.i
conflict = a.op == WRITE or b.op == WRITE
if same_obj and diff_txn and conflict:
G.add_edge(a.i, b.i)
return G
def draw(G):
"""Prettily draw a networkx graph G."""
plt.figure()
color_range = np.linspace(0, 1, len(G.nodes()))
labels = {n: "$T_{{{}}}$".format(n) for n in G}
pos = nx.spectral_layout(G)
kwargs = {
"alpha": 1.0,
"cmap": plt.get_cmap("Dark2"), # http://bit.ly/1ItQDgE
"font_color": "w",
"font_size": 40,
"labels": labels,
"node_color": color_range,
"node_size": 10000,
"pos": pos, # http://bit.ly/1DAnT4y
"width": 4.0,
"with_labels": True,
}
nx.draw(G, **kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| mwhittaker/serial | serial.py | serial.py | py | 13,575 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "networkx.DiGraph",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "ne... |
37059041423 | """Function which calculates how positive a website's content is. Scores usually range between -10 and +10"""
import requests
from bs4 import BeautifulSoup as bs
from afinn import Afinn
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def sentiment_analyze(url):
"""calculates a website's positivity"""
# add https if not in there at start
if url[0:8] != "https://":
url = "https://" + url
try:
my_session = requests.session()
for_cookies = requests.get(url).cookies
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
}
response = my_session.get(url, headers=headers, cookies=for_cookies)
req_text = response.text
# checking request html output
# with open("testing.html", "w") as file:
# file.write(req_text)
# remove unicode characters
decoded_text = req_text.encode("ascii", "ignore").decode("unicode_escape")
# get the individual text pieces inside the web page as separate list elements
soup_li = bs(decoded_text, "lxml").body.getText(separator="||").split("||")
# list which will hold the pieces of text together with their scores
text_li = []
# Initialise the 2 sentiment analysis libraries used
afinn = Afinn()
analyzer = SentimentIntensityAnalyzer()
# sum of non-0 scores
sum_text = 0
# count of non-0 scores
count_text = 0
# max/min text scores holders
max_score = 0
max_text = ""
min_score = 0
min_text = ""
for text in soup_li:
# only look at pieces of text with 5+ sentences
if len(text.split()) >= 5:
afinn_score = afinn.score(text) # usually from -5 to +5
vader_score = analyzer.polarity_scores(text)[
"compound"
] # from -1 to +1
combined_score = 0
if afinn_score != 0 or vader_score != 0:
count_text += 1
if afinn_score == 0:
combined_score = vader_score
elif vader_score == 0:
combined_score = afinn_score
else:
combined_score = (afinn_score * 2 + vader_score * 10) / 2
sum_text += 10 if combined_score > 0 else -10
if combined_score > max_score:
max_score = combined_score
max_text = text
elif combined_score < min_score:
min_score = combined_score
min_text = text
text_li.append(
{
"text": text,
"combined_score": combined_score,
"vader_score": vader_score,
"afinn_score": afinn_score,
}
)
if count_text == 0:
return {
"success": False,
"message": "Unable to calculate any scores.",
"raw_data": text_li,
}
else:
return {
"success": True,
"avg_score": round(sum_text / count_text * 2),
"max_score": max_score,
"max_text": max_text,
"min_score": min_score,
"min_text": min_text,
"raw_data": text_li,
}
# catch errors in requests.get statement
except requests.exceptions.ConnectionError as error:
return {
"success": False,
"message": f"An error occurred when trying to access the '{url}' URL. Error message: '{error}'",
}
except Exception as error:
return {
"success": False,
"message": f"Something went wrong when processing the '{url}' URL.Error message: '{error}'",
} | mihailthebuilder/news-sentiment | sentiment.py | sentiment.py | py | 4,072 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.session",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "afinn.Afinn",
"li... |
323629396 | """Added paid to order model
Revision ID: eb502f9a5410
Revises: 82df20a186ef
Create Date: 2020-04-19 17:05:05.312230
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'eb502f9a5410'
down_revision = '82df20a186ef'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('order', sa.Column('paid', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('order', 'paid')
# ### end Alembic commands ###
| Dsthdragon/kizito_bookstore | migrations/versions/eb502f9a5410_added_paid_to_order_model.py | eb502f9a5410_added_paid_to_order_model.py | py | 682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean... |
15476854005 | from __future__ import division, print_function, absolute_import
import os
import pytest
import hypothesis
from hypothesis.errors import InvalidArgument
from hypothesis.database import ExampleDatabase
from hypothesis._settings import settings, Verbosity
def test_has_docstrings():
assert settings.verbosity.__doc__
original_default = settings.get_profile('default').max_examples
def setup_function(fn):
settings.load_profile('default')
settings.register_profile('test_settings', settings())
settings.load_profile('test_settings')
def test_cannot_set_non_settings():
s = settings()
with pytest.raises(AttributeError):
s.databas_file = u'some_file'
def test_settings_uses_defaults():
s = settings()
assert s.max_examples == settings.default.max_examples
def test_raises_attribute_error():
with pytest.raises(AttributeError):
settings().kittens
def test_respects_none_database():
assert settings(database=None).database is None
def test_settings_can_be_used_as_context_manager_to_change_defaults():
with settings(max_examples=12):
assert settings.default.max_examples == 12
assert settings.default.max_examples == original_default
def test_can_repeatedly_push_the_same_thing():
s = settings(max_examples=12)
t = settings(max_examples=17)
assert settings().max_examples == original_default
with s:
assert settings().max_examples == 12
with t:
assert settings().max_examples == 17
with s:
assert settings().max_examples == 12
with t:
assert settings().max_examples == 17
assert settings().max_examples == 12
assert settings().max_examples == 17
assert settings().max_examples == 12
assert settings().max_examples == original_default
def test_cannot_create_settings_with_invalid_options():
with pytest.raises(InvalidArgument):
settings(a_setting_with_limited_options=u'spoon')
def test_can_set_verbosity():
settings(verbosity=Verbosity.quiet)
settings(verbosity=Verbosity.normal)
settings(verbosity=Verbosity.verbose)
def test_can_not_set_verbosity_to_non_verbosity():
with pytest.raises(InvalidArgument):
settings(verbosity='kittens')
@pytest.mark.parametrize('db', [None, ExampleDatabase()])
def test_inherits_an_empty_database(db):
assert settings.default.database is not None
s = settings(database=db)
assert s.database is db
with s:
t = settings()
assert t.database is db
@pytest.mark.parametrize('db', [None, ExampleDatabase()])
def test_can_assign_database(db):
x = settings(database=db)
assert x.database is db
def test_load_profile():
settings.load_profile('default')
assert settings.default.max_examples == 200
assert settings.default.max_shrinks == 500
assert settings.default.min_satisfying_examples == 5
settings.register_profile(
'test',
settings(
max_examples=10,
max_shrinks=5
)
)
settings.load_profile('test')
assert settings.default.max_examples == 10
assert settings.default.max_shrinks == 5
assert settings.default.min_satisfying_examples == 5
settings.load_profile('default')
assert settings.default.max_examples == 200
assert settings.default.max_shrinks == 500
assert settings.default.min_satisfying_examples == 5
def test_loading_profile_keeps_expected_behaviour():
settings.register_profile('ci', settings(max_examples=10000))
settings.load_profile('ci')
assert settings().max_examples == 10000
with settings(max_examples=5):
assert settings().max_examples == 5
assert settings().max_examples == 10000
def test_load_non_existent_profile():
with pytest.raises(hypothesis.errors.InvalidArgument):
settings.get_profile('nonsense')
@pytest.mark.skipif(
os.getenv('HYPOTHESIS_PROFILE') not in (None, 'default'),
reason='Defaults have been overridden')
def test_runs_tests_with_defaults_from_conftest():
assert settings.default.strict
assert settings.default.timeout == -1
| LyleH/hypothesis-python_1 | tests/cover/test_settings.py | test_settings.py | py | 4,182 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "hypothesis._settings.settings.verbosity",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "hypothesis._settings.settings",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "hypothesis._settings.settings.get_profile",
"line_number": 17,
... |
71683730983 | import argparse
import datetime
import os
import socket
import sys
from time import sleep
# espa-processing imports
import config
import parameters
import processor
import settings
import sensor
import utilities
from api_interface import APIServer
from logging_tools import (EspaLogging, get_base_logger, get_stdout_handler,
get_stderr_handler, archive_log_files)
base_logger = get_base_logger()
def work(cfg, params, developer_sleep_mode=False):
"""
Take the environment configuration, order parameters and initiate order processing.
Note: Much of this code was taken from the ondemand_mapper.py script in espa-processing.
Args:
cfg (dict): Configuration params given by config.config() and by the worker environment
params (dict): JSON response from the API for a single granule or scene
Returns:
None, Products are generated, packaged, and distributed if processing was successful
"""
# This will be the Mesos node hostname
processing_location = socket.gethostname()
# Use the base_logger initially, if an exception occurs before the processing logger is configured
# the base_logger will handle log it
logger = base_logger
if not parameters.test_for_parameter(params, 'options'):
raise ValueError('Error missing JSON [options] record')
start_time = datetime.datetime.now()
# Initialize so that we don't sleep
dont_sleep = True
# Note that the API response "scene" value is what we use for product_id
try:
(order_id, product_id, product_type, options) = \
(params['orderid'], params['scene'], params['product_type'],
params['options'])
if product_id != 'plot':
# Developer mode is always false unless you are a developer
# so sleeping will always occur for non-plotting requests
# Override with the developer mode
dont_sleep = developer_sleep_mode
# Fix the orderid in-case it contains any single quotes
# The processors can not handle single quotes in the email
# portion due to usage in command lines.
params['orderid'] = order_id.replace("'", '')
# product_id is not part of the API response - we add it here
if not parameters.test_for_parameter(params, 'product_id'):
params['product_id'] = product_id
# Figure out if debug level logging was requested
debug = False
if parameters.test_for_parameter(options, 'debug'):
debug = options['debug']
# Configure and get the logger for this order request
EspaLogging.configure(settings.PROCESSING_LOGGER, order=order_id,
product=product_id, debug=debug)
# Replace the base_logger with the processing_logger
logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)
# add our stdout/stderr log streams
logger.addHandler(get_stdout_handler())
logger.addHandler(get_stderr_handler())
logger.info('Processing {}:{}'.format(order_id, product_id))
logger.info('Attempting connection to {0}'.format(cfg['espa_api']))
# will throw an exception on init if unable to get a 200 response
server = APIServer(cfg['espa_api'])
# will throw an exception if does not receive a 200 response
status = server.update_status(product_id, order_id, processing_location, 'processing')
if product_id != 'plot':
# Make sure we can process the sensor
tmp_info = sensor.info(product_id)
del tmp_info
# Make sure we have a valid output format
if not parameters.test_for_parameter(options, 'output_format'):
logger.warning('[output_format] parameter missing defaulting to envi')
options['output_format'] = 'envi'
if (options['output_format'] not in parameters.VALID_OUTPUT_FORMATS):
raise ValueError('Invalid Output format {}'.format(options['output_format']))
# ----------------------------------------------------------------
# NOTE: The first thing the product processor does during
# initialization is validate the input parameters.
# ----------------------------------------------------------------
destination_product_file = 'ERROR'
destination_cksum_file = 'ERROR'
pp = None
try:
# All processors are implemented in the processor module
pp = processor.get_instance(cfg, params)
(destination_product_file, destination_cksum_file) = pp.process()
finally:
# Free disk space to be nice to the whole system.
if pp is not None:
pp.remove_product_directory()
# Sleep the number of seconds for minimum request duration
sleep(utilities.get_sleep_duration(cfg, start_time, dont_sleep))
log_items = archive_log_files(order_id, product_id)
for item in log_items:
utilities.change_ownership(item, cfg.get('espa_user'), cfg.get('espa_group'))
# Everything was successful so mark the scene complete
server.mark_scene_complete(product_id, order_id,
processing_location,
destination_product_file,
destination_cksum_file,
'') # sets log_file_contents to empty string ''
return True
except Exception as e:
# First log the exception
logger.exception('Exception encountered in processing.main.work:\nexception: {}'.format(e))
try:
# Sleep the number of seconds for minimum request duration
logger.debug('Attempting to archive log files for order_id: {}\nproduct_id: {}'.format(order_id, product_id))
sleep(utilities.get_sleep_duration(cfg, start_time, dont_sleep))
log_items = archive_log_files(order_id, product_id)
for item in log_items:
utilities.change_ownership(item, cfg.get('espa_user'), cfg.get('espa_group'))
except Exception as e2:
logger.exception('Problem archiving log files. error: {}'.format(e2))
try:
logger.debug('Attempting to set product error, order_id: {}\nproduct_id: {}'.format(order_id, product_id))
logged_contents = EspaLogging.read_logger_file(settings.PROCESSING_LOGGER)
error_log = "Processing Log: {}\n\nException: {}".format(logged_contents, e)
server.set_scene_error(product_id, order_id, processing_location, error_log)
except Exception as e3:
logger.exception('Unable to reach ESPA API and set product error for order_id: {}\nproduct_id: {}\nerror: {}'.format(order_id, product_id, e3))
raise e3
return False
def main(data=None):
try:
# retrieve a dict containing processing environment configuration values
cfg = config.config()
sleep_for = cfg.get('init_sleep_seconds')
base_logger.info('Holding for {} seconds'.format(sleep_for))
sleep(sleep_for)
# export values for the container environment
config.export_environment_variables(cfg)
# create the .netrc file
utilities.build_netrc()
base_logger.debug('OS ENV - {0}'.format(['{0}: {1}'.format(var, val) for var, val in os.environ.items()]))
base_logger.info('configured parameters - {0}'.format(['{0}: {1}'.format(var, val) for var, val in cfg.items()]))
if not data:
parser = argparse.ArgumentParser()
parser.add_argument(dest="data", action="store", metavar="JSON",
type=utilities.convert_json,
help="response from the API"
"containing order information")
args = parser.parse_args()
data = args.data
base_logger.info('order data - {0}'.format(data))
for d in data:
result = work(cfg, d)
base_logger.info('processing.work executed for data {} successfully? {}'.format(d, result))
# Exit normally
sys.exit(0)
except Exception as e:
msg = 'ESPA Worker error, problem executing main.main\nError: {}'.format(e)
base_logger.exception(msg)
# Exit with 1 so Container and Task know there was a problem and report to the framework appropriately
sys.exit(msg)
if __name__ == '__main__':
main()
| djzelenak/espa-worker | processing/main.py | main.py | py | 8,666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging_tools.get_base_logger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "parameters.test_for_parameter",
"line_number": 44,
"usage_type": "call"
},
{
"ap... |
28180715346 | from pathlib import Path
from .CUBEparser import parse_files
# yapf: disable
SHORTHAND_FUNCTIONALS = [
'svwn3',
'svwn5',
'pbe',
'pbe0',
'bpw91',
'bp86',
'b3p86',
'b3p86-g',
'blyp',
'b3lyp',
'b3lyp-g',
'olyp',
'kt1',
'kt2',
'kt3'
]
# yapf: enable
"""List of recognized shorthands for functionals"""
def write_scf_fock(user_dict, wf_dict, origin):
fock_dict = {}
# ZORA
if user_dict["WaveFunction"]["relativity"].lower() == "zora":
fock_dict["zora_operator"] = {
"include_nuclear": user_dict["ZORA"]["include_nuclear"],
"include_coulomb": user_dict["ZORA"]["include_coulomb"],
"include_xc": user_dict["ZORA"]["include_xc"],
}
# Kinetic
fock_dict["kinetic_operator"] = {"derivative": user_dict["Derivatives"]["kinetic"]}
# Nuclear
fock_dict["nuclear_operator"] = {
"proj_prec": user_dict["Precisions"]["nuclear_prec"],
"smooth_prec": user_dict["Precisions"]["nuclear_prec"],
"nuclear_model": user_dict["WaveFunction"]["nuclear_model"],
"shared_memory": user_dict["MPI"]["share_nuclear_potential"],
}
# Reaction
if user_dict["WaveFunction"]["environment"].lower() == "pcm":
fock_dict["reaction_operator"] = {
"poisson_prec": user_dict["world_prec"],
"kain": user_dict["PCM"]["SCRF"]["kain"],
"max_iter": user_dict["PCM"]["SCRF"]["max_iter"],
"optimizer": user_dict["PCM"]["SCRF"]["optimizer"],
"dynamic_thrs": user_dict["PCM"]["SCRF"]["dynamic_thrs"],
"density_type": user_dict["PCM"]["SCRF"]["density_type"],
"epsilon_in": user_dict["PCM"]["Permittivity"]["epsilon_in"],
"epsilon_out": user_dict["PCM"]["Permittivity"]["epsilon_out"],
"formulation": user_dict["PCM"]["Permittivity"]["formulation"],
}
# Coulomb
if wf_dict["method_type"] in ["hartree", "hf", "dft"]:
fock_dict["coulomb_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"shared_memory": user_dict["MPI"]["share_coulomb_potential"],
}
# Exchange
if wf_dict["method_type"] in ["hf", "dft"]:
fock_dict["exchange_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"exchange_prec": user_dict["Precisions"]["exchange_prec"],
}
# Exchange-Correlation
if wf_dict["method_type"] in ["dft"]:
func_dict = []
for line in wf_dict["dft_funcs"].split("\n"):
sp = line.split()
if len(sp) > 0:
func = sp[0].lower()
coef = [1.0]
if len(sp) > 1:
coef = list(map(float, sp[1:]))
func_dict.append({"name": func, "coef": coef[0]})
fock_dict["xc_operator"] = {
"shared_memory": user_dict["MPI"]["share_xc_potential"],
"xc_functional": {
"spin": user_dict["DFT"]["spin"],
"cutoff": user_dict["DFT"]["density_cutoff"],
"functionals": func_dict,
},
}
# External electric field
if len(user_dict["ExternalFields"]["electric_field"]) > 0:
fock_dict["external_operator"] = {
"electric_field": user_dict["ExternalFields"]["electric_field"],
"r_O": origin,
}
return fock_dict
def write_scf_guess(user_dict, wf_dict):
guess_str = user_dict["SCF"]["guess_type"].lower()
guess_type = guess_str.split("_")[0]
zeta = 0
scf_dict = user_dict["SCF"]
guess_prec = scf_dict["guess_prec"]
if guess_type == "chk":
# At least one orbital must be present in the checkpoint folder
chk_Phi = Path(f"{scf_dict['path_checkpoint']}/phi_scf_idx_0.meta")
if not chk_Phi.is_file():
print(
f"No checkpoint guess found in {scf_dict['path_checkpoint']}, falling back to 'sad_gto' initial guess"
)
guess_type = "sad_gto"
else:
# adjust guess precision if checkpoint files are present
guess_prec = user_dict["world_prec"]
if guess_type in ["core", "sad"]:
zeta_str = guess_str.split("_")[1]
if zeta_str == "sz":
zeta = 1
elif zeta_str == "dz":
zeta = 2
elif zeta_str == "tz":
zeta = 3
elif zeta_str == "qz":
zeta = 4
elif zeta_str == "gto":
guess_type = guess_str
else:
print("Invalid zeta:" + zeta_str)
file_dict = user_dict["Files"]
if guess_type == "cube":
found = parse_files(user_dict)
if not found:
print(
f"No CUBE guess found in any of the 'initial_guess' sub-folders, falling back to 'sad_gto' initial guess"
)
guess_type = "sad_gto"
vector_dir = file_dict["cube_vectors"]
guess_dict = {
"zeta": zeta,
"prec": guess_prec,
"type": guess_type,
"method": wf_dict["method_name"],
"relativity": wf_dict["relativity_name"],
"environment": wf_dict["environment_name"],
"external_field": wf_dict["external_name"],
"screen": scf_dict["guess_screen"],
"localize": scf_dict["localize"],
"restricted": user_dict["WaveFunction"]["restricted"],
"file_chk": f"{scf_dict['path_checkpoint']}/phi_scf",
"file_basis": file_dict["guess_basis"],
"file_gto_p": file_dict["guess_gto_p"],
"file_gto_a": file_dict["guess_gto_a"],
"file_gto_b": file_dict["guess_gto_b"],
"file_phi_p": file_dict["guess_phi_p"] + "_scf",
"file_phi_a": file_dict["guess_phi_a"] + "_scf",
"file_phi_b": file_dict["guess_phi_b"] + "_scf",
"file_CUBE_p": f"{vector_dir}CUBE_p_vector.json",
"file_CUBE_a": f"{vector_dir}CUBE_a_vector.json",
"file_CUBE_b": f"{vector_dir}CUBE_b_vector.json",
}
return guess_dict
def write_scf_solver(user_dict, wf_dict):
# SCF precisions and thresholds
start_prec = user_dict["SCF"]["start_prec"]
final_prec = user_dict["SCF"]["final_prec"]
if final_prec < 0.0:
final_prec = user_dict["world_prec"]
if start_prec < 0.0:
start_prec = final_prec
scf_dict = user_dict["SCF"]
solver_dict = {
"method": wf_dict["method_name"],
"relativity": wf_dict["relativity_name"],
"environment": wf_dict["environment_name"],
"external_field": wf_dict["external_name"],
"kain": scf_dict["kain"],
"max_iter": scf_dict["max_iter"],
"rotation": scf_dict["rotation"],
"localize": scf_dict["localize"],
"file_chk": scf_dict["path_checkpoint"] + "/phi_scf",
"checkpoint": scf_dict["write_checkpoint"],
"start_prec": start_prec,
"final_prec": final_prec,
"energy_thrs": scf_dict["energy_thrs"],
"orbital_thrs": scf_dict["orbital_thrs"],
"helmholtz_prec": user_dict["Precisions"]["helmholtz_prec"],
}
return solver_dict
def write_scf_properties(user_dict, origin):
prop_dict = {}
if user_dict["Properties"]["dipole_moment"]:
prop_dict["dipole_moment"] = {}
prop_dict["dipole_moment"]["dip-1"] = {
"operator": "h_e_dip",
"precision": user_dict["world_prec"],
"r_O": origin,
}
if user_dict["Properties"]["quadrupole_moment"]:
prop_dict["quadrupole_moment"] = {}
prop_dict["quadrupole_moment"]["quad-1"] = {
"operator": "h_e_quad",
"precision": user_dict["world_prec"],
"r_O": origin,
}
if user_dict["Properties"]["geometric_derivative"]:
prop_dict["geometric_derivative"] = {}
prop_dict["geometric_derivative"]["geom-1"] = {
"operator": "h_nuc_grad",
"precision": user_dict["world_prec"],
"smoothing": user_dict["Precisions"]["nuclear_prec"],
}
return prop_dict
def write_scf_plot(user_dict):
plot_dict = {}
if user_dict["Properties"]["plot_density"] or len(
user_dict["Properties"]["plot_orbitals"]
):
plot_dict["orbitals"] = user_dict["Properties"]["plot_orbitals"]
plot_dict["density"] = user_dict["Properties"]["plot_density"]
plot_dict["plotter"] = user_dict["Plotter"]
if user_dict["world_unit"] == "angstrom":
plot_dict["plotter"] = {
k: [
user_dict["Constants"]["angstrom2bohrs"] * r
for r in plot_dict["plotter"][k]
]
for k in plot_dict["plotter"].keys()
}
return plot_dict
def write_rsp_calc(omega, user_dict, origin):
wf_dict = parse_wf_method(user_dict)
if not wf_dict["relativity_name"] in ["None", "Off"]:
raise RuntimeError(
"Linear response not available: " + wf_dict["relativity_name"]
)
rsp_dict = user_dict["Response"]
file_dict = user_dict["Files"]
rsp_calc = {}
rsp_calc["frequency"] = omega
rsp_calc["dynamic"] = omega > 1.0e-12
rsp_calc["fock_operator"] = write_rsp_fock(user_dict, wf_dict)
rsp_calc["unperturbed"] = {
"precision": user_dict["world_prec"],
"localize": rsp_dict["localize"],
"fock_operator": write_scf_fock(user_dict, wf_dict, origin),
}
guess_str = rsp_dict["guess_type"].lower()
user_guess_type = guess_str.split("_")[0]
user_guess_prec = rsp_dict["guess_prec"]
vector_dir = file_dict["cube_vectors"]
rsp_calc["components"] = []
for dir in [0, 1, 2]:
rsp_comp = {}
program_guess_type = user_guess_type
program_guess_prec = user_guess_prec
# check that initial guess files exist
if user_guess_type == "chk":
chk_X = Path(f"{rsp_dict['path_checkpoint']}/X_rsp_{dir:d}")
chk_Y = Path(f"{rsp_dict['path_checkpoint']}/Y_rsp_{dir:d}")
if not (chk_X.is_file() and chk_Y.is_file()):
print(
f"No checkpoint guess found in {rsp_dict['path_checkpoint']} for direction {dir:d}, falling back to zero initial guess"
)
program_guess_type = "none"
else:
# adjust guess precision if checkpoint files are present
program_guess_prec = user_dict["world_prec"]
elif user_guess_type == "cube":
found = parse_files(user_dict, dir)
if not found:
print(
f"No CUBE guess found in any of the 'initial_guess' sub-folders for direction {dir:d}, falling back to zero initial guess"
)
program_guess_type = "none"
else:
# do no checks on other types of guess
pass
rsp_comp["initial_guess"] = {
"prec": program_guess_prec,
"type": program_guess_type,
"file_chk_x": f"{rsp_dict['path_checkpoint']}/X_rsp_{dir:d}",
"file_chk_y": f"{rsp_dict['path_checkpoint']}/Y_rsp_{dir:d}",
"file_x_p": f"{file_dict['guess_x_p']}_rsp_{dir:d}",
"file_x_a": f"{file_dict['guess_x_a']}_rsp_{dir:d}",
"file_x_b": f"{file_dict['guess_x_b']}_rsp_{dir:d}",
"file_y_p": f"{file_dict['guess_y_p']}_rsp_{dir:d}",
"file_y_a": f"{file_dict['guess_y_a']}_rsp_{dir:d}",
"file_y_b": f"{file_dict['guess_y_b']}_rsp_{dir:d}",
"file_CUBE_x_p": f"{vector_dir}CUBE_x_p_{dir:d}_vector.json",
"file_CUBE_x_a": f"{vector_dir}CUBE_x_a_{dir:d}_vector.json",
"file_CUBE_x_b": f"{vector_dir}CUBE_x_b_{dir:d}_vector.json",
"file_CUBE_y_p": f"{vector_dir}CUBE_y_p_{dir:d}_vector.json",
"file_CUBE_y_a": f"{vector_dir}CUBE_y_a_{dir:d}_vector.json",
"file_CUBE_y_b": f"{vector_dir}CUBE_y_b_{dir:d}_vector.json",
}
if rsp_dict["write_orbitals"]:
path_orbitals = rsp_dict["path_orbitals"]
rsp_comp["write_orbitals"] = {
"file_x_p": f"{path_orbitals}/X_p_rsp_{dir:d}",
"file_x_a": f"{path_orbitals}/X_a_rsp_{dir:d}",
"file_x_b": f"{path_orbitals}/X_b_rsp_{dir:d}",
"file_y_p": f"{path_orbitals}/Y_p_rsp_{dir:d}",
"file_y_a": f"{path_orbitals}/Y_a_rsp_{dir:d}",
"file_y_b": f"{path_orbitals}/Y_b_rsp_{dir:d}",
}
if rsp_dict["run"][dir]:
rsp_comp["rsp_solver"] = write_rsp_solver(user_dict, wf_dict, dir)
rsp_calc["components"].append(rsp_comp)
return rsp_calc
def write_rsp_fock(user_dict, wf_dict):
fock_dict = {}
# Coulomb
if wf_dict["method_type"] in ["hartree", "hf", "dft"]:
fock_dict["coulomb_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"shared_memory": user_dict["MPI"]["share_coulomb_potential"],
}
# Exchange
if wf_dict["method_type"] in ["hf", "dft"]:
fock_dict["exchange_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"exchange_prec": user_dict["Precisions"]["exchange_prec"],
}
# Exchange-Correlation
if wf_dict["method_type"] in ["dft"]:
func_dict = []
for line in wf_dict["dft_funcs"].split("\n"):
sp = line.split()
if len(sp) > 0:
func = sp[0].lower()
coef = [1.0]
if len(sp) > 1:
coef = list(map(float, sp[1:]))
func_dict.append({"name": func, "coef": coef[0]})
fock_dict["xc_operator"] = {
"shared_memory": user_dict["MPI"]["share_xc_potential"],
"xc_functional": {
"spin": user_dict["DFT"]["spin"],
"cutoff": user_dict["DFT"]["density_cutoff"],
"functionals": func_dict,
},
}
return fock_dict
def write_rsp_solver(user_dict, wf_dict, d):
# Response precisions and thresholds
start_prec = user_dict["Response"]["start_prec"]
final_prec = user_dict["Response"]["final_prec"]
if final_prec < 0.0:
final_prec = user_dict["world_prec"]
if start_prec < 0.0:
start_prec = final_prec
rsp_dict = user_dict["Response"]
solver_dict = {
"method": wf_dict["method_name"],
"kain": rsp_dict["kain"],
"max_iter": rsp_dict["max_iter"],
"file_chk_x": rsp_dict["path_checkpoint"] + "/X_rsp_" + str(d),
"file_chk_y": rsp_dict["path_checkpoint"] + "/Y_rsp_" + str(d),
"checkpoint": rsp_dict["write_checkpoint"],
"start_prec": start_prec,
"final_prec": final_prec,
"orbital_thrs": user_dict["Response"]["orbital_thrs"],
"property_thrs": user_dict["Response"]["property_thrs"],
"helmholtz_prec": user_dict["Precisions"]["helmholtz_prec"],
"orth_prec": 1.0e-14,
}
return solver_dict
def parse_wf_method(user_dict):
method_name = ""
restricted = user_dict["WaveFunction"]["restricted"]
method_type = user_dict["WaveFunction"]["method"].lower()
dft_funcs = user_dict["DFT"]["functionals"].lower()
if method_type in ["core"]:
method_name = "Core Hamiltonian"
elif method_type in ["hartree"]:
method_name = "Hartree"
elif method_type in ["hf", "hartree-fock", "hartreefock"]:
method_name = "Hartree-Fock"
method_type = "hf"
elif method_type in ["dft"]:
method_name = "DFT"
elif method_type in ["lda"]:
method_name = "DFT (SVWN5)"
dft_funcs = "svwn5"
method_type = "dft"
elif method_type in SHORTHAND_FUNCTIONALS:
method_name = "DFT (" + method_type.upper() + ")"
dft_funcs = method_type
method_type = "dft"
else:
raise RuntimeError(
f"Invalid wavefunction method {user_dict['WaveFunction']['method']}"
)
# Determine relativity name label for print outs to the output file
relativity_name = "None"
if user_dict["WaveFunction"]["relativity"].lower() in ["none"]:
user_dict["WaveFunction"]["relativity"] = "off"
user_dict["ZORA"]["include_nuclear"] = False
user_dict["ZORA"]["include_coulomb"] = False
user_dict["ZORA"]["include_xc"] = False
if user_dict["WaveFunction"]["relativity"].lower() in ["nzora"]:
user_dict["WaveFunction"]["relativity"] = "zora"
user_dict["ZORA"]["include_nuclear"] = True
user_dict["ZORA"]["include_coulomb"] = False
user_dict["ZORA"]["include_xc"] = False
if user_dict["WaveFunction"]["relativity"].lower() in ["zora"]:
components = [
user_dict["ZORA"]["include_nuclear"],
user_dict["ZORA"]["include_coulomb"],
user_dict["ZORA"]["include_xc"],
]
names = ["V_nuc", "J", "V_xc"]
if any(components):
zora_terms = " + ".join(
[name for name, comp in zip(names, components) if comp]
)
relativity_name = "ZORA (" + zora_terms + ")"
else:
raise RuntimeError("ZORA selected, but no ZORA potential included")
if user_dict["ZORA"]["include_xc"] and not restricted:
raise RuntimeError(
"ZORA (V_xc) not available for unrestricted wavefunctions"
)
# Determine environment name label for print outs to the output file
environment_name = "None"
if user_dict["WaveFunction"]["environment"].lower() == "pcm":
environment_name = "PCM"
# Determine external name label for print outs to the output file
ext_dict = user_dict["ExternalFields"]
has_external_fields = len(ext_dict["electric_field"]) > 0
external_name = "None"
if has_external_fields:
# If no external fields, then the list will be empty
# Need to catch the exception and store placeholders
try:
x, y, z = ext_dict["electric_field"]
except ValueError:
x, y, z = None, None, None # Useless placeholders
# Labels to aggregate
external_name = f"Electric field ({x}, {y}, {z})"
wf_dict = {
"relativity_name": relativity_name,
"environment_name": environment_name,
"external_name": external_name,
"method_name": method_name,
"method_type": method_type,
"dft_funcs": dft_funcs,
}
return wf_dict
| MRChemSoft/mrchem | python/mrchem/helpers.py | helpers.py | py | 18,587 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "CUBEparser.parse_files",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
... |
74928715303 | # Derived from https://github.com/greenelab/deep-review/blob/75f2dd8c61099a17235a4b8de0567b2364901e4d/build/randomize-authors.py
# by Daniel Himmelstein under the CC0 1.0 license
# https://github.com/greenelab/deep-review#license
import argparse
import pathlib
import sys
import yaml
from manubot.util import read_serialized_data
MISSING_CONTRIBUTIONS = ["**MISSING**"]
def parse_args():
parser = argparse.ArgumentParser(
description="Select authors for an individual manuscript from metadata.authors "
"or update author metadata for the merged manuscript. Overwrites metadata.yaml."
)
parser.add_argument(
"--keyword", required=True, help="keyword indicating the individual manuscript "
"(e.g. 'pathogenesis') or 'merged' to update author metadata for the merged manuscript"
)
parser.add_argument(
"--path", default="content/metadata.yaml", help="path to metadata.yaml"
)
args = parser.parse_args()
return args
def dump_yaml(obj, path):
path = pathlib.Path(path)
sys.stderr.write(f"Dumping YAML to {path}\n")
with path.open("w", encoding="utf-8") as write_file:
yaml.dump(
obj,
write_file,
# default_flow_style=False,
explicit_start=True,
explicit_end=True,
width=float("inf"),
sort_keys=False,
allow_unicode=True,
)
write_file.write("\n")
def generate_consortium_members(authors):
"""
Generate the list of consortium members from the authors
"""
# Consortium members are all authors who are not consortia
# Sort members by the last token of their name
consortium_members = [author["name"] for author in authors if "consortium" not in author or not author["consortium"]]
return sorted(consortium_members, key=lambda name: name.split()[-1])
def update_merged(path):
"""
Update author contributions for the merged manuscript by taking the union
of all contributions on individual manuscripts. Overwrites existing
contributions for the author that are not associated with an individual
manuscript. Builds the list of consortium members.
"""
metadata = read_serialized_data(path)
authors = metadata.get("authors", [])
metadata["consortiummembers"] = generate_consortium_members(authors)
# Set contributions to the union of all manuscript-specific contributions
# Use general contributions if there are no manuscript-specific contributions
for author in authors:
contributions = set()
if "manuscripts" in author:
for manuscript in author["manuscripts"].keys():
if manuscript.lower() == "contributions":
raise IndentationError(f"Contributions for {author['name']} should be "
"indented under a specific manuscript\n")
# A list of the author's contributions for each individual manuscript
individual_contributions = author["manuscripts"][manuscript].get("contributions", MISSING_CONTRIBUTIONS)
contributions.update(individual_contributions)
elif "contributions" in author:
contributions.update(author["contributions"])
else:
contributions.update(MISSING_CONTRIBUTIONS)
if MISSING_CONTRIBUTIONS[0] in contributions:
sys.stderr.write(f"Missing contributions for {author['name']}\n")
author["contributions"] = sorted(contributions)
# Check whether code of conduct has been approved
if "code of conduct" not in author or "confirmed" not in author["code of conduct"] or not author["code of conduct"]["confirmed"]:
sys.stderr.write(f"{author['name']} has not approved the code of conduct\n")
sys.stderr.write(f"Updating contributions for {len(authors)} authors for merged manuscript\n")
metadata["authors"] = authors
dump_yaml(metadata, path)
def update_individual(path, keyword):
"""
Select authors for an individual manuscript. Expects the manuscript keyword
to be in a dictionary called manuscripts for each author of that manuscript.
Updates contributions to be the manuscript-specific contributions. Builds the
list of consortium members.
"""
metadata = read_serialized_data(path)
authors = metadata.get("authors", [])
metadata["consortiummembers"] = generate_consortium_members(authors)
individual_authors = [author for author in authors if "manuscripts" in author and keyword in author["manuscripts"]]
# Sort authors by their numeric order for this individual manuscript
# If the author has the manuscript keyword, which indicates authorship, but not an order
# the default order is -1, which should move them to the front of the author list
# Sort by name to break ties
individual_authors.sort(key=lambda author: (author["manuscripts"][keyword].get("order", -1), author["name"]))
# Set contributions to the appropriate manuscript-specific contributions
for author in individual_authors:
# A list of the author's contributions for this manuscript
contributions = author["manuscripts"][keyword].get("contributions", MISSING_CONTRIBUTIONS)
if contributions == MISSING_CONTRIBUTIONS:
sys.stderr.write(f"Missing {keyword} contributions for {author['name']}\n")
author["contributions"] = sorted(contributions)
sys.stderr.write(f"Found {len(individual_authors)} authors for {keyword} manuscript\n")
metadata["authors"] = individual_authors
dump_yaml(metadata, path)
if __name__ == "__main__":
args = parse_args()
if args.keyword.lower() == "merged":
update_merged(args.path)
else:
update_individual(args.path, args.keyword)
| greenelab/covid19-review | build/update-author-metadata.py | update-author-metadata.py | py | 5,812 | python | en | code | 117 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
... |
72166330345 | # Importing necessary libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# from sklearn.metrics import mean_squared_error, r2_score
# Sample dataset of house prices and areas (replace this with your own dataset)
mona = "data.xlsx"
data = pd.read_excel(mona)
# Create a DataFrame from the dataset
#df = pd.DataFrame(data)
# Split the data into features (X) and target (y)
X = data.iloc[:, 0:1]
y = data.iloc[:, -1]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X.values, y, test_size=0.2, random_state=42)
# Create and train the linear regression model
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions on the test set
# y_pred = model.predict(X_test)
# Evaluate the model
# mse = mean_squared_error(y_test, y_pred)
# r2 = r2_score(y_test, y_pred)
# Get area as user input for prediction
while True:
try:
user_input_area = float(input("Enter the area in sq. ft for price prediction: "))
break
except ValueError:
print("Invalid input! Please enter a valid number.")
# Predict the price for the user input area
predicted_price = model.predict([[user_input_area]])
price = predicted_price[0]
print(f"Predicted Price for {user_input_area} sq.ft is Rs {round(price,2)}")
| preyar/RealEstatePricePrediction | Prediction.py | Prediction.py | py | 1,385 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 22,
"usage_type": "call... |
12631299379 | import cv2
import os
# 비디오 파일 경로 설정
video_path = "차체 영상 1차.mp4"
# 저장할 이미지 파일을 저장할 폴더 경로 설정
output_folder = "video"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 비디오 캡쳐 객체 생성
cap = cv2.VideoCapture(video_path)
# 캡쳐가 올바르게 열렸는지 확인
if not cap.isOpened():
print("Error: Could not open video.")
exit()
# 프레임 캡쳐 설정
frame_count = 0
while True:
# 프레임 캡쳐
ret, frame = cap.read()
# 비디오의 끝에 도달하면 종료
if not ret:
break
# 이미지 파일 경로 설정
image_path = os.path.join(output_folder, f"frame_{frame_count:04d}.jpg")
# 이미지 파일로 저장
cv2.imwrite(image_path, frame)
frame_count += 1
# 사용한 리소스 해제
cap.release()
cv2.destroyAllWindows()
print(f"{frame_count} frames captured and saved to '{output_folder}'")
| TAEM1N2/kudos12_2023 | data_setting/python_practice/video_cap.py | video_cap.py | py | 971 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_nu... |
19795245501 | import json
import sys
from django.conf import settings
from django.utils import translation
from adapter.utils import build_auth_args
# from adapter.utils.local import get_request
from blueapps.utils import get_request
def _clean_auth_info_uin(auth_info):
if "uin" in auth_info:
# 混合云uin去掉第一位
if auth_info["uin"].startswith("o"):
auth_info["uin"] = auth_info["uin"][1:]
return auth_info
def update_bkdata_auth_info(params):
"""
更新参数中的数据平台鉴权信息
"""
if settings.FEATURE_TOGGLE.get("bkdata_token_auth", "off") == "on":
# 如果使用 bkdata token 鉴权,需要设置鉴权方式,如果是用户鉴权,直接沿用原来的用户
params["bkdata_authentication_method"] = params.get("bkdata_authentication_method") or "token"
params["bkdata_data_token"] = settings.BKDATA_DATA_TOKEN
else:
# 如果是用户授权,设置为admin超级管理员
params["bkdata_authentication_method"] = "user"
params["bk_username"] = "admin"
params["operator"] = "admin"
return params
# 后台任务 & 测试任务调用 ESB 接口不需要用户权限控制
if (
"celery" in sys.argv
or "shell" in sys.argv
or ("runserver" not in sys.argv and sys.argv and "manage.py" in sys.argv[0])
):
def add_esb_info_before_request(params):
params["bk_app_code"] = settings.APP_CODE
params["bk_app_secret"] = settings.SECRET_KEY
params.setdefault("bkdata_authentication_method", "user")
if "bk_username" not in params:
params["bk_username"] = "admin"
if "operator" not in params:
params["operator"] = params["bk_username"]
return params
def add_esb_info_before_request_for_bkdata(params):
params = add_esb_info_before_request(params)
params = update_bkdata_auth_info(params)
return params
# 正常 WEB 请求所使用的函数
else:
def add_esb_info_before_request(params):
"""
通过 params 参数控制是否检查 request
@param {Boolean} [params.no_request] 是否需要带上 request 标识
"""
# 规范后的参数
params["bk_app_code"] = settings.APP_CODE
params["bk_app_secret"] = settings.SECRET_KEY
params["appenv"] = settings.RUN_VER
if "no_request" in params and params["no_request"]:
params["bk_username"] = "admin"
params["operator"] = "admin"
else:
req = get_request()
auth_info = build_auth_args(req)
params.update(auth_info)
if not params.get("auth_info"):
auth_info = _clean_auth_info_uin(auth_info)
params["auth_info"] = json.dumps(auth_info)
params.update({"blueking_language": translation.get_language()})
bk_username = req.user.bk_username if hasattr(req.user, "bk_username") else req.user.username
if "bk_username" not in params:
params["bk_username"] = bk_username
if "operator" not in params:
params["operator"] = bk_username
# 兼容旧接口
params["uin"] = params["bk_username"]
params["app_code"] = settings.APP_CODE
params["app_secret"] = settings.SECRET_KEY
params.setdefault("bkdata_authentication_method", "user")
return params
| robert871126/bk-chatbot | adapter/api/modules/utils.py | utils.py | py | 3,445 | python | en | code | null | github-code | 36 | [
{
"api_name": "django.conf.settings.FEATURE_TOGGLE.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.FEATURE_TOGGLE",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 24,
"usage_type": ... |
4799644228 | from cadastro import Cadastro, Login
from email_senha import EmailSenha
import json, random, string
login = False
cadastro = False
opcao = input("1. login\n2. cadastrar ")
if opcao == "1":
login = Login().autenticacao()
tentativas = 0
while login == "senha incorreta":
print("senha incorreta")
login = Login().autenticacao()
tentativas += 1
if tentativas == 2:
codigo = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
dados = Login().retorna_email()
retorno_email = EmailSenha(dados[0]).envia(codigo)
print(retorno_email)
if "não existe" in retorno_email:
pass
else:
autenticacao = input("digite o codigo enviado no seu email: ")
if autenticacao == codigo:
senha = input("digite sua nova senha: ")
confirmacao = input("confirme sua nova senha: ")
if senha == confirmacao:
Cadastro(dados[0], senha, dados[1], dados[2], dados[3], True).armazena()
else:
for i in range(2):
senha = input("digite sua nova senha: ")
confirmacao = input("confirme sua nova senha: ")
if senha == confirmacao:
Cadastro(dados[0], senha, dados[1], dados[2], dados[3], True).armazena()
break
else:
i = 0
while autenticacao != codigo:
i += 1
autenticacao = input("digite o codigo enviado no seu email: ")
if i == 4:
break
break
if login == "logado":
print("_"*50, "\n")
print("1. consultar\n2. atualizar")
print("_"*50, "\n")
option = input("digite a opção: ")
if option == "1":
with open("db.json", "r", encoding="utf8") as db:
data = json.load(db)
print("_"*50, "\n")
print("1. consultar base\n2. consultar usuario")
print("_"*50, "\n")
option = input("digite a opção: ")
if option == "1":
for id in data:
print(id, data[id]["nickname"])
elif option == "2":
user = input("qual usuário você deseja consultar? ")
for id in data:
for user_db in data[id]:
if user in data[id][user_db]:
print(user)
# print(data)
#to do: atualizar()
elif opcao == "2":
Cadastro().armazena() | Bonbeck/Cadastro | main.py | main.py | py | 2,800 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "cadastro.Login",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cadastro.Login",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.choices",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "string.ascii_uppercase",... |
35910502981 | import os, discord, random, asyncio, json, time
from discord.ext import commands
class games(commands.Cog):
def __init__(self,bot):
self.coin_toss=0
self.bot=bot
self.counter = 0
@commands.command(name="FLIP",aliases=['FLIP`'])
async def coin_toss(self,ctx,choice):
#choice must be either H or T
self.coin_toss=random.randint(0,1)
if choice == 'H' or choice == 'T':
if choice== 'H':
choice_text='Heads'
if self.coin_toss==0:
won_text='And Won!'
else:
won_text='You Lost!'
else:
choice_text='Tails'
if self.coin_toss==1:
won_text='And Won!'
else:
won_text='You Lost!'
coin_embed=discord.Embed(title="The Coin Is Being Tossed",description="You're Betting On "+choice_text ,colour=discord.Colour.gold())
coin_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
coin_embed.set_image(url="https://cdn.dribbble.com/users/1493264/screenshots/5573460/coin-flip-dribbble.gif")
coin_embed.set_thumbnail(url="https://i.imgur.com/YTg8cjS.png")
heads_embed=discord.Embed(title="The Coin Has Been Tossed",description="You Got Heads! "+won_text ,colour=discord.Colour.gold())
heads_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
heads_embed.set_image(url="https://rollthedice.online/assets/images/upload/dice/dado-cara-cruz/cara_moneda.png")
tails_embed=discord.Embed(title="The Coin Has Been Tossed",description="You Got Tails! "+won_text ,colour=discord.Colour.gold())
tails_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
tails_embed.set_image(url="https://rollthedice.online/assets/images/upload/dice/dado-cara-cruz/cruz_moneda.png")
loading=await ctx.send(embed=coin_embed)
await asyncio.sleep(7)
await loading.delete()
if self.coin_toss==0:
await ctx.send(embed=heads_embed)
else:
await ctx.send(embed=tails_embed)
if choice == "H" and self.coin_toss==0 or choice == "T" and self.coin_toss==1:
print("COIN: PLAYER WON")
else:
print("COIN: PLAYER LOST")
else:
await ctx.send('``INCORRECT FORMAT: PLEASE DO `FLIP` H OR `FLIP` T ``')
@commands.group()
async def cool(self,ctx):
if ctx.invoked_subcommand is None:
await ctx.send("No")
@cool.command(name='bot',aliases=['TESTB'])
async def _bot(self,ctx):
await ctx.send('Yes, the bot is cool.')
@commands.group(name="DUNGEON",aliases=['DNG`'],invoke_without_command=True)
@commands.has_role('Owner')
async def dungeon_game(self,ctx,choice=None):
self.dungeon_current=ctx.author.id
self.is_playing=True
dungeon_embed=discord.Embed(title="THE DUNGEON",description="To start the game do `DNG` START",colour=discord.Colour.dark_red())
dungeon_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
dungeon_embed.set_image(url="https://cdn.discordapp.com/attachments/732597615199387820/732603048664236082/Untitled13_20200714182238.png")
dungeon_embed.set_thumbnail(url="https://i.imgur.com/YTg8cjS.png")
dungeon_embed.add_field(name="Description:",value='"The Dungeon" is a Dungeon crawler game where your goal is to roam the dungeon to try and find loot,and finally make a swift exit,careful of the skeleton mobs that may end up catching you ending your run! find the exit as fast as possible with the maximum amount of loot.')
startup=await ctx.send(embed=dungeon_embed)
@commands.command(name="DUNGEONT",aliases=['DT`'])
@commands.has_role('Owner')
async def dungeon_test(self,ctx,choice=None):
print(choice)
i=0
if choice ==None:
await ctx.send("hello")
def check(m):
return m.content == ('hello') or m.content == ('hi')
for i in range(10):
msg = await self.bot.wait_for('message', check=check)
if msg.content== ('hello'):
print('y')
elif msg.content==('hi'):
print('n')
i+=1
def setup(client):
client.add_cog(games(client))
| yassir56069/TSOE | cogs/games.py | games.py | py | 4,574 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "disco... |
20227289100 | #!/usr/bin/python
import sys, os
import sets
from Bio import SeqIO
def make_location_set(l):
return sets.Set([n for n in xrange(l.nofuzzy_start, l.nofuzzy_end)])
for rec in SeqIO.parse(sys.stdin, "genbank"):
new_features = []
for feature in rec.features:
add = 1
if feature.type == 'CDS':
if '*' in feature.qualifiers['translation'][0]:
location_set = make_location_set(feature.location)
for f2 in rec.features:
if f2.type == 'CDS' and f2 != feature:
ret = location_set.intersection(make_location_set(f2.location))
if ret:
add = 0
if add:
new_features.append(feature)
rec.features = new_features
SeqIO.write([rec], sys.stdout, "genbank")
| nickloman/xbase | annotation/remove_overlaps_with_frameshifts.py | remove_overlaps_with_frameshifts.py | py | 843 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "sets.Set",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sys.stdin",
"line_number": 10,
... |
23410005730 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('bar', '0238_auto_20160817_1352'),
]
operations = [
migrations.AddField(
model_name='caja',
name='marca',
field=models.CharField(default=b'PC Standard', help_text=b'Ingrese la marca de la Caja.', max_length=50, verbose_name=b'Marca'),
),
migrations.AddField(
model_name='caja',
name='modelo_fabricacion',
field=models.CharField(default=b'PC Standard Proc. Intel - 4 GBs de RAM', help_text=b'Ingrese el modelo de fabricacion de la Caja.', max_length=100, verbose_name=b'Modelo de Fabricacion'),
),
migrations.AddField(
model_name='caja',
name='numero_serie',
field=models.CharField(default=b'1234567890', help_text=b'Ingrese el numero de serie de la Caja.', max_length=20, verbose_name=b'Numero de Serie'),
),
migrations.AlterField(
model_name='timbrado',
name='fecha_autorizacion_timbrado',
field=models.DateField(default=datetime.datetime(2016, 8, 17, 14, 8, 50, 877000), help_text=b'Ingrese la Fecha de Autorizacion del Timbrado', verbose_name=b'Fecha de Autorizacion del Timbrado'),
),
migrations.AlterField(
model_name='timbrado',
name='fecha_limite_vigencia_timbrado',
field=models.DateField(default=datetime.datetime(2017, 8, 17, 14, 8, 50, 877000), help_text=b'Ingrese la Fecha Limite de Vigencia del Timbrado', verbose_name=b'Fecha Limite de Vigencia del Timbrado'),
),
]
| pmmrpy/SIGB | bar/migrations/0239_auto_20160817_1408.py | 0239_auto_20160817_1408.py | py | 1,747 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 15,
"usage_type": "call"
},
{
... |
14570907737 | #importar librerias
from ast import parse
from wsgiref import headers
import requests #peticion al servidor
import lxml.html as html
import pandas as pd
from tqdm.auto import tqdm #barras de progreso
from lxml.etree import ParseError
from lxml.etree import ParserError
import csv
from fake_useragent import UserAgent
ua = UserAgent()
#inicialize variables & XPATH
url_padre = f'https://www.metacritic.com/browse/games/release-date/available/ps5/metascore?page=0' #inicializar en 0
url_root = 'https://www.metacritic.com'
header = {'User-Agent': ua.random}
link_title = '//td[@class="clamp-summary-wrap"]/a/@href'
genre = '//li[@class="summary_detail product_genre"]/span[@class="data"]/text()'
s = requests.Session() #pasa a trabajar con sesiones por ende mas rapido hace los requests
r = s.get(url_padre, headers=header) #headers evita error 400
home = r.content.decode('utf-8')
parser = html.fromstring(home)
titles_url = parser.xpath(link_title)
titles_url = [url_root+x for x in titles_url]
genres_list = []
links_offline = []
def get_genres():
for x in tqdm(titles_url):
header = {'User-Agent': ua.random}
try:
r = s.get(x, headers=header) #headers evita error 400
if (r == ''):
print(f'Status Code: {r.status_code} - {x}')
links_offline.append(x)
pass
home = r.content.decode('utf-8')
parser = html.fromstring(home)
genres = parser.xpath(genre)
genres_aux = set(genres)
genres = list(genres_aux)
genres_list.append(genres)
except (ParserError, ParseError, IndexError) as error:
print(f'\n{error} {r.status_code}')
links_offline.append(x)
continue
return genres_list
df=pd.DataFrame(get_genres())
print(df)
print(links_offline)
df.to_csv('metacritic_game_ranking_test.csv', index=False) | joseorozco84/scraper | genres.py | genres.py | py | 1,910 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "lxml.h... |
73445915623 | import IPython
from IPython.utils.path import get_ipython_dir
from IPython.html.utils import url_path_join as ujoin
from IPython.html.base.handlers import IPythonHandler, json_errors
from tornado import web
import json
# Handler for the /new page. Will render page using the
# wizard.html page
class New_PageHandler(IPythonHandler):
"""Render the create distributed project interface """
@web.authenticated
def get(self):
self.write(self.render_template('wizard.html',
base_url = self.base_url,
page_title="New Distributed Project"
)
)
# Handler for the /master page. Will render page using the
# master.html page
class Master_PageHandler(IPythonHandler):
"""Render the create distributed project interface """
@web.authenticated
def get(self):
self.write(self.render_template('master.html',
base_url = self.base_url,
page_title="Manage Distributed Projects"
)
)
| Feldman-Michael/masterthesis | home/ipython/.local/share/jupyter/nbextensions/ma/server/services/ipy_html_distproject.py | ipy_html_distproject.py | py | 996 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "IPython.html.base.handlers.IPythonHandler",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "tornado.web.authenticated",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 13,
"usage_type": "name"
},
{
... |
74957373542 | from __future__ import print_function, division
import os
import torch
import torchtext
import itertools
from loss.loss import NLLLoss
class Evaluator(object):
def __init__(self, loss=NLLLoss(), batch_size=64):
self.loss = loss
self.batch_size = batch_size
def evaluate(self, model, data):
model.eval()
loss = self.loss
loss.reset()
match = 0
total = 0
match_sentence = 0
total_lengths = 0
condition_positive = 0
prediction_positive = 0
true_positive = 0
check_sentence = True
#device = None if torch.cuda.is_available() else -1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_iterator = torchtext.data.BucketIterator(
dataset=data, batch_size=self.batch_size,
sort=True, sort_key=lambda x: len(x.src),
device=device, train=False)
tgt_vocab = data.fields['tgt'].vocab
pad = tgt_vocab.stoi[data.fields['tgt'].pad_token]
eos = tgt_vocab.stoi['<eos>']
zero = tgt_vocab.stoi['0']
unk = tgt_vocab.stoi[data.fields['tgt'].unk_token]
with torch.no_grad():
for batch in batch_iterator:
input_variables, input_lengths = getattr(batch, 'src')
input_part, _ = getattr(batch, 'srcp')
target_variables = getattr(batch, 'tgt')
target_part = getattr(batch, 'tgtp')
decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(),
input_part, target_variables, target_part)
correct_list = []
# Evaluation
seqlist = other['sequence']
for step, step_output in enumerate(decoder_outputs):
target = target_variables[:, step + 1]
loss.eval_batch(step_output.view(target_variables.size(0), -1), target)
predict = seqlist[step].view(-1)
non_padding = target.ne(pad)
correct = predict.eq(target).masked_select(non_padding).sum().item()
correct_list.append(predict.eq(target).masked_select(non_padding).tolist())
CP = target.ne(zero).eq(target.ne(eos)).masked_select(non_padding)
PP = predict.ne(zero).eq(predict.ne(eos)).masked_select(non_padding)
c_mask = target.ne(pad).eq(target.ne(eos)).eq(target.ne(unk)).eq(target.ne(zero))
TP = target.masked_select(c_mask).eq(predict.masked_select(c_mask))
match += correct
total += non_padding.sum().item()
condition_positive += CP.sum().item()
prediction_positive += PP.sum().item()
true_positive += TP.sum().item()
q = list(itertools.zip_longest(*correct_list))
for i in q:
check_sentence = False
for j in i:
if(j == 0):
check_sentence = True
if(check_sentence == False):
match_sentence += 1
total_lengths += len(input_lengths)
if total == 0:
character_accuracy = 0
sentence_accuracy = 0
else:
character_accuracy = match / total
sentence_accuracy = match_sentence / total_lengths
if condition_positive == 0:
recall = 0
else:
recall = true_positive / condition_positive
if prediction_positive == 0:
precision = 0
else:
precision = true_positive / prediction_positive
if precision == 0 and recall == 0:
f1_score = 0
else:
f1_score = 2.0 * ((precision * recall) / (precision + recall))
return loss.get_loss(), character_accuracy, sentence_accuracy, f1_score
| hopemini/activity-clustering-multimodal-ml | autoencoder/seq2seq/evaluator/evaluator.py | evaluator.py | py | 4,024 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "loss.loss.NLLLoss",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "loss.loss",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "loss.loss",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "loss.loss.reset",
"line_num... |
138291184 | #!/usr/bin/env python3
from collections import deque
class BinaryTreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def create_tree():
root = BinaryTreeNode(1)
root.left = BinaryTreeNode(2)
root.right = BinaryTreeNode(3)
root.left.left = BinaryTreeNode(4)
root.left.right = BinaryTreeNode(5)
# root.right.left = BinaryTreeNode(6)
root.right.right = BinaryTreeNode(7)
root.left.right.left = BinaryTreeNode(8)
root.left.right.left.right = BinaryTreeNode(9)
return root
def in_order_traversal(node):
if node is None:
return
in_order_traversal(node.left)
print('{0:}, '.format(node.value), end="")
in_order_traversal(node.right)
def is_tree_balanced(node):
return abs(height(node.left)-height(node.right))<=1
def height(node):
lh = -1
rh = -1
if node.left is not None:
lh = height(node.left)
lh += 1
else:
lh += 1
if node.right is not None:
rh = height(node.right)
rh += 1
else:
rh += 1
return max(lh, rh)
def bfs(node):
if node is not None:
return
q = deque([])
q.append(node)
while len(q) > 0:
temp_node = q.popleft()
print(temp_node.value, end=", ")
if temp_node.left is not None:
q.append(temp_node.left)
if temp_node.right is not None:
q.append(temp_node.right)
if __name__ == '__main__':
in_order_traversal(create_tree())
print("\n", end="")
print(is_tree_balanced(create_tree()))
bfs(create_tree())
| banginji/algorithms | misc/treebalance.py | treebalance.py | py | 1,635 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 59,
"usage_type": "call"
}
] |
406588995 | # Run by typing python3 main.py
# Import basics
import re
import os
import pickle
# Import stuff for our web server
from flask import Flask, flash, request, redirect, url_for, render_template
from flask import send_from_directory
from flask import jsonify
from utils import get_base_url, allowed_file, and_syntax
# Import stuff for text pre-processing and models
import numpy as np
import nltk
nltk.download('punkt')
import torch
from aitextgen import aitextgen
from gensim.models import Word2Vec
# Load up the models into memory
ai = aitextgen(to_gpu=False, model_folder="models/trained_model_gpt2")
rf_model = pickle.load(open('models/random_forest_model_avg.pkl', 'rb'))
w2v_model = Word2Vec.load('models/w2v.bin')
NON_ALPHANUM = re.compile(r'[\W]')
NON_ASCII = re.compile(r'[^a-z0-1\s]')
# Setup the webserver
# Port may need to be changed if there are multiple flask servers running on same server
#port = 12346
#base_url = get_base_url(port)
#app = Flask(__name__, static_url_path=base_url+'static')
# Deployment code - uncomment the following line of code when ready for production
app = Flask(__name__)
def sent_vectorizer(sent):
"""Takes in a sentence and returns the average word2vec embedding of all words
in this sentence that are in the vocab of the model.
Inputs:
-------
sent (str):
a string of a sentence to embedd
model (gensim.models.Word2Vec):
an already trained Word2Vec model
Output:
-------
avgembedding (np.ndarray):
A 100-dimension long numpy vector of the average Word2vec embedding of all
the words of ``sent`` that appear in the vocabulary of Word2Vec model.
"""
sent_vec = np.zeros(100)
numw = 0
words = nltk.word_tokenize(sent)
for w in words:
if w in w2v_model.wv.index_to_key:
sent_vec = np.add(sent_vec, w2v_model.wv[w])
numw += 1
avgembedding = sent_vec/numw
return avgembedding
def clean_text(text):
"""Cleans text using regex.
Arguments:
----------
text (str):
Text.
no_non_ascii (str):
Cleaned text.
"""
lower = text.lower()
no_punctuation = NON_ALPHANUM.sub(r' ', lower)
no_non_ascii = NON_ASCII.sub(r'', no_punctuation)
return no_non_ascii
@app.route('/')
#@app.route(base_url)
def home():
return render_template('Home.html', generated=None)
@app.route('/', methods=['POST'])
#@app.route(base_url, methods=['POST'])
def home_post():
return redirect(url_for('results'))
@app.route('/team')
#@app.route(base_url + '/team')
def team():
return render_template('Team.html', generated=None)
@app.route('/results')
#@app.route(base_url + '/results')
def results():
return render_template('Try-our-product.html', generated=None)
@app.route('/generate_text', methods=["POST"])
#@app.route(base_url + '/generate_text', methods=["POST"])
def generate_text():
"""
View function that will return json response for generated text.
"""
prompt = request.form['prompt']
if prompt is not None:
prompt = str(prompt).strip()
generated = ai.generate(
n=2,
batch_size=4,
prompt=prompt,
max_length=20,
temperature=1.0,
top_p=0.9,
return_as_list=True
)
opinions = []
for line in generated:
cleaned_line = clean_text(line)
embedding = sent_vectorizer(cleaned_line).reshape(-1, 100)
opinion = rf_model.predict(embedding).item()
if opinion == '1':
opinions.append('<br><i> ( Meemaw <span style=\"color: #008000\">approves</span> this message! )</i>')
elif opinion == '-1':
opinions.append("<br><i> ( Meemaw <span style=\"color: #E53F2E\">doesn't approve</span> this message! )</i>")
data = {'generated_ls': generated, 'opinions': opinions}
return jsonify(data)
if __name__ == "__main__":
'''
coding center code
'''
# IMPORTANT: change the cocalcx.ai-camp.org to the site where you are editing this file.
website_url = 'cocalc2.ai-camp.org'
print(f"Try to open\n\n https://{website_url}" + base_url + '\n\n')
app.run(host = '0.0.0.0', port=port, debug=True)
import sys; sys.exit(0)
'''
scaffold code
'''
# Only for debugging while developing
# app.run(port=80, debug=True)
| aashishyadavally/pick-up-line-generator | app/main.py | main.py | py | 4,425 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "aitextgen.aitextgen",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec.... |
23243623755 | import cv2
import numpy as np
import tkinter as tk
from tkinter import filedialog
def load_correspondences(file_name):
try:
f = open(file_name, "r")
lines = f.readlines()
lines = [l.rstrip() for l in lines]
f.close()
points1 = []
points2 = []
for line in lines:
x1,y1,x2,y2 = line.split(',')
x1,y1,x2,y2 = int(x1), int(y1), int(x2), int(y2)
points1.append( [x1, y1])
points2.append( [x2, y2])
points1 = np.array(points1, np.float32)
points2 = np.array(points2, np.float32)
return points1, points2
except Exception as e:
print(e)
print('Cannot read file',file_name)
return False, True
def record_click(event, x, y, flags, params):
# checking for left mouse clicks
if event == cv2.EVENT_LBUTTONDOWN:
postitions, marker_color, image, window_name = params
font = cv2.FONT_HERSHEY_SIMPLEX
#append coordinates of click
postitions.append( [x, y] )
#increment counter
count = len(postitions)
cv2.putText(image, str(count) , (x,y), font,
0.75, marker_color, 2)
cv2.imshow(window_name, image)
def write_correspondence_to_file(file_name, first_positions, second_positions):
f = open(file_name,"w")
for i in range(len(first_positions)):
p1, p2 = first_positions[i], second_positions[i]
x1, y1 = p1
x2, y2 = p2
x1,y1,x2,y2 = int(x1), int(y1), int(x2), int(y2)
line = f'{x1},{y1},{x2},{y2}\n'
f.write(line)
f.close()
def get_image_path(title):
root = tk.Tk()
dummy = root.withdraw()
image_path = filedialog.askopenfilename(initialdir = ".",title = title,filetypes = (("all files","*.*"),("png files","*.png"),("jpg files","*.jpg"),("jpeg files","*.jpeg")))
return image_path
def get_correspondences(image1_path, image2_path, points_filename='points.txt'):
first_image = cv2.imread(image1_path, 1)
second_image = cv2.imread(image2_path,1)
# displaying the image
first_window_name = 'First Image'
second_window_name = 'Second Image'
cv2.imshow(first_window_name, first_image)
cv2.imshow(second_window_name, second_image)
#setting up parameters to be passed to the mouse click event callback for each window
first_image_positions = [] #list of points in first image
first_color = (255, 0, 0) #mark the points in blue for first image
second_image_positions = []
second_color = (0, 0, 255) # mark points in red for the second image
first_window_param = (first_image_positions, first_color, first_image, first_window_name)
second_window_param = (second_image_positions, second_color, second_image, second_window_name)
cv2.setMouseCallback(first_window_name, record_click,param=first_window_param )
cv2.setMouseCallback(second_window_name, record_click,param=second_window_param )
# wait for a key to be pressed to exit
cv2.waitKey(0)
# close the window
cv2.destroyAllWindows()
#ignore points that have no correspondence
length = min(len(first_image_positions), len(second_image_positions))
first_image_positions = first_image_positions[0:length]
second_image_positions = second_image_positions[0:length]
first_image_positions = np.array(first_image_positions, np.float32)
second_image_positions = np.array(second_image_positions, np.float32)
return first_image_positions, second_image_positions
| marwansalem/image-stitching | correspondences.py | correspondences.py | py | 3,583 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_... |
26297648004 | from itertools import product
def create_dice(sides):
dice = []
for i in range(sides):
dice.append( i+1 )
return dice
number_of_dices = int(input("Mata in antalet tärningar:\n"))
number_of_sides = int(input("Mata in antalet sidor för tärningarna:\n"))
highest_sum = number_of_sides * number_of_dices
lowest_sum = number_of_dices
dice = create_dice(number_of_sides)
d = {}
for i in range(lowest_sum, highest_sum+1):
d[i] = ""
res = list(product(range(1, number_of_sides + 1), repeat = number_of_dices))
for item in res:
for key in d:
if key == sum(item):
d[key] += "*"
break
print("Resultat:")
for i in d:
print("{:<4}{}".format(i, d[i]))
| hadi-ansari/TDP002 | gamla_tentor_tdp002/2018/uppgift2.py | uppgift2.py | py | 730 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.product",
"line_number": 25,
"usage_type": "call"
}
] |
24193842042 | import pandas as pd
from pathlib import Path
from enum import Enum
pd.options.mode.chained_assignment = None # default='warn'
class Location(Enum):
home = 1
visiting = 2
#not used atm
def get_last_occurrence(team_id, location):
data_folder = Path("../")
all_game_file = data_folder / "_mlb_remerged_all.csv"
df_game_logs = pd.read_csv(all_game_file, index_col=0)
if location == Location.home:
last_occurrence = df_game_logs.where(df_game_logs['Home team'] == team_id).last_valid_index()
print('home')
else:
last_occurrence = df_game_logs.where(df_game_logs['Visiting team'] == team_id).last_valid_index()
print('away')
return last_occurrence
#not used atm
def get_all_teams():
data_folder = Path("../")
all_game_file = data_folder / "_mlb_remerged_all.csv"
df_teams = pd.read_csv(all_game_file)
df_unique_id = df_teams['teamID'].unique()
print(df_unique_id)
print(df_unique_id.size)
def get_team_data(home_id, visit_id):
data_folder = Path("../")
all_game_file = data_folder / "_mlb_remerged_all.csv"
df_all_games = pd.read_csv(all_game_file)
print("Number of Columns: " + str(len(df_all_games.columns)-2))
df_columns = df_all_games.columns.values.tolist()
home_team_columns = [i for i in df_columns if "Home" in i]
print("Number of Home Columns: " + str(len(home_team_columns)))
visiting_team_columns = [i for i in df_columns if "Visiting" in i]
print("Number of Visiting Columns: " + str(len(visiting_team_columns)))
last_occurrence_home = df_all_games.where(df_all_games['Home team'] == home_id).last_valid_index()
#home_team_data = df_all_games.iloc[[get_last_occurrence(home_id, Location.home)]]
home_team_data = df_all_games.iloc[[last_occurrence_home]]
home_team_to_home_column = home_team_data[home_team_columns]
last_occurrence_away = df_all_games.where(df_all_games['Visiting team'] == visit_id).last_valid_index()
#visiting_team_data = df_all_games.iloc[[get_last_occurrence(visit_id, Location.visiting)]]
visiting_team_data = df_all_games.iloc[[last_occurrence_away]]
visiting_team_to_visiting_column = visiting_team_data[visiting_team_columns]
df_merged_data = pd.concat([home_team_to_home_column,
visiting_team_to_visiting_column.set_index(home_team_to_home_column.index)], axis=1)
print(df_merged_data)
get_team_data('SEA', 'DET')
#get_all_teams()
| timucini/MLB-DeepLearning-Project | Verworfen/TeamIdMerge2.py | TeamIdMerge2.py | py | 2,468 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.options",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "enum.Enum",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_... |
30974355669 | import importlib
import shutil
import uuid
from typing import Dict, List, Tuple, Union
from torch import Tensor, nn
from torchdistill.common import module_util
from pathlib import Path
import torch
import time
import gc
import os
from logging import FileHandler, Formatter
from torchdistill.common.file_util import check_if_exists
from torchdistill.common.main_util import is_main_process, load_ckpt
from torchdistill.losses.util import register_func2extract_org_output
from torchdistill.models.official import get_image_classification_model
from torchdistill.models.registry import get_model
from torchdistill.common.constant import def_logger, LOGGING_FORMAT
import numpy as np
from torchinfo import summary
logger = def_logger.getChild(__name__)
def make_dirs(dir_path):
Path(dir_path).mkdir(parents=True, exist_ok=True)
def make_parent_dirs(file_path):
Path(file_path).parent.mkdir(parents=True, exist_ok=True)
def setup_log_file(log_file_path, mode='w'):
make_parent_dirs(log_file_path)
fh = FileHandler(filename=log_file_path, mode=mode)
fh.setFormatter(Formatter(LOGGING_FORMAT))
def_logger.addHandler(fh)
def calc_compression_module_sizes(bnet_injected_model: nn.Module,
device: str,
input_size: Tuple[int, int, int, int] = (1, 3, 224, 224),
log_model_summary: bool = True) -> Tuple[str, int, Dict[str, int]]:
"""
Calc params and sizes individual components of compression module
Returns (summary string, #params model, #params of the encoder)
"""
assert hasattr(bnet_injected_model, 'compression_module'), "Model has no compression module"
model_summary = summary(bnet_injected_model, input_size=input_size,
col_names=['input_size', 'output_size', 'mult_adds', 'num_params'],
depth=5,
device=device,
verbose=0,
mode="eval")
model_params = model_summary.total_params
if log_model_summary:
logger.info(f"Bottleneck Injected model params:\n{model_summary}")
# compression module core
p_analysis = summary(bnet_injected_model.compression_module.g_a, col_names=["num_params"],
verbose=0,
mode="eval",
device=device).total_params
p_synthesis = summary(bnet_injected_model.compression_module.g_s, col_names=["num_params"],
verbose=0,
mode="eval",
device=device).total_params
# compression modules with side information
p_hyper_analysis = summary(bnet_injected_model.compression_module.h_a,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_a") else 0
p_hyper_synthesis = summary(bnet_injected_model.compression_module.h_s,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_s") else 0
p_hyper_analysis_2 = summary(bnet_injected_model.compression_module.h_a_2,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_a_2") else 0
p_hyper_synthesis_2 = summary(bnet_injected_model.compression_module.h_s_2,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_s_2") else 0
# compression modules with context models
p_context_prediction = summary(bnet_injected_model.compression_module.context_prediction,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"context_prediction") else 0
p_entropy_parameters = summary(bnet_injected_model.compression_module.entropy_parameters,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"entropy_parameters") else 0
# entropy estimation
params_eb = summary(bnet_injected_model.compression_module.entropy_bottleneck, col_names=["num_params"],
verbose=0,
mode="eval").total_params
params_comp_module = summary(bnet_injected_model.compression_module, col_names=["num_params"],
verbose=0).total_params
# params_comp_module += p_reconstruction
summary_str = f"""
Compression Module Summary:
Params Analysis: {p_analysis:,}
Params Synthesis: {p_synthesis:,}
Params Hyper Analysis: {p_hyper_analysis:,}
Params Hyper Synthesis: {p_hyper_synthesis:,}
Params Hyper Analysis 2: {p_hyper_analysis_2:,}
Params Hyper Synthesis 2: {p_hyper_synthesis_2:,}
Params Context Prediction: {p_context_prediction:,}
Params Entropy Parameters: {p_entropy_parameters :,}
Params Entropy Bottleneck: {params_eb:,}
Total Params Compression Module: {params_comp_module:,}
Which makes up {params_comp_module / model_params * 100:.2f}% of the total model params
"""
enc_params_main = p_analysis
enc_params_hyper = p_hyper_analysis + p_hyper_synthesis + p_hyper_analysis_2 + p_hyper_synthesis_2
enc_params_context_module = p_entropy_parameters + p_context_prediction
total_encoder = enc_params_main + enc_params_hyper + enc_params_context_module
return summary_str, model_params, { "Main Network": enc_params_main,
"Hyper Network": enc_params_hyper,
"Context Module": enc_params_context_module,
"Total Encoder Params": total_encoder}
def calc_compression_module_overhead(bnet_injected_model: nn.Module,
base_model: nn.Module,
device: str,
input_size: Tuple[int, int, int, int] = (1, 3, 224, 224),
log_model_summary: bool = True) -> Tuple[str, int, int]:
model_summary = summary(base_model, input_size=input_size,
col_names=['input_size', 'output_size', 'mult_adds', 'num_params'],
depth=3,
device=device,
verbose=0,
mode="eval")
if log_model_summary:
logger.info(f"Base model params:\n{model_summary}")
# in case teacher model is a mock model
teacher_params = model_summary.total_params or 1
summary_str, model_params, enc_params = calc_compression_module_sizes(bnet_injected_model,
device,
input_size,
log_model_summary)
summary_str = f"""{summary_str}
Incurring a total overhead of {(model_params - teacher_params) / teacher_params * 100:.2f}% in parameters w.r.t the original classification model
"""
return summary_str, model_params, enc_params
@torch.inference_mode
def freeze_module_params(modules: Union[List[nn.Module], nn.Module]):
modules = modules if isinstance(list, modules) else [modules]
for module in modules:
for param in module.parameters():
param.requires_grad = False
@torch.inference_mode
def unfreeze_module_params(modules: Union[List[nn.Module], nn.Module]):
modules = modules if isinstance(list, modules) else [modules]
for module in modules:
for param in module.parameters():
param.requires_grad = False
def chmod_r(path: str, mode: int):
"""Recursive chmod"""
if not os.path.exists(path):
return
os.chmod(path, mode)
for root, dirnames, filenames in os.walk(path):
for dirname in dirnames:
os.chmod(os.path.join(root, dirname), mode)
for filename in filenames:
os.chmod(os.path.join(root, filename), mode)
def mkdir(folder: str):
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
def prepare_log_file(test_only, log_file_path, config_path, start_epoch, overwrite=False):
eval_file = "_eval" if test_only else ""
if is_main_process():
if log_file_path:
log_file_path = f"{os.path.join(log_file_path, Path(config_path).stem)}{eval_file}.log"
else:
log_file_path = f"{config_path.replace('config', 'logs', 1)}{eval_file}.log"
if start_epoch == 0 or overwrite:
log_file_path = uniquify(log_file_path)
mode = 'w'
else:
mode = 'a'
setup_log_file(os.path.expanduser(log_file_path), mode=mode)
def rm_rf(path: str):
"""
Recursively removes a file or directory
"""
if not path or not os.path.exists(path):
return
try:
chmod_r(path, 0o777)
except PermissionError:
pass
exists_but_non_dir = os.path.exists(path) and not os.path.isdir(path)
if os.path.isfile(path) or exists_but_non_dir:
os.remove(path)
else:
shutil.rmtree(path)
def to_token_tensor(t: Tensor):
if len(t.shape) == 3:
return t
return t.flatten(2).transpose(1, 2)
def to_img_tensor(t: Tensor, resolution):
if len(t.shape) == 4:
print("Tensor already in img shape")
return t
B, _, C = t.shape
H, W = resolution
return t.transpose(1, 2).view(B, -1, H, W)
class AverageMeter:
"""Moving Average"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_if_module_exits(module, module_path):
module_names = module_path.split('.')
child_module_name = module_names[0]
if len(module_names) == 1:
return hasattr(module, child_module_name)
if not hasattr(module, child_module_name):
return False
return check_if_module_exits(getattr(module, child_module_name), '.'.join(module_names[1:]))
def extract_entropy_bottleneck_module(model):
model_wo_ddp = model.module if module_util.check_if_wrapped(model) else model
entropy_bottleneck_module = None
if check_if_module_exits(model_wo_ddp, "compression_module.entropy_bottleneck"):
entropy_bottleneck_module = module_util.get_module(model_wo_ddp, "compression_module")
elif check_if_module_exits(model_wo_ddp, 'compression_model.entropy_bottleneck'):
entropy_bottleneck_module = module_util.get_module(model_wo_ddp, "compression_model")
return entropy_bottleneck_module
def compute_bitrate(likelihoods, input_size):
b, _, h, w = input_size
likelihoods = likelihoods.detach().cpu()
bitrate = -likelihoods.log2().sum()
bbp = bitrate / (b * h * w)
return bbp, bitrate
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start_time = time.time()
def end_timer():
torch.cuda.synchronize()
end_time = time.time()
return end_time - start_time
def load_model(model_config, device, distributed, skip_ckpt=False,
load_stage1_ckpt=False,
apply_quantization=False,
load_orig=False):
model = get_image_classification_model(model_config, distributed)
if model is None:
repo_or_dir = model_config.get('repo_or_dir', None)
model = get_model(model_config['name'], repo_or_dir, **model_config['params'])
if apply_quantization:
model.prepare_quantization()
model.apply_quantization()
if not skip_ckpt:
if load_orig:
ckpt_file_path = os.path.expanduser(model_config.get('ckpt_orig'))
else:
ckpt_file_path = os.path.expanduser(model_config.get('ckpt_stage1') if load_stage1_ckpt else model_config['ckpt'])
load_ckpt(ckpt_file_path, model=model, strict=True)
else:
logger.info('Skipping loading from checkpoint...')
return model.to(device)
def get_no_stages(train_config):
return sum(map(lambda x: "stage" in x, train_config.keys()))
def uniquify(path):
filename, extension = os.path.splitext(path)
counter = 1
while os.path.exists(path):
path = filename + " (" + str(counter) + ")" + extension
counter += 1
return path
def compute_psnr(recon_images, ground_truths):
with torch.no_grad():
# todo: expand ground truth as? Probably not because gt is also batched
mse = torch.mean((recon_images - ground_truths).pow(2))
psnr = 10. * torch.log10(1. / mse)
return psnr
def short_uid() -> str:
return str(uuid.uuid4())[0:8]
def append_to_filename(filename: str, appendix: str, sep='_'):
path_obj = Path(filename)
return os.path.join(os.path.dirname(filename), f"{path_obj.stem}{sep}{appendix}{path_obj.suffix}")
def calc_head_size(model,
encoder_paths=('compression_module.g_a',
'compression_module.h_s',
'compression_module.h_a')):
"""
Calculate head size in kB
"""
size = analyze_model_size(model, encoder_paths=encoder_paths,
additional_rest_paths=('compression_module.g_s', 'backbone'))
return size['encoder']
def analyze_model_size(model, encoder_paths=None, additional_rest_paths=None, ignores_dtype_error=True):
"""
Modified version from SC2bench
"""
model_size = 0
encoder_size = 0
rest_size = 0
encoder_path_set = set(encoder_paths)
additional_rest_path_set = set(additional_rest_paths)
# todo: exclude buffers
for k, v in model.state_dict().items():
if v is None:
# "fake params" of eagerly quantized modules
assert 'model_fp32' in k
continue
dim = v.dim()
param_count = 1 if dim == 0 else np.prod(v.size())
v_dtype = v.dtype
if v_dtype in (torch.int64, torch.float64):
num_bits = 64
elif v_dtype in (torch.int32, torch.float32):
num_bits = 32
elif v_dtype in (torch.int16, torch.float16, torch.bfloat16):
num_bits = 16
elif v_dtype in (torch.int8, torch.uint8, torch.qint8, torch.quint8):
num_bits = 8
elif v_dtype == torch.bool:
num_bits = 2
else:
error_message = f'For {k}, dtype `{v_dtype}` is not expected'
if ignores_dtype_error:
print(error_message)
continue
else:
raise TypeError(error_message)
param_size = num_bits * param_count
model_size += param_size
match_flag = False
for encoder_path in encoder_path_set:
if k.startswith(encoder_path):
encoder_size += param_size
if k in additional_rest_path_set:
rest_size += param_size
match_flag = True
break
if not match_flag:
rest_size += param_size
return {'model': model_size, 'encoder': encoder_size, 'rest': rest_size}
class GradScaleMockWrapper:
def __init__(self, scaler):
self.scaler = scaler
def scale(self, loss):
if self.scaler:
return self.scaler.scale(loss)
else:
return loss
def step(self, optim):
if self.scaler:
self.scaler.step(optim)
else:
optim.step()
def update(self):
if self.scaler:
self.scaler.update()
def get_module(module_path):
"""
Return a module reference
"""
module_ = importlib.import_module(module_path)
return module_
@torch.inference_mode()
def load_ckpt_inf(ckpt_file_path, model=None, optimizer=None, lr_scheduler=None, strict=True):
if check_if_exists(ckpt_file_path):
ckpt = torch.load(ckpt_file_path, map_location='cpu')
elif isinstance(ckpt_file_path, str) and \
(ckpt_file_path.startswith('https://') or ckpt_file_path.startswith('http://')):
ckpt = torch.hub.load_state_dict_from_url(ckpt_file_path, map_location='cpu', progress=True)
else:
logger.info('ckpt file is not found at `{}`'.format(ckpt_file_path))
return None, None, None
if model is not None:
if 'model' in ckpt:
logger.info('Loading model parameters')
if strict is None:
model.load_state_dict(ckpt['model'], strict=strict)
else:
model.load_state_dict(ckpt['model'], strict=strict)
elif optimizer is None and lr_scheduler is None:
logger.info('Loading model parameters only')
model.load_state_dict(ckpt, strict=strict)
else:
logger.info('No model parameters found')
if optimizer is not None:
if 'optimizer' in ckpt:
logger.info('Loading optimizer parameters')
optimizer.load_state_dict(ckpt['optimizer'])
elif model is None and lr_scheduler is None:
logger.info('Loading optimizer parameters only')
optimizer.load_state_dict(ckpt)
else:
logger.info('No optimizer parameters found')
if lr_scheduler is not None:
if 'lr_scheduler' in ckpt:
logger.info('Loading scheduler parameters')
lr_scheduler.load_state_dict(ckpt['lr_scheduler'])
elif model is None and optimizer is None:
logger.info('Loading scheduler parameters only')
lr_scheduler.load_state_dict(ckpt)
else:
logger.info('No scheduler parameters found')
return ckpt.get('best_value', 0.0), ckpt.get('config', None), ckpt.get('args', None)
@register_func2extract_org_output
def extract_org_loss_map(org_criterion, student_outputs, teacher_outputs, targets, uses_teacher_output, **kwargs):
org_loss_dict = dict()
if org_criterion is not None:
# Models with auxiliary classifier returns multiple outputs
if isinstance(student_outputs, (list, tuple)):
if uses_teacher_output:
org_loss_dict[0] = org_criterion(student_outputs, teacher_outputs, targets)
else:
for i, sub_outputs in enumerate(student_outputs):
org_loss_dict[i] = org_criterion(sub_outputs, targets)
else:
org_loss = org_criterion(student_outputs, teacher_outputs, targets) if uses_teacher_output \
else org_criterion(student_outputs, targets)
org_loss_dict = {0: org_loss}
return org_loss_dict
def normalize_range(t: Tensor, new_min: float = 0.0, new_max: float = 1.0) -> Tensor:
t_min = torch.min(t)
s_max = torch.max(t)
return (t - t_min) / (s_max - t_min) * (new_max - new_min) + new_min
| rezafuru/FrankenSplit | misc/util.py | util.py | py | 20,533 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "torchdistill.common.constant.def_logger.getChild",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torchdistill.common.constant.def_logger",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 29,
"usage_type": ... |
30461364600 | # village_id, name, x, y, idx, pts, b
import pdb
import utils
import numpy as np
if __name__ == "__main__":
files = utils.getLastFiles()
villages = utils.read_villages(files["villages"])
coords = villages['coords']
pts = villages["points"]
v_barb = (villages['player'] == 0)
v_player = (villages['player'] != 0)
WORLD_POINTS = np.zeros([10,10], dtype=int)
WORLD_PLAYERS = np.zeros([10,10], dtype=int)
WORLD_BARB = np.zeros([10,10], dtype=int)
for k in range(100):
xlim, ylim = utils.contLimits(k)
# pdb.set_trace()
validx = (coords[:,0] >= xlim[0]) & (coords[:,0] < xlim[1])
validy = (coords[:,1] >= ylim[0]) & (coords[:,1] < ylim[1])
valid = validx & validy & v_player
valid_pts = pts[valid.nonzero()[0]]
if len(valid_pts) > 0:
m = np.mean(valid_pts)
x, y = k%10, k//10
WORLD_POINTS[y, x] = int(m)
WORLD_PLAYERS[y, x] = int(len(valid_pts))
valid = validx & validy & v_barb
valid_pts = pts[valid.nonzero()[0]]
if len(valid_pts) > 0:
WORLD_BARB[y, x] = int(len(valid_pts))
utils.plotMat(WORLD_POINTS, "Média de pontos", False)
utils.plotMat(WORLD_PLAYERS, "Número de Aldeias Ativas", False)
utils.plotMat(WORLD_BARB, "Número de Bárbaras", False)
| felipecadar/tw-scripts | plot_world.py | plot_world.py | py | 1,374 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "utils.getLastFiles",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "utils.read_villages",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
... |
5216604834 | from dataclasses import dataclass
from struct import Struct
from .bytes import Bytes
KEY_HASH = Struct("<HH")
ENCRYPTED_MESSAGE = Struct("<BIIII16s16s")
@dataclass
class SignedMessage:
"""The cryptographic message portion of Session Offer."""
flags: int
key_slot: int
key_mask: int
challenge: bytes
echo: int
@classmethod
def read(cls, buf: Bytes) -> "SignedMessage":
buf.seek(0)
flags = buf.u8()
key_slot = buf.u8()
key_mask = buf.u8()
challenge_len = buf.u8()
challenge = buf.read(challenge_len)
echo = buf.u32()
return cls(flags, key_slot, key_mask, challenge, echo)
def write(self, buf: Bytes) -> int:
buf.seek(0)
written = 0
written += buf.write_u8(self.flags)
written += buf.write_u8(self.key_slot)
written += buf.write_u8(self.key_mask)
written += buf.write_u8(len(self.challenge))
written += buf.write(self.challenge)
written += buf.write_u32(self.echo)
buf.truncate()
return written
@property
def hash_region(self) -> tuple[int, int]:
return KEY_HASH.unpack_from(self.challenge)
@property
def challenge_type(self) -> int:
return self.challenge[4]
@property
def challenge_buf(self) -> bytes:
return self.challenge[5:]
@dataclass
class EncryptedMessage:
"""The cryptographic message portion of Session Accept."""
flags: int
key_hash: int
challenge_answer: int
echo: int
timestamp: int
key: bytes
nonce: bytes
@classmethod
def read(cls, buf: Bytes) -> "EncryptedMessage":
buf.seek(0)
args = buf.read_struct(ENCRYPTED_MESSAGE)
return cls(*args)
def write(self, buf: Bytes) -> int:
buf.seek(0)
written = buf.write_struct(
ENCRYPTED_MESSAGE,
self.flags,
self.key_hash,
self.challenge_answer,
self.echo,
self.timestamp,
self.key,
self.nonce,
)
buf.truncate()
return written
| vbe0201/wizproxy | wizproxy/proto/handshake.py | handshake.py | py | 2,130 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "struct.Struct",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "struct.Struct",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bytes.Bytes",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "bytes.Bytes",
"line_number"... |
39893499902 | from django.urls import path
from . import views
# app_name = ''
urlpatterns = [
#GET: 전체글조회, POST: 게시글 생성
path('', views.review_list),
#GET: 단일 게시글 조회, DELETE: 해당 게시글 삭제, PUT: 해당 게시글 수정
path('<int:review_pk>/', views.review_update_delete),
#GET: 댓글 전체를 조회
path('<int:review_pk>/comments/', views.comment_create),
#GET: 단일 댓글 조회, PUT: 댓글 수정, DELETE: 댓글 삭제
path('comments/<int:comment_pk>/', views.comment_update_delete),
######################################################
# 리뷰 좋아요
path('<int:review_pk>/likes/', views.review_likes),
]
| chomyoenggeun/linkedmovie | server/community/urls.py | urls.py | py | 707 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
35512006282 | import trimesh
import numpy as np
from sklearn.neighbors import KDTree
from trimesh.proximity import ProximityQuery
def transform_mesh(mesh, trans_name, trans_params):
if trans_name == 'preprocess':
mesh = preprocess_mesh(mesh, **trans_params)
elif trans_name == 'refine':
mesh = refine_mesh(mesh, **trans_params)
elif trans_name == 'rotate':
mesh = rotate_mesh(mesh, **trans_params)
else:
raise ValueError("transformation type %d is not defined"%trans_name)
return mesh
def preprocess_mesh(mesh,
mesh_out=None,
merge_vertex=True,
with_scaling_to_unit_box=True,
scaler=2):
"""
process the mesh to ensure it is watertight and fits a unit cube [-1,1]^3
"""
if merge_vertex:
mesh.merge_vertices(merge_tex=True, merge_norm=True)
if not mesh.is_watertight:
raise ValueError('mesh is not watertight')
if with_scaling_to_unit_box:
s1 = mesh.bounding_box.centroid
s2 = scaler / np.max(mesh.bounding_box.extents)
new_vertices = mesh.vertices - s1
mesh.vertices = new_vertices * s2
if mesh_out is not None:
with open(mesh_out, 'w') as fid:
mesh.export(fid, file_type='obj')
return mesh
def refine_mesh(mesh,
mesh_out=None,
mesh_refine_size=0.1,
show=False):
"""
generate refined surface mesh
"""
refined_mesh = refine_surface_mesh(mesh, mesh_size=mesh_refine_size, show=show)
if mesh_out is not None:
with open(mesh_out, 'w') as fid:
refined_mesh.export(fid, file_type='obj')
return refined_mesh
def rotate_mesh(mesh,
matrix=None):
if matrix is None:
matrix = trimesh.transformations.random_rotation_matrix()
mesh.apply_transform(matrix)
return mesh
def get_volume_points_randomly(n_points, scaler=2):
points = np.random.random((n_points, 3)) - 0.5
points *= scaler
return points
def get_rasterized_points(voxel_resolution, scaler=2):
half_width = scaler / 2
points = np.meshgrid(
np.linspace(-half_width, half_width, voxel_resolution),
np.linspace(-half_width, half_width, voxel_resolution),
np.linspace(-half_width, half_width, voxel_resolution)
)
points = np.stack(points)
points = np.swapaxes(points, 1, 2)
points = points.reshape(3, -1).transpose().astype(np.float32)
return points
def get_sdf(mesh, points):
return - ProximityQuery(mesh).signed_distance(points)
# def remove_volume_edges(node_attr, edge_idx):
# on_surface_idx = np.where(node_attr[:, -1] == 1)[0]
# mask = np.isin(edge_idx, on_surface_idx).any(axis=0)
# new_edge_idx = edge_idx[:, mask]
# return new_edge_idx
def ball_query(x1, x2, radius=0.1, min_n_edges=3, max_n_edges=50):
k = min(len(x2), max_n_edges)
tree = KDTree(x2)
dist, idx = tree.query(x1, k=k)
s1, s2 = idx.shape
idx = np.stack((np.tile(np.arange(s1), (s2, 1)).T, idx), axis=2).reshape(-1, 2) # get list of pairs
indicator = dist < radius
indicator[:, :min_n_edges] = 1 # set the minimum number of edges
indicator = indicator.reshape(-1)
idx = idx[indicator] # set the radius of proximity
edges = idx.T
return edges
def get_edges_with_ball_query(x, radius=0.1, min_n_edges=3, max_n_edges=50, n_features_to_consider=3,
with_volume_edges=True):
points = x[:, :n_features_to_consider]
if with_volume_edges:
edges = ball_query(points, points, radius=radius, min_n_edges=min_n_edges, max_n_edges=max_n_edges)
else:
sdf_indicator = x[:, -1]
surface_points = points[sdf_indicator == 1]
volume_points = points[sdf_indicator != 1]
edges1 = ball_query(surface_points, points, radius=radius, min_n_edges=min_n_edges, max_n_edges=max_n_edges)
edges2 = ball_query(volume_points, surface_points, radius=radius, min_n_edges=min_n_edges, max_n_edges=max_n_edges)
edges2[0] = edges2[0] + len(surface_points)
edges = np.concatenate((edges1, edges2), axis=1)
return edges
def add_reversed_edges(edges):
edges_reversed = np.flipud(edges)
edges = np.concatenate([edges, edges_reversed], axis=1)
return edges
def add_self_edges(edges):
n_nodes = edges.max() + 1
self_edges = [list(range(n_nodes))] * 2
self_edges = np.array(self_edges)
edges = np.concatenate([edges, self_edges], axis=1)
return edges
def compute_edge_features(x, edge_index):
e1, e2 = edge_index
edge_attrs = x[e1, :] - x[e2, :]
return edge_attrs
def get_mesh_edges(mesh):
return mesh.edges.T
| amaleki2/graph_sdf | src/data_utils.py | data_utils.py | py | 4,749 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.max",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "trimesh.transformations.random_rotation_matrix",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "trimesh.transformations",
"line_number": 67,
"usage_type": "attribute"
},
{
... |
25452837300 | """Test whether the app increased LDs
by comparing a 7-session total against a baseline week.
"""
import os
import numpy as np
import pandas as pd
import pingouin as pg
from scipy.stats import sem
import utils
#### Choose export paths.
basename = "app_effect"
export_dir = os.path.join(utils.Config.data_directory, "results")
export_fname_data = os.path.join(export_dir, f"{basename}-data.csv")
# export_fname_descr = os.path.join(export_dir, f"{basename}-descriptives.csv")
export_fname_stats = os.path.join(export_dir, f"{basename}-stats.csv")
# export_fname_plot = os.path.join(export_dir, f"{basename}-plot.csv")
export_fname_timedesc = os.path.join(export_dir, f"{basename}-timedesc.csv")
################################# Load and wrangle data.
df = utils.load_data("merged")
# There might be a few dreams without a lucidity rating.
df = df.dropna(subset=["lucidSelfRating"])
# Convert boolean lucid success column to integer (1s/0s) for later math.
df["lucidSelfRating"] = df["lucidSelfRating"].astype(int)
# Shouldn't be more than 7 sessions but just to be sure.
df = df[df["sessionID"].isin([1,2,3,4,5,6,7])]
# Most sessions have just one trial, but some need to be aggregated into a single score.
# Sum the number of LDs for each session.
session_df = df.groupby(["subjectID", "sessionID"], as_index=False
)["lucidSelfRating"].agg("sum")
# Reduce number of LDs to simple yes/no (1/0) lucidity. (doesn't change much, only a few have >1)
session_df["lucidSelfRating"] = session_df["lucidSelfRating"].ge(1).astype(int)
# Pivot out to a table that has sessions as columns
table = session_df.pivot(columns="sessionID", values="lucidSelfRating", index="subjectID")
# Reduce to subjects with all 7 sessions
table = table[table.notna().all(axis=1)]
# Sum across all sessions to get cumulative total amount of LDs per participant per day.
cumtable = table.cumsum(axis=1)
# Get the baseline scores for each participant and merge with session data.
baseline = df[["subjectID","LDF"]].drop_duplicates("subjectID")
data = cumtable.merge(baseline, on="subjectID")
data = data.rename(columns={7: "app", "LDF": "baseline"})
# # Get descriptives summary for the cumulative version.
# cumtable_descr = totals[["all_sessions", "baseline"]
# ].agg(["count", "mean"]).round(3).T.unstack(level=1)
####### Get number of days between first and 7th app use, for final sample.
final_subs = data["subjectID"].unique()
subset = df[df["subjectID"].isin(final_subs)]
subset = subset[subset["sessionID"].isin([1,7])]
subset = subset[~subset.duplicated(subset=["subjectID", "sessionID"], keep="first")]
subset = subset[["subjectID", "sessionID", "timeStart"]].reset_index(drop=True)
subset["timeStart"] = pd.to_datetime(subset["timeStart"])
subset = subset.pivot(index="subjectID", columns="sessionID", values="timeStart")
timediff = subset[7] - subset[1]
timediff_desc = timediff.describe()
timediff_desc.to_csv(export_fname_timedesc, index=True, header=False)
####### Run statistics
a = data["baseline"].values
b = data["app"].values
stats = pg.wilcoxon(a, b).rename_axis("test")
stats.loc["Wilcoxon", "mean-n"] = len(a) # same as b
stats.loc["Wilcoxon", "mean-app"] = np.mean(b)
stats.loc["Wilcoxon", "mean-app"] = np.mean(b)
################## Export session-level data, descriptives, and stats.
data.to_csv(export_fname_data, index=False, na_rep="NA")
stats.to_csv(export_fname_stats, index=True, float_format="%.4f")
| remrama/lucidapp | analyze-app_effect.py | analyze-app_effect.py | py | 3,437 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "utils.Config",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_... |
2952639119 | import visa
import time
from time import sleep
rm = visa.ResourceManager()
print('Connected VISA resources:')
print(rm.list_resources())
dmm = rm.open_resource('USB0::0x1AB1::0x09C4::DM3R192701216::INSTR')
print('Instrument ID (IDN:) = ', dmm.query('*IDN?'))
#print("Volts DC = ", dmm.query(":MEASure:VOLTage:DC?"))
print("DC Current = ", dmm.query(":MEASure:CURRent:DC?"))
f = open('iLog.csv','w')
fStr = "Time, DC Current, Raw\n"
f.write(fStr)
print("Poll rate = 500mS. Will run for 24 hours collecting 172,800 readings")
print("output file = iLog.csv\n\n")
print(" Seconds Count ", "DC Current", "Raw Meter Response", sep="\t|\t")
print("----------------------------------------------------------------------------------")
for x in range(0, 172800):
rawStr = dmm.query(":MEASure:CURRent:DC?")
iStr = rawStr
rawStr = rawStr.replace ("\n", "")
iStr = iStr.replace("\n", "")
iStr = iStr.replace("#9000000015", "")
iFlt = float(iStr)
now = time.time()
print(now, iFlt ,rawStr, sep="\t|\t")
fStr = str(now) + "," + str(iFlt) + "," + rawStr + "\n"
f.write(fStr)
sleep(.5)
f.close()
| JohnRucker/Rigol-DM3058E | test.py | test.py | py | 1,135 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "visa.ResourceManager",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
}
] |
12133536334 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 14:51:00 2017
@author: trevario
Automatically plot TCSPC data using matplotlib.pyplot
"""
import glob, os
import numpy as np
import matplotlib.pyplot as plt
import csv
#%matplotlib inline
print("what directory are the files in?")
name = input()
os.chdir('/home/trevario/Documents/Labnotebook/TCSPC/' + str(name))
spt = glob.glob("*.dat")
for i in range(len(spt)):
data = np.genfromtxt(str(spt[i]),skip_header=2)
#this size is in inches
plt.figure(figsize=(5, 3), dpi=240)
#get those labels right
sps = spt[i]
plt.title(str(sps[:-4]), fontsize=16)
with open(str(sps), encoding="latin_1") as bunch:
spamreader = csv.reader(bunch, delimiter='\t')
col_1 = list(zip(*spamreader))[0]
xaxis = col_1[1]
plt.xlabel(str(xaxis), fontsize=12)
with open(str(sps), encoding="latin_1") as bunch:
spamreader = csv.reader(bunch, delimiter='\t')
col_2 = list(zip(*spamreader))[1]
yaxis = col_2[1]
plt.ylabel(str(yaxis), fontsize=12)
#change the plotted data if you want
#plt.ylim(0,15000)
#TCSPC data does not need to be inverted, ignore this line
#plt.gca().invert_xaxis()
#tell it you wanna change the axes
ax = plt.subplot(1,1,1)
#get rid of top and right axes
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
#get rid of the ticks on the top and right axis
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set size of font for data labels
ax.tick_params(axis='both', which='major', labelsize=10)
#set thickness of axes
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
#make room for the labels
plt.tight_layout()
plt.gcf().subplots_adjust(left=0.15)
#show the plot
plt.plot(data[:,0],data[:,1], color = "blue", linewidth=2.0)
#save the plot
plt.savefig(str(spt[i]) + ".png", dpi=240)
plt.show()
| trevhull/dataplot | tcspc.py | tcspc.py | py | 2,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"l... |
31479728563 | import pygame
from constants import WHITE, SIZE_WALL, YELLOW, MARGIN
class Food:
def __init__(self, row, col, width, height, color):
self.image = pygame.Surface([width, height])
self.image.fill(WHITE)
self.image.set_colorkey(WHITE)
pygame.draw.ellipse(self.image, color, [0, 0, width, height])
self.row = row
self.col = col
self.rect = self.image.get_rect()
self.rect.top = row * SIZE_WALL + MARGIN["TOP"]
self.rect.left = col * SIZE_WALL + MARGIN["LEFT"]
if color == YELLOW:
self.rect.top += SIZE_WALL // 2 - height // 2
self.rect.left += SIZE_WALL // 2 - width // 2
def draw(self, screen):
screen.blit(self.image, (self.rect.left, self.rect.top))
def getRC(self):
return [self.row, self.col]
| nxhawk/PacMan_AI | Source/Object/Food.py | Food.py | py | 860 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.Surface",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "constants.WHITE",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "constants.WHITE",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "pygame.draw.elli... |
30526669570 | # augmentations for 2D and 2.5D
import random
import numpy as np
import SimpleITK as sitk
from src.utils.itk_tools import rotate_translate_scale2d
# Now only support list or tuple
class Compose(object):
def __init__(self, augmentations):
self._augmentations = augmentations
def __call__(self, nda, nda_type):
for _a in self._augmentations:
nda = _a(nda, nda_type)
return nda
# flip axis is array axis
class NPRandomFlip(object):
def __init__(self, axis=0, do_probability=0.5):
assert isinstance(axis, (int, tuple, list)), "Axis value type must be int, tuple or list."
self._axis = axis
self._p = do_probability
def __call__(self, nda, nda_type):
if random.random() < self._p:
# set flip params
if isinstance(self._axis, int):
flip_axis = self._axis
flip_axis_dim3 = self._axis + 1
elif isinstance(self._axis, (tuple, list)):
flip_axis = random.sample(self._axis, random.randint(1, len(self._axis)))
flip_axis_dim3 = tuple([i + 1 for i in flip_axis])
else:
flip_axis = None
flip_axis_dim3 = None
# initialize out
out = []
# do flip
for a, t in zip(nda, nda_type):
if a.ndim == 3:
out.append(np.copy(np.flip(a, flip_axis_dim3)))
else:
out.append(np.copy(np.flip(a, flip_axis)))
out = tuple(out)
return out
else:
return nda
# scale, translate, rotation
class ITKRandomRotateTranslateScale(object):
def __init__(self, theta=0, tx=0, ty=0, scale=0, do_probability=0.5):
self._theta = theta * np.pi / 180.0
self._tx = tx
self._ty = ty
self._scale = scale
self._p = do_probability
def __call__(self, nda, nda_type):
if random.random() < self._p:
# set transform params
transform_params = [(np.random.rand() * 2 - 1) * self._scale + 1,
(np.random.rand() * 2 - 1) * self._theta,
(np.random.rand() * 2 - 1) * self._tx,
(np.random.rand() * 2 - 1) * self._ty,
]
# initialize out
out = []
# do
for a, t in zip(nda, nda_type):
interpolator = "Linear" if t == "image" else "NearestNeighbor"
default_v = float(np.amin(a)) if t == "image" else 0
if a.ndim == 3:
tmp = []
for i in a.shape[0]:
tmp.append(sitk.GetArrayFromImage(rotate_translate_scale2d(
sitk.GetImageFromArray(a[i], isVector=False), transform_params, interpolator, default_v)))
out.append(np.stack(tmp, axis=0))
else:
out.append(sitk.GetArrayFromImage(
rotate_translate_scale2d(
sitk.GetImageFromArray(a, isVector=False), transform_params, interpolator, default_v)))
out = tuple(out)
return out
else:
return nda
| eugeneyuan/test_rep | src/data/aug2d.py | aug2d.py | py | 3,312 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.random",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_num... |
16586177664 | # -*- coding: UTF-8 -*-
from flask import render_template, flash, redirect
from sqlalchemy.orm import *
from sqlalchemy import *
from flask.ext.sqlalchemy import SQLAlchemy
from flask import Flask
from flask import *
from forms import lyb
#from flask.ext.bootstrap import Bootstrap
app = Flask(__name__)
app.config.from_object('config')
app.config['SQLALCHEMY_DATABASE_URI']='mysql://dj:123456@localhost/wjh'
#bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
titel = db.Column(db.String(300), unique=True)
text = db.Column(db.Text)
def __init__(self, titel,text):
self.text = text
self.titel=titel
db.create_all()
@app.route('/add', methods=['GET', 'POST'])
def register():
form = lyb(request.form)
if request.method == 'POST' and form.validate():
user=User(form.title.data,form.text.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registering')
return redirect(url_for('index'))
return render_template('text.html', form=form)
@app.route('/index')
def index():
p = User.query.order_by(User.id.desc()).all()
return render_template('index.html',p=p)
@app.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
p =User.query.get_or_404(id)
return render_template('index.html_1.html', p=p)
@app.route('/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
post = User.query.get_or_404(id)
form = lyb(request.form)
if request.method == 'POST' and form.validate():
post.titel=form.title.data
post.text=form.text.data
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
form.title.data=post.titel
form.text.data=post.text
return render_template('text_1.html', form=form,post=post)
@app.route('/delete/<int:id>', methods=['GET', 'POST'])
def delete(id):
post = User.query.get_or_404(id)
db.session.delete(post)
db.session.commit()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run('0.0.0.0',debug=True)
if __name__ == '__main__':
app.run('0.0.0.0',debug=True)
| wjh1234/python-scripts | flasker/app/views.py | views.py | py | 2,095 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.ext.sqlalchemy.SQLAlchemy",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "forms.lyb",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "flask.flash",
... |
13098646528 |
from io import BytesIO
from pathlib import Path
import random
from flask import Blueprint, Flask
from flask.wrappers import Response
from loft.config import Config, DebugConfig
from loft.util.id_map import IdMap
from loft.web.blueprints.api import api
rand = random.Random()
rand.seed(24242424)
def client(config: Config, api: Blueprint):
'''
Create the test Flask application, register the blueprint and return the
Flask test client.
'''
flask = Flask(__name__)
flask.config.from_object(config)
flask.register_blueprint(api)
return flask.test_client()
def test_post():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
dest = config.DOWNLOADS_FOLDER / 'test.txt'
assert not dest.exists()
response: Response = c.post('/api/files', data={
'upload': (BytesIO('lorum ipsum dolor sit amet'.encode('utf8')), 'test.txt')
})
assert response.status_code == 200
assert dest.exists()
with open(dest, 'r') as f:
assert f.read() == 'lorum ipsum dolor sit amet'
dest.unlink()
def test_post_duplicate_filename():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
with (config.DOWNLOADS_FOLDER / 'test.txt').open('w') as f:
f.write('hello')
dest = config.DOWNLOADS_FOLDER / 'test_1.txt'
assert not dest.exists()
response: Response = c.post('/api/files', data={
'upload': (BytesIO('lorum ipsum dolor sit amet'.encode('utf8')), 'test.txt')
})
assert response.status_code == 200
assert dest.exists()
with open(dest, 'r') as f:
assert f.read() == 'lorum ipsum dolor sit amet'
dest.unlink()
def test_post_empty_filename():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
dest = config.DOWNLOADS_FOLDER / 'Untitled'
assert not dest.exists()
response: Response = c.post('/api/files', data={
'upload': (BytesIO('lorum ipsum dolor sit amet'.encode('utf8')), '')
})
assert response.status_code == 200
assert dest.exists()
with open(dest, 'r') as f:
assert f.read() == 'lorum ipsum dolor sit amet'
dest.unlink()
def test_list():
config = DebugConfig()
i = IdMap()
i.add(Path('parent/foo.ext'))
i.add(Path('parent/bar.ext2'))
with client(config, api(i)) as c:
response = c.get('/api/files')
data = response.get_json()
assert 'available' in data
assert len(data['available']) == 2
assert data['available'][0][0] == 0
assert data['available'][0][1] == 'foo.ext'
assert data['available'][1][0] == 1
assert data['available'][1][1] == 'bar.ext2'
def test_list_empty():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
response = c.get('/api/files')
data = response.get_json()
assert 'available' in data
assert len(data['available']) == 0
def test_get():
config = DebugConfig()
i = IdMap()
with open(config.DOCUMENTS_FOLDER / 'a.txt', 'w+') as f:
path = Path(f.name)
for _ in range(1, 10):
f.write(str(rand.uniform(0, 1000)))
assert i.add(path) == 0
with client(config, api(i)) as c:
l_r: Response = c.get('/api/files')
l_data = l_r.get_json()
assert 'available' in l_data
assert len(l_data['available']) == 1
assert l_data['available'][0][0] == 0
assert l_data['available'][0][1] == path.name
response: Response = c.get('/api/files/0')
assert response.status_code == 200
f.seek(0)
assert response.get_data(as_text=True) == f.read()
def test_get_empty():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
response: Response = c.get('/api/files/0')
assert response.status_code == 404
| ucsb-cs148-s21/t7-local-network-file-transfer | test/web/blueprints/test_api.py | test_api.py | py | 4,074 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "random.Random",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "loft.config.Config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.Blueprint",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"l... |
1479958272 | from datetime import date
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from app.features_extractors.numerical import make_harmonic_features
def number_of_days_until_true(boolean_values: pd.Series, today: date) -> pd.Series:
return (boolean_values[today:].idxmax() - today).days if not boolean_values.empty else np.NaN
def number_of_days_after_true(boolean_values: pd.Series, today: date):
return (today - boolean_values[:today].iloc[::-1].idxmax()).days if not boolean_values.empty else np.NaN
def extract_calendar_features(data: pd.DataFrame) -> pd.DataFrame:
df = pd.DataFrame(index=data.created_at.dt.normalize().unique())
df['day_cos'], df['day_sin'] = make_harmonic_features(df.index.day, df.index.days_in_month)
df['month_cos'], df['month_sin'] = make_harmonic_features(df.index.month, 12)
df['quarter_cos'], df['quarter_sin'] = make_harmonic_features(df.index.quarter, 4)
seasons = (df.index.month % 12 + 3) // 3 - 1
df['season_cos'], df['season_sin'] = make_harmonic_features(seasons, 4)
df['year'] = df.index.year
df['dayofyear_cos'], df['dayofyear_sin'] = make_harmonic_features(df.index.year, 365)
df['dayofweek_cos'], df['dayofweek_sin'] = make_harmonic_features(df.index.dayofweek, 7)
df['is_weekend'] = df.index.dayofweek >= 5
dates_with_margin = pd.date_range(
pd.to_datetime(df.index.min()) - pd.DateOffset(months=4),
pd.to_datetime(df.index.max()) + pd.DateOffset(months=4))
holidays = calendar().holidays(
start=dates_with_margin.min(),
end=dates_with_margin.max())
is_holiday = pd.Series(pd.Series(dates_with_margin).isin(holidays).values, index=dates_with_margin)
df['days_until_holidays'] = pd.Series(df.index)\
.apply(lambda today: number_of_days_until_true(is_holiday, today)).values
df['days_after_holidays'] = pd.Series(df.index)\
.apply(lambda today: number_of_days_after_true(is_holiday, today)).values
df['is_holiday'] = df.index.isin(holidays)
return pd.DataFrame({'normalized_date': data.created_at.dt.normalize()}, index=data.index)\
.merge(df.fillna(0), left_on='normalized_date', right_index=True)\
.drop(columns=['normalized_date'])
| ahmediqtakehomes/TakeHomes | reformated_takehomes_old/doordash_1/example_submission/app/features_extractors/calendar.py | calendar.py | py | 2,283 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.Series",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.date",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.NaN",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"li... |
34058973132 | from sklearn.preprocessing import Normalizer
import numpy as np
from utils import extract_face_roi_single
from Database import addNewStudent
import pickle
import os
from bson.binary import Binary
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES']='-1'
Normaliser = Normalizer(norm='l2')
global graph
frozen_graph="facenet_optimized.pb"
with tf.gfile.GFile(frozen_graph, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def,
input_map=None,
return_elements=None,
name="")
sess= tf.Session(graph=graph)
def get_embedding(face):
with graph.as_default():
face=face.astype('float32')
mean,std=face.mean(),face.std()
face=(face-mean)/std
face=np.expand_dims(face,axis=0)
#embedding=model.predict(face)
y_pred = graph.get_tensor_by_name("Bottleneck_BatchNorm/cond/Merge:0")
x= graph.get_tensor_by_name("input_1:0")
feed_dict = {x: face}
embedding=sess.run(y_pred,feed_dict)
return embedding[0]
def get_block_embeddings(path):
embeddingsArr=[]
for filename in os.listdir(path):
img=extract_face_roi_single(os.path.join(path,filename))
img=get_embedding(img)
img=np.reshape(img,(-1,2))
img=Normaliser.transform(img)
img=np.reshape(img,(128,))
embeddingsArr.append(img)
return embeddingsArr
def get_single_embedding(rollno,img,filename):
img=extract_face_roi_single(img)
img=get_embedding(img)
img=np.reshape(img,(-1,2))
img=Normaliser.transform(img)
img=np.reshape(img,(128,))
img=list(img)
img= Binary(pickle.dumps(img, protocol=2), subtype=128 )
addNewStudent(rollno,filename,img)
image_path='StudentImages/'
save_path='Embeddings/'
def prepare_data():
embeddingArr=get_block_embeddings(image_path)
with open(os.path.join(save_path,'Embeddings.pickle'),'wb') as f:
pickle.dump((embeddingArr),f)
| VikasOjha666/Attendance_API_optimized | prepare_embeddings.py | prepare_embeddings.py | py | 2,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sklearn.preprocessing.Normalizer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.gfile.GFile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name"... |
29178775126 | from django.http import HttpResponse
from django.core.paginator import Paginator
from django.shortcuts import render
from .operations_c import Company_O, Publication
from django.http import QueryDict
from home.operation_home import Home_O
def Add_Publication(request):
if request.method == 'POST':
mutable_post_data = request.POST.copy()
mutable_post_data['availability_travel'] = int('availability_travel' in request.POST)
mutable_post_data['change_residence'] = int('change_residence' in request.POST)
Publication().Create_Publication(mutable_post_data,request.session['pk_user'])
return render(request,'company/add_publication.html',{
'area':Company_O().Get_Area(),'city':Company_O().Get_City(),
'Type_Contract':Company_O().Type_Contract(),'Workday':Company_O().Workday(),
'Workplace':Company_O().Workplace(),'Minimum_Studiess':Company_O().Minimum_Studiess(),
'languages':Company_O().languages(),'Languages_Levels':Company_O().Languages_Levels()
})
def All_List_Application_Company(request):
data = Publication().All_List_Application_Company(request.session['pk_user'])
items_per_page = 4
paginator = Paginator(data, items_per_page)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request,'company/list_publications.html', {'page_obj': page_obj})
def Edit_Publication(request,pk):
data = Home_O().Get_Publication(pk)
print(data)
return render(request,'company/edit_publication.html',{'data':data,
'area':Company_O().Get_Area(),'city':Company_O().Get_City(),
'Type_Contract':Company_O().Type_Contract(),'Workday':Company_O().Workday(),
'Workplace':Company_O().Workplace(),'Minimum_Studiess':Company_O().Minimum_Studiess(),
'languages':Company_O().languages(),'Languages_Levels':Company_O().Languages_Levels()
})
| cdavid58/empleo | company/views.py | views.py | py | 1,810 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "operations_c.Publication",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "operations_c.Company_O",
"line_number": 16,
"usage_type": "call"
},
{
"api_name"... |
17872592313 | #
# author: Paul Galatic
#
# This program is JUST for drawing a rounded rectangle.
#
import pdb
from PIL import Image, ImageDraw
from extern import *
def sub_rectangle(draw, xy, corner_radius=25, fill=(255, 255, 255)):
'''
Source: https://stackoverflow.com/questions/7787375/python-imaging-library-pil-drawing-rounded-rectangle-with-gradient
'''
upper_left_point = xy[0]
bottom_right_point = xy[1]
draw.rectangle(
[
(upper_left_point[0], upper_left_point[1] + corner_radius),
(bottom_right_point[0], bottom_right_point[1] - corner_radius)
],
fill=fill,
)
draw.rectangle(
[
(upper_left_point[0] + corner_radius, upper_left_point[1]),
(bottom_right_point[0] - corner_radius, bottom_right_point[1])
],
fill=fill,
)
draw.pieslice([upper_left_point, (upper_left_point[0] + corner_radius * 2, upper_left_point[1] + corner_radius * 2)],
180,
270,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, bottom_right_point[1] - corner_radius * 2), bottom_right_point],
0,
90,
fill=fill,
)
draw.pieslice([(upper_left_point[0], bottom_right_point[1] - corner_radius * 2), (upper_left_point[0] + corner_radius * 2, bottom_right_point[1])],
90,
180,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, upper_left_point[1]), (bottom_right_point[0], upper_left_point[1] + corner_radius * 2)],
270,
360,
fill=fill,
)
def rectangle(draw, size, fill=WHITE, border=None):
width, height = size
img = Image.new('RGBA', size, color=BLANK)
if border:
outdims = ((0, 0), (width, height))
sub_rectangle(draw, outdims, fill=border)
indims = ((BORDER, BORDER), (width - BORDER, height - BORDER))
else:
indims = ((0, 0), (width, height))
sub_rectangle(draw, indims, fill=fill)
return img | pgalatic/zeitgeist | rectround.py | rectround.py | py | 2,033 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.new",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 57,
"usage_type": "name"
}
] |
9587970567 | from plugin import plugin
from colorama import Fore
@plugin("hex")
def binary(jarvis, s):
"""
Converts an integer into a hexadecimal number
"""
if s == "":
s = jarvis.input("What's your number? ")
try:
n = int(s)
except ValueError:
jarvis.say("That's not a number!", Fore.RED)
return
else:
if n < 0:
jarvis.say("-" + hex(n).upper()[3:], Fore.YELLOW)
else:
jarvis.say(hex(n).upper()[2:], Fore.YELLOW)
| sukeesh/Jarvis | jarviscli/plugins/hex.py | hex.py | py | 503 | python | en | code | 2,765 | github-code | 36 | [
{
"api_name": "colorama.Fore.RED",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.YELLOW",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "colora... |
30490661295 | #!/usr/bin/env python3
import os
import sys
import logging
import json
import requests
import datetime
import pyteamcity
import http.server
import validators
from urllib.parse import urlparse
config = {}
tc = None
logger = None
def initializeLogger():
logger = logging.getLogger('teamcity_connector')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
class TCWebHookHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
contentLen = int(self.headers['Content-Length'])
postBody=self.rfile.read(contentLen)
postBody = postBody.decode("utf-8")
postBody=json.loads(postBody)
buildId=postBody['build']['buildId']
result=processBuild(buildId)
self.send_response(result['status_code'])
self.end_headers()
self.wfile.write(result['text'].encode("utf-8"))
return
def getTeamcityConnection(user,password,url):
url_parsed = urlparse(url)
tc = pyteamcity.TeamCity(user,password,url_parsed.hostname,url_parsed.port)
try:
tc.get_server_info()
except Exception as e:
logger.error("can not connect to TeamCity: %s" % e)
result = False
else:
result = tc
return result
def dateTimeToTimestamp(s):
s=datetime.datetime.strptime(s, "%Y%m%dT%H%M%S%z").timestamp()*1000
s="%.0f" % s
return s
def processBuild(buildId):
try:
build = tc.get_build_by_build_id(buildId)
logger.debug("buildId: %s" % buildId)
logger.debug("build: %s" % build)
except Exception as e:
logger.error("can not get build: %s" % e)
try:
buildStatistic = tc.get_build_statistics_by_build_id(buildId)
except Exception as e:
logger.error("can not get build statistic: %s" % e)
try:
changes = tc.get_changes_by_build_id(buildId)
except Exception:
logger.info("changes are empty for build id: %s" % buildId)
changesEmpty = True
else:
changesEmpty = False
data={}
data['buildStatus'] = build['status']
data['buildUrl'] = build['webUrl']
buildStatisticProperties = buildStatistic['property']
for buildStatisticProperty in buildStatisticProperties:
if 'BuildDurationNetTime' == buildStatisticProperty['name']:
data['duration'] = int(buildStatisticProperty['value'])
data['startTime'] = dateTimeToTimestamp(build['startDate'])
if 'finishDate' in build:
data['endTime'] = dateTimeToTimestamp(build['finishDate'])
# FIXME: what is instanceUrl ? set to N/A
data['instanceUrl'] = "N/A"
try:
data['jobName'] = build['buildType']['projectName']
except Exception as e:
logger.warn("can not get project name from build type, set to N/A")
data['jobName'] = "N/A"
# FIXME: what is jobURL? set to webUrl
data['jobUrl'] = build['webUrl']
try:
data['log'] = changes['comment']
except Exception as e:
data['log'] = ""
data['niceName'] = build['buildType']['id']
data['number'] = build['id']
if build['triggered']['type'] == "user":
data['startedBy'] = build['triggered']['user']['username']
elif build['triggered']['type'] == "vcs":
data['startedBy'] = "started by VCS trigger"
data['sourceChangeSet'] = []
sourceChangeSet = {}
if changesEmpty == False:
for changeIterator in build['lastChanges']['change']:
try:
change=tc.get_change_by_change_id(changeIterator['id'])
except Exception as e:
logger.error("can not get change with id %s" % changeIterator['id'])
else:
sourceChangeSet['scmRevisionNumber'] = change['version']
sourceChangeSet['scmCommitLog'] = change['comment']
try:
sourceChangeSet['scmAuthor'] = change['user']['name']
except Exception as e:
sourceChangeSet['scmAuthor'] = ''
logger.info("user.name is not found for change %s, set to username" % changeIterator['id'])
else:
sourceChangeSet['scmAuthor'] = change['username']
if sourceChangeSet['scmAuthor'] == '' and build['triggered']['type'] == "vcs":
sourceChangeSet['scmAuthor'] = "started by VCS trigger"
elif sourceChangeSet['scmAuthor'] == '' and build['triggered']['type'] == "user":
sourceChangeSet['scmAuthor'] = build['triggered']['user']['username']
else:
logger.error("can not get \"triggered by\" value for buildId %s" % buildId)
sourceChangeSet['scmCommitTimestamp'] = dateTimeToTimestamp(change['date'])
sourceChangeSet['numberOfChanges'] = 1
data['sourceChangeSet'].append(sourceChangeSet)
dataJson=json.dumps(data)
logger.debug("dataJson: %s" % dataJson)
headers = {'Accept': 'application/json','Content-type':'application/json'}
url=config['HYGIEIA_API_URL'] + "/build"
request=requests.post(url, data = dataJson, headers = headers)
logger.debug("request: %s" % request)
logger.debug("build ID: %s" % build['id'])
result={}
result['status_code']=request.status_code
result['text']=request.text
logger.debug("result: %s" % result)
return result
def checkEnvironmentVariables(config):
result = True
config["HOST"] = "0.0.0.0"
config['PORT'] = 80
if "HYGIEIA_API_URL" in os.environ and validators.url(os.getenv("HYGIEIA_API_URL")):
config['HYGIEIA_API_URL'] = os.getenv("HYGIEIA_API_URL")
else:
logger.error("HYGIEIA_API_URL environmanet variable is not set")
result = False
if "TEAMCITY_URL" in os.environ and validators.url(os.getenv("TEAMCITY_URL")):
config['TEAMCITY_URL'] = os.getenv("TEAMCITY_URL")
else:
logger.error("TEAMCITY_URL environmanet variable is not set")
result=False
if "TEAMCITY_USER" in os.environ:
config['TEAMCITY_USER'] = os.getenv("TEAMCITY_USER")
else:
logger.info("TEAMCITY_USER environment variable is not set, trying with empty")
config['TEAMCITY_USER'] = ""
if "TEAMCITY_PASSWORD" in os.environ:
config['TEAMCITY_PASSWORD'] = os.getenv("TEAMCITY_PASSWORD")
else:
logger.info("TEAMCITY_PASSWORD environment variable is not set, trying with empty")
config['TEAMCITY_PASSWORD'] = ""
return result
if __name__ == '__main__':
logger = initializeLogger()
if checkEnvironmentVariables(config) == True:
tc = getTeamcityConnection(config['TEAMCITY_USER'], config['TEAMCITY_PASSWORD'], config['TEAMCITY_URL'])
if tc != False:
httpd = http.server.HTTPServer((config['HOST'], config['PORT']), TCWebHookHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
| mrlov/hygieia_teamcity_collector | main.py | main.py | py | 6,631 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.INF... |
40858069336 | import matplotlib.pyplot as plt
# 模拟导航路径数据
path = [(0, 0), (1, 1), (2, 3), (3, 4), (4, 2)]
# 初始化绘图
fig, ax = plt.subplots()
ax.set_xlim(-1, 5)
ax.set_ylim(-1, 5)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Navigation Path')
# 绘制导航路径
x = [point[0] for point in path]
y = [point[1] for point in path]
ax.plot(x, y, 'b-')
# 初始化用户位置
user_position = (0, 0)
user_position_plot, = ax.plot(user_position[0], user_position[1], 'ro') # 用户位置的红点
# 更新用户位置
def update_user_position(new_pos):
user_position_plot.set_data(new_pos[0], new_pos[1]) # 更新红点的坐标
plt.draw() # 重新绘制图像
# 模拟用户位置的更新
import time
for i in range(1, len(path)):
new_position = path[i]
update_user_position(new_position)
plt.pause(1) # 暂停一秒
time.sleep(1) # 等待一秒
| haiboCode233/KivyPlusAR | testcode.py | testcode.py | py | 897 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.draw",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mat... |
29014504899 | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from penn_treebank_reader import *
from dataset import DatasetReader
from dataset import make_batch_iterator
def get_offset_cache(length):
offset_cache = {}
ncells = int(length * (1 + length) / 2)
for lvl in range(length):
level_length = length - lvl
ncells_less = int(level_length * (1 + level_length) / 2)
offset_cache[lvl] = ncells - ncells_less
return offset_cache
class ChartUtils(object):
def __init__(self):
super(ChartUtils, self).__init__()
self.offset_cache = {}
def to_idx(self, length, level, pos):
return self.to_offset(length, level) + pos
def to_offset(self, length, level):
if length not in self.offset_cache:
self.offset_cache[length] = get_offset_cache(length)
return self.offset_cache[length][level]
chart_utils = ChartUtils()
class ModelContainer(nn.Module):
def __init__(self, embed, model, loss_func):
super(ModelContainer, self).__init__()
self.embed = embed
self.model = model
self.loss_func = loss_func
class ConstituentLoss(nn.Module):
def __init__(self, vocab_size):
super(ConstituentLoss, self).__init__()
self.hidden_size = 100
self.predict = nn.Linear(self.hidden_size, vocab_size)
def forward(self, chart, label_batch):
batch_index = label_batch['batch_index']
idx_index = label_batch['idx_index']
label_index = label_batch['label_index']
logit = self.predict(chart[batch_index, idx_index])
loss = nn.CrossEntropyLoss()(logit, label_index)
return loss
class SequenceEncoder(nn.Module):
def __init__(self, vocab_size, self_attention=False):
super(SequenceEncoder, self).__init__()
self.self_attention = self_attention
self.hidden_size = 100
self.embed = nn.Embedding(vocab_size, self.hidden_size)
if self.self_attention:
self.atten_q = nn.Linear(self.hidden_size, self.hidden_size)
def run_attention(self, h):
q, k, v = h, h, h
scores = torch.matmul(self.atten_q(q), k.transpose(1, 2))
return torch.sum(scores.unsqueeze(3) * v.unsqueeze(2), 2)
def forward(self, x):
h = self.embed(x)
if self.self_attention:
h = self.run_attention(h)
return h
class ChartEncoder(nn.Module):
def __init__(self):
super(ChartEncoder, self).__init__()
self.hidden_size = 100
self.compose = nn.Sequential(
nn.Linear(2*self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size))
self.score = nn.Linear(2*self.hidden_size, 1) # TODO: Use dot product instead.
def step(self, level):
N = level # number of constituent pairs.
L = self.length - level # number of elements being computed.
l_index, r_index = [], []
ref_idx, ref_pos = [], []
for idx in range(N):
left_level = idx
right_level = level - idx - 1
left_offset = chart_utils.to_offset(self.length, left_level)
right_offset = chart_utils.to_offset(self.length, right_level + 1) - L
left_index = torch.arange(left_offset, left_offset+L)
right_index = torch.arange(right_offset, right_offset+L)
l_index.append(left_index)
r_index.append(right_index)
ref_idx.append(torch.LongTensor([idx]*L))
ref_pos.append(torch.arange(L))
l_index = torch.cat(l_index)
r_index = torch.cat(r_index)
ref_idx = torch.cat(ref_idx)
ref_pos = torch.cat(ref_pos)
l = self.chart.index_select(index=l_index, dim=1)
r = self.chart.index_select(index=r_index, dim=1)
state = torch.cat([l, r], 2)
h_raw = self.compose(state)
s_raw = self.score(state) # TODO: Should incorporate score from children.
s = torch.softmax(s_raw.view(self.batch_size, L, N, 1), dim=2)
hbar = torch.sum(s * h_raw.view(self.batch_size, L, N, self.hidden_size), 2)
# sbar = torch.sum(s * s_raw.view(self.batch_size, L, N, 1), 2)
offset = chart_utils.to_offset(self.length, level)
self.chart[:, offset:offset+L] = hbar
def build_chart(self, x):
chart_size = self.length * (self.length + 1) // 2
chart = torch.FloatTensor(self.batch_size, chart_size, self.hidden_size).fill_(0)
chart[:, :self.length] = x
self.chart = chart
for level in range(1, self.length):
self.step(level)
def init_batch(self, x):
self.batch_size = x.shape[0]
self.length = x.shape[1]
def forward(self, x):
self.init_batch(x)
self.build_chart(x)
return None
class BatchManager(object):
def prepare_batch(self, batch_map):
return torch.LongTensor(batch_map['sentences'])
def prepare_labels(self, batch_map):
batch_index = []
idx_index = []
label_index = []
length = len(batch_map['sentences'][0])
for s in batch_map['sentences']:
assert len(s) == length, 'Does not support variable length batches.'
for i, spans in enumerate(batch_map['labels']):
for pos, size, label in spans:
level = size - 1
batch_index.append(i)
idx_index.append(chart_utils.to_idx(length, level, pos))
label_index.append(label)
batch_index = torch.LongTensor(batch_index)
idx_index = torch.LongTensor(idx_index)
label_index = torch.LongTensor(label_index)
return {
'batch_index': batch_index,
'idx_index': idx_index,
'label_index': label_index,
}
def main(options):
tr_reader = JSONLReader(options.tr_file)
tr_dataset = DatasetReader(tr_reader, config={'max_len': options.tr_max_len}).build()
batch_iterator = make_batch_iterator(None, tr_dataset)
embed = SequenceEncoder(self_attention=options.self_attention, vocab_size=len(tr_dataset['metadata']['word2idx']))
model = ChartEncoder()
loss_func = ConstituentLoss(vocab_size=len(tr_dataset['metadata']['label2idx']))
container = ModelContainer(embed, model, loss_func)
params = container.parameters()
optimizer = optim.Adam(params, lr=0.002, betas=(0.9, 0.999), eps=1e-8)
print('# of sentences = {}'.format(len(tr_dataset['sentences'])))
print('vocab size = {}'.format(len(tr_dataset['metadata']['word2idx'])))
print('# of classes = {}'.format(len(tr_dataset['metadata']['label2idx'])))
print(tr_dataset['metadata']['label2idx'])
for epoch in range(options.max_epochs):
for batch_map in batch_iterator.get_iterator():
seq = BatchManager().prepare_batch(batch_map)
seqh = embed(seq)
_ = model(seqh)
label_batch = BatchManager().prepare_labels(batch_map)
loss = loss_func(model.chart, label_batch)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(params, 5.0)
optimizer.step()
print(loss.item())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--tr_file', default=os.path.expanduser('~/data/ptb/valid.jsonl'), type=str)
parser.add_argument('--tr_max_len', default=10, type=int)
parser.add_argument('--self_attention', action='store_true')
parser.add_argument('--max_epochs', default=1000, type=int)
options = parser.parse_args()
main(options)
| mrdrozdov/chart-parser | train.py | train.py | py | 7,733 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"li... |
72244550505 | import json
# Load the mapping from inscriptions.json
with open("inscriptions.json", "r") as json_file:
data = json.load(json_file)
# Create a mapping from "Goosinals #" to "id"
mapping = {}
for entry in data:
number = int(entry["meta"]["name"].split("#")[1].strip())
mapping[number] = entry["id"]
# Process output.txt and replace numbers with ids
with open("output.txt", "r") as file:
lines = file.readlines()
with open("mapped_output.txt", "w") as file:
for line in lines:
number = int(line.strip()) # Assuming each line has only a number
file.write(mapping[number] + "\n")
| jokie88/goosinal_mosaic | map_goosinalnumber_to_hash.py | map_goosinalnumber_to_hash.py | py | 617 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 5,
"usage_type": "call"
}
] |
14203206189 | import math
from flask import render_template, request, redirect, url_for, session, jsonify
from saleapp import app, login
import utils
import cloudinary.uploader
from flask_login import login_user, logout_user, login_required
from saleapp.admin import *
from saleapp.models import UserRole
@app.route("/")
def index():
kw = request.args.get('keyword')
cate_id = request.args.get('category_id')
page = request.args.get('page', 1)
products = utils.read_products(kw=kw, cate_id=cate_id, page=int(page))
counter = utils.count_products()
return render_template("index.html",
products=products,
pages=math.ceil(counter/app.config['PAGE_SIZE']))
@app.route('/register', methods=['get', 'post'])
def user_register():
err_msg = ""
if request.method.__eq__('POST'):
name = request.form.get('name')
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
confirm = request.form.get('confirm')
avatar_path = None
try:
if password.strip().__eq__(confirm.strip()):
avatar = request.files.get('avatar')
if avatar:
res = cloudinary.uploader.upload(avatar)
avatar_path = res['secure_url']
utils.add_user(name=name, username=username,
password=password, email=email,
avatar=avatar_path)
return redirect(url_for('user_signin'))
else:
err_msg = 'Mat khau KHONG khop!!!'
except Exception as ex:
err_msg = 'He thong dang co loi: ' + str(ex)
return render_template('register.html', err_msg=err_msg)
@app.route('/user-login', methods=['get', 'post'])
def user_signin():
err_msg = ''
if request.method.__eq__('POST'):
username = request.form.get('username')
password = request.form.get('password')
user = utils.check_login(username=username, password=password)
if user:
login_user(user=user)
return redirect(url_for(request.args.get('next', 'index')))
else:
err_msg = 'Username hoac password KHONG chinh xac!!!'
return render_template('login.html', err_msg=err_msg)
@app.route('/user-logout')
def user_signout():
logout_user()
return redirect(url_for('user_signin'))
@app.route('/admin-login', methods=['post'])
def signin_admin():
username = request.form['username']
password = request.form['password']
user = utils.check_login(username=username,
password=password)
if user:
login_user(user=user)
return redirect('/admin')
@app.context_processor
def common_response():
return {
'categories': utils.read_categories(),
'cart_stats': utils.cart_stats(session.get('cart'))
}
@login.user_loader
def user_load(user_id):
return utils.get_user_by_id(user_id=user_id)
@app.route("/products")
def product_list():
cate_id = request.args.get('category_id')
kw = request.args.get('keyword')
from_price = request.args.get('from_price')
to_price = request.args.get('to_price')
products = utils.read_products(cate_id=cate_id, kw=kw,
from_price=from_price, to_price=to_price)
return render_template('product_list.html', products=products)
@app.route('/cart')
def cart():
return render_template('cart.html',
cart_stats=utils.cart_stats(session.get('cart')))
@app.route('/api/add-to-cart', methods=['post'])
def add_to_cart():
data = request.json
id = str(data.get('id'))
name = data.get('name')
price = data.get('price')
cart = session.get('cart')
if not cart:
cart = {}
if id in cart:
cart[id]['quantity'] = cart[id]['quantity'] + 1
else:
cart[id] = {
'id': id,
'name': name,
'price': price,
'quantity': 1
}
session['cart'] = cart
return jsonify(utils.cart_stats(session.get('cart')))
@app.route('/api/update-cart', methods=['put'])
def update_cart():
id = str(request.json.get('id'))
quantity = request.json.get('quantity')
cart = session.get('cart')
err_msg = ''
if cart:
if id in cart:
cart[id]['quantity'] = quantity
session['cart'] = cart
return jsonify({
'code': 200,
'data': utils.cart_stats(cart)
})
else:
err_msg = 'Khong co san pham tuong ung de cap nhat!'
else:
err_msg = 'Chua co gio hang!'
return jsonify({
'code': 404,
'err_msg': err_msg
})
@app.route('/api/delete-cart/<product_id>', methods=['delete'])
def delete_cart(product_id):
cart = session.get('cart')
err_msg = ''
if cart:
if product_id in cart:
del cart[product_id]
session['cart'] = cart
return jsonify({
'code': 200,
'data': utils.cart_stats(cart)
})
else:
err_msg = 'Khong co san pham tuong ung de cap nhat!'
else:
err_msg = 'Chua co gio hang!'
return jsonify({
'code': 404,
'err_msg': err_msg
})
@app.route('/api/pay', methods=['post'])
@login_required
def pay():
try:
utils.add_receipt(session.get('cart'))
del session['cart']
except:
return jsonify({'code': 404})
return jsonify({'code': 200})
if __name__ == '__main__':
app.run(debug=True) | duonghuuthanh/K19SaleApp | mysaleappv3/saleapp/index.py | index.py | py | 5,712 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.args.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.re... |
11263851417 | import sqlite3
from flask import g
from app.app import app
from .model import Objective, User
DATABASE = "data.db"
def create_tables():
with app.app_context():
db = get_db()
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS users (
user_id text primary key,
name text,
email text
)'''
)
cursor.execute('''CREATE TABLE IF NOT EXISTS objectives (
id integer primary key,
user_id text,
name text,
initial_date text,
final_date text,
initial_investment text,
recurring_investment text,
goal_value text,
foreign KEY(user_id) REFERENCES users(user_id)
)'''
)
db.commit()
db.close()
def add_objective(objective: Objective):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""INSERT INTO objectives VALUES (null,
'{objective.user_id}',
'{objective.name}',
'{objective.initial_date}',
'{objective.final_date}',
'{objective.initial_investment}',
'{objective.recurring_investment}',
'{objective.goal_value}')"""
cursor.execute(command)
db.commit()
db.close()
def update_objective(objective: Objective):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""UPDATE objectives SET
name = '{objective.name}',
initial_date = '{objective.initial_date}',
final_date = '{objective.final_date}',
initial_investment = '{objective.initial_investment}',
recurring_investment = '{objective.recurring_investment}',
goal_value = '{objective.goal_value}'
WHERE id = '{objective.id}'"""
cursor.execute(command)
db.commit()
db.close()
def add_user(users: User):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""INSERT INTO users VALUES ('{users.user_id}', '{users.name}', '{users.email}')"""
cursor.execute(command)
db.commit()
db.close()
def update_user(user: User):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""UPDATE users SET
name = '{user.name}',
email = '{user.email}'
WHERE id = '{user.user_id}'"""
cursor.execute(command)
db.commit()
db.close()
return cursor.rowcount > 0
def get_user(user_id):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""SELECT * FROM users WHERE user_id = '{user_id}'"""
cursor.execute(command)
user = cursor.fetchone()
db.close()
if user is None:
raise FileNotFoundError
return User(user[0], user[1], user[2])
def get_all_users():
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""SELECT * FROM users"""
cursor.execute(command)
users = cursor.fetchall()
db.close()
return map(lambda user: User(user[0], user[1], user[2]), users)
def delete_user(user_id):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""DELETE FROM users WHERE user_id = '{user_id}'"""
cursor.execute(command)
db.commit()
db.close()
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
| brunotsantos1997/robson-api | app/data/database.py | database.py | py | 4,413 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "app.app.app.app_context",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "app.app.app",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "model.Objective",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "app.app.app.app_c... |
25743825361 | __all__ = [
"Quantizer"
]
from multiprocessing import Pool
import numpy as np
from ..base import Pipe
from ..config import read_config
from ..funcs import parse_marker
config = read_config()
D = config.get("jpeg2000", "D")
QCD = config.get("jpeg2000", "QCD")
delta_vb = config.get("jpeg2000", "delta_vb")
reserve_bits = config.get("jpeg2000", "reserve_bits")
min_task_number = config.get("accelerate", "codec_min_task_number")
max_pool_size = config.get("accelerate", "codec_max_pool_size")
class Quantizer(Pipe):
"""
Quantizer
"""
def __init__(self,
name="Quantizer",
mode="quantify",
irreversible=False,
accelerated=False,
D=D,
QCD=QCD,
delta_vb=delta_vb,
reserve_bits=reserve_bits):
"""
Init and set attributes of a quantizer.
Explicit Attributes
-------------------
name: str, ofptional
Name of the quantizer.
mode: str, optional
Mode of quantizer, must in ["quantify", "dequantify"].
irreversible: bool, optional
Whether the transform is lossy or lossless.
accelerated: bool, optional
Whether the process would be accelerated by subprocess pool.
D: int, optional
Number of resolution layers.
QCD: str, optional
Quantization default used to specify epsilon_b and mu_b of subband with lowest resolution.
delta_vb: float, optional
Used in dequantization, ranges from 0 to 1.
Implicit Attributes
-------------------
epsilon_b: int
Epsilon used to determine the quantization step of subband with lowest resolution, ranges from 0 to 2^5.
mu_b: int
Mu used to determine the quantization step of subband with lowest resolution, ranges from 0 to 2^11.
min_task_number: int
Minimun task number to start a pool.
max_pool_size: int
Maximun size of pool.
"""
super().__init__()
self.name = name
self.mode = mode
self.irreversible = irreversible
self.accelerated = accelerated
self.D = D
self.QCD = QCD
self.delta_vb = delta_vb
self.reserve_bits = reserve_bits
self.epsilon_b, self.mu_b = parse_marker(self.QCD)
self.min_task_number = min_task_number
self.max_pool_size = max_pool_size
def recv(self, X, **params):
self.logs.append("")
self.logs[-1] += self.formatter.message("Receiving data.")
self.received_ = X
self.accelerate(**params)
try:
self.irreversible = params["irreversible"]
self.logs[-1] += self.formatter.message("\"irreversible\" is specified as {}.".format(self.irreversible))
except KeyError:
self.logs[-1] += self.formatter.warning(
"\"irreversible\" is not specified, now set to {}.".format(self.irreversible))
try:
self.QCD = params["QCD"]
self.logs[-1] += self.formatter.message("\"QCD\" is specified as {}.".format(self.QCD))
except KeyError:
self.logs[-1] += self.formatter.warning("\"QCD\" is not specified, now set to {}.".format(self.QCD))
try:
self.D = params["D"]
self.logs[-1] += self.formatter.message("\"D\" is specified as {}.".format(self.D))
except KeyError:
self.logs[-1] += self.formatter.warning("\"D\" is not specified, now set to {}.".format(self.D))
try:
self.reserve_bits = params["reserve_bits"]
self.logs[-1] += self.formatter.message("\"reserve_bits\" is specified as {}.".format(self.reserve_bits))
except KeyError:
self.logs[-1] += self.formatter.warning("\"reserve_bits\" is not specified, now set to {}.".format(self.reserve_bits))
self.epsilon_b, self.mu_b = parse_marker(self.QCD)
delta_bs = []
for i in range(self.D):
delta_bs.append(2 ** -(self.epsilon_b + i - self.D) * (1 + self.mu_b / (2 ** 11)))
print(delta_bs)
if self.mode == "quantify":
if self.irreversible:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate quantify.")
inputs = [[x, delta_bs] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_quantize, inputs)
else:
X = [_quantize(x, delta_bs) for x in X]
else:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate quantify.")
inputs = [[x, self.reserve_bits, False] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_scale, inputs)
else:
X = [_scale(x, self.reserve_bits, False) for x in X]
elif self.mode == "dequantify":
try:
self.D = params["delta_vb"]
self.logs[-1] += self.formatter.message("\"delta_vb\" is specified as {}.".format(self.delta_vb))
except KeyError:
self.logs[-1] += self.formatter.warning("\"delta_vb\" is not specified, now set to {}.".format(self.delta_vb))
if self.irreversible:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate dequantify.")
inputs = [[x, delta_bs, self.delta_vb] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_dequantize, inputs)
else:
X = [_dequantize(x, delta_bs, self.delta_vb) for x in X]
else:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate dequantify.")
inputs = [[x, self.reserve_bits, True] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_scale, inputs)
else:
X = [_scale(x, self.reserve_bits, True) for x in X]
else:
msg = "Invalid attribute %s for quantizer %s. Quantizer.mode should be set to \"quantify\" or \"dequantify\"." % (self.mode, self)
self.logs[-1] += self.formatter.error(msg)
raise AttributeError(msg)
self.sended_ = X
return self
def _quantize(tile, delta_bs):
quantified_tile = [np.array(tile[0] / delta_bs[0], dtype=np.int64)]
for subbands, delta_b in zip(tile[1:], delta_bs):
quantified_tile.append(tuple([np.array(subband / delta_b, dtype=np.int64) for subband in subbands]))
return quantified_tile
def _dequantize(coeffs, delta_bs, delta_vb):
dequantified_tile = [(coeffs[0] + delta_vb) * delta_bs[0]]
for subbands, delta_b in zip(coeffs[1:], delta_bs):
dequantified_tile.append(tuple([(subband + delta_vb) * delta_b for subband in subbands]))
return dequantified_tile
def _scale(tile, reserve_bits, compress):
new_tile = []
if compress:
for subbands in tile:
if isinstance(subbands, tuple):
new_tile.append(tuple([np.array(subband / (10 ** reserve_bits), dtype=np.float64) for subband in subbands]))
else:
new_tile.append(np.array(subbands / (10 ** reserve_bits), dtype=np.float64))
else:
for subbands in tile:
if isinstance(subbands, tuple):
new_tile.append(tuple([np.array(subband * (10 ** reserve_bits), dtype=np.int64) for subband in subbands]))
else:
new_tile.append(np.array(subbands * (10 ** reserve_bits), dtype=np.int64))
return new_tile
| yetiansh/fpeg | fpeg/utils/quantify.py | quantify.py | py | 6,831 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.read_config",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "config.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "config.get",
"line_numbe... |
368888743 | from datetime import datetime
import re
import string
import pandas as pd
import time
from sympy import li
class Model_Trace_Analysis:
def __init__(self):
timestr = time.strftime("%Y%m%d_%H%M%S")
self.txt_path = './analysis/klm_bei_record/typing_log_'+str(timestr)+'.txt'
self.result_path = './analysis/klm_bei_record/human_factor_analysis_'+str(timestr)+'.xlsx'
self.f = open(self.txt_path, 'a+')
print("trace analysis initialisation")
def set_trace(self, boolTrace):
if boolTrace == True:
self.f = open(self.txt_path, 'a')
self.f.write("----------------Start a new log----------------\n")
self.f.close()
print("trace on")
else:
self.f = open(self.txt_path, 'a')
self.f.write("----------------End the log----------------\n")
self.f.close()
print("trace off")
return boolTrace
""" trace typing below """
def record_pressed_button(self, caption, wordPred, senPred, currentSen):
currentTime = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
self.f = open(self.txt_path, 'a')
self.f.write(currentTime + ' >> ' + caption + ', word pred: '+ '|'.join(wordPred).lower() + ', sentence pred: '+ '|'.join(senPred).lower()+ ', current sentence: '+ currentSen +'\n')
self.f.close()
""" trace typing above """
# Start trace analysis
""" Extract data from a line below """
def _extract_info_from_line(self, line):
dateTime = re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}', line)
# time = re.search(r'\d{2}:\d{2}:\d{2}.d{3}', line)
dateTimeObj = datetime.strptime(dateTime.group(0), '%Y-%m-%d %H:%M:%S.%f')
keyType = line[line.find('>> ')+3 : line[line.find('>> ')+3:].find(': ')+line.find('>> ')+3]
keyValue = line[line.find(keyType)+len(keyType)+2 : line.find(', word pred: ')]
if keyValue != 'Speak' and keyValue != 'Space' and keyValue != 'Clear All':
keyValue.lower()
wordPred = line[line.find(', word pred: ')+len(', word pred: ') : line.find(', sentence pred: ')].lower()
senPred = line[line.find(', sentence pred: ')+len(', sentence pred: ') : line.find(', current sentence: ')].lower()
currentSentence = line[line.find(', current sentence: ')+len(', current sentence: ') : line.find('\n')].lower()
wordPredList = wordPred.split('|')
senPredList = senPred.split('|')
logDict = { 'timeTag': dateTimeObj,
'keyType': keyType,
'keyValue': keyValue,
'wordPred': wordPredList,
'sentencePred': senPredList,
'wordPredRoundIndex': 0,
'sentencePredRoundIndex': 0,
'currentSentence': currentSentence}
return logDict
""" Extract data from a line above """
""" Run trace analyse below """
def run_trace_analyse(self, traceLogFile, T_interrupt_threshold):
# print("In model_trace_analyse using file: " + traceLogFile)
with open(traceLogFile) as f:
lines = f.readlines()
boolLogData = False
logDictList = []
for line in lines:
if 'Start a new log' in line:
boolLogData = True
continue
if 'End the log' in line:
boolLogData = False
break
if boolLogData:
logDictList.append(self._extract_info_from_line(line))
print("log dictionary list:")
print(logDictList)
# seprate by sentences
oneSentence = []
# result summary
humanFactorsDistList = []
sentenceNum = 0
for line in logDictList:
if line['keyValue'] != 'Speak':
oneSentence.append(line)
else:
oneSentence.append(line)
# Start analysis this sentence
KS_all_temp, sentenceLengthInWord, sentenceLengthInChar, T_all_temp, KS_error_correction_temp, T_error_correction_temp, KS_irra_extra_temp, T_irra_extra_temp, T_interrupt_temp = self._cal_human_factors(oneSentence, T_interrupt_threshold)
humanFactorsDist = {
'sentenceNum': sentenceNum,
'sentence': oneSentence,
'KS_all': KS_all_temp,
'sentenceLengthInWord': sentenceLengthInWord,
'sentenceLengthInChar': sentenceLengthInChar,
'T_all': T_all_temp,
'KS_error_correction': KS_error_correction_temp,
'T_error_correction': T_error_correction_temp,
'KS_irra_extra': KS_irra_extra_temp,
'T_irra_extra': T_irra_extra_temp,
'T_interrupt': T_interrupt_temp,
'entry_rate': sentenceLengthInWord/(T_all_temp/60.0),
'KS_saving_rate': (sentenceLengthInChar-KS_all_temp)/sentenceLengthInChar,
'ETRI': 1 - T_error_correction_temp/(T_all_temp-T_interrupt_temp),
'EI': KS_error_correction_temp/(KS_all_temp-KS_irra_extra_temp),
'RI': 1 - KS_irra_extra_temp/(KS_all_temp-KS_error_correction_temp),
'II_KS': 1 - (KS_error_correction_temp+KS_irra_extra_temp)/KS_all_temp,
'II_T': 1 - (T_error_correction_temp+T_irra_extra_temp+T_interrupt_temp)/T_all_temp
}
humanFactorsDistList.append(humanFactorsDist)
print('Sentence ' + str(sentenceNum) + ' human factor: ')
print('ETRI = '+str(humanFactorsDist['ETRI']))
print('EI = '+str(humanFactorsDist['EI']))
print('RI = '+str(humanFactorsDist['RI']))
print('II_KS = '+str(humanFactorsDist['II_KS']))
print('II_T = '+str(humanFactorsDist['II_T']))
oneSentence = []
sentenceNum += 1
# Overall human performance
KS_all = 0
T_all = 0.0
KS_error_correction = 0
T_error_correction = 0.0
KS_irra_extra = 0
T_irra_extra = 0.0
T_interrupt = 0.0
for hf in humanFactorsDistList:
KS_all += hf['KS_all']
T_all += hf['T_all']
KS_error_correction += hf['KS_error_correction']
T_error_correction += hf['T_error_correction']
KS_irra_extra += hf['KS_irra_extra']
T_irra_extra += hf['T_irra_extra']
T_interrupt += hf['T_interrupt']
ETRI = 1 - T_error_correction/(T_all-T_interrupt),
EI = KS_error_correction/(KS_all-KS_irra_extra),
RI = 1 - KS_irra_extra/(KS_all-KS_error_correction),
II_KS = 1 - (KS_error_correction+KS_irra_extra)/KS_all,
II_T = 1 - (T_error_correction+T_irra_extra+T_interrupt)/T_all
print('Overall human factors: ')
print('ETRI = '+str(ETRI))
print('EI = '+str(EI))
print('RI = '+str(RI))
print('II_KS = '+str(II_KS))
print('II_T = '+str(II_T))
df = pd.DataFrame.from_dict(humanFactorsDistList)
df.to_excel(self.result_path)
""" Run trace analyse above """
def _add_to_pred_pool(self, index, timeTag, predList):
dict = {}
dict['round'] = index
dict['timeTag'] = timeTag
dict['prediction'] = predList
return dict
def _cal_human_factors(self, logDictList, T_interrupt_threshold):
KS_current = 0
sentenceLengthInWord = 0
sentenceLengthInChar = 0
if logDictList:
T_all = (logDictList[-1]['timeTag'] - logDictList[0]['timeTag']).total_seconds()
KS_irra_extra = 0
T_irra_extra = 0.0
word_irra_extra_buffer = []
KS_error_correction = 0
T_error_correction = 0.0
boolDeletionCombo = False
N_deletion_combo = 0
errorSentence = ""
lastSentence = ""
T_interrupt = 0.0
lastLogDict = {}
currentWordPredFistShowInRound = {}
currentSentencePredFistIrrationalAction = {}
currentWord = ""
currentSentence = ""
currentWordPredListDictList = []
currentSentencePredListDictList = []
currentWordPredRoundIndex = 0
currentSentencePredRoundIndex = 0
boolCheckWordPredIrrational = False
boolCheckSentencePredIrrational = False
boolFirstWord = True
for logDict in logDictList:
KS_current += 1
boolFinishAWord = False
boolFinishASentence = False
currentSentence = logDict['currentSentence']
if logDict.get('keyValue') == 'Space' or logDict.get('keyValue') == ',' or logDict.get('keyValue') == '.' or logDict.get('keyType') == 'word' or logDict.get('keyType') == 'sentence':
# indicate the end of a word
boolFinishAWord = True
boolFinishASentence = False
if logDict.get('keyValue') == 'Speak':
# indicate the end of a sentence
boolFinishAWord = True
boolFinishASentence = True
"""Form a sentence below """
if logDict.get('keyType') == 'key':
# Delete a letter
if logDict.get('keyValue') == '<-':
print("<-, Error sentence: "+logDict['currentSentence'])
if N_deletion_combo == 0:
errorSentence = lastSentence
N_deletion_combo += 1
if currentWord:
currentWord = currentWord[:-1]
# Typing a word
elif boolFinishAWord == False:
if N_deletion_combo != 0:
boolDeletionCombo = True
currentWord = currentWord + logDict.get('keyValue')
# Extend wordPred and sentencePred list
if boolFirstWord == True:
boolFirstWord = False
else:
currentWordPredRoundIndex += 1
currentSentencePredRoundIndex += 1
currentWordPredListDictList.append(self._add_to_pred_pool(index=currentWordPredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('wordPred')))
currentSentencePredListDictList.append(self._add_to_pred_pool(index=currentSentencePredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('sentencePred')))
currentWordPredPool = ''
currentSentencePredPool = ''
currentWordPredPoolDictList = []
currentSentencePoolDictList = []
for currentWordPred in currentWordPredListDictList:
currentWordPredPool = currentWordPredPool + str(currentWordPred['round']) +': '+ '||'.join(currentWordPred['prediction']) + '; '
tempDict = {
'round': currentWordPred['round'],
'prediction': currentWordPred['prediction']
}
currentWordPredPoolDictList.append(tempDict)
for currentSentencePred in currentSentencePredListDictList:
currentSentencePredPool = currentSentencePredPool + str(currentSentencePred['round']) + ': ' + '||'.join(currentSentencePred['prediction'])+ '; '
tempDict = {
'round': currentSentencePred['round'],
'prediction': currentSentencePred['prediction']
}
currentSentencePoolDictList.append(tempDict)
print('Typing a word, currentWord: '+currentWord)
print(' current word prediction: '+'|'.join(logDict.get('wordPred')))
print(' current sentence prediction: '+'|'.join(logDict.get('sentencePred')))
print(' current word prediction pool: '+currentWordPredPool)
print(' current sentence prediction pool: '+currentSentencePredPool)
print(' current sentence: '+currentSentence)
# currentSentence = logDict['currentSentence']
if boolDeletionCombo == True:
# Find when errorStartSentence first shows in log
errorStart = 0
if N_deletion_combo > 1:
errorStartSentence = errorSentence[:-(N_deletion_combo-1)]
else:
errorStartSentence = errorSentence
for tempLog in logDictList[:KS_current]:
errorStart += 1
if errorStartSentence == tempLog['currentSentence']:
break
# Calculate deletion and error time and KS
if errorStart != 0:
# current sentence is shown before, account error KS and time
KS_error_correction += KS_current - errorStart
T_error_correction += (logDict['timeTag'] - logDictList[errorStart-1]['timeTag']).total_seconds()
else:
# current sentence is not shown before, only add correction KS and time (select a pred word but delete part of it)
KS_error_correction += N_deletion_combo
T_error_correction += (logDict['timeTag'] - logDictList[KS_current-N_deletion_combo]['timeTag']).total_seconds()
boolDeletionCombo = False
N_deletion_combo = 0
errorSentence = ""
if boolFinishAWord == True and boolFinishASentence == False and logDict.get('keyType') != 'sentence':
# A word is finished, but the sentence is not finished
if logDict.get('keyType') == 'word':
# Use word prediction to finish the word
currentWord = logDict.get('keyValue').lower()
# Check word rationality
currentWord = currentWord.strip().lower()
currentWordFinishInRound = len(currentWordPredListDictList) # Finishes in the next round that is not recorded in this list, therefore we use the maximum round in the list plus one
currentWordFinishTime = logDict['timeTag']
currentWordPredFirstIrrationalAction = {}
for recordByRound in currentWordPredListDictList:
# Record the first miss of the predicted word
if currentWord in recordByRound['prediction'] and len(currentWord)>1:
if boolCheckWordPredIrrational == True:
currentWordPredFirstIrrationalAction = {
'round': recordByRound['round'],
'timeTag': recordByRound['timeTag']}
KS_irra_extra = KS_irra_extra + currentWordFinishInRound - currentWordPredFirstIrrationalAction['round']
T_irra_extra = T_irra_extra + (currentWordFinishTime-currentWordPredFirstIrrationalAction['timeTag']).total_seconds()
temp_irra_extra_dict = {
'round': KS_current, # in sentence level
'KS_irra_extra': KS_irra_extra,
'T_irra_extra': T_irra_extra
}
word_irra_extra_buffer.append(temp_irra_extra_dict)
print("-> Current KS_irra_extra = "+str(KS_irra_extra))
print("-> Current T_irra_extra = "+str(T_irra_extra))
break
boolCheckWordPredIrrational = True
boolCheckWordPredIrrational = False
# Renew wordPred list
currentWordPredRoundIndex = 0
currentWordPredListDictList = []
currentWordPredListDictList.append(self._add_to_pred_pool(index=currentWordPredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('wordPred')))
# Extend sentencePred list
currentSentencePredRoundIndex += 1
currentSentencePredListDictList.append(self._add_to_pred_pool(index=currentSentencePredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('sentencePred')))
print('A word is finished, currentWord: '+currentWord+', currentSentence: '+currentSentence+'*')
# print trace
currentWordPredPool = ''
currentSentencePredPool = ''
currentWordPredPoolDictList = []
currentSentencePredPoolDictList = []
for currentWordPred in currentWordPredListDictList:
currentWordPredPool = currentWordPredPool + str(currentWordPred['round']) +': '+ '||'.join(currentWordPred['prediction']) + '; '
tempDict = {
'round': currentWordPred['round'],
'prediction': currentWordPred['prediction']
}
currentWordPredPoolDictList.append(tempDict)
for currentSentencePred in currentSentencePredListDictList:
currentSentencePredPool = currentSentencePredPool + str(currentSentencePred['round']) + ': ' + '||'.join(currentSentencePred['prediction'])+ '; '
tempDict = {
'round': currentSentencePred['round'],
'prediction': currentSentencePred['prediction']
}
currentSentencePredPoolDictList.append(tempDict)
print(' current word prediction: '+'|'.join(logDict.get('wordPred')))
print(' current sentence prediction: '+'|'.join(logDict.get('sentencePred')))
print(' current word prediction pool: '+currentWordPredPool)
print(' current sentence prediction pool: '+currentSentencePredPool)
print(' current sentence: '+currentSentence)
currentWord = ''
# currentWordPredFistShowInRound = None
currentWordPredFistShowInRound = {}
if boolFinishAWord == True and boolFinishASentence == False and logDict.get('keyType') == 'sentence':
# A word is finished, and a sentence prediction is selected
currentWord = ''
# Check sentence rationality
currentSentenceFinishInRound = len(currentSentencePredListDictList) # Finishes in the next round that is not recorded in this list, therefore we use the maximum round in the list plus one
currentSentenceFinishTime = logDict['timeTag']
for recordByRound in currentSentencePredListDictList:
if boolCheckSentencePredIrrational == True:
currentSentencePredFistIrrationalAction = {
'round': recordByRound['round'],
'timeTag': recordByRound['timeTag']}
# Check if the sentence irrational action is after any word finishment actions
boolIrrationalInSentenceLevel = True
if word_irra_extra_buffer:
for buffer in reversed(word_irra_extra_buffer):
if buffer['round'] < currentSentencePredFistIrrationalAction['round']:
boolIrrationalInSentenceLevel = False
if boolIrrationalInSentenceLevel == False:
KS_irra_extra = buffer['KS_irra_extra'] + currentSentenceFinishInRound - currentSentencePredFistIrrationalAction['round']
T_irra_extra = buffer['T_irra_extra'] + (currentSentenceFinishTime-currentSentencePredFistIrrationalAction['timeTag']).total_seconds()
else:
KS_irra_extra = currentSentenceFinishInRound - currentSentencePredFistIrrationalAction['round']
T_irra_extra = (currentSentenceFinishTime-currentSentencePredFistIrrationalAction['timeTag']).total_seconds()
print("-> Current KS_irra_extra = "+str(KS_irra_extra))
print("-> Current T_irra_extra = "+str(T_irra_extra))
break
if currentSentence.strip() in recordByRound['prediction']:
boolCheckSentencePredIrrational = True
boolCheckSentencePredIrrational = False
# Renew the wordPred and sentencePred list
currentWordPredRoundIndex += 1
currentSentencePredRoundIndex += 1
currentWordPredListDictList.append(self._add_to_pred_pool(index=currentWordPredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('wordPred')))
currentSentencePredListDictList.append(self._add_to_pred_pool(index=currentSentencePredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('sentencePred')))
# print
currentWordPredPool = ''
currentSentencePredPool = ''
currentWordPredPoolDictList = []
currentSentencePredPoolDictList = []
for currentWordPred in currentWordPredListDictList:
currentWordPredPool = currentWordPredPool + str(currentWordPred['round']) +': '+ '||'.join(currentWordPred['prediction']) + '; '
tempDict = {
'round': currentWordPred['round'],
'prediction': currentWordPred['prediction']
}
currentWordPredPoolDictList.append(tempDict)
for currentSentencePred in currentSentencePredListDictList:
currentSentencePredPool = currentSentencePredPool + str(currentSentencePred['round']) + ': ' + '||'.join(currentSentencePred['prediction'])+ '; '
tempDict = {
'round': currentSentencePred['round'],
'prediction': currentSentencePred['prediction']
}
currentSentencePredPoolDictList.append(tempDict)
print('Select a sentence prediction: '+currentSentence)
print(' current word prediction: '+'|'.join(logDict.get('wordPred')))
print(' current sentence prediction: '+'|'.join(logDict.get('sentencePred')))
print(' current word prediction pool: ' + currentWordPredPool)
print(' current sentence prediction pool: ' + currentSentencePredPool)
print(' current sentence: '+currentSentence)
currentWordPredListDictList = []
currentSentencePredListDictList = []
currentWordPredRoundIndex = 1
currentSentencePredRoundIndex = 1
currentWordPredFistShowInRound = {}
currentSentencePredFistIrrationalAction = {}
if boolFinishAWord == True and boolFinishASentence == True:
# A sentence is finished
# Set wordPred and sentencePred to []
currentWordPredListDictList = []
currentSentencePredListDictList = []
currentWordPredRoundIndex = 0
currentSentencePredRoundIndex = 0
sentenceLengthInWord = lastSentence.count(' ') + 1
sentenceLengthInChar = len(lastSentence)
print('A sentence is finished, currentSentence: '+lastSentence+'*')
lastSentence = currentSentence
""" Form a sentence above """
""" Calculate interruption time below """
# Assume interruption does not happen in irrational and erronous actions
if lastLogDict:
timeDifference = (logDict['timeTag'] - lastLogDict['timeTag']).total_seconds()
if timeDifference > T_interrupt_threshold:
T_interrupt += timeDifference
""" Calculate interruption time above """
lastLogDict = logDict
print('KS_all = '+str(KS_current))
print('T_all = '+str(T_all))
print('KS_error_correction = '+str(KS_error_correction))
print('T_error_correction = '+str(T_error_correction))
print('KS_irra_extra = '+str(KS_irra_extra))
print('T_irra_extra = '+str(T_irra_extra))
print('T_interrupt = '+str(T_interrupt))
return KS_current, sentenceLengthInWord, sentenceLengthInChar, T_all, KS_error_correction, T_error_correction, KS_irra_extra, T_irra_extra, T_interrupt
| TuringFallAsleep/Tinkerable-AAC-Keyboard | develop/model_trace_analysis.py | model_trace_analysis.py | py | 25,662 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.strftime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "re.search",
... |
22204331425 | from ..service.client_service import ClientService
from flask_restx import Resource, Namespace, fields
from flask import jsonify, request
client_service = ClientService()
api = Namespace('Cliente', 'Operações relacionadas aos clientes da loja')
clients_fields = api.model('Cliente', {
'name': fields.String,
'cpf': fields.String
})
@api.route("/<int:id>")
class Client(Resource):
def get(self, id):
try:
client = client_service.get_client(id)
return jsonify({"data": client.serialize()})
except Exception as e:
return jsonify({'data': 'Cliente não disponível, {}'.format(str(e))})
@api.doc(body=clients_fields)
def put(self, id):
json = request.get_json(force=True)
try:
name = json['name']
cpf = json['cpf']
status = client_service.update_client(id, name, cpf)
if status:
return jsonify({'data': 'Cliente atualizado'})
else:
return jsonify({'data': 'Cliente não pôde ser atualizado'})
except:
return jsonify({'data': 'Cliente não pôde ser atualizado, campo necessário não foi enviado.'})
def delete(self, id):
status = client_service.delete_client(id)
if status:
return jsonify({'data': 'Cliente deletado'})
else:
return jsonify({'data': 'Cliente não pôde ser deletado'})
@api.route("")
class ClientList(Resource):
def get(self):
clients = client_service.get_clients()
return jsonify({'data': clients})
@api.doc(body=clients_fields)
def post(self):
try:
json = request.get_json(force=True)
name = json['name']
cpf = json['cpf']
client_service.insert_client(str(name), str(cpf))
return jsonify({'data': 'Cliente inserido com sucesso'})
except Exception as e:
print(str(e))
return jsonify({'data': 'Cliente não pôde ser inserido, {}'.format(str(e))}) | anaplb3/loja-api | app/main/controller/client_controller.py | client_controller.py | py | 2,076 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "service.client_service.ClientService",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_restx.Namespace",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields.String",
"line_number": 10,
"usage_type": "attribute"
},
... |
21546325152 | #This python script reads a CSV formatted table of methylation sites
#and attaches, depending on the coordinate, 1.5 kb flanking regions
#numbers listed
import csv # module to read CSV files
import re # module to search for regular expressions in files; not in use now but will set up for sophisticated search and grabs
import argparse # module to handle or "parse" arguments
import numpy
def split_head_body(g_file,head_a,body_a):
#a=[]
#b=[]
h_ct=-1
for i in g_file:
if (re.match('>',i)):
h_ct=h_ct+1
head_a.append(i.replace("\n","").replace(">",""))
body_a.append("")
else:
body_a[h_ct]=body_a[h_ct]+i.replace("\n","")
#head_a.append(a)
#body_a.append(b)
#print (headers)
#We will set up the following command line arguments
#-f --csvfile : path to CSV input file
##-g --gff3file : path to GFF3 formatted annotation file
#-d --dnafile : path to FASTA formatted genome sequence file
#-o --output : path to CSV output file
#Let's see what the user has given us
optParse=argparse.ArgumentParser() #Create an argument parsing "object" called optParse
optParse.add_argument("-f","--csvfile",help="path to CSV input file")
optParse.add_argument("-s","--size",help="size of flanking region")
optParse.add_argument("-d","--dnafile",help="path to genome sequence file")
optParse.add_argument("-o","--output",help="path to CSV output file")
argstr=optParse.parse_args() # store the arguments given/passed by the user
#Now, get to work and open the files (no error handling for now)
csv_f=open(argstr.csvfile,'r') # open the input CSV file in read-only mode
out_f=open(argstr.output,'w')# open the output CSV file in write-only mode
#Now let's start reading the csv file, line by line
genome_f=open(argstr.dnafile,'r')
genome=genome_f.readlines()
head_a=[]
body_a=[]
split_head_body(genome,head_a,body_a)
flank_size=int(argstr.size)
#inp_csv_read=csv.reader(csv_f,dialect='excel')
#run a loop that iterates over all lines/rows in the CSV input
for line in csv_f:
#store field values in an array
inp_field=line.split(',')
coord=int(inp_field[1])
#Now, we know that the GenomeIDs are in Column 1
#So we will use the first element of the array and search for matches in the
#annotation file
if (inp_field[0]!='' and inp_field!="Name"):
if ((coord-flank_size)<1):
coord_s=0
else:
coord_s=coord-flank_size-1
if (coord+flank_size>len(body_a[head_a.index(inp_field[0])])):
coord_e=len(body_a[head_a.index(inp_field[0])])
else:
coord_e=coord+flank_size
seq=body_a[head_a.index(inp_field[0])][coord_s:coord_e]
#print(genome_id.group(0).replace(";","")+","+seq+","+line)
out_f.write(">"+inp_field[0]+":"+str(coord)+"\n"+seq+"\n\n")
csv_f.close()
out_f.close()
genome_f.close()
#count_fpkm.close()
quit()
| lanl/DNA_methylation_analysis | meth_site_flanking_seq.py | meth_site_flanking_seq.py | py | 2,848 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 36,
"usage_type": "call"
}
] |
40243581716 | from django.conf import settings
from django.db import models
import logging
import requests
log = logging.getLogger('genoome.twentythree.models')
class Token23(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True)
access_token = models.TextField()
refresh_token = models.TextField()
scope = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class ClientError(Exception):
pass
def _api_get(self, url):
headers = {
'Authorization': 'Bearer %s' % self.access_token,
}
url = "https://api.23andme.com%s" % (url,)
response = requests.get(url, headers=headers)
if response.status_code != 200:
# https://api.23andme.com/docs/errors/
log.warning('23andme error response: %s\nurl:%s', response.text, url)
raise self.ClientError
return response.json()
@classmethod
def get_by_code(klass, user, code):
# https://api.23andme.com/docs/authentication/
# curl https://api.23andme.com/token/
# -d client_id=xxx \
# -d client_secret=yyy \
# -d grant_type=authorization_code \
# -d code=zzz \
# -d "redirect_uri=https://localhost:5000/receive_code/"
# -d "scope=basic%20rs3094315"
post_data = {
'client_id': settings.CLIENT_ID23,
'client_secret': settings.CLIENT_SECRET23,
'scope': 'basic genomes',
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': settings.COMEBACK_URL23,
}
response = requests.post('https://api.23andme.com/token/', data=post_data, timeout=30.00, verify=True)
if response.status_code != 200:
log.error('Problem fetching token %s %s', response.status_code, response.text)
raise klass.ClientError
data = response.json()
initial = {
'access_token': data['access_token'],
'refresh_token': data['refresh_token'],
'scope': data['scope'],
}
instance, created = klass.objects.get_or_create(user=user, defaults=initial)
if not created:
log.warning('Updating initial token for %s', user)
for key in initial:
setattr(instance, key, initial[key])
instance.save()
log.debug('Token for %s ready!', user)
return instance
def refresh(self):
post_data = {
'client_id': settings.CLIENT_ID23,
'client_secret': settings.CLIENT_SECRET23,
'scope': self.scope,
'refresh_token': self.refresh_token,
'grant_type': 'refresh_token',
'redirect_uri': settings.COMEBACK_URL23,
}
response = requests.post('https://api.23andme.com/token/', data=post_data, timeout=30.00, verify=True)
if response.status_code != 200:
log.error('Problem refreshing token %s %s', response.status_code, response.text)
raise self.ClientError
data = response.json()
self.access_token = data['access_token']
self.refresh_token = data['refresh_token']
self.scope = data['scope']
self.save()
def get_genome(self, profile_id23):
# GET /1/genomes/profile_id/?unfiltered=...
# curl https://api.23andme.com/1/genomes/c44.../ -H "..."
# https://api.23andme.com/res/txt/snps.b4e00fe1db50.data
# scope required: genomes
data = self._api_get('/1/genomes/%s/' % (profile_id23,))
return data['genome']
def get_profiles(self):
# GET /1/user/
# # JSON response:
#{
# "id": "a42e94634e3f7683",
# "profiles": [
# {
# "genotyped": true,
# "id": "c4480ba411939067"
# }, ...
# ]
#}
# scope required: basic
data = self._api_get('/1/user/')
return data['profiles']
class CeleryTask23(models.Model):
STATUS_CHOICES = (
('new', 'new'),
('fetching', 'fetching genome'),
('parsing', 'parsing genome'),
('error', 'error'),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True)
chosen_profile = models.TextField()
fetch_task_id = models.TextField()
analyze_order = models.ForeignKey('disease.AnalyzeDataOrder', null=True)
process_task_id = models.TextField(null=True)
status = models.TextField(choices=STATUS_CHOICES, default='new')
| jiivan/genoomy | genoome/twentythree/models.py | models.py | py | 4,655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db... |
23109102237 | import requests
from datetime import datetime
from bs4 import BeautifulSoup
url = 'https://www.naver.com/'
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
#실시간 검색어 긁어온거 그대로
#한두개 코드 긁어서 전체적으로 긁어오려면 어떻게 써야할지 고민
# → li 태그 전체를 뽑아오게끔 손질해보자( li:nth-child(1) → li )
names = soup.select('#PM_ID_ct > div.header > div.section_navbar > div.area_hotkeyword.PM_CL_realtimeKeyword_base > div.ah_roll.PM_CL_realtimeKeyword_rolling_base > div > ul > li > a > span.ah_k')
print(names)
#태그들을 제거해보자
print(f'{datetime.now()} 기준 실시간 검색어') #fstring 기능 사용(3.6버전 이상부터)
for name in names:
print(name.text)
## 파이썬 3.0 이상 → format함수를 이용한 포매팅
## 파이썬 3.6 이상 → f-string 포매팅
# format 함수
#'{1} {0}'.format('one','two')
# f-string
# a, b = 'one', 'two'
# f'{a}, {b}' | drhee0919/TIL | Chatbot/05_naver_rank.py | 05_naver_rank.py | py | 992 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime"... |
26089552298 | import numpy as np
import math
import scipy.signal as juan
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['KaiTi']#黑体:SimHei 宋体:SimSun 楷体KaiTi 微软雅黑体:Microsoft YaHei
plt.rcParams['axes.unicode_minus'] = False#这两用于写汉字
n1 = np.arange(0,32,1)
dom = [True if (i>=8 and i<=23) else False for i in n1] #使用列表解析的方法
s=0*(n1<=7)+(0.7**n1)*dom+0*(n1>=24)#信号的表示
noise=np.random.normal(0, 0.004, len(n1))
x=s+noise#将均值为0,方差为0.004的噪声与信号相加
h1=(0.5**(15-n1))*(n1<=15)#为了便于对照,我们将几个滤波器长度都设成一样
h2=(0.9**(15-n1))*(n1<=15)
h3=0*(n1<=7)+(0.7**(31-n1))*dom+0*(n1>=24)
def convolve(h):#两函数进行卷积
y=juan.convolve(x,h/(math.sqrt(sum(h**2))),mode='full')
return y
y1=convolve(h1)
y2=convolve(h2)
y3=convolve(h3)
fig1,(ax1,ax2)=plt.subplots(2,1)
ax1.stem(s,use_line_collection='True',label='原始信号')
ax2.stem(x,use_line_collection='True',label='加噪信号')
fig2,(ax3,ax4,ax5)=plt.subplots(3,1)
ax3.stem(y1,use_line_collection='True',label='h1滤波')
ax4.stem(y2,use_line_collection='True',label='h2滤波')
ax5.stem(y3,use_line_collection='True',label='匹配滤波')
ax1.legend()#写图例
ax2.legend(loc="upper right")
ax3.legend()
ax4.legend()
ax5.legend()
plt.show()
| Mr-Da-Yang/Python_learning | 2019vacational_project/matplotlib/xinhaojiance_02.py | xinhaojiance_02.py | py | 1,391 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 6,
"usage_type": "attribute"
},
{
"ap... |
993256569 | import asyncio
import serial_asyncio
import threading
from functools import partial
class AsyncSerialConnection(object):
def __init__(self, loop, device, port='/dev/ttyUSB0'):
coro = serial_asyncio.create_serial_connection(loop, ZiGateProtocol, port, baudrate=115200)
futur = asyncio.run_coroutine_threadsafe(coro, loop) # Requires python 3.5.1
futur.add_done_callback(partial(self.bind_transport_to_device, device))
@staticmethod
def bind_transport_to_device(device, protocol_refs):
"""
Bind device and protocol / transport once they are ready
Update the device status @ start
"""
transport = protocol_refs.result()[0]
protocol = protocol_refs.result()[1]
protocol.device = device
device.send_to_transport = transport.write
class ZiGateProtocol(asyncio.Protocol):
def __init__(self):
super().__init__()
self.transport = None
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
try:
self.device.read_data(data)
except:
ZGT_LOG.debug('ERROR')
def connection_lost(self, exc):
pass
def start_loop(loop):
loop.run_forever()
loop.close()
if __name__ == "__main__":
import logging
from pyzigate.interface import ZiGate
# Setup logging on screen, debug mode
l = logging.getLogger('zigate')
l.setLevel(logging.DEBUG)
l.addHandler(logging.StreamHandler())
# Asyncio based connection
zigate = ZiGate()
loop = asyncio.get_event_loop()
connection = AsyncSerialConnection(loop, zigate)
# Adding loop in a thread for testing purposes (i.e non blocking ipython console)
# not needed when full program is run within the event loop
t = threading.Thread(target=start_loop, args=(loop,))
t.start()
zigate.send_data('0010')
| elric91/ZiGate | examples/async_serial.py | async_serial.py | py | 1,929 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "serial_asyncio.create_serial_connection",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "asyncio.run_coroutine_threadsafe",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 12,
"usage_type": "call"
},... |
28890243109 | import torch
import math
from torch import nn
from torch.nn import functional as F
from data import utils as du
from model import ipa_pytorch
from model import frame_gemnet
from openfold.np import residue_constants
import functools as fn
Tensor = torch.Tensor
def get_index_embedding(indices, embed_size, max_len=2056):
"""Creates sine / cosine positional embeddings from a prespecified indices.
Args:
indices: offsets of size [..., N_edges] of type integer
max_len: maximum length.
embed_size: dimension of the embeddings to create
Returns:
positional embedding of shape [N, embed_size]
"""
K = torch.arange(embed_size//2).to(indices.device)
pos_embedding_sin = torch.sin(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding_cos = torch.cos(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding = torch.cat([
pos_embedding_sin, pos_embedding_cos], axis=-1)
return pos_embedding
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
# Code from https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
assert len(timesteps.shape) == 1
timesteps = timesteps * max_positions
half_dim = embedding_dim // 2
emb = math.log(max_positions) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
class Embedder(nn.Module):
def __init__(self, model_conf):
super(Embedder, self).__init__()
self._model_conf = model_conf
self._embed_conf = model_conf.embed
# Time step embedding
index_embed_size = self._embed_conf.index_embed_size
node_embed_dims = index_embed_size
edge_in = index_embed_size * 2
# Sequence index embedding
if self._embed_conf.use_res_idx_encoding:
node_embed_dims += index_embed_size
edge_in += index_embed_size
if self._embed_conf.embed_aatype:
aatype_embed_size = self._embed_conf.aatype_embed_size
self.aatype_embedder = nn.Sequential(
nn.Linear(residue_constants.restype_num+1, aatype_embed_size),
nn.ReLU(),
nn.Linear(aatype_embed_size, aatype_embed_size),
nn.LayerNorm(aatype_embed_size),
)
node_embed_dims += aatype_embed_size
edge_in += aatype_embed_size * 2
else:
aatype_embed_size = 0
node_embed_size = self._model_conf.node_embed_size
self.node_embedder = nn.Sequential(
nn.Linear(node_embed_dims, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.LayerNorm(node_embed_size),
)
if self._embed_conf.embed_distogram:
edge_in += self._embed_conf.num_bins
edge_embed_size = self._model_conf.edge_embed_size
self.edge_embedder = nn.Sequential(
nn.Linear(edge_in, edge_embed_size),
nn.ReLU(),
nn.Linear(edge_embed_size, edge_embed_size),
nn.LayerNorm(edge_embed_size),
)
self.timestep_embedder = fn.partial(
get_timestep_embedding,
embedding_dim=self._embed_conf.index_embed_size
)
self.index_embedder = fn.partial(
get_index_embedding,
embed_size=self._embed_conf.index_embed_size
)
def forward(
self,
*,
seq_idx,
t,
aatype,
fixed_mask,
ca_pos
):
"""Embeds a set of inputs
Args:
seq_idx: [..., N] Positional sequence index for each residue.
t: Sampled t in [0, 1].
fixed_mask: mask of fixed (motif) residues.
Returns:
node_embed: [B, N, D_node]
edge_embed: [B, N, N, D_edge]
"""
num_batch, num_res = seq_idx.shape
init_node_embed = []
# Embed timestep.
t_embed = torch.tile(
self.timestep_embedder(t)[:, None, :], (1, num_res, 1))
# Set time step to epsilon=1e-5 for fixed residues.
fixed_t_embed = self.timestep_embedder(torch.ones_like(t)*1e-5)
fixed_t_embed = torch.tile(fixed_t_embed[:, None, :], (1, num_res, 1))
fixed_mask = fixed_mask[..., None]
prot_t_embed = (
t_embed * (1 - fixed_mask)
+ fixed_t_embed * fixed_mask
)
init_node_embed.append(prot_t_embed)
# Embed 1D sequence features.
if self._embed_conf.use_res_idx_encoding:
init_node_embed.append(self.index_embedder(seq_idx))
if self._embed_conf.embed_aatype:
aatype_embed = self.aatype_embedder(aatype)
init_node_embed.append(aatype_embed)
node_embed = self.node_embedder(
torch.cat(init_node_embed, dim=-1).float())
# Embed 2D sequence features.
edge_attr = seq_idx[:, :, None] - seq_idx[:, None, :]
edge_attr = edge_attr.reshape([num_batch, num_res**2])
edge_embed = self.index_embedder(edge_attr)
cross_t_embed = torch.cat([
torch.tile(prot_t_embed[:, :, None, :], (1, 1, num_res, 1)),
torch.tile(prot_t_embed[:, None, :, :], (1, num_res, 1, 1)),
], dim=-1).float().reshape([num_batch, num_res**2, -1])
pair_feats = [
edge_embed,
cross_t_embed,
]
if self._embed_conf.embed_aatype:
cross_aatype = torch.cat([
torch.tile(aatype_embed[:, :, None, :], (1, 1, num_res, 1)),
torch.tile(aatype_embed[:, None, :, :], (1, num_res, 1, 1)),
], dim=-1).float()
pair_feats.append(cross_aatype.reshape(
[num_batch, num_res**2, -1]))
if self._embed_conf.embed_distogram:
dgram = du.calc_distogram(
ca_pos,
self._embed_conf.min_bin,
self._embed_conf.max_bin,
self._embed_conf.num_bins,
)
pair_feats.append(dgram.reshape([num_batch, num_res**2, -1]))
edge_embed = torch.cat(pair_feats, dim=-1).float()
edge_embed = self.edge_embedder(edge_embed)
edge_embed = edge_embed.reshape(
[num_batch, num_res, num_res, -1])
return node_embed, edge_embed
class ReverseDiffusion(nn.Module):
def __init__(self, model_conf):
super(ReverseDiffusion, self).__init__()
self._model_conf = model_conf
self.embedding_layer = Embedder(model_conf)
if self._model_conf.network_type == 'ipa':
self.score_model = ipa_pytorch.IpaScore(model_conf)
else:
raise ValueError(
f'Unrecognized network {self._model_conf.network_type}')
def _apply_mask(self, aatype_diff, aatype_0, diff_mask):
return diff_mask * aatype_diff + (1 - diff_mask) * aatype_0
def _calc_trans_0(self, trans_score, trans_t, beta_t):
beta_t = beta_t[..., None, None]
cond_var = 1 - torch.exp(-beta_t)
return (trans_score * cond_var + trans_t) / torch.exp(-1/2*beta_t)
def forward(self, input_feats):
"""forward computes the reverse diffusion conditionals p(X^t|X^{t+1})
for each item in the batch
Args:
X: the noised samples from the noising process, of shape [Batch, N, D].
Where the T time steps are t=1,...,T (i.e. not including the un-noised X^0)
Returns:
model_out: dictionary of model outputs.
"""
# Frames as [batch, res, 7] tensors.
bb_mask = input_feats['res_mask'].type(torch.float32) # [B, N]
fixed_mask = input_feats['fixed_mask'].type(torch.float32)
edge_mask = bb_mask[..., None] * bb_mask[..., None, :]
# Padding needs to be unknown aatypes.
pad_aatype = torch.eye(residue_constants.restype_num + 1)[-1][None]
aatype_t = (
input_feats['aatype_t'] * bb_mask[..., None]
+ pad_aatype[:, None, :].to(bb_mask.device) * (1 - bb_mask[..., None])
).type(torch.float32)
# Initial embeddings of positonal and relative indices.
init_node_embed, init_edge_embed = self.embedding_layer(
seq_idx=input_feats['seq_idx'],
t=input_feats['t'],
aatype=aatype_t,
fixed_mask=fixed_mask,
ca_pos=input_feats['rigids_t'][..., 4:],
)
edge_embed = init_edge_embed * edge_mask[..., None]
node_embed = init_node_embed * bb_mask[..., None]
# Run main network
model_out = self.score_model(node_embed, edge_embed, input_feats)
# Rescale score predictions by the standard deviations or variances.
trans_score = model_out['trans_score'] * input_feats['trans_score_scaling'][:, None, None]
rot_score = model_out['rot_score'] * input_feats['rot_score_scaling'][:, None, None]
# Logits are of shape [..., 20] where 20 is the number of aatypes.
if self._model_conf.aatype_prediction:
aatype_logits = model_out['aatype']
# Probs are of shape [..., 21] where 21 is the vocab size.
# Last token is padding that we set to 0.
aatype_probs = torch.nn.functional.softmax(aatype_logits, dim=-1)
else:
aatype_logits = input_feats['aatype_t'][..., :-1]
aatype_probs = input_feats['aatype_t'][..., :-1]
aatype_probs = torch.cat([
aatype_probs,
torch.zeros(aatype_probs.shape[:-1] + (1,)).to(
aatype_probs.device)
], dim=-1)
aatype_probs = self._apply_mask(
aatype_probs, input_feats['aatype_0'], 1 - fixed_mask[..., None])
pred_out = {
'psi': model_out['psi'],
'rot_score': rot_score,
'trans_score': trans_score,
'aatype_logits': aatype_logits,
'aatype_probs': aatype_probs,
}
if self._model_conf.direct_prediction:
raise ValueError('Make compatible with masking')
pred_out['final_rigids'] = model_out['final_rigids']
pred_out['rigids_update'] = model_out['rigids_update']
if self._model_conf.dgram_prediction:
pred_out['dgram'] = model_out['dgram']
return pred_out
| blt2114/twisted_diffusion_sampler | protein_exp/model/reverse_se3_diffusion.py | reverse_se3_diffusion.py | py | 10,866 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.arange",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.sin",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number":... |
72225854504 | #!/usr/bin/python3
"""
Main 'BaseModel' class that defines all common
attributes/methods for other classes
"""
import uuid
import models
from datetime import datetime
class BaseModel:
""" Base class constructor method """
def __init__(self, *args, **kwargs):
""" Base class initializes the objects """
if kwargs:
for key_inside, value_inside in kwargs.items():
if key_inside == "created_at" or key_inside == "updated_at":
format = "%Y-%m-%dT%H:%M:%S.%f"
datetime_object = datetime.strptime(value_inside, format)
setattr(self, key_inside, datetime_object)
elif key_inside != "__class__":
setattr(self, key_inside, value_inside)
else:
self.id = str(uuid.uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
models.storage.new(self)
def __str__(self):
""" Method that returns a string representation """
return f"[{self.__class__.__name__}] ({self.id}) {self.__dict__}"
def save(self):
""" Method that update the current date and time """
self.updated_at = datetime.now()
models.storage.save()
def to_dict(self):
""" Method that returns a dictionary representation """
new_dict = dict(self.__dict__)
new_dict["__class__"] = self.__class__.__name__
new_dict["created_at"] = self.created_at.isoformat()
new_dict["updated_at"] = self.updated_at.isoformat()
return new_dict
| DevPacho/holbertonschool-AirBnB_clone | models/base_model.py | base_model.py | py | 1,607 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.date... |
9815950754 | import discord
import asyncio
import time
import sys
import os
import random
import aiohttp
useproxies = sys.argv[4]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
token = sys.argv[1]
SERVER = sys.argv[2]
tokenno = sys.argv[3]
@client.event
async def on_ready(): #the prints are commented out so it is silent and we can return to the menu. I also plan to do this for the main attacks, but i need a way to view the attacks first so we can stop them :/
#print ("Token " + str(tokenno) + " logged in!")
for channel in client.get_server(SERVER).channels:
if channel.type != discord.ChannelType.text:
continue
myperms = channel.permissions_for(client.get_server(SERVER).get_member(client.user.id))
if not myperms.send_messages:
continue
for x in range(3):
async for x in client.logs_from(channel):
channame = channel.name
if x.author.id == str(client.user.id):
await client.delete_message(x)
#print ("Token " + str(tokenno) + ": Cleaned " + channame)
await client.close()
try:
client.run(token, bot=False)
except Exception as c:
print (c)
| X-Nozi/NoziandNiggarr24Toolbox | spammer/cleanup.py | cleanup.py | py | 1,446 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "aiohttp.ProxyConnector",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.Client",
... |
1119124359 | from typing import Iterable, Callable
import SearchSpace
from BenchmarkProblems.CombinatorialProblem import CombinatorialProblem
from Version_E.Feature import Feature
from Version_E.InterestingAlgorithms.Miner import FeatureSelector
from Version_E.MeasurableCriterion.CriterionUtilities import Balance, Extreme, All
from Version_E.MeasurableCriterion.Explainability import Explainability
from Version_E.MeasurableCriterion.ForSampling import Completeness, ExpectedFitness
from Version_E.MeasurableCriterion.GoodFitness import HighFitness, ConsistentFitness
from Version_E.MeasurableCriterion.MeasurableCriterion import MeasurableCriterion
from Version_E.PrecomputedFeatureInformation import PrecomputedFeatureInformation
from Version_E.PrecomputedPopulationInformation import PrecomputedPopulationInformation
from Version_E.Testing import Miners
def get_reference_features_for_regurgitation_sampling(problem: CombinatorialProblem,
termination_predicate: Callable,
ppi: PrecomputedPopulationInformation,
reference_miner_parameters: dict,
amount_to_return: int,
importance_of_explainability: float) -> list[Feature]:
search_criterion = Balance([
Explainability(problem),
Balance([
HighFitness(),
ConsistentFitness()],
weights = [1, 1])],
weights=[importance_of_explainability, 1 - importance_of_explainability])
selector = FeatureSelector(ppi, search_criterion)
miner = Miners.decode_miner(reference_miner_parameters,
selector=selector,
termination_predicate=termination_predicate)
mined_features = miner.get_meaningful_features(amount_to_return)
return mined_features
def regurgitation_sample(reference_features: Iterable[Feature],
termination_predicate: Callable,
original_ppi: PrecomputedPopulationInformation,
sampling_miner_parameters: dict,
amount_to_return: int) -> list[SearchSpace.Candidate]:
reference_feature_pfi = PrecomputedFeatureInformation(original_ppi, reference_features)
good_fitness = Balance([HighFitness(), ConsistentFitness()])
generation_criterion = Balance([Completeness(),
ExpectedFitness(criterion=good_fitness,
pfi=reference_feature_pfi)],
weights=[2, 1])
selector = FeatureSelector(original_ppi, generation_criterion)
sampling_miner = Miners.decode_miner(sampling_miner_parameters,
selector=selector,
termination_predicate=termination_predicate)
sampled_features = sampling_miner.get_meaningful_features(amount_to_return)
sampled_candidates = [feature.to_candidate() for feature in sampled_features
if feature.is_convertible_to_candidate()]
return sampled_candidates
| Giancarlo-Catalano/Featurer | Version_E/Sampling/RegurgitationSampler.py | RegurgitationSampler.py | py | 3,398 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "BenchmarkProblems.CombinatorialProblem.CombinatorialProblem",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "Version_E.PrecomputedPopulationInformation.PrecomputedPopulationInfor... |
43906527977 | """
Process command line arguments and/or load configuration file
mostly used by the test scripts
"""
import argparse
import sys
import os.path
from typing import Union
import yaml
def do_args():
"""
@brief { function_description }
@return { description_of_the_return_value }
"""
# Parse command line arguments and modify config
parser = argparse.ArgumentParser(
prog='pyspectrumscale.py',
description='Python Spectrum Scale Management API tools'
)
# Command line arguments
parser.add_argument(
"-v",
"--verbose",
dest='verbose',
help="Increase output to stderr and stdout",
action="store_true"
)
parser.add_argument(
"-q",
"--quiet",
dest='quiet',
help="Reduce output to stderr and stdout",
action="store_true"
)
parser.add_argument(
"-d",
"--dry_run",
dest='dryrun',
help="Do a dry run, no changes written to Spectrum Scale or GPFS",
action="store_true"
)
parser.add_argument(
"-f",
"--file",
default='pyspectrumsscale.conf.yaml',
dest='file',
help="Specify a configuration file, default is pyspectrumsscale.conf.yaml",
)
parser.add_argument(
"--filesystem",
default=None,
nargs='+',
dest='filesystem',
help="Specify a scale filesystem",
)
parser.add_argument(
"--fileset",
default=None,
nargs='+',
dest='fileset',
help="Specify a scale fileset, requires a filesystem",
)
parser.add_argument(
"--path",
default=None,
dest='path',
help="Specify a scale filesystem, requires a filesystem",
)
parser.add_argument(
"--parent",
default=None,
dest='parent',
help="Specify a scale fileset parent",
)
parser.add_argument(
"--comment",
default=None,
dest='comment',
help="Specify a scale fileset comment",
)
parser.add_argument(
'-s',
'--server',
default=None,
type=str,
dest='server',
help="Hostname of Spectrum Scale Management server"
)
parser.add_argument(
'-u',
'--user',
default=None,
type=str,
dest='user',
help="The username used to connect to the Spectrum Scale Management server"
)
parser.add_argument(
'-p',
'--password',
default=None,
type=str,
dest='password',
help="The password used to conenct to the Spectrum Scale Management server"
)
parser.add_argument(
'--port',
default=None,
type=str,
dest='port',
help="The password used to conenct to the Spectrum Scale Management server"
)
parser.add_argument(
'--version',
default=None,
type=str,
dest='version',
help="The Spectrum Scale Management server API version"
)
parser.add_argument(
'--verify_ssl',
default=None,
type=bool,
dest='verify_ssl',
help=(
"If true the SSL certificate of the"
" Spectrum Scale Management server will be verified"
)
)
parser.add_argument(
'--verify_warnings',
default=None,
type=bool,
dest='verify_warnings',
help=(
"If false warnings about the SSL state of "
"the Spectrum Scale Management server will be silenced"
)
)
parser.add_argument(
'--verify_method',
default=None,
type=Union[bool, str],
dest='verify_method',
help=(
"The method used to validate the SSL state of "
"the Spectrum Scale Management server"
)
)
# Positional commands
parser.add_argument(
dest='command',
help='Command help',
default=None,
nargs='?',
type=str,
choices=[
'dumpconfig',
'connectiontest'
]
)
return parser.parse_args()
# Create the CONFIG to be imported elsewhere
# Set defaults
CONFIG = {
'scaleserver': {
'host': 'scaleserver.example.org',
'user': 'username',
'password': None,
'port': 443,
'version': 'v2',
'verify_ssl': True,
'verify_method': True,
'verify_warnings': True
},
}
ARGS = do_args()
# Override configuration defaults with values from the config file
if os.path.isfile(ARGS.file):
with open(ARGS.file, 'r') as configfile:
CONFIG.update(yaml.load(configfile))
# Override configuration loaded from file with command line arguments
if ARGS.server:
CONFIG['scaleserver']['host'] = ARGS.server
if ARGS.user:
CONFIG['scaleserver']['user'] = ARGS.user
if ARGS.password:
CONFIG['scaleserver']['password'] = ARGS.password
if ARGS.port:
CONFIG['scaleserver']['port'] = ARGS.port
if ARGS.version:
CONFIG['scaleserver']['version'] = ARGS.version
# This one can be bool or str values
if ARGS.verify_method is not None:
CONFIG['scaleserver']['verify_method'] = ARGS.verify_method
if ARGS.verify_ssl is not None:
CONFIG['scaleserver']['verify_ssl'] = ARGS.verify_ssl
if ARGS.verify_warnings is not None:
CONFIG['scaleserver']['verify_warnings'] = ARGS.verify_warnings
# If there's no config file, write one
if not os.path.isfile(ARGS.file):
print(
"The configuration file %s was missing,"
" wrote default configuration to file" %
ARGS.file
)
with open(ARGS.file, 'w') as configfile:
yaml.dump(CONFIG, configfile, default_flow_style=False)
sys.exit(0)
# Set state from command line
CONFIG['command'] = ARGS.command
CONFIG['dryrun'] = ARGS.dryrun
CONFIG['filesystem'] = ARGS.filesystem
CONFIG['fileset'] = ARGS.fileset
CONFIG['path'] = ARGS.path
CONFIG['parent'] = ARGS.parent
CONFIG['comment'] = ARGS.comment
| Aethylred/pyspectrumscale | pyspectrumscale/configuration/__init__.py | __init__.py | py | 6,045 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "os.path.path.isfile",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "os.path.pa... |
14997087223 | import typing
from typing import Any, Callable, List, Tuple, Union
import IPython.display as display
import cv2
import numpy as np
import os, sys
from PIL import Image
from .abc_interpreter import Interpreter
from ..data_processor.readers import preprocess_image, read_image, restore_image, preprocess_inputs
from ..data_processor.visualizer import visualize_heatmap
class ScoreCAMInterpreter(Interpreter):
"""
Score CAM Interpreter.
More details regarding the Score CAM method can be found in the original paper:
https://arxiv.org/abs/1910.01279
"""
def __init__(self,
paddle_model,
trained_model_path,
use_cuda=True,
model_input_shape=[3, 224, 224]) -> None:
"""
Initialize the GradCAMInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
use_cuda (bool, optional): Whether or not to use cuda. Default: True
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.use_cuda = use_cuda
self.model_input_shape = model_input_shape
self.paddle_prepared = False
def interpret(self,
inputs,
target_layer_name,
labels=None,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
inputs (str or list of strs or numpy.ndarray): The input image filepath or a list of filepaths or numpy array of read images.
target_layer_name (str): The target layer to calculate gradients.
labels (list or tuple or numpy.ndarray, optional): The target labels to analyze. The number of labels should be equal to the number of images. If None, the most likely label for each image will be used. Default: None
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str or list of strs or None, optional): The filepath(s) to save the processed image(s). If None, the image will not be saved. Default: None
:return: interpretations/heatmap for each image
:rtype: numpy.ndarray
Example::
import interpretdl as it
def paddle_model(image_input):
import paddle.fluid as fluid
class_num = 1000
model = ResNet50()
logits = model.net(input=image_input, class_dim=class_num)
probs = fluid.layers.softmax(logits, axis=-1)
return probs
scorecam = it.ScoreCAMInterpreter(paddle_model,
"assets/ResNet50_pretrained", True)
scorecam.interpret(
'assets/catdog.png',
'res5c.add.output.5.tmp_0',
label=None,
visual=True,
save_path='assets/scorecam_test.jpg')
"""
imgs, data, save_path = preprocess_inputs(inputs, save_path,
self.model_input_shape)
b, c, h, w = data.shape
self.target_layer_name = target_layer_name
if not self.paddle_prepared:
self._paddle_prepare()
if labels is None:
_, probs = self.predict_fn(data)
labels = np.argmax(probs, axis=1)
bsz = len(imgs)
labels = np.array(labels).reshape((bsz, 1))
feature_map, _ = self.predict_fn(data)
interpretations = np.zeros((b, h, w))
for i in range(feature_map.shape[1]):
feature_channel = feature_map[:, i, :, :]
feature_channel = np.concatenate([
np.expand_dims(cv2.resize(f, (h, w)), 0)
for f in feature_channel
])
norm_feature_channel = np.array(
[(f - f.min()) / (f.max() - f.min())
for f in feature_channel]).reshape((b, 1, h, w))
_, probs = self.predict_fn(data * norm_feature_channel)
scores = [p[labels[i]] for i, p in enumerate(probs)]
interpretations += feature_channel * np.array(scores).reshape((
b, ) + (1, ) * (interpretations.ndim - 1))
interpretations = np.maximum(interpretations, 0)
interpretations_min, interpretations_max = interpretations.min(
), interpretations.max()
if interpretations_min == interpretations_max:
return None
interpretations = (interpretations - interpretations_min) / (
interpretations_max - interpretations_min)
interpretations = np.array([(interp - interp.min()) /
(interp.max() - interp.min())
for interp in interpretations])
for i in range(b):
visualize_heatmap(interpretations[i], imgs[i], visual,
save_path[i])
return interpretations
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
data_op = fluid.data(
name='data',
shape=[None] + self.model_input_shape,
dtype='float32')
probs = self.paddle_model(data_op)
if isinstance(probs, tuple):
probs = probs[0]
trainable_vars = list(main_program.list_vars())
for v in trainable_vars:
if v.name == self.target_layer_name:
conv = v
main_program = main_program.clone(for_test=True)
if self.use_cuda:
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
fluid.io.load_persistables(exe, self.trained_model_path,
main_program)
def predict_fn(data):
feature_map, probs_out = exe.run(main_program,
feed={'data': data},
fetch_list=[conv, probs])
return feature_map, probs_out
self.predict_fn = predict_fn
self.paddle_prepared = True
| LoganCome/FedMedical | utils/InterpretDL/interpretdl/interpreter/score_cam.py | score_cam.py | py | 7,112 | python | en | code | 44 | github-code | 36 | [
{
"api_name": "abc_interpreter.Interpreter",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "abc_interpreter.Interpreter.__init__",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "abc_interpreter.Interpreter",
"line_number": 41,
"usage_type": "name"
... |
2214688634 | import argparse
from alarm import __version__
from alarm.constants import ALLOWED_EXTENSIONS, ON_WINDOWS
def parse_args(args):
"""Passing in args makes this easier to test:
https://stackoverflow.com/a/18161115
"""
parser = argparse.ArgumentParser(
description="Play an alarm after N minutes",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
duration_group = parser.add_mutually_exclusive_group(required=True)
duration_group.add_argument(
"-s", "--seconds", type=int, help="Number of seconds before playing alarm"
)
duration_group.add_argument(
"-m", "--minutes", type=int, help="Number of minutes before playing alarm"
)
run_mode_group = parser.add_mutually_exclusive_group()
if not ON_WINDOWS:
run_mode_group.add_argument(
"-b",
"--background",
action="store_true",
default=False,
help="Run timer in the background",
)
run_mode_group.add_argument(
"-d",
"--display_timer",
action="store_true",
default=False,
help="Show timer in console",
)
alarm_file_group = parser.add_mutually_exclusive_group()
alarm_file_group.add_argument(
"-l",
"--song_library",
help=(
"Take a random song from a song library directory, "
f"supported formats: {', '.join(sorted(ALLOWED_EXTENSIONS))}"
),
)
alarm_file_group.add_argument(
"-f", "--file", help="File path to song to play as alarm"
)
alarm_file_group.add_argument(
"-M", "--message", help="Set an audio message to play for alarm"
)
parser.add_argument(
"-v", "--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument("-t", "--timeout", type=int, help="Stop alarm after N seconds")
parser.add_argument(
"-r",
"--repeat",
type=int,
default=1,
help="Repeat alarm cycle N times (only works in foreground)",
)
return parser.parse_args(args)
| hobojoe1848/pybites-alarm | alarm/cli.py | cli.py | py | 2,095 | python | en | code | null | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "alarm.constants.ON_WINDOWS",
"line_number": 25,
"usage_type": "name"
... |
37325991579 | import numpy as np
from zipline.pipeline import CustomFactor
from zipline.pipeline.data import USEquityPricing
class CCI(CustomFactor):
"""
Commodity Channel Index
Momentum indicator
**Default Inputs:** USEquityPricing.close, USEquityPricing.high, USEquityPricing.low
**Default Window Length:** 14
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci
"""
inputs = [USEquityPricing.high, USEquityPricing.low, USEquityPricing.close]
window_length = 14 + 1
outputs = ['CCI_t_1', 'CCI_t']
def compute(self, today, assets, out, high, low, close):
# typical price matrix
typical_prices = (high + low + close) / 3.
# mean of each column
mean_typical_t = np.nanmean(typical_prices[-1:0:-1], axis=0)
mean_typical_t_1 = np.nanmean(typical_prices[-2::-1], axis=0)
# mean deviation
mean_deviation_t = np.sum(
np.abs(typical_prices[-1:0:-1] - np.tile(mean_typical_t, (len(typical_prices) - 1, 1))), axis=0) / (
self.window_length - 1)
mean_deviation_t_1 = np.sum(
np.abs(typical_prices[-2::-1] - np.tile(mean_typical_t_1, (len(typical_prices) - 1, 1))), axis=0) / (
self.window_length - 1)
# CCI
out.CCI_t[:] = (typical_prices[-1] - mean_typical_t) / (.015 * mean_deviation_t)
out.CCI_t_1[:] = (typical_prices[-2] - mean_typical_t_1) / (.015 * mean_deviation_t_1) | ahmad-emanuel/quant_trading_system | Indicators/CCI_self.py | CCI_self.py | py | 1,542 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "zipline.pipeline.CustomFactor",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "zipline.pipeline.data.USEquityPricing.high",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "zipline.pipeline.data.USEquityPricing",
"line_number": 17,
"... |
3119120370 | '''
参考资料:https://blog.csdn.net/weixin_45971950/article/details/122331273
'''
import cv2
def is_inside(o, i):
ox, oy, ow, oh = o
ix, iy, iw, ih = i
return ox > ix and oy > iy and ox+ow < ix+iw and oy+oh < iy+ih
def draw_person(image, person):
x, y, w, h = person
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 1)
# 读取摄像头
cap = cv2.VideoCapture('20220827-093000-100000.mp4')
# 视频的帧率FPS
fps = cap.get(cv2.CAP_PROP_FPS)
# 视频的总帧数
total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# 视屏读取速度,1为1倍速,8为8倍速
speed = 8
# 使用opencv的hog特征进行行人检测
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
for i in range(int(total_frame)):
ret, frame = cap.read()
if not ret:
break
if i % (int(fps)*speed) == 0:
# 检测代码
found, w = hog.detectMultiScale(frame)
#print(found, w)
foundList = []
for ri, r in enumerate(found):
flag = 0
for qi, q in enumerate(found):
if ri != qi and is_inside(r, q):
flag = 1
if (flag == 0):
foundList.append(r)
for person in foundList:
draw_person(frame, person)
cv2.imshow("face", frame)
if ord('q') == cv2.waitKey(1):
break
# 释放内存
cv2.destroyAllWindows()
# 释放摄像头
cap.release()
| ryan6liu/demo | facedetect/demo/demo_movedetect_hog_svm.py | demo_movedetect_hog_svm.py | py | 1,477 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.rectangle",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRA... |
24019764012 | import numpy as np
import random
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
from matplotlib.patches import Rectangle
# ========================== CONSTANTS ============================
L = 7
SHAPES = ['CUBE', 'SPHERE', 'EMPTY']
COLORS = ['R', 'G', 'B']
def get_shape_pattern(i_start, i_end, j_start, j_end):
shape1 = random.choice([0,1])
shape2 = random.choice([0,1,2])
thickness = random.choice([1,2,3])
def shape1_pred(i,j):
is_wall = [
(i - i_start) < thickness,
(j - j_start) < thickness,
(i_end - i) < thickness,
(j_end - j) < thickness,
]
return any(is_wall)
def pattern(i,j):
if shape1_pred(i,j):
return shape1
else:
return shape2
return pattern
def get_color_pattern(i_start, i_end, j_start, j_end):
pattfs = [lambda x : 0,
lambda x : 1,
lambda x : 2,
lambda x : 0 if x % 2 else 1,
lambda x : 1 if x % 2 else 2,
lambda x : 2 if x % 2 else 0,
]
indexs = ['i', 'j', 'i+j']
pattfunc = random.choice(pattfs)
patt_id = random.choice(indexs)
def pattern(i,j):
return pattfunc(eval(patt_id))
return pattern
# renders the program into a dictionary of (i,j) => (shape, color)
def render_shapes():
def gen_range():
start = random.choice([_ for _ in range(L)])
end = random.choice([_ for _ in range(L)])
if start + 2 <= end:
return (start, end)
else:
return gen_range()
i_start, i_end = gen_range()
j_start, j_end = gen_range()
shape_fun = get_shape_pattern(i_start, i_end, j_start, j_end)
color_fun = get_color_pattern(i_start, i_end, j_start, j_end)
ret = dict()
for i in range(i_start, i_end+1):
for j in range(j_start, j_end+1):
shape = SHAPES[shape_fun(i,j)]
color = COLORS[color_fun(i,j)]
if shape != 'EMPTY':
ret[(i,j)] = (shape, color)
return ret
# draws the shapes onto a canvas
def draw(shapes, name):
R = 0.9 / 2 / L
plt.figure()
currentAxis = plt.gca(aspect='equal')
for coord in shapes:
shape, color = shapes[coord]
x,y = coord
if shape == 'CUBE':
currentAxis.add_patch(Rectangle((x/L, y/L), 2*R,2*R, facecolor=color))
if shape == 'SPHERE':
currentAxis.add_patch(Circle((x/L+R, y/L+R), R, facecolor=color))
plt.savefig(f'drawings/{name}.png')
plt.close()
# generate a legal program, where legality is defined loosely
def gen_legal_shape():
shapes = render_shapes()
if len(shapes) >= 1:
return shapes
else:
return gen_legal_shape()
# turn shape into a cononical repr so to keep duplicate programs out
def shape_to_repr(shapes):
return tuple(sorted(list(shapes.items())))
def unrepr_shape(shape_repr):
return dict(shape_repr)
if __name__ == '__main__':
shapes = gen_legal_shape()
# print (shapes)
# print (shape_to_repr(shapes))
draw(shapes, 'prog')
| evanthebouncy/program_synthesis_pragmatics | version_space/grid.py | grid.py | py | 3,191 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_n... |
34986315817 | from flask import Flask, render_template, request, url_for, flash, redirect
import sqlite3
from werkzeug.exceptions import abort
from flask_socketio import SocketIO
from engineio.payload import Payload
Payload.max_decode_packets = 50
def get_db_connection():
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
return conn
def get_post(post_id):
conn = get_db_connection()
post = conn.execute('SELECT * FROM posts WHERE id = ?', (post_id,)).fetchone()
conn.close()
if post is None:
abort(404)
return post
app = Flask(__name__)
app.config['SECRET_KEY'] = 'your secret key'
sio = SocketIO(app)
@app.route("/")
def index():
conn = get_db_connection()
posts = conn.execute('SELECT * FROM posts').fetchall()
conn.close()
return render_template('index.html', posts=posts)
@app.route("/drawing")
def drawing():
return render_template('drawing.html')
@app.route("/chat")
def chat():
return render_template('chat.html')
@app.route("/tracking")
def tracking():
return render_template('tracking.html')
@app.route('/<int:post_id>')
def post(post_id):
post = get_post(post_id)
return render_template('post.html', post=post)
@app.route('/create', methods=('GET', 'POST'))
def create():
if request.method == 'POST':
title = request.form['title']
content = request.form['content']
if not title:
flash('Title is required!')
else:
conn = get_db_connection()
conn.execute('INSERT INTO posts (title, content) VALUES (?, ?)', (title, content))
conn.commit()
conn.close()
return redirect(url_for('index'))
return render_template('create.html')
@app.route('/<int:id>/edit', methods=('GET', 'POST'))
def edit(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
content = request.form['content']
if not title:
flash('Title is required!')
else:
conn = get_db_connection()
conn.execute('UPDATE posts SET title = ?, content = ?'
' WHERE id = ?',
(title, content, id))
conn.commit()
conn.close()
flash('updated!', 'success')
return redirect(url_for('post', post_id=post['id']))
return render_template('edit.html', post=post)
@app.route('/<int:id>/delete', methods=('POST',))
def delete(id):
post = get_post(id)
conn = get_db_connection()
conn.execute('DELETE FROM posts WHERE id = ?', (id,))
conn.commit()
conn.close()
flash('"{}" was successfully deleted!'.format(post['title']), 'danger')
return redirect(url_for('index'))
@app.route("/chart")
def chartindex():
conn = get_db_connection()
postRows = conn.execute('SELECT COUNT(*) AS PostCount, strftime("%d-%m-%Y", created) AS PostDate FROM posts group by strftime("%d-%m-%Y", created)').fetchall()
commentRows = conn.execute('SELECT COUNT(*) AS CommentCount, strftime("%d-%m-%Y", created) AS CommentDate FROM comments group by strftime("%d-%m-%Y", created)').fetchall()
conn.close()
posts = [dict(row) for row in postRows]
comments = [dict(row) for row in commentRows]
jsonData = {"posts": posts, "comments": comments}
return render_template('chart.html', json=jsonData)
def messageReceived(methods=['GET', 'POST']):
print('message was received!!!')
@sio.on('my event')
def handle_my_custom_event(json, methods=['GET', 'POST']):
print('received my event: ' + str(json))
sio.emit('my response', json, callback=messageReceived)
clients = {}
@sio.on('mouse_position')
def handle_mouse_position(data):
print('received mouse position: ' + str(data) + ' sid:' + request.sid)
clients[request.sid] = data
sio.emit('all_coords', clients)
if __name__ == '__main__':
sio.run(app, debug=True)
| JeroenMX/LearningPython | main.py | main.py | py | 3,952 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "engineio.payload.Payload.max_decode_packets",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "engineio.payload.Payload",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 10,
"usage_type": "call"
},
... |
40243457981 | import torch.nn as nn
import torch
import torchvision
import cv2
import time
import numpy as np
import os
YOEO_CLASSES = (
"shark",
"coral",
"fish",
"turtle",
"manta ray",
)
def preproc(img, input_size, swap=(2, 0, 1)):
if len(img.shape) == 3:
padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
else:
padded_img = np.ones(input_size, dtype=np.uint8) * 114
r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
padded_img = padded_img.transpose(swap)
padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
return padded_img, r
class ValTransform:
"""
Defines the transformations that should be applied to test PIL image
for input into the network
dimension -> tensorize -> color adj
Arguments:
resize (int): input dimension to SSD
rgb_means ((int,int,int)): average RGB of the dataset
(104,117,123)
swap ((int,int,int)): final order of channels
Returns:
transform (transform) : callable transform to be applied to test/val
data
"""
def __init__(self, swap=(2, 0, 1), legacy=False):
self.swap = swap
self.legacy = legacy
# assume input is cv2 img for now
def __call__(self, img, res, input_size):
img, _ = preproc(img, input_size, self.swap)
if self.legacy:
img = img[::-1, :, :].copy()
img /= 255.0
img -= np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)
img /= np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1)
return img, np.zeros((1, 5))
def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
for i in range(len(boxes)):
box = boxes[i]
cls_id = int(cls_ids[i])
score = scores[i]
if score < conf:
continue
x0 = int(box[0])
y0 = int(box[1])
x1 = int(box[2])
y1 = int(box[3])
color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
text = "{}:{:.1f}%".format(class_names[cls_id], score * 100)
txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
cv2.rectangle(
img,
(x0, y0 + 1),
(x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])),
txt_bk_color,
-1,
)
cv2.putText(
img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1
)
return img
_COLORS = (
np.array(
[
0.000,
0.000,
1.000,
0.000,
0.447,
0.880,
0.929,
0.694,
0.125,
0.466,
0.674,
0.188,
0.494,
0.184,
0.556,
0.301,
0.745,
0.933,
0.635,
0.078,
0.184,
0.300,
0.300,
0.300,
0.600,
0.600,
0.600,
1.000,
0.000,
0.000,
1.000,
0.500,
0.000,
0.749,
0.749,
0.000,
0.000,
1.000,
0.000,
0.850,
0.325,
0.098,
0.667,
0.000,
1.000,
0.333,
0.333,
0.000,
0.333,
0.667,
0.000,
0.333,
1.000,
0.000,
0.667,
0.333,
0.000,
0.667,
0.667,
0.000,
0.667,
1.000,
0.000,
1.000,
0.333,
0.000,
1.000,
0.667,
0.000,
1.000,
1.000,
0.000,
0.000,
0.333,
0.500,
0.000,
0.667,
0.500,
0.000,
1.000,
0.500,
0.333,
0.000,
0.500,
0.333,
0.333,
0.500,
0.333,
0.667,
0.500,
0.333,
1.000,
0.500,
0.667,
0.000,
0.500,
0.667,
0.333,
0.500,
0.667,
0.667,
0.500,
0.667,
1.000,
0.500,
1.000,
0.000,
0.500,
1.000,
0.333,
0.500,
1.000,
0.667,
0.500,
1.000,
1.000,
0.500,
0.000,
0.333,
1.000,
0.000,
0.667,
1.000,
0.000,
1.000,
1.000,
0.333,
0.000,
1.000,
0.333,
0.333,
1.000,
0.333,
0.667,
1.000,
0.333,
1.000,
1.000,
0.667,
0.000,
1.000,
0.667,
0.333,
1.000,
0.667,
0.667,
1.000,
0.667,
1.000,
1.000,
1.000,
0.000,
1.000,
1.000,
0.333,
1.000,
1.000,
0.667,
1.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.143,
0.143,
0.143,
0.286,
0.286,
0.286,
0.429,
0.429,
0.429,
0.571,
0.571,
0.571,
0.714,
0.714,
0.714,
0.857,
0.857,
0.857,
0.000,
0.447,
0.741,
0.314,
0.717,
0.741,
0.50,
0.5,
0,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
def postprocess(
prediction, num_classes, conf_thre=0.7, nms_thre=0.45, class_agnostic=False
):
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Get score and class with highest confidence
class_conf, class_pred = torch.max(
image_pred[:, 5 : 5 + num_classes], 1, keepdim=True
)
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1)
detections = detections[conf_mask]
if not detections.size(0):
continue
if class_agnostic:
nms_out_index = torchvision.ops.nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
nms_thre,
)
else:
nms_out_index = torchvision.ops.batched_nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
detections[:, 6],
nms_thre,
)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = torch.cat((output[i], detections))
return output
class Predictor(object):
def __init__(
self,
model,
num_classes,
confthre,
nmsthre,
test_size,
cls_names=YOEO_CLASSES,
):
self.model = model
self.cls_names = cls_names
self.num_classes = num_classes
self.confthre = confthre
self.nmsthre = nmsthre
self.test_size = test_size
self.preproc = ValTransform(legacy=False)
def inference(self, img):
img_info = {"id": 0}
if isinstance(img, str):
img_info["file_name"] = os.path.basename(img)
img = cv2.imread(img)
else:
img_info["file_name"] = None
height, width = img.shape[:2]
img_info["height"] = height
img_info["width"] = width
img_info["raw_img"] = img
ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])
img_info["ratio"] = ratio
img, _ = self.preproc(img, None, self.test_size)
img = torch.from_numpy(img).unsqueeze(0)
img = img.float()
# print(img.shape)
with torch.no_grad():
t0 = time.time()
outputs = self.model(img)
# if self.decoder is not None:
# outputs = self.decoder(outputs, dtype=outputs.type())
outputs = postprocess(
outputs,
self.num_classes,
self.confthre,
self.nmsthre,
class_agnostic=True,
)
return outputs, img_info
def visual(self, output, img_info, cls_conf=0.35):
ratio = img_info["ratio"]
img = img_info["raw_img"]
if output is None:
return img, (torch.empty(0), torch.empty(0), torch.empty(0))
output = output.cpu()
bboxes = output[:, 0:4]
# preprocessing: resize
bboxes /= ratio
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)
return vis_res, (bboxes, cls, scores)
def video_predict(
video_file,
out_path,
model,
num_classes,
confthre,
nmsthre,
test_size,
YOEO_CLASSES,
ifps,
verbose=False,
):
predictor = Predictor(
model, num_classes, confthre, nmsthre, test_size, YOEO_CLASSES
)
cap = cv2.VideoCapture(video_file)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
origi_shape = (width, height)
fps = round(cap.get(cv2.CAP_PROP_FPS))
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if verbose:
print(num_frames, "frames detected!")
bbox_class_score = []
vid_writer = cv2.VideoWriter(
out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
)
index = 0
start_time = time.time()
while True:
success, img = cap.read()
index += 1
if success:
if index % (fps / ifps) == 0: # inference optimization
frame_start_time = time.time()
outputs, img_info = predictor.inference(img.copy())
result_frame, results = predictor.visual(
outputs[0], img_info, predictor.confthre
)
vid_writer.write(result_frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
if verbose:
print(
"--- Frame inferred in %0.2f seconds ---"
% (time.time() - frame_start_time)
)
bbox_class_score.append(results)
else:
vid_writer.write(img)
if index > num_frames:
break
print("--- Completed in %0.2f seconds ---" % (time.time() - start_time))
return (
bbox_class_score,
origi_shape,
fps,
num_frames,
)
| teyang-lau/you-only-edit-once | src/utils/yolox_process.py | yolox_process.py | py | 13,535 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number... |
36956278049 | from suite_subprocess import suite_subprocess
from contextlib import contextmanager
from wtscenario import make_scenarios
import json, re, wiredtiger, wttest
# Shared base class used by verbose tests.
class test_verbose_base(wttest.WiredTigerTestCase, suite_subprocess):
# The maximum number of lines we will read from stdout in any given context.
nlines = 50000
# The JSON schema we expect all messages to follow. Captures all possible fields, detailing
# each field's name, associated type and whether we always expect for that field to be
# present.
expected_json_schema = {
'category': {'type': str, 'always_expected': True },
'category_id': {'type': int, 'always_expected': True },
'error_str': {'type': str, 'always_expected': False },
'error_code': {'type': int, 'always_expected': False },
'msg': {'type': str, 'always_expected': True },
'session_dhandle_name': {'type': str, 'always_expected': False },
'session_err_prefix': {'type': str, 'always_expected': False },
'session_name': {'type': str, 'always_expected': False },
'thread': {'type': str, 'always_expected': True },
'ts_sec': {'type': int, 'always_expected': True },
'ts_usec': {'type': int, 'always_expected': True },
'verbose_level': {'type': str, 'always_expected': True },
'verbose_level_id': {'type': int, 'always_expected': True },
}
# Validates the JSON schema of a given event handler message, ensuring the schema is consistent and expected.
def validate_json_schema(self, json_msg):
expected_schema = dict(self.expected_json_schema)
for field in json_msg:
# Assert the JSON field is valid and expected.
self.assertTrue(field in expected_schema, 'Unexpected field "%s" in JSON message: %s' % (field, str(json_msg)))
# Assert the type of the JSON field is expected.
self.assertEqual(type(json_msg[field]), expected_schema[field]['type'],
'Unexpected type of field "%s" in JSON message, expected "%s" but got "%s": %s' % (field,
str(expected_schema[field]['type']), str(type(json_msg[field])), str(json_msg)))
expected_schema.pop(field, None)
# Go through the remaining fields in the schema and ensure we've seen all the fields that are always expected be present
# in the JSON message
for field in expected_schema:
self.assertFalse(expected_schema[field]['always_expected'], 'Expected field "%s" in JSON message, but not found: %s' %
(field, str(json_msg)))
# Validates the verbose category (and ID) in a JSON message is expected.
def validate_json_category(self, json_msg, expected_categories):
# Assert the category field is in the JSON message.
self.assertTrue('category' in json_msg, 'JSON message missing "category" field')
self.assertTrue('category_id' in json_msg, 'JSON message missing "category_id" field')
# Assert the category field values in the JSON message are expected.
self.assertTrue(json_msg['category'] in expected_categories, 'Unexpected verbose category "%s"' % json_msg['category'])
self.assertTrue(json_msg['category_id'] == expected_categories[json_msg['category']],
'The category ID received in the message "%d" does not match its expected definition "%d"' % (json_msg['category_id'], expected_categories[json_msg['category']]))
def create_verbose_configuration(self, categories):
if len(categories) == 0:
return ''
return 'verbose=[' + ','.join(categories) + ']'
@contextmanager
def expect_verbose(self, categories, patterns, expect_json, expect_output = True):
# Clean the stdout resource before yielding the context to the execution block. We only want to
# capture the verbose output of the using context (ignoring any previous output up to this point).
self.cleanStdout()
# Create a new connection with the given verbose categories.
verbose_config = self.create_verbose_configuration(categories)
# Enable JSON output if required.
if expect_json:
verbose_config += ",json_output=[message]"
conn = self.wiredtiger_open(self.home, verbose_config)
# Yield the connection resource to the execution context, allowing it to perform any necessary
# operations on the connection (for generating the expected verbose output).
yield conn
# Read the contents of stdout to extract our verbose messages.
output = self.readStdout(self.nlines)
# Split the output into their individual messages. We want validate the contents of each message
# to ensure we've only generated verbose messages for the expected categories.
verbose_messages = output.splitlines()
if expect_output:
self.assertGreater(len(verbose_messages), 0)
else:
self.assertEqual(len(verbose_messages), 0)
if len(output) >= self.nlines:
# If we've read the maximum number of characters, its likely that the last line is truncated ('...'). In this
# case, trim the last message as we can't parse it.
verbose_messages = verbose_messages[:-1]
# Test the contents of each verbose message, ensuring it satisfies the expected pattern.
verb_pattern = re.compile('|'.join(patterns))
# To avoid truncated messages, slice out the last message string in the
for line in verbose_messages:
# Check JSON validity
if expect_json:
try:
json.loads(line)
except Exception as e:
self.prout('Unable to parse JSON message: %s' % line)
raise e
self.assertTrue(verb_pattern.search(line) != None, 'Unexpected verbose message: ' + line)
# Close the connection resource and clean up the contents of the stdout file, flushing out the
# verbose output that occurred during the execution of this context.
conn.close()
self.cleanStdout()
# test_verbose01.py
# Verify basic uses of the verbose configuration API work as intended i.e. passing
# single & multiple valid and invalid verbose categories. These tests are mainly focused on uses
# of the interface prior to the introduction of verbosity levels, ensuring 'legacy'-style
# uses of the interface are still supported.
class test_verbose01(test_verbose_base):
format = [
('flat', dict(is_json=False)),
('json', dict(is_json=True)),
]
scenarios = make_scenarios(format)
collection_cfg = 'key_format=S,value_format=S'
# Test use cases passing single verbose categories, ensuring we only produce verbose output for the single category.
@wttest.skip_for_hook("tiered", "FIXME-WT-9809 - Fails for tiered")
def test_verbose_single(self):
# Close the initial connection. We will be opening new connections with different verbosity settings throughout
# this test.
self.close_conn()
# Test passing a single verbose category, 'api'. Ensuring the only verbose output generated is related to
# the 'api' category.
with self.expect_verbose(['api'], ['WT_VERB_API'], self.is_json) as conn:
# Perform a set of simple API operations (table creations and cursor operations) to generate verbose API
# messages.
uri = 'table:test_verbose01_api'
session = conn.open_session()
session.create(uri, self.collection_cfg)
c = session.open_cursor(uri)
c['api'] = 'api'
c.close()
session.close()
# Test passing another single verbose category, 'compact'. Ensuring the only verbose output generated is related to
# the 'compact' category.
with self.expect_verbose(['compact'], ['WT_VERB_COMPACT'], self.is_json) as conn:
# Create a simple table to invoke compaction on. We aren't doing anything interesting with the table
# such that the data source will be compacted. Rather we want to simply invoke a compaction pass to
# generate verbose messages.
uri = 'table:test_verbose01_compact'
session = conn.open_session()
session.create(uri, self.collection_cfg)
session.compact(uri)
session.close()
# Test use cases passing multiple verbose categories, ensuring we only produce verbose output for specified categories.
def test_verbose_multiple(self):
self.close_conn()
# Test passing multiple verbose categories, being 'api' & 'version'. Ensuring the only verbose output generated
# is related to those two categories.
with self.expect_verbose(['api','version'], ['WT_VERB_API', 'WT_VERB_VERSION'], self.is_json) as conn:
# Perform a set of simple API operations (table creations and cursor operations) to generate verbose API
# messages. Beyond opening the connection resource, we shouldn't need to do anything special for the version
# category.
uri = 'table:test_verbose01_multiple'
session = conn.open_session()
session.create(uri, self.collection_cfg)
c = session.open_cursor(uri)
c['multiple'] = 'multiple'
c.close()
# Test use cases passing no verbose categories, ensuring we don't produce unexpected verbose output.
def test_verbose_none(self):
self.close_conn()
# Testing passing an empty set of categories. Ensuring no verbose output is generated.
with self.expect_verbose([], [], self.is_json, False) as conn:
# Perform a set of simple API operations (table creations and cursor operations). Ensuring no verbose messages
# are generated.
uri = 'table:test_verbose01_none'
session = conn.open_session()
session.create(uri, self.collection_cfg)
c = session.open_cursor(uri)
c['none'] = 'none'
c.close()
# Test use cases passing invalid verbose categories, ensuring the appropriate error message is
# raised.
def test_verbose_invalid(self):
self.close_conn()
self.assertRaisesHavingMessage(wiredtiger.WiredTigerError,
lambda:self.wiredtiger_open(self.home, 'verbose=[test_verbose_invalid]'),
'/\'test_verbose_invalid\' not a permitted choice for key \'verbose\'/')
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_verbose01.py | test_verbose01.py | py | 10,688 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "wttest.WiredTigerTestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "suite_subprocess.suite_subprocess",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 97,
"usage_type": "call"
},
{
"api_nam... |
12782770518 | from estimate_explosion_time.shared import get_custom_logger, main_logger_name, pickle_dir
import logging
logger = get_custom_logger(main_logger_name)
logger.setLevel(logging.INFO)
logger.debug('logging level is DEBUG')
from estimate_explosion_time.analyses.rappid_simulations import rappidDH
from estimate_explosion_time.core.fit_data.fitlauncher.fitlauncher import Fitter
from estimate_explosion_time.analyses.rappid_simulations.convert_to_pickle_files import \
rappid_pkl_name, write_model_to_pickle, rappid_original_data
import os
# only include lightcurves with a peak magnitude brighter than this
peak_mag = 19
# take the original simulated data and convert it into pickles in the right format
# the path to the original data is to be specified in convert_to_pickle_files.py
for model_number in [3, 13]:
if not os.path.isfile(rappid_pkl_name(model_number, peak_mag)):
write_model_to_pickle(model_number, peak_mag)
# sepcify where to look for the SED files that were used in the simulation.
# That's necesarry for getting the explosion time from the template
sed_directory = rappid_original_data + '/SEDs'
# get the lightcurves either generated using MOSFiT type 'mosfit'
# or using specral templates type 'templates'
generated_with = 'mosfit'
# get the DataHandler object who takes care of all the book keeping
thisDH = rappidDH.get_dhandler(generated_with, sed_directory=sed_directory)
# get the explosion times for the simulations
thisDH.get_explosion_times_from_template(ncpu=45)
# fit the lightcurves with the desired method (only 'mosfit' is good!)
method = 'mosfit'
fitter = Fitter.get_fitter(method)
logger.debug(
f'fitter method {fitter.method_name} \n'
f'job-id {fitter.job_id}'
)
missing_indice_file = f'{pickle_dir}/{thisDH.name}/{fitter.method_name}/missing_indices.txt'
fitter.fit_lcs(thisDH,
tasks_in_group=100,
# missing_indice_file=missing_indice_file # to be used when repeating the fit
)
# make a selection of lightcurves based on the available photometry
thisDH.select_and_adjust_selection_string()
# get the results
thisDH.results('mosfit')
| JannisNe/ztf_SN-LCs-explosion_time_estimation | estimate_explosion_time/analyses/rappid_simulations/complete_analyses.py | complete_analyses.py | py | 2,150 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "estimate_explosion_time.shared.get_custom_logger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "estimate_explosion_time.shared.main_logger_name",
"line_number": 4,
"usage_type": "argument"
},
{
"api_name": "logging.INFO",
"line_number": 5,
"usag... |
74205781223 | from typing import Iterable, Iterator
from PIL import Image # type: ignore
def resize_image_to_height(image: Image.Image, height: int) -> Image.Image:
return image.resize(size=(int(image.width * (height / image.height)), height))
def concat_paired_images(
left_image: Image.Image, right_image: Image.Image, mode: str
) -> Image.Image:
dest_image_width = left_image.width + right_image.width
dest_image_height = max(left_image.height, right_image.height)
dest_image = Image.new(mode=mode, size=(dest_image_width, dest_image_height))
dest_image.paste(im=left_image, box=(0, 0))
dest_image.paste(im=right_image, box=(left_image.width, 0))
return dest_image
def concat_images(images: Iterable[Image.Image], mode: str) -> Image.Image:
images_itr = iter(images)
return _concat_images(next(images_itr), images_itr, mode=mode)
def _concat_images(
first_image: Image.Image, rest_images_itr: Iterator[Image.Image], mode: str
) -> Image.Image:
try:
return concat_paired_images(
left_image=first_image,
right_image=_concat_images(
first_image=next(rest_images_itr),
rest_images_itr=rest_images_itr,
mode=mode,
),
mode=mode,
)
except StopIteration:
return first_image
| yskuniv/python-simple-web-counter | simple_web_counter/utils/image/image.py | image.py | py | 1,340 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.Image",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"li... |
6753606270 | from flask import Flask
# create Flask app object and init all modules
def create_app(config_object):
from .main import create_module as main_create_module
from app.api.v1 import create_module as api_v1_create_module
# Init APP
app = Flask(__name__)
app.config.from_object(config_object)
# Init modules
main_create_module(app)
app.logger.info("Init Main module")
app.logger.info('Initializing API v1 module')
api_v1_create_module(app)
app.logger.info('API v1 module initialized')
return app
| artem-shestakov/PIN_and_Hash | app/__init__.py | __init__.py | py | 545 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.api.v1",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.api.v1.config.from_object",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.api.v1.confi... |
40892450132 | import io
from pathlib import Path
import magic
from django.conf import settings
from smb.smb_structs import OperationFailure
from smb.SMBConnection import SMBConnection
def factory():
config = settings.SAMBA
connection = SMBConnection(
config["user"],
config["password"],
"abcd",
config["server_name"],
use_ntlm_v2=True,
)
connection.connect(config["server_ip"], int(config["port"]))
return connection
def store_file_and_create_folders(conn, file_path, file_binary):
file_path = Path(file_path)
share = file_path.parts[1]
folders = file_path.parts[2:-1]
last_folder = "/"
for folder in folders:
last_folder += f"{folder}/"
try:
conn.createDirectory(share, last_folder)
except OperationFailure:
pass
conn.storeFile(share, f'/{"/".join(file_path.parts[2:])}', file_binary)
def list_path(conn, path):
path_parts = Path(path).parts
share = path_parts[1]
folder = "/".join(path_parts[2:])
return conn.listPath(share, folder)
def find_file_mime_type(conn, path):
path_parts = Path(path).parts
share = path_parts[1]
file_path = "/".join(path_parts[2:])
# Read first 2048 bytes
file_buffer = io.BytesIO()
conn.retrieveFileFromOffset(share, file_path, file_buffer, max_length=2048)
file_buffer.seek(0)
return magic.from_buffer(file_buffer.read(), mime=True)
def retrieve_file(conn, path):
path_parts = Path(path).parts
share = path_parts[1]
file_path = "/".join(path_parts[2:])
file_buffer = io.BytesIO()
conn.retrieveFile(share, file_path, file_buffer)
file_buffer.seek(0)
return file_buffer
| pierrotlemekcho/exaged | sifapi/planning/samba.py | samba.py | py | 1,707 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.settings.SAMBA",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "smb.SMBConnection.SMBConnection",
"line_number": 12,
"usage_type": "call"
},
{
... |
8975839640 | import cv2
from keras.models import load_model
import numpy as np
video_capture = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX
# 读取人脸haar模型
face_detection = cv2.CascadeClassifier('model/face_detection/haarcascade_frontalface_default.xml')
# 读取性别判断模型
gender_classifier = load_model('model/gender/simple_CNN.81-0.96.hdf5')
# 读取情绪判别模型
emotion_classifier = load_model('model/emotion/simple_CNN.530-0.65.hdf5')
gender_labels = {0: 'womam', 1: 'man'}
emotion_labels = {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy',
4: 'sad', 5: 'surprise', 6: 'neutral'}
while True:
# 读取摄像头的视频流
_, frame = video_capture.read()
# 将视频流转换成灰度
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 检测人脸,产生坐标值
faces = face_detection.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
# 将性别判断出来
face = frame[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]
face = cv2.resize(face, (48, 48))
face = np.expand_dims(face, 0)
face = face / 255.0
gender_label_arg = np.argmax(gender_classifier.predict(face))
gender = gender_labels[gender_label_arg]
if gender == gender_labels[0]:
gender_color = (255, 0, 0)
else:
gender_color = (0, 255, 0)
gray_face = gray[(y - 40):(y + h + 40), (x - 20):(x + w + 20)]
gray_face = cv2.resize(gray_face, (48, 48))
gray_face = gray_face / 255.0
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
emotion = emotion_labels[emotion_label_arg]
cv2.rectangle(frame, (x, y), (x + w, y + h), gender_color, 2)
cv2.putText(frame, gender, (x, y - 30), font, .7, gender_color, 1, cv2.LINE_AA)
cv2.putText(frame, emotion, (x + 90, y - 30), font, .7, gender_color, 1, cv2.LINE_AA)
cv2.imshow('face', frame)
if cv2.waitKey(30) & ord('q') == 0xFF:
break
# 销毁视频流
video_capture.release()
cv2.destroyAllWindows()
| HadXu/machine-learning | face_detection_and_emotion/video_test.py | video_test.py | py | 2,250 | python | en | code | 287 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.FONT_HERSHEY_SIMPLEX",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "ker... |
42600269199 | # Script to mess around with User authenticated spotify API
# For some reason, cannot authenticate with Google Chrome, so instead use Firefox
# http://spotipy.readthedocs.io/en/latest/
from pathlib import Path
from spotipy.oauth2 import SpotifyClientCredentials
import json
import spotipy
import time
import sys
import spotipy.util as util
import pandas as pd
# Set up directory and read credentials
directory = Path(__file__).resolve().parent
infile = open(directory / "credentials.txt", 'r').readlines()
username = "liltkrookie"
scope = "user-top-read"
sort = "tempo"
print(infile[0])
# Send crendetials and ping Spotify API
token = util.prompt_for_user_token(username,scope,client_id='8d3383fc5c434af5bf40fb7b2915c618',client_secret=infile[0],redirect_uri='http://localhost:8888/callback')
client_credentials_manager = SpotifyClientCredentials(client_id='8d3383fc5c434af5bf40fb7b2915c618', client_secret=infile[0])
sp = spotipy.Spotify(auth=token)
playlistdata = sp.current_user_top_tracks(limit=50, offset=0, time_range='medium_term')
playlist_json = json.dumps(playlistdata,indent=4)
track_list = json.loads(playlist_json)
num = len(track_list['items'])
tid=[]
for i in range(0, num):
uri =track_list['items'][i]['uri']
tid.append(uri)
# Song Audiofeatures
analysis = sp.audio_features(tid)
sample_json = json.dumps(analysis)
data = pd.read_json(sample_json)
# print(data)
# Song Metadata
analysis2 = sp.tracks(tid)
sample_json2 = json.dumps(analysis2)
data2 = json.loads(sample_json2)
songdata=[]
songlabels=['song','uri','artist']
for i in range(0, num):
name=data2['tracks'][i]['name']
uri =data2['tracks'][i]['uri']
artist =data2['tracks'][i]['album']['artists'][0]['name']
songdata.append([name, uri, artist])
song_metadata = pd.DataFrame.from_records(songdata, columns=songlabels)
# print(song_metadata)
# DataFrame merge
export = pd.merge(song_metadata,data, how = 'outer', on =['uri'])
writer = pd.ExcelWriter(directory / 'top_played_songs.xlsx')
export.to_excel(writer,'Sheet1')
writer.save()
dfList = export['uri'].tolist()
print ("Completed download and export of top played songs") | tkajikawa/spotify_api | spotify_test.py | spotify_test.py | py | 2,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "spotipy.util.prompt_for_user_token",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "spotipy.util",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "spotipy.... |
10350771272 | import pandas as pd
from astroquery.simbad import Simbad
import astropy.units as u
from astropy.coordinates import SkyCoord
from astroquery.gaia import Gaia
import numpy as np
import argparse
import sys
from time import sleep
parser = argparse.ArgumentParser(description='SIXTH: get information from Simbad and GAIA TAP (DR1)')
parser.add_argument('-i', nargs=1, default=[0], help='start index', type=int)
parser.add_argument('-f', nargs=1, help='file', type=str)
args = parser.parse_args()
istart = args.i[0]
datx=pd.read_csv("../siwiyn/siwiyn.tsv",delimiter="|",comment="#")
#_RAJ2000|_DEJ2000|WDS|Name|HD|HIP|Date|PA|Sep|Dmag|Wave|FWHM|f_FWHM|SimbadName|_RA|_DE
mask=~datx.duplicated(subset='Name')
dat=datx[mask]
#dat=dat.set_index("Name")
#print(dat["Name"].values[0])
print(len(datx),len(dat))
#sys.exit()
#GAIA distance
#f=open("siwiyn_position.txt","a")
#f.write("System Number|name|RA(2000)|DEC(2000)|Simbad plx|GAIA plx|V|R|J|H|K"+"\n")
#f.close()
if args.f:
namelist=np.loadtxt(args.f[0],dtype=int)
else:
namelist=range(istart,len(dat))
for i,sysi in enumerate(namelist):
f=open("siwiyn_position.txt","a")
name=dat["Name"].values[sysi]
print(i,name)
sleep(1)
# if True:
try:
ra=dat["_RAJ2000"][sysi]
dec=dat["_DEJ2000"][sysi]
c = SkyCoord(ra+" "+dec, unit=(u.hourangle, u.deg))
width = u.Quantity(5, u.arcsec)
height = u.Quantity(5, u.arcsec)
#GAIA
r = Gaia.query_object_async(coordinate=c, width=width, height=height)
plx=None
if len(r["parallax"]) == 0:
sw = False
elif type(r["parallax"][0]) == np.float64:
plx=r["parallax"][0]
sw = True
else:
sw = False
print("GAIA",plx)
Simbad.SIMBAD_URL = "http://simbad.u-strasbg.fr/simbad/sim-script"
Simbad.add_votable_fields("parallax","flux(V)","flux(R)","flux(J)","flux(H)","flux(K)")
result_table = Simbad.query_region(c, radius='0d0m5s')
print(result_table)
if result_table is None:
plxs=np.nan
magcom="|||||"
elif len(result_table) == 1:
plxs=result_table["PLX_VALUE"].item()
V=result_table["FLUX_V"].item()
R=result_table["FLUX_R"].item()
J=result_table["FLUX_J"].item()
H=result_table["FLUX_H"].item()
K=result_table["FLUX_K"].item()
magcom="|"+str(V)+"|"+str(R)+"|"+str(J)+"|"+str(H)+"|"+str(K)
else:
plxs=result_table["PLX_VALUE"][0]
V=result_table["FLUX_V"][0]
R=result_table["FLUX_R"][0]
J=result_table["FLUX_J"][0]
H=result_table["FLUX_H"][0]
K=result_table["FLUX_K"][0]
magcom="|"+str(V)+"|"+str(R)+"|"+str(J)+"|"+str(H)+"|"+str(K)
#eplx=result_table["PLX_ERROR"].item()
if plxs == plxs:
f.write(str(sysi)+"|"+name+"|"+str(ra)+"|"+str(dec)+"|"+str(plxs)+"|"+str(plx)+magcom+"\n")
else:
f.write(str(sysi)+"|"+name+"|"+str(ra)+"|"+str(dec)+"|None|"+str(plx)+magcom+"\n")
except:
try:
ra=dat["_RAJ2000"][sysi]
dec=dat["_DEJ2000"][sysi]
c = SkyCoord(ra+" "+dec, unit=(u.hourangle, u.deg))
f.write(str(sysi)+"|"+str(ra)+"|"+str(dec)+"|||||||"+"\n")
except:
f.write(str(sysi)+"|||||||||"+"\n")
f.close()
| HajimeKawahara/LookAtThis | database/python/siwiyn_parallax.py | siwiyn_parallax.py | py | 3,494 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
42119246092 | import pygame
from settings import Settings
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_game):
"""initialize the ship and set its starting position"""
super().__init__()
self.screen = ai_game.screen
self.settings = ai_game.settings
self.screen_rect = ai_game.screen.get_rect()
#$load ship img and its rect
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
#start ship at bottom
self.rect.midbottom = self.screen_rect.midbottom
#store a decimal value for the ships position
self.x = float(self.rect.x)
#flag to control movement
self.moving_right = False
self.moving_left = False
def update(self):
"""updates position based on movement flag"""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.x += self.settings.ship_speed
if self.moving_left and self.rect.left > 0:
self.x -= self.settings.ship_speed
#update rect object from self.x
self.rect.x = self.x
def blitme(self):
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""center the ship on the screen"""
self.rect.midbottom = self.screen_rect.midbottom
self.x = float(self.rect.x) | SylvainAroma/Alien-Invasion | ship.py | ship.py | py | 1,202 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite.Sprite",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 16,
"usage_type": "attribute"
}
] |
16173542417 | # pylint: disable=missing-docstring
"""This is a script to test the RecurrentEncoder module."""
import pickle
import pytest
import torch
import torch.nn as nn
from metarl.torch.embeddings import RecurrentEncoder
class TestRecurrentEncoder:
"""Test for RecurrentEncoder."""
# yapf: disable
@pytest.mark.parametrize(
'input_dim, output_dim, hidden_sizes, num_tasks, num_seq', [
(1, 1, (1, ), 1, 3),
(3, 3, (3, ), 1, 5),
(5, 5, (5, 5), 2, 4),
(7, 7, (7, 5, 7), 2, 5),
(9, 9, (9, 7, 5, 9), 3, 10),
])
# yapf: enable
def test_module(self, input_dim, output_dim, hidden_sizes, num_tasks,
num_seq):
"""Test forward method."""
input_val = torch.ones((num_tasks, num_seq, input_dim),
dtype=torch.float32)
# last hidden size should match output size
# output_dim is latent_dim
module = RecurrentEncoder(input_dim=input_dim,
output_dim=output_dim,
hidden_nonlinearity=None,
hidden_sizes=hidden_sizes,
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
module.reset(num_tasks=num_tasks)
output = module(input_val)
# maps input of shape (task, seq, input_dim) to (task, 1, output_dim)
expected_shape = [num_tasks, 1, output_dim]
assert all([a == b for a, b in zip(output.shape, expected_shape)])
# yapf: disable
@pytest.mark.parametrize(
'input_dim, output_dim, hidden_sizes, num_tasks, num_seq', [
(1, 1, (1, ), 1, 3),
(3, 3, (3, ), 1, 5),
(5, 5, (5, 5), 2, 4),
(7, 7, (7, 5, 7), 2, 5),
(9, 9, (9, 7, 5, 9), 3, 10),
])
# yapf: enable
def test_is_pickleable(self, input_dim, output_dim, hidden_sizes,
num_tasks, num_seq):
"""Test is_pickeable."""
input_val = torch.ones((num_tasks, num_seq, input_dim),
dtype=torch.float32)
module = RecurrentEncoder(input_dim=input_dim,
output_dim=output_dim,
hidden_nonlinearity=None,
hidden_sizes=hidden_sizes,
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
module.reset(num_tasks=num_tasks)
output1 = module(input_val)
h = pickle.dumps(module)
module_pickled = pickle.loads(h)
module_pickled.reset(num_tasks=num_tasks)
output2 = module_pickled(input_val)
assert torch.all(torch.eq(output1, output2))
| icml2020submission6857/metarl | tests/metarl/torch/embeddings/test_recurrent_encoder.py | test_recurrent_encoder.py | py | 2,849 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.ones",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "metarl.torch.embeddings.RecurrentEncoder",
"line_number": 32,
"usage_type": "call"
},
{
"api_name":... |
12482854412 | #!/usr/bin/env python
# coding: utf-8
# !rm -r inference
# !pip install -r requirements.txt
import json
import sys, os
import requests
import datetime
import numpy as np
import pickle
import time
import random
import zstandard as zstd
import tarfile
import pandas as pd
import boto3
import botocore
from botocore.client import Config
import pygrib
import pydap
import xarray as xr
import h5py
from collections import defaultdict
from joblib import Parallel, delayed, parallel_backend
import subprocess
import cv2
secure = dict([e.split('=') for e in open('secure.txt', 'r').read().split('\n')])
infer = dict([e.split('=') for e in open('infer.txt', 'r').read().split('\n')])
# infer = {k: v.split(',') for k, v in infer.items()}
dataset = 'tg'
labels = pd.read_csv('data_{}/train_labels.csv'.format(dataset))
grid = pd.concat( (
pd.read_csv('data_tg/grid_metadata.csv'),
) ).drop_duplicates().reset_index(drop = True)
submission = pd.read_csv('data_{}/submission_format.csv'.format(dataset))
files = pd.read_csv('data_{}/{}_satellite_metadata{}.csv'.format(
dataset, *(('pm25', '') if dataset == 'pm'
else ('no2', '_0AF3h09'))))
labels['location'] = grid.set_index('grid_id')['location'].reindex(labels.grid_id).values
labels['datetime'] = pd.to_datetime(labels.datetime)
submission['location'] = grid.set_index('grid_id').location.reindex(submission.grid_id).values
files.time_end = pd.to_datetime(files.time_end)
cities = {
'Taipei': ( (121.5, 121.5), (25.0, 25) ),
'Delhi': ( (77.0, 77.25), (28.75, 28.5) ),
'LA': ((360-118.25, 360-117.75), (34.0, 34.0) )
}
feats = [
# (6, 'Maximum/Composite radar reflectivity:dB (instant):regular_ll:atmosphere:level 0', ),
# (7, 'Visibility:m (instant):regular_ll:surface:level 0', ),
(11, 'Wind speed (gust):m s**-1 (instant):regular_ll:surface:level 0', ),
(402, 'Surface pressure:Pa (instant):regular_ll:surface:level 0'),
# (404, 'Temperature:K (instant):regular_ll:surface:level 0'),
# (405, 'Soil Temperature:K (instant):regular_ll:depthBelowLandLayer:levels 0.0-0.1 m'),
(406, 'Volumetric soil moisture content:Proportion (instant):regular_ll:depthBelowLandLayer:levels 0.0-0.1 m'),
(415, '2 metre temperature:K (instant):regular_ll:heightAboveGround:level 2 m'),
(416, '2 metre specific humidity:kg kg**-1 (instant):regular_ll:heightAboveGround:level 2 m'),
# (417, '2 metre dewpoint temperature:K (instant):regular_ll:heightAboveGround:level 2 m:'),#fcst time 0 hrs:from 202001010000
(418, '2 metre relative humidity:% (instant):regular_ll:heightAboveGround:level 2 m:'), #fcst time 0 hrs:from 202001010000
(419, 'Apparent temperature:K (instant):regular_ll:heightAboveGround:level 2 m:'),#fcst time 0 hrs:from 202001010000
(420, '10 metre U wind component:m s**-1 (instant):regular_ll:heightAboveGround:level 10 m:'),#fcst time 0 hrs:from 202001010000
(421, '10 metre V wind component:m s**-1 (instant):regular_ll:heightAboveGround:level 10 m:'),#fcst time 0 hrs:from 202001010000
# (435, 'Precipitable water:kg m**-2 (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer'),#:fcst time 0 hrs:from 202001010000
(436, 'Cloud water:kg m**-2 (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer:'),#fcst time 0 hrs:from 202001010000
(437, 'Relative humidity:% (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer:'),#fcst time 0 hrs:from 202001010000
(438, 'Total ozone:DU (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer:'),#fcst time 0 hrs:from 202001010000
# (424, 'Precipitation rate:kg m**-2 s**-1 (instant):regular_ll:surface:level 0'),
# (484, 'Temperature:K (instant):regular_ll:pressureFromGroundLayer', ),
# (485, 'Relative humidity:% (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (486, 'Specific humidity:kg kg**-1 (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (487, 'U component of wind:m s**-1 (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (488, 'V component of wind:m s**-1 (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (520, 'Pressure reduced to MSL:Pa (instant):regular_ll:meanSea:level 0:', ),
]
cities2 = {
'tpe': ( 121.5, 25 ),
'dl': ( 77.0, 28.5 ),
'la': (-118.25, 34.0 )
}
coords = {'la': [('3A3IE', -117.9114, 34.1494),
('3S31A', -117.9563, 33.8142),
('7II4T', -118.0461, 34.0006),
('8BOQH', -118.4504, 34.0379),
('A2FBI', -117.4173, 34.0006),
('A5WJI', -117.9563, 33.9261),
('B5FKJ', -117.5071, 34.1123),
('C8HH7', -116.519, 33.8516),
('DHO4M', -118.3605, 34.1866),
('DJN0F', -117.6419, 34.1123),
('E5P9N', -117.5071, 34.0006),
('FRITQ', -118.1809, 33.8516),
('H96P6', -118.5402, 34.1866),
('HUZ29', -117.2825, 34.1123),
('I677K', -117.5071, 34.0751),
('IUON3', -117.7317, 34.0751),
('JNUQF', -118.2258, 33.8142),
('PG3MI', -118.2258, 34.0751),
('QH45V', -118.4504, 33.9634),
('QJHW4', -118.5402, 34.3722),
('QWDU8', -118.1359, 34.1494),
('VBLD0', -118.2258, 33.8888),
('VDUTN', -117.9114, 33.8142),
('WT52R', -116.8783, 33.9261),
('X5DKW', -117.597, 34.0379),
('Z0VWC', -118.1809, 33.7769),
('ZP1FZ', -117.8665, 34.1494),
('ZZ8JF', -117.3275, 33.6648)],
'tpe': [('1X116', 121.5033, 24.998),
('90BZ1', 121.5482, 25.0387),
('9Q6TA', 121.5482, 25.0794),
('KW43U', 121.5931, 25.0387),
('VR4WG', 121.5033, 25.0794),
('XJF9O', 121.5033, 25.0387),
('XNLVD', 121.5033, 25.1201)],
'dl': [('1Z2W7', 77.2821, 28.5664),
('6EIL6', 77.0575, 28.5664),
('7334C', 77.1024, 28.5664),
('78V83', 76.9227, 28.5664),
('7F1D1', 77.1024, 28.6058),
('8KNI6', 77.2821, 28.4874),
('90S79', 77.1922, 28.6452),
('A7UCQ', 77.2372, 28.6058),
('AZJ0Z', 77.2372, 28.724),
('C7PGV', 77.1922, 28.5269),
('CPR0W', 77.2821, 28.6846),
('D72OT', 77.1473, 28.724),
('D7S1G', 77.327, 28.6846),
('E2AUK', 77.0126, 28.6058),
('GAC6R', 77.1024, 28.7634),
('GJLB2', 77.1024, 28.4874),
('GVQXS', 77.1922, 28.6846),
('HANW9', 77.1922, 28.5664),
('HM74A', 77.1024, 28.6846),
('IUMEZ', 77.2372, 28.6452),
('KZ9W9', 77.1473, 28.6452),
('NE7BV', 77.1024, 28.8421),
('P8JA5', 77.2372, 28.5664),
('PJNW1', 77.1922, 28.724),
('PW0JT', 76.9227, 28.6846),
('S77YN', 77.0575, 28.724),
('SZLMT', 77.1473, 28.6846),
('UC74Z', 77.2821, 28.5269),
('VXNN3', 77.1473, 28.8028),
('VYH7U', 77.0575, 28.7634),
('WZNCR', 77.1473, 28.5664),
('YHOPV', 77.2821, 28.6452),
('ZF3ZW', 77.0575, 28.6846)]}
def cleanDict(d):
return {k: cleanDict(v) for k, v in d.items() } if isinstance(d, defaultdict) else d
def processGFS(file, d):
p = pygrib.open(file)
lat, lon = p[1].latlons()
spots = {}
for city, ( (lonmin, lonmax) , (latmin, latmax) ) in cities.items():
xmin = np.argmax( (lat == latmin).sum(axis = 1) )#[0]
xmax = np.argmax( (lat == latmax).sum(axis = 1) )#[0]
ymin = np.argmax( (lon == lonmin).sum(axis = 0) )#[0]
ymax = np.argmax( (lon == lonmax).sum(axis = 0) )#[0]
spots[city] = ((xmin, xmax), (ymin, ymax))
data = []
for e in p:
if any(z in str(e) for i, z in feats):
arr = e.values
assert arr.shape == lat.shape
for spot, ((xmin, xmax), (ymin, ymax)) in spots.items():
data.append( (str(e),
spot,
((lat[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].min(),
lat[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].max()),
(lon[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].min(),
lon[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].max())),
arr[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].astype(np.float32),
arr[xmin:xmax + 1, ymin:ymax + 1].mean() ) );
# if len(data) == 1: print(data)
# print(data); return data
return data
# break;
def pullGFS(files):
results = []
for i in range(10):
try:
pswd = secure['password']
values = {'email' : secure['username'], 'passwd' : pswd, 'action' : 'login'}
login_url = 'https://rda.ucar.edu/cgi-bin/login'
ret = requests.post(login_url, data=values)
if ret.status_code != 200:
print('Bad Authentication'); time.sleep(i); continue;
except Exception as e:
print(e)
time.sleep(i)
print('Downloading {} gfs files'.format(len(files)))
# print(filelist); return;
dspath = 'https://rda.ucar.edu/data/ds084.1/'
save_dir = '/tmp/'
zc = zstd.ZstdCompressor(level = 9)
for file in files:
start = time.time()
for i in range(10):
try:
filename = dspath + file
outfile = save_dir + os.path.basename(filename)
print('Downloading', file)
with requests.get(filename, cookies = ret.cookies,
allow_redirects = True, stream=True) as r:
r.raise_for_status()
with open(outfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024):
f.write(chunk)
s = os.path.getsize(outfile);
data = processGFS(outfile, 5)
os.remove(outfile)
pkl = pickle.dumps(data)
compr = zc.compress(pkl)
os.makedirs('inference/gfs-5/', exist_ok = True)
with open('inference/gfs-5/{}'.format(os.path.basename(filename)), 'wb') as f:
f.write(compr)
results.append({
# 'statusCode': 200,
'file': os.path.basename(filename),
'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(pkl),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
# 'data': json.dumps(data),
}); break;
except Exception as e:
print(e)
time.sleep(i)
try: os.remove(outfile)
except: pass;
return results
ifs_tags = ['128_057_uvb',
'128_134_sp',
'128_136_tcw',
'128_137_tcwv',
'128_146_sshf',
'128_147_slhf',
'128_164_tcc',
'128_165_10u',
'128_166_10v',
'128_167_2t',
'128_168_2d',
'128_169_ssrd',
'128_175_strd',
'128_176_ssr',
'128_177_str',
'128_189_sund',
'128_206_tco3',
'128_228_tp',
'128_243_fal',
'128_244_fsr',
'128_245_flsr',
'228_246_100u',
'228_247_100v']
def processIFS(file):
dataset = xr.open_dataset(file)
vars = list(dataset.variables)
assert len(vars) == 5 if 'oper.an' in file else 6 if 'oper.fc' in file else -1;
# assert vars[-4:] == ['latitude', 'longitude', 'time', 'utc_date']
field = vars[0]
name = dataset.variables[field].attrs['long_name']
# print(name)
clean_name = name.lower().replace(' ', '_').replace('-', '_')
# print(clean_name)
sat_data = defaultdict(lambda: defaultdict(dict))
for location, (clon, clat) in cities2.items():
minimum_latitude = clat + 8
minimum_longitude = (clon - 10 ) % 360
maximum_latitude = clat - 8
maximum_longitude = (clon + 10) % 360
data = dataset[field].loc[{
'latitude':slice(minimum_latitude,maximum_latitude),
'longitude':slice(minimum_longitude,maximum_longitude)}]
# print(data.shape)
a = data
v = a.values
lat = np.tile( np.stack([a['latitude']], axis = 1), ( 1, v.shape[-1]))
lon = np.tile( np.stack([a['longitude']], axis = 0), ( v.shape[-2], 1))
assert v.shape == (4, 227, 285) if 'oper.an' in file else (2, 2, 227, 285) if 'oper.fc' in file else None
if 'oper.an' in file:
times = a.time.values.astype('datetime64[s]')
assert len(times) == 4
assert v.shape[0] == len(times)
elif 'oper.fc' in file:
start_times = np.repeat(a.forecast_initial_time.values.astype('datetime64[s]'), 2)
deltas = np.tile([np.timedelta64(int(h), 'h') for h in a.forecast_hour.values], 2)
times = list(zip(start_times, deltas))
# print(times)
v = v.reshape(4, v.shape[-2], v.shape[-1])
# print(times); print(deltas)
assert v.shape[1:] == lat.shape
assert v.shape[1:] == lon.shape
zones = {}# defaultdict(dict)
for tidx, t in enumerate(times):
for grid_id, plon, plat in coords[location]:
for r in [ 0.05, 0.1, 0.2, 0.5, 1, 2, 5]:
if (grid_id, r) not in zones:
zones[(grid_id, r)] = (lat - plat) ** 2 + (lon - plon%360) ** 2 < r ** 2
zone = zones[(grid_id, r)]
# ct = len(v[tidx][zone])#.count()
sat_data[t][grid_id][clean_name + '_mean{}'.format(r)] = v[tidx][zone].mean() #if ct > 3 else np.nan
# for k, v in sat_data.items():
# print(k, len(v))
# print(v['1X116'])
def clean(d):
if isinstance(d, defaultdict):
d = {k: clean(v) for k, v in d.items()}
return d
return clean(sat_data)
def pullIFS(files):
results = []
for i in range(10):
try:
pswd = secure['password']
values = {'email' : secure['username'], 'passwd' : pswd, 'action' : 'login'}
login_url = 'https://rda.ucar.edu/cgi-bin/login'
ret = requests.post(login_url, data=values)
if ret.status_code != 200:
print('Bad Authentication'); time.sleep(i); continue;
except Exception as e:
print(e)
time.sleep(i)
save_dir = '/tmp/'
dspath = 'https://rda.ucar.edu/data/ds113.1/'
print('Downloading {} ifs files'.format(len(files)))
zc = zstd.ZstdCompressor(level = 9)
for file in files:
start = time.time()
for i in range(10):
try:
filename = dspath + file
outfile = save_dir + os.path.basename(filename)
print('Downloading', file)
with requests.get(filename, cookies = ret.cookies,
allow_redirects = True, stream=True) as r:
r.raise_for_status()
with open(outfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024):
f.write(chunk)
s = os.path.getsize(outfile);
data = processIFS(outfile)
os.remove(outfile)
pkl = pickle.dumps(data)
compr = zc.compress(pkl)
os.makedirs('inference/ifs/', exist_ok = True)
with open('inference/ifs/{}'.format(os.path.basename(filename)), 'wb') as f:
f.write(compr)
results.append({
# 'statusCode': 200,
'file': os.path.basename(filename),
'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(pkl),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
}); break;
except Exception as e:
print(e)
time.sleep(i)
try: os.remove(outfile)
except: pass
return results
tropomi_fields = ['nitrogendioxide_tropospheric_column',
'nitrogendioxide_tropospheric_column_precision',
'air_mass_factor_troposphere',
'air_mass_factor_total']
def loadFileS3(row):
my_config = Config(signature_version = botocore.UNSIGNED)
s3a = boto3.client('s3', config = my_config)
filename, url, cksum, sz = [row[k] for k in ['granule_id', 'us_url', 'cksum', 'granule_size']]
print(filename, url, cksum, sz)
file = '/tmp/' + filename
bucket = url.split('//')[-1].split('/')[0]
key = '/'.join(url.split('//')[-1].split('/')[1:])
s = s3a.download_file(bucket, key, file)
assert ( subprocess.check_output(['cksum',file])
.decode('utf-8').split(' ')[:2] == [str(cksum), str(sz)])
return file
def processTropomi(hdf, location, fine = True):
zones = {}; # defaultdict(dict)
sat_data = defaultdict(lambda: defaultdict(dict))
hp = hdf['PRODUCT']
lat = hp['latitude'][:][0]#.values
lon = hp['longitude'][:][0]#.values
for field in tropomi_fields:
v = hp[field][:][0]
data = np.ma.masked_array(v, (v == v.max() ) | (v == v.min())).clip(0, None)
assert data.shape == lat.shape
for grid_id, plon, plat in coords[location]:
for r in ([ 0.07, 0.1, 0.14, 0.2, 0.3, 0.5, 1, 2] if fine else [ 0.1, 0.25, 0.5, 1, 2, ]):
if (grid_id, r) not in zones:
zones[(grid_id, r)] = (lat - plat) ** 2 + (lon - plon) ** 2 < r ** 2
zone = zones[(grid_id, r)]
ct = data[zone].count()
m = data[zone].mean() if ct > (0 if 'fine' else 3) else np.nan
s = data[zone].std() if ct >= 3 else np.nan
sat_data[grid_id][field + '_mean{}'.format(r)] = m
sat_data[grid_id][field + '_stdev{}'.format(r)] = s
sat_data[grid_id][field + '_count{}'.format(r)] = ct
# if '2' in grid_id:#.startswith('9'):
# print(field, '_count{}'.format(r), ct, m ,s )
return sat_data
def pullTropomi(row, fine = True):
results = []
start = time.time()
assert row['product'].startswith('tropomi')
file = loadFileS3(row)
hdf = h5py.File(file, 'r')
s = os.path.getsize(file);
sat_data = processTropomi(hdf, row['location'], fine)
output = row.copy()
output['d1'] = cleanDict(sat_data)
s3 = boto3.client('s3')
zc = zstd.ZstdCompressor(level = 15)
pkl = pickle.dumps(output)
compr = zc.compress(pkl)
filename = file.split('/')[-1]
os.makedirs('inference/tropomi-fine/', exist_ok = True)
with open('inference/tropomi-fine/{}'.format(filename), 'wb') as f:
f.write(compr)
try: os.remove(file)
except Exception as e: print(e); pass
return {
# 'statusCode': 200,
'file': os.path.basename(filename),
'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(pkl),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
};
def loadAssim(field, location, year, month, min_day, max_day):
url = 'https://opendap.nccs.nasa.gov/dods/gmao/geos-cf/assim/aqc_tavg_1hr_g1440x721_v1'
DATASET = xr.open_dataset(url)
start_time = np.datetime64('{}-{:02d}-{:02d} 00:00:00'.format(year, month, min_day))
end_time = np.datetime64('{}-{:02d}-{:02d} 23:59:00'.format(year, month, max_day))
# end_time = np.datetime64('{}-01-01 00:00:00'.format(year + 1))
minimum_latitude = min([e[-1] for e in coords[location]]) - 3
minumum_longitude = min([e[-2] for e in coords[location]]) - 3
maximum_latitude = max([e[-1] for e in coords[location]]) + 3
maximum_longitude = max([e[-2] for e in coords[location]]) + 3
data = DATASET[field].loc[{'time':slice(start_time,end_time),
'lat':slice(minimum_latitude,maximum_latitude),
'lon':slice(minumum_longitude,maximum_longitude)}]
return data
def processAssim(a, location, field):
t = a.time.values.astype('datetime64[s]')
sat_data = defaultdict(dict)
v = a.values[0]
if (v == 1.0e15).sum() > 0:
return {'location': location, 'time_end': t, 'd1': cleanDict(sat_data)}
lat = np.tile( np.stack([a['lat']], axis = 1), ( 1, v.shape[1]))
lon = np.tile( np.stack([a['lon']], axis = 0), ( v.shape[0], 1))
lat = cv2.resize(lat, None, fx = 5, fy = 5)
lon = cv2.resize(lon, None, fx = 5, fy = 5)
v2 = cv2.resize(v, None, fx = 5, fy = 5)
zones = {}
for grid_id, plon, plat in coords[location]:
for r in [ 0.1, 0.25, 0.5, 1, 2, ]:
if (grid_id, r) not in zones:
z = (lat - plat) ** 2 + (lon - plon) ** 2 < r ** 2
zones[(grid_id, r)] = z#, z.sum())
zone = zones[(grid_id, r)]
m = v2[zone].mean()#, 1#zone.sum()
sat_data[grid_id][field + '_mean{}'.format(r)] = m #data[zone].mean()# if ct > 3 else np.nan
return {'location': location, 'time_end': t, 'd1': cleanDict(sat_data)}
def pullAssim(year, month, min_day, max_day):
for field in ['no2', 'so2', 'co', 'o3', 'pm25_rh35_gcc']:
for location in coords.keys():
start = time.time()
for i in range(10):
try:
data = loadAssim(field, location, year, month, min_day, max_day)
print('{}-{:02d} {} {} {}'.format(
year, month, field, location, len(data)))
# assert len(data) == 24
with parallel_backend('threading'):
r = Parallel(os.cpu_count())(
delayed(processAssim)(a, location, field) for a in data)
zc = zstd.ZstdCompressor(level = 9)
out = pickle.dumps(r)
compr = zc.compress(out)
filename = '{}_{}_{}_{:02}.pkl'.format(
field, location, year, month, )
os.makedirs('inference/assim/', exist_ok = True)
with open('inference/assim/{}'.format(filename), 'wb') as f:
f.write(compr)
print({
# 'statusCode': 200,
'file': filename.split('.')[0],
# 'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(out),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
}); break;
except Exception as e:
print(e); time.sleep(i)
def listAssimDates(dates):
months = {}
for t in dates:
k = (t.year, t.month)
prior = months.get(k, [])
if sum(prior) > 0:
months[k] = (min(prior[0], t.day), max(prior[0], t.day))
else:
months[k] = (t.day, t.day)
return [(*k, *v) for k, v in months.items()]
start = datetime.datetime(*[int(i) for i in infer['start'].split(',')])
end = datetime.datetime(*[int(i) for i in infer['end'].split(',')])
dt = start - datetime.timedelta(days = 10)
dates = []
while dt <= end + datetime.timedelta(days = 1):
dates.append(dt);
dt += datetime.timedelta(days = 1)
print(len(dates))
print(dates[0]); print(dates[-1])
def listGFSFiles(dates):
filelist = []; fwd = 0
for t in dates:
dt = t.strftime('%Y%m%d')
for hr in [0, 6, 12, 18]:
filelist.append('{}/{}/gfs.0p25.{}{:02d}.f{:03d}.grib2'.format(
dt[:4], dt, dt, hr, fwd))
return filelist
def listIFSFiles(dates):
filelist = []
for t in dates:
for tag in ifs_tags:
domain = 'ec.oper.fc.sfc'
file = '{}/{}/{}.{}.regn1280sc.{}.nc'.format(domain,
datetime.datetime.strftime(t, '%Y%m'),
domain, tag,
datetime.datetime.strftime(t, '%Y%m%d') )
filelist.append(file)
return filelist
def listTropomiRows(dates):
tropomi_rows = [e.to_dict() for idx, e in
files[files['product'].str.startswith('tropomi')
& (files.time_end.dt.tz_localize(None) >= min(dates) )
& (files.time_end.dt.tz_localize(None)
<= max(dates) + datetime.timedelta(days = 1) )
].iterrows()]
return tropomi_rows
# %%time
N_THREADS = min(10, os.cpu_count() )
Parallel(N_THREADS)(delayed(pullIFS)(
listIFSFiles(dates)[i::N_THREADS])
for i in range(N_THREADS))
# %%time
N_THREADS = min(4, os.cpu_count() )
Parallel(N_THREADS)(delayed(pullGFS)(
listGFSFiles(dates)[:][i::N_THREADS])
for i in range(N_THREADS))
# %%time
N_THREADS = min(5, os.cpu_count())
Parallel(N_THREADS)(delayed(pullTropomi)(row)
for row in listTropomiRows(dates))
# %%time
[pullAssim(*d) for d in listAssimDates(dates)]
if start.year <= 2018 and end.year >= 2021:
os.makedirs('cache', exist_ok = True)
for path in os.listdir('inference'):
with tarfile.open('cache/{}.tar'.format(path), 'w') as f:
for file in os.listdir('inference/{}'.format(path)):
f.add('inference/{}/{}'.format(path, file),
arcname = file)
# !jupyter nbconvert --no-prompt --to script 'RunFeatures.ipynb'
| drivendataorg/nasa-airathon | no2/1st Place/RunFeatures.py | RunFeatures.py | py | 25,953 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"... |
33571337998 | #!/usr/bin/env python3
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from time import sleep
import logging
import os
import sys
# Very simple tee logic implementation. You can specify shell command, output
# logfile and env variables. After TeePopen is created you can only wait until
# it finishes. stderr and stdout will be redirected both to specified file and
# stdout.
class TeePopen:
# pylint: disable=W0102
def __init__(self, command, log_file, env=os.environ.copy(), timeout=None):
self.command = command
self.log_file = log_file
self.env = env
self.process = None
self.timeout = timeout
def _check_timeout(self):
sleep(self.timeout)
while self.process.poll() is None:
logging.warning(
"Killing process %s, timeout %s exceeded",
self.process.pid,
self.timeout,
)
os.killpg(self.process.pid, 9)
sleep(10)
def __enter__(self):
self.process = Popen(
self.command,
shell=True,
universal_newlines=True,
env=self.env,
start_new_session=True, # signall will be sent to all children
stderr=STDOUT,
stdout=PIPE,
bufsize=1,
)
self.log_file = open(self.log_file, "w", encoding="utf-8")
if self.timeout is not None and self.timeout > 0:
t = Thread(target=self._check_timeout)
t.daemon = True # does not block the program from exit
t.start()
return self
def __exit__(self, t, value, traceback):
for line in self.process.stdout:
sys.stdout.write(line)
self.log_file.write(line)
self.process.wait()
self.log_file.close()
def wait(self):
for line in self.process.stdout:
sys.stdout.write(line)
self.log_file.write(line)
return self.process.wait()
| ByConity/ByConity | tests/ci/tee_popen.py | tee_popen.py | py | 2,012 | python | en | code | 1,352 | github-code | 36 | [
{
"api_name": "os.environ.copy",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.warning",
"lin... |
34076302112 | from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
import sys
'''
The key concept in CSP is to find a set of spatial filters (components) that optimally discriminate
between the two classes. These filters are represented by the eigenvectors obtained in the 'fit' method.
When you apply the CSP transformation to a new data sample in the 'transform' method, it projects the data
onto these filters. The result is that the transformed data has enhanced features that maximize the
differences in variances between the two classes. This makes it easier to classify the data based on
the most discriminative spatial patterns.
'''
class CSPTransformer(BaseEstimator, TransformerMixin):
def __init__(self, nb_components=4):
'''
In practice, it's common to try a range of values for the number of components (e.g., from 2 to 10)
and evaluate their impact on classification accuracy.
The choice of the optimal number of components may involve a trade-off between enhanced discriminative power
and computational efficiency.
'''
self.nb_components = nb_components
self.filters = np.array([])
self.x = np.array([])
self.y = np.array([])
def fit(self, x, y):
if self.x.size == 0:
self.x = x
self.y = y
class_labels = np.unique(y)
if len(class_labels) != 2:
print("CSPTransformer: Error: CSP is a binary classification method: there should be two class labels.",
file=sys.stderr)
exit()
x_class1 = x[y == class_labels[0]] #take all values related to one class-label
x_class2 = x[y == class_labels[1]]
'''
Get covariance matrices for each class.
A covariance matrix is a square matrix giving the covariance between each element pair in a vector
Covariance is the mean value of the product of the deviations of two variates from their respective means.
A positive covariance indicates that both variates tend to be high or low at same time (similar direction)
while a negative covariance indicates that if one variate is high the other will be low (opposite direction).
'''
cov1 = np.cov(x_class1, rowvar=False)
cov2 = np.cov(x_class2, rowvar=False)
'''
Get the 'eigenvalues' and 'eigenvectors' by solving the 'generalized eigenvalue problem'
The 'generalized eigenvalue problem' is a mathematical problem that arises in various fields
and is an extendion of the 'standard eigenvalue problem'.
In the 'standard eigenvalue problem', you are given a square matrix A, and you want to find
scalars λ (eigenvalues) and corresponding vectors x (eigenvectors) that satisfy
the equation: A * x = λ * x. The 'eigenvalues' represent the scaling factors, and the 'eigenvectors'
represent the directions along which the matrix A scales or rotates.
The 'generalized eigenvalue problem' extends the concept to two matrices, A and B. Given two square matrices
A and B, you want to find scalars λ (generalized eigenvalues) and corresponding
vectors x (generalized eigenvectors) that satisfy the equation: A * x = λ * B * x.
Here we will use the 'eigenvectors' as transformation matrices that will maximize the variance of one class
and minimize the variance of another class with the goal of maximizing the differences between the two classes.
'''
eigenvalues, eigenvectors = np.linalg.eig(np.dot(np.linalg.pinv(np.add(cov1, cov2)), cov1))
'''
Now we will sort eigenvalues and corresponding eigenvectors in descending order. Thus sort the eigenvalues and
align the corresponding eigenvectors based on the eigenvalue magnitudes.
This will allow us to select the first eigenvectors and use those as CSP filters. Because they come first
they will be associated with the highest eigenvalue magnitudes and thus be the most discriminative.
'''
ascending_indices = np.argsort(eigenvalues)
descending_indices = np.flip(ascending_indices)
eigenvalues = eigenvalues[descending_indices]
eigenvectors = eigenvectors[:, descending_indices] #reorder the columns (eigenvectors) of the eigenvectors matrix
self.filters = eigenvectors[:, :self.nb_components].real.astype(np.float32)
return self
def partial_fit(self, x, y):
self.x = np.concatenate((self.x, x), axis=0)
self.y = np.concatenate((self.y, y), axis=0)
return self.fit(self.x, self.y)
def transform(self, x):
if self.filters.size == 0:
print("CSPTransformer: Error: use the 'fit' method to find the filters before using 'transform' method",
file=sys.stderr)
exit()
x_csp = np.dot(x, self.filters)
return x_csp
| artainmo/total_perspective_vortex | processing_EEGs_lib/dimensionality_reduction_algorithm.py | dimensionality_reduction_algorithm.py | py | 4,957 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.