hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a417a0a839c157704c0bb9c7d9a86e16b358f3e
| 22,087
|
py
|
Python
|
pdb_profiling/processors/uniprot/api.py
|
NatureGeorge/pdb-profiling
|
b29f93f90fccf03869a7a294932f61d8e0b3470c
|
[
"MIT"
] | 5
|
2020-10-27T12:02:00.000Z
|
2021-11-05T06:51:59.000Z
|
pdb_profiling/processors/uniprot/api.py
|
NatureGeorge/pdb-profiling
|
b29f93f90fccf03869a7a294932f61d8e0b3470c
|
[
"MIT"
] | 9
|
2021-01-07T04:47:58.000Z
|
2021-09-22T13:20:35.000Z
|
pdb_profiling/processors/uniprot/api.py
|
NatureGeorge/pdb-profiling
|
b29f93f90fccf03869a7a294932f61d8e0b3470c
|
[
"MIT"
] | null | null | null |
# @Created Date: 2019-12-08 06:46:49 pm
# @Filename: api.py
# @Email: 1730416009@stu.suda.edu.cn
# @Author: ZeFeng Zhu
# @Last Modified: 2020-02-16 10:54:32 am
# @Copyright (c) 2020 MinghuiGroup, Soochow University
from typing import Iterable, Iterator, Optional, Union, Generator, Dict, List
from time import perf_counter
from numpy import nan, array
from pathlib import Path
from unsync import unsync, Unfuture
from copy import deepcopy
from pdb_profiling.log import Abclog
from pdb_profiling.utils import init_semaphore, init_folder_from_suffix, init_folder_from_suffixes, a_read_csv
from pdb_profiling.fetcher.webfetch import UnsyncFetch
from uuid import uuid4
from pdb_profiling.cif_gz_stream import iter_index
from aiohttp import ClientSession
from aiofiles import open as aiofiles_open
from pdb_profiling.ensure import EnsureBase
from tenacity import wait_random, stop_after_attempt
ensure = EnsureBase()
rt_kw = dict(wait=wait_random(max=20), stop=stop_after_attempt(6))
"""QUERY_COLUMNS: List[str] = [
'id', 'length', 'reviewed',
'comment(ALTERNATIVE%20PRODUCTS)',
'feature(ALTERNATIVE%20SEQUENCE)',
'genes', 'organism', 'protein%20names']
RESULT_COLUMNS: List[str] = [
'Entry', 'Length', 'Status',
'Alternative products (isoforms)',
'Alternative sequence',
'Gene names', 'Organism', 'Protein names']
COLUMNS_DICT: Dict = dict(zip(QUERY_COLUMNS, RESULT_COLUMNS))
RESULT_NEW_COLUMN: List[str] = ['yourlist', 'isomap']"""
BASE_URL: str = 'https://www.uniprot.org'
"""PARAMS: Dict = {
# 'fil': 'organism%3A"Homo+sapiens+(Human)+[9606]"+AND+reviewed%3Ayes',
# reviewed:yes+AND+organism:9606
'columns': None,
'query': None,
'from': None,
'to': 'ACC',
'format': 'tab'}"""
"""
class MapUniProtID(Abclog):
'''
Implement UniProt Retrieve/ID Mapping API
'''
def __init__(self, id_col: str, id_type: str,
dfrm: Optional[DataFrame],
ids: Optional[Iterable] = None,
sites: Optional[Iterable] = None,
genes: Optional[Iterable] = None,
usecols: Optional[Iterable] = QUERY_COLUMNS,
site_col: Optional[str] = None,
gene_col: Optional[str] = None,
logger: Optional[logging.Logger] = None,
loggingPath: Optional[str] = None):
self.init_logger(self.__class__.__name__, logger)
if dfrm is not None:
self.dfrm = dfrm.drop_duplicates().reset_index(drop=True)
else:
'''
the length of dataframe is based on:
* the num of `ids` if there is more than one id
* the num of `sites` if there is just one id with specified `sites`
'''
if isinstance(ids, str):
if sites is not None and not isinstance(sites, str):
index_len = len(sites)
else:
index_len = 1
else:
index_len = len(ids)
self.dfrm = DataFrame(dict(zip(
(col for col in (id_col, site_col, gene_col) if col is not None),
(value for value in (ids, sites, genes) if value is not None))),
index=list(range(index_len)))
self.index = dfrm.index
self.id_col = id_col
self.id_type = id_type
self.site_col = site_col
self.gene_col = gene_col
self.loggingPath = loggingPath
if isinstance(usecols, str):
PARAMS['columns'] = usecols
usecols = usecols.split(',')
elif isinstance(usecols, (Iterable, Iterator)):
PARAMS['columns'] = ','.join(usecols)
else:
raise ValueError('Invalid usecols')
self.usecols = usecols
PARAMS['from'] = id_type
if isinstance(loggingPath, (str, Path)):
self.set_logging_fileHandler(loggingPath)
@property
def sites(self) -> Generator:
if self.site_col is not None:
for name, group in self.dfrm.groupby(by=self.id_col, sort=False):
yield name, group[self.site_col]
else:
yield None
@staticmethod
def split_df(dfrm, colName, sep):
'''Split DataFrame'''
df = dfrm.copy()
return df.drop([colName], axis=1).join(df[colName].str.split(sep, expand=True).stack().reset_index(level=1, drop=True).rename(colName))
def yieldTasks(self, lyst: Iterable, chunksize: int = 100, sep: str = ',') -> Generator:
fileName = self.outputPath.stem
for i in range(0, len(lyst), chunksize):
cur_fileName = f'{fileName}+{i}'
cur_params = deepcopy(PARAMS)
cur_params['query'] = sep.join(lyst[i:i+chunksize]) # self.outputPath.suffix
yield ('get', {'url': f'{BASE_URL}/uploadlists/', 'params': cur_params}, str(Path(self.outputPath.parent, cur_fileName+'.tsv')))
def retrieve(self, outputPath: Union[str, Path],
finishedPath: Optional[str] = None,
sep: str = '\t',
chunksize: int = 100,
concur_req: int = 20,
rate: float = 1.5,
ret_res: bool = True,
semaphore = None):
finish_id = list()
self.outputPath = Path(outputPath)
self.result_cols = [COLUMNS_DICT.get(
i, i) for i in self.usecols] + RESULT_NEW_COLUMN
if finishedPath is not None:
try:
target_col = RESULT_NEW_COLUMN[0]
finish: Series = read_csv(
finishedPath,
sep=sep,
usecols=[target_col],
names=self.result_cols,
skiprows=1,
header=None)[target_col]
except Exception as e:
col_to_add = RESULT_NEW_COLUMN[1]
self.logger.warning(
f"{e}\nSomething wrong with finished raw file, probably without '{col_to_add}' column.")
finish_df = read_csv(
finishedPath, sep=sep, names=self.result_cols[:-1], skiprows=1, header=None)
finish_df[col_to_add] = nan
finish_df.to_csv(finishedPath, sep=sep, index=False)
finish: Series = finish_df[target_col]
for query_id in finish:
if ',' in query_id:
finish_id.extend(query_id.split(','))
else:
finish_id.append(query_id)
query_id: Series = self.dfrm[self.id_col]
if finish_id:
rest_id = list(set(query_id) - set(finish_id))
else:
rest_id = query_id.unique()
self.logger.info(
f"Have finished {len(finish_id)} ids, {len(rest_id)} ids left.")
res = UnsyncFetch.multi_tasks(
tasks=self.yieldTasks(rest_id, chunksize),
to_do_func=self.process,
concur_req=concur_req,
rate=rate,
ret_res=ret_res,
semaphore=semaphore)
return res
def getCanonicalInfo(self, dfrm: DataFrame):
'''
Will Change the dfrm
* Add new column (canonical_isoform)
* Change the content of column (UniProt)
'''
# Get info from Alt Product file
if self.altProPath is None:
dfrm['canonical_isoform'] = nan
return dfrm
else:
usecols = ["IsoId", "Sequence", "Entry", "UniProt"]
altPro_df = read_csv(self.altProPath, sep="\t", usecols=usecols)
altPro_df = altPro_df[altPro_df["Sequence"]
== "Displayed"].reset_index(drop=True)
altPro_df.rename(
columns={"IsoId": "canonical_isoform"}, inplace=True)
# Modify dfrm
dfrm = merge(
dfrm, altPro_df[["canonical_isoform", "Entry"]], how="left")
return dfrm
def getGeneStatus(self, handled_df: DataFrame, colName: str = 'GENE_status'):
'''
Will Change the dfrm, add Gene Status
* Add new column (GENE) # if id_col != gene_col
* Add new column (GENE_status)
**About GENE_status**
* ``False`` : First element of Gene names is not correspond with refSeq's GENE (e.g)
* others(corresponding GENE)
'''
self.gene_status_col = colName
if self.id_type != 'GENENAME':
if self.gene_col is None:
handled_df[colName] = True
return None
gene_map = self.dfrm[[self.id_col,
self.gene_col]].drop_duplicates()
gene_map = gene_map.groupby(self.id_col)[self.gene_col].apply(
lambda x: array(x) if len(x) > 1 else list(x)[0])
handled_df['GENE'] = handled_df.apply(
lambda z: gene_map[z['yourlist']], axis=1)
handled_df[colName] = handled_df.apply(lambda x: x['GENE'] == x['Gene names'].split(
' ')[0] if not isinstance(x['Gene names'], float) else False, axis=1)
handled_df['GENE'] = handled_df['GENE'].apply(
lambda x: ','.join(x) if not isinstance(x, str) else x)
else:
handled_df[colName] = handled_df.apply(lambda x: x['yourlist'] == x['Gene names'].split(
' ')[0] if not isinstance(x['Gene names'], float) else False, axis=1)
def label_mapping_status(self, dfrm: DataFrame, colName: str = 'Mapping_status'):
self.mapping_status_col = colName
gene_status_col = self.gene_status_col
dfrm[colName] = 'No'
dfrm[gene_status_col] = dfrm[gene_status_col].apply(
lambda x: x.any() if isinstance(x, Iterable) else x)
if self.id_col == 'GENENAME':
pass_df = dfrm[
(dfrm[gene_status_col] == True) &
(dfrm['Status'] == 'reviewed') &
(dfrm['unp_map_tage'] != 'Untrusted & No Isoform')]
else:
pass_df = dfrm[
(dfrm['Status'] == 'reviewed') &
(dfrm['unp_map_tage'] != 'Untrusted & No Isoform')]
pass_index = pass_df.index
dfrm.loc[pass_index, colName] = 'Yes'
# Deal with 'one to many' situation
multipleCounter = Counter(dfrm.loc[pass_index, 'yourlist'])
err_li = [i for i, j in multipleCounter.items() if j > 1]
err_index = pass_df[pass_df['yourlist'].isin(err_li)].index
dfrm.loc[err_index, colName] = 'Error'
@unsync
async def process(self, path: Union[str, Path, Unfuture], sep: str = '\t'):
self.logger.debug("Start to handle id mapping result")
if not isinstance(path, (Path, str)):
path = await path # .result()
if not Path(path).stat().st_size:
return None
self.altSeqPath, self.altProPath = ExtractIsoAlt.main(path=path)
try:
df = read_csv(
path, sep='\t', names=self.result_cols, skiprows=1, header=None)
except ValueError:
df = read_csv(
path, sep='\t', names=self.result_cols[:-1], skiprows=1, header=None)
# Add New Column: canonical_isoform
df = self.getCanonicalInfo(df)
# Add New Column: unp_map_tage
df['unp_map_tage'] = nan
# Classification
df_with_no_isomap = df[df['isomap'].isnull()] # Class A
df_with_isomap = df[df['isomap'].notnull()] # Class B
# ----------------------------------------------------------------------
# In Class A
# ----------------------------------------------------------------------
if len(df_with_no_isomap) > 0:
df_wni_split = self.split_df(df_with_no_isomap, 'yourlist', ',')
df_wni_split.drop(columns=['isomap'], inplace=True)
# [yourlist <-> UniProt]
df_wni_split['UniProt'] = df_wni_split['Entry']
df_wni_split['unp_map_tage'] = 'Trusted & No Isoform'
# Find out special cases 1
df_wni_split_warn = df_wni_split[df_wni_split['Alternative products (isoforms)'].notnull(
)].index
df_wni_split.loc[df_wni_split_warn,
'unp_map_tage'] = 'Untrusted & No Isoform'
# 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt'
# ----------------------------------------------------------------------
# In Class B
# ----------------------------------------------------------------------
if len(df_with_isomap) > 0:
wi_yourlist_count = df_with_isomap.apply(
lambda x: x['yourlist'].count(','), axis=1)
wi_isomap_count = df_with_isomap.apply(
lambda x: x['isomap'].count(','), axis=1)
# In subClass 1
df_wi_eq = df_with_isomap.loc[wi_yourlist_count[wi_yourlist_count ==
wi_isomap_count].index]
if len(df_wi_eq) > 0:
df_wi_eq_split = self.split_df(
df_wi_eq.drop(columns=['yourlist']), 'isomap', ',')
df_wi_eq_split[['yourlist', 'UniProt']] = df_wi_eq_split['isomap'].str.split(
' -> ', expand=True)
# [yourlist <-> UniProt]
df_wi_eq_split.drop(columns=['isomap'], inplace=True)
df_wi_eq_split['unp_map_tage'] = 'Trusted & Isoform'
# # 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt'
# In subClass 2
df_wi_ne = df_with_isomap.loc[wi_yourlist_count[wi_yourlist_count !=
wi_isomap_count].index]
if len(df_wi_ne) > 0:
df_wi_ne_split = self.split_df(df_wi_ne, 'isomap', ',')
df_wi_ne_split.rename(
columns={'yourlist': 'checkinglist'}, inplace=True)
df_wi_ne_split[['yourlist', 'UniProt']] = df_wi_ne_split['isomap'].str.split(
' -> ', expand=True)
df_wi_ne_split.drop(columns=['isomap'], inplace=True)
df_wi_ne_split['unp_map_tage'] = 'Trusted & Isoform & Contain Warnings'
# 'Entry', 'Gene names', 'Status', 'Alternative products (isoforms)', 'Organism', 'yourlist', 'UniProt', 'checkinglist'
# Find out special cases 2
usecols = Index(set(df_wi_ne_split.columns) -
{'yourlist', 'UniProt'})
df_wi_ne_warn = self.split_df(
df_wi_ne_split[usecols].drop_duplicates(), 'checkinglist', ',')
df_wi_ne_warn = df_wi_ne_warn[~df_wi_ne_warn['checkinglist'].isin(
df_wi_ne_split['yourlist'])].rename(columns={'checkinglist': 'yourlist'})
df_wi_ne_warn['UniProt'] = df_wi_ne_warn['Entry']
# sequence conflict
df_wi_ne_warn['unp_map_tage'] = 'Untrusted & No Isoform'
df_wi_ne_split.drop(columns=['checkinglist'], inplace=True)
# Concat Dfrm
variables = ["df_wni_split", "df_wi_eq_split",
"df_wi_ne_split", "df_wi_ne_warn"]
lvs = locals()
varLyst = [lvs[variable] for variable in variables if variable in lvs]
final_df = concat(varLyst, sort=False).reset_index(drop=True)
cano_index = final_df[final_df["canonical_isoform"].notnull()].index
if len(cano_index) > 0:
final_df.loc[cano_index, "UniProt"] = final_df.loc[cano_index, ].apply(
lambda x: x["Entry"] if x["UniProt"] in x["canonical_isoform"] else x["UniProt"], axis=1)
# Add Gene Status
self.getGeneStatus(final_df)
# Label Mapping Status
self.label_mapping_status(final_df)
pathOb = Path(path)
edPath = str(Path(pathOb.parent, f'{pathOb.stem}_ed.tsv')) # {pathOb.suffix}
final_df.to_csv(edPath, sep=sep, index=False)
self.logger.debug(f"Handled id mapping result saved in {edPath}")
return edPath
"""
class UniProtAPI(Abclog):
'''
Implement UniProt Retrieve/ID Mapping API
* focus on tabular format
* <https://www.uniprot.org/help/uploadlists>
* <https://www.uniprot.org/help/api_idmapping>
'''
headers = {'Cache-Control': 'no-cache'}
params = {
'columns': 'id,feature(ALTERNATIVE%20SEQUENCE)',
'query': None,
'from': 'ACC+ID',
'to': 'ACC',
'format': 'tab'}
with_name_suffix = True
@classmethod
def task_unit(cls, chunk, i, folder, name, sep):
cur_params = deepcopy(cls.params)
cur_params['query'] = sep.join(chunk)
return ('get', {'url': f'{BASE_URL}/uploadlists/', 'params': cur_params, 'headers': cls.headers}, folder/f'{name}+{i}.tsv')
@classmethod
def yieldTasks(cls, lyst: Iterable, chunksize: int, folder, name: str, sep: str = ',') -> Generator:
name_with_suffix = f'{name}+{uuid4().hex}' if cls.with_name_suffix else name
for i in range(0, len(lyst), chunksize):
yield cls.task_unit(lyst[i:i+chunksize], i, folder, name_with_suffix, sep)
@classmethod
@unsync
async def set_web_semaphore(cls, web_semaphore_value: int):
cls.web_semaphore = await init_semaphore(web_semaphore_value)
@classmethod
def set_folder(cls, folder: Union[Path, str]):
cls.folder = init_folder_from_suffix(folder, 'UniProt/uploadlists/')
@classmethod
def retrieve(cls, lyst: Iterable, name: str, sep: str = ',', chunksize: int = 100, rate: float = 1.5, semaphore=None, **kwargs):
return [UnsyncFetch.single_task(
task,
semaphore=cls.web_semaphore if semaphore is None else semaphore,
rate=rate) for task in cls.yieldTasks(lyst, chunksize, cls.folder, name, sep)]
class UniProtINFO(Abclog):
'''
* Download UniProt Fasta Sequences
* Download UniProt Features
'''
@classmethod
@unsync
async def set_web_semaphore(cls, web_semaphore_value:int):
cls.web_semaphore = await init_semaphore(web_semaphore_value)
@classmethod
def set_folder(cls, folder: Union[Path, str]):
cls.fasta_folder, cls.txt_folder = tuple(init_folder_from_suffixes(folder, ('UniProt/fasta', 'UniProt/txt')))
@classmethod
def get_fasta_folder(cls):
return cls.fasta_folder
@classmethod
def get_txt_folder(cls):
return cls.txt_folder
def __init__(self, api_suffix):
if api_suffix == 'fasta':
self.get_cur_folder = self.get_fasta_folder
self.params = {'include': 'no'}
elif api_suffix == 'txt':
self.get_cur_folder = self.get_txt_folder
self.params = {}
else:
raise AssertionError(f'Invalid api_suffix: {api_suffix} for UniProt')
self.suffix = api_suffix
def task_unit(self, unp:str):
cur_fileName = f'{unp}.{self.suffix}'
return ('get', {'url': f'{BASE_URL}/uniprot/{cur_fileName}', 'params': self.params}, self.get_cur_folder()/cur_fileName)
def single_retrieve(self, identifier: str, rate: float = 1.5):
return UnsyncFetch.single_task(
task=self.task_unit(identifier),
semaphore=self.web_semaphore,
rate=rate)
@classmethod
async def txt_reader(cls, url):
remain_part = b''
async with cls.web_semaphore:
async with ClientSession() as session:
async with session.get(url=url, timeout=3600) as resp:
if resp.status == 200:
async for rv in resp.content.iter_any():
if rv:
index = (None, *iter_index(rv, b'\n', 1), None)
if len(index) == 2:
remain_part += rv
continue
if remain_part:
yield remain_part + rv[:index[1]]
remain_part = b''
for start, end in zip(index[1:-1], index[2:-1]):
yield rv[start:end]
else:
for start, end in zip(index[:-1], index[1:-1]):
yield rv[start:end]
if index[-2] != len(rv):
remain_part = rv[index[-2]:]
if remain_part:
yield remain_part
else:
raise Exception(
"code={resp.status}, message={resp.reason}, headers={resp.headers}".format(resp=resp) +
f"\nurl={url}")
@staticmethod
@unsync
@ensure.make_sure_complete(**rt_kw)
async def txt_writer(handle, path, header: bytes = b'', start_key: bytes = b'FT VAR_SEQ', content_key: bytes = b'FT '):
start = False
async with aiofiles_open(path, 'wb') as fileOb:
if header:
await fileOb.write(header)
async for line in handle:
if line.startswith(start_key):
start = True
elif start and not line.startswith(content_key):
return path
if start:
await fileOb.write(line)
def stream_retrieve_txt(self, identifier, name_suffix='VAR_SEQ', **kwargs):
assert self.suffix == 'txt'
return self.txt_writer(handle=self.txt_reader(f'{BASE_URL}/uniprot/{identifier}.{self.suffix}'), path=self.get_cur_folder()/f'{identifier}+{name_suffix}.{self.suffix}', **kwargs)
| 42.55684
| 186
| 0.55467
| 2,578
| 22,087
| 4.553918
| 0.165632
| 0.010562
| 0.011244
| 0.010307
| 0.230579
| 0.185349
| 0.141482
| 0.125724
| 0.102129
| 0.083816
| 0
| 0.008874
| 0.316295
| 22,087
| 518
| 187
| 42.638996
| 0.768558
| 0.019378
| 0
| 0.217054
| 0
| 0
| 0.089272
| 0.034603
| 0
| 0
| 0
| 0
| 0.015504
| 1
| 0.085271
| false
| 0
| 0.116279
| 0.031008
| 0.302326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a41e415317ae7c881f36ab4cbf51cbe613df940
| 9,409
|
py
|
Python
|
hep_spt/stats/poisson.py
|
mramospe/hepspt
|
11f74978a582ebc20e0a7765dafc78f0d1f1d5d5
|
[
"MIT"
] | null | null | null |
hep_spt/stats/poisson.py
|
mramospe/hepspt
|
11f74978a582ebc20e0a7765dafc78f0d1f1d5d5
|
[
"MIT"
] | null | null | null |
hep_spt/stats/poisson.py
|
mramospe/hepspt
|
11f74978a582ebc20e0a7765dafc78f0d1f1d5d5
|
[
"MIT"
] | 1
|
2021-11-03T03:36:15.000Z
|
2021-11-03T03:36:15.000Z
|
'''
Function and classes representing statistical tools.
'''
__author__ = ['Miguel Ramos Pernas']
__email__ = ['miguel.ramos.pernas@cern.ch']
from hep_spt.stats.core import chi2_one_dof, one_sigma
from hep_spt.core import decorate, taking_ndarray
from hep_spt import PACKAGE_PATH
import numpy as np
import os
from scipy.stats import poisson
from scipy.optimize import fsolve
import warnings
__all__ = ['calc_poisson_fu',
'calc_poisson_llu',
'gauss_unc',
'poisson_fu',
'poisson_llu',
'sw2_unc'
]
# Number after which the poisson uncertainty is considered to
# be the same as that of a gaussian with "std = sqrt(lambda)".
__poisson_to_gauss__ = 200
def _access_db(name):
'''
Access a database table under 'data/'.
:param name: name of the file holding the data.
:type name: str
:returns: Array holding the data.
:rtype: numpy.ndarray
'''
ifile = os.path.join(PACKAGE_PATH, 'data', name)
table = np.loadtxt(ifile)
return table
@decorate(np.vectorize)
def calc_poisson_fu(m, cl=one_sigma):
'''
Return the lower and upper frequentist uncertainties for
a poisson distribution with mean "m".
:param m: mean of the Poisson distribution.
:type m: float or np.ndarray(float)
:param cl: confidence level (between 0 and 1).
:type cl: float or np.ndarray(float)
:returns: Lower and upper uncertainties.
:rtype: (float, float) or np.ndarray(float, float)
.. note:: This function might turn very time consuming. Consider using :func:`poisson_fu` instead.
'''
sm = np.sqrt(m)
alpha = (1. - cl)/2.
il, ir = _poisson_initials(m)
if m < 1:
# In this case there is only an upper uncertainty, so
# the coverage is reset so it covers the whole "cl"
lw = m
alpha *= 2.
else:
def fleft(l): return 1. - \
(poisson.cdf(m, l) - poisson.pmf(m, l)) - alpha
lw = fsolve(fleft, il)[0]
def fright(l): return poisson.cdf(m, l) - alpha
up = fsolve(fright, ir)[0]
return _process_poisson_unc(m, lw, up)
@decorate(np.vectorize)
def calc_poisson_llu(m, cl=one_sigma):
'''
Calculate poisson uncertainties based on the logarithm of likelihood.
:param m: mean of the Poisson distribution.
:type m: float or numpy.ndarray(float)
:param cl: confidence level (between 0 and 1).
:type cl: float or numpy.ndarray(float)
:returns: Lower and upper uncertainties.
:rtype: (float, float) or numpy.ndarray(float, float)
.. note:: This function might turn very time consuming. Consider using :func:`poisson_llu` instead.
'''
ns = np.sqrt(chi2_one_dof.ppf(cl))
def nll(x): return -2.*np.log(poisson.pmf(m, x))
ref = nll(m)
def func(x): return nll(x) - ref - ns
il, ir = _poisson_initials(m)
if m < 1:
lw = m
else:
lw = fsolve(func, il)[0]
up = fsolve(func, ir)[0]
return _process_poisson_unc(m, lw, up)
def gauss_unc(s, cl=one_sigma):
'''
Calculate the gaussian uncertainty for a given confidence level.
:param s: standard deviation of the gaussian.
:type s: float or numpy.ndarray(float)
:param cl: confidence level.
:type cl: float
:returns: Gaussian uncertainty.
:rtype: float or numpy.ndarray(float)
.. seealso:: :func:`poisson_fu`, :func:`poisson_llu`, :func:`sw2_unc`
'''
n = np.sqrt(chi2_one_dof.ppf(cl))
return n*s
def poisson_fu(m):
'''
Return the poisson frequentist uncertainty at one standard
deviation of confidence level.
:param m: measured value(s).
:type m: int or numpy.ndarray(int)
:returns: Lower and upper frequentist uncertainties.
:rtype: numpy.ndarray(float, float)
.. seealso:: :func:`gauss_unc`, :func:`poisson_llu`, :func:`sw2_unc`
'''
return _poisson_unc_from_db(m, 'poisson_fu.dat')
def poisson_llu(m):
'''
Return the poisson uncertainty at one standard deviation of
confidence level. The lower and upper uncertainties are defined
by those two points with a variation of one in the value of the
negative logarithm of the likelihood multiplied by two:
.. math::
\\sigma_\\text{low} = n_\\text{obs} - \\lambda_\\text{low}
.. math::
\\alpha - 2\\log P(n_\\text{obs}|\\lambda_\\text{low}) = 1
.. math::
\\sigma_\\text{up} = \\lambda_\\text{up} - n_\\text{obs}
.. math::
\\alpha - 2\\log P(n_\\text{obs}|\\lambda_\\text{up}) = 1
where :math:`\\alpha = 2\\log P(n_\\text{obs}|n_\\text{obs})`.
:param m: measured value(s).
:type m: int or numpy.ndarray(int)
:returns: Lower and upper frequentist uncertainties.
:rtype: numpy.ndarray(float, float)
.. seealso:: :func:`gauss_unc`, :func:`poisson_fu`, :func:`sw2_unc`
'''
return _poisson_unc_from_db(m, 'poisson_llu.dat')
@taking_ndarray
def _poisson_initials(m):
'''
Return the boundaries to use as initial values in
scipy.optimize.fsolve when calculating poissonian
uncertainties.
:param m: mean of the Poisson distribution.
:type m: float or numpy.ndarray(float)
:returns: Upper and lower boundaries.
:rtype: (float, float) or numpy.ndarray(float, float)
'''
sm = np.sqrt(m)
il = m - sm
ir = m + sm
# Needed by "calc_poisson_llu"
if il.ndim == 0:
if il <= 0:
il = 0.1
else:
il[il <= 0] = 0.1
return il, ir
def _poisson_unc_from_db(m, database):
'''
Used in functions to calculate poissonian uncertainties,
which are partially stored on databases. If "m" is above the
maximum number stored in the database, the gaussian approximation
is taken instead.
:param m: measured value(s).
:type m: int or numpy.ndarray(int)
:param database: name of the database.
:type database: str
:returns: Lower and upper frequentist uncertainties.
:rtype: (float, float) or numpy.ndarray(float, float)
:raises TypeError: if the input is a (has) non-integer value(s).
:raises ValueError: if the input value(s) is(are) not positive.
'''
m = np.array(m)
if not np.issubdtype(m.dtype, np.integer):
raise TypeError('Calling function with a non-integer value')
if np.any(m < 0):
raise ValueError('Values must be positive')
scalar_input = False
if m.ndim == 0:
m = m[None]
scalar_input = True
no_app = (m < __poisson_to_gauss__)
if np.count_nonzero(no_app) == 0:
# We can use the gaussian approximation in all
out = np.array(2*[np.sqrt(m)]).T
else:
# Non-approximated uncertainties
table = _access_db(database)
out = np.zeros((len(m), 2), dtype=np.float64)
out[no_app] = table[m[no_app]]
mk_app = np.logical_not(no_app)
if mk_app.any():
# Use the gaussian approximation for the rest
out[mk_app] = np.array(2*[np.sqrt(m[mk_app])]).T
if scalar_input:
return np.squeeze(out)
return out
def _process_poisson_unc(m, lw, up):
'''
Calculate the uncertainties and display an error if they
have been incorrectly calculated.
:param m: mean value.
:type m: float
:param lw: lower bound.
:type lw: float
:param up: upper bound.
:type up: float
:returns: Lower and upper uncertainties.
:type: numpy.ndarray(float, float)
'''
s_lw = m - lw
s_up = up - m
if any(s < 0 for s in (s_lw, s_up)):
warnings.warn('Poisson uncertainties have been '
'incorrectly calculated')
# numpy.vectorize needs to know the exact type of the output
return float(s_lw), float(s_up)
def sw2_unc(arr, bins=20, range=None, weights=None):
'''
Calculate the errors using the sum of squares of weights.
The uncertainty is calculated as follows:
.. math::
\\sigma_i = \\sqrt{\\sum_{j = 0}^{n - 1} \\omega_{i,j}^2}
where *i* refers to the i-th bin and :math:`j \\in [0, n)` refers to
each entry in that bin with weight :math:`\\omega_{i,j}`. If "weights" is
None, then this coincides with the square root of the number of entries
in each bin.
:param arr: input array of data to process.
:param bins: see :func:`numpy.histogram`.
:type bins: int, sequence of scalars or str
:param range: range to process in the input array.
:type range: None or tuple(float, float)
:param weights: possible weights for the histogram.
:type weights: None or numpy.ndarray(value-type)
:returns: Symmetric uncertainty.
:rtype: numpy.ndarray
.. seealso:: :func:`gauss_unc`, :func:`poisson_fu`, :func:`poisson_llu`
'''
if weights is not None:
values = np.histogram(arr, bins, range, weights=weights*weights)[0]
else:
values = np.histogram(arr, bins, range)[0]
return np.sqrt(values)
if __name__ == '__main__':
'''
Generate the tables to store the pre-calculated values of
some uncertainties.
'''
m = np.arange(__poisson_to_gauss__)
print('Creating databases:')
for func in (calc_poisson_fu, calc_poisson_llu):
ucts = np.array(func(m, one_sigma)).T
name = func.__name__.replace('calc_', r'') + '.dat'
fpath = os.path.join('data', name)
print('- {}'.format(fpath))
np.savetxt(fpath, ucts)
| 27.755162
| 103
| 0.636199
| 1,359
| 9,409
| 4.282561
| 0.210449
| 0.035052
| 0.028866
| 0.026117
| 0.338832
| 0.324399
| 0.263574
| 0.251031
| 0.208591
| 0.180241
| 0
| 0.007745
| 0.245297
| 9,409
| 338
| 104
| 27.837278
| 0.811857
| 0.527793
| 0
| 0.150442
| 0
| 0
| 0.081703
| 0.007139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123894
| false
| 0
| 0.070796
| 0.035398
| 0.292035
| 0.017699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a47729e5dc9d9a2649d73a1b1f6d29309683f2b
| 7,871
|
py
|
Python
|
augmentation.py
|
Harlequln/C1M18X-Behavioural_Cloning
|
0c49ad2432b2694848a7b83fddeea04c3306aa80
|
[
"MIT"
] | null | null | null |
augmentation.py
|
Harlequln/C1M18X-Behavioural_Cloning
|
0c49ad2432b2694848a7b83fddeea04c3306aa80
|
[
"MIT"
] | null | null | null |
augmentation.py
|
Harlequln/C1M18X-Behavioural_Cloning
|
0c49ad2432b2694848a7b83fddeea04c3306aa80
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.image as mpimg
from pathlib import Path
from model import *
CAMERA_STEERING_CORRECTION = 0.2
def image_path(sample, camera="center"):
""" Transform the sample path to the repository structure.
Args:
sample: a sample (row) of the data dataframe. Usually drawn of a batch
by the generator
camera: the camera to extract the path for
Returns:
the converted image path string
"""
return str(Path(f"./data/{sample[camera].split('data')[-1]}"))
def crop_image(image, top=60, bot=25):
""" Crop the upper and lower borders of the given image.
Args:
image: the image to crop
top: the pixels to crop from the upper part
bot: the pixels to crop from the bottom part
Returns:
the cropped image
"""
return image[top:-bot, :, :]
def resize_image(image, shape=NVIDIA_SHAPE[0:2]):
""" Resize the image to shape.
Args:
image: input image
shape: (height, width) tuple, defaults to Nvidia input shape (66, 200)
Returns:
the resized image
"""
h, w = shape
return cv2.resize(image, dsize=(w, h), interpolation=cv2.INTER_AREA)
def rgb2yuv(rgb_image):
""" Convert the RGB image to YUV space. """
return cv2.cvtColor(rgb_image, cv2.COLOR_RGB2YUV)
def rgb2hsv(rgb_image):
""" Convert the RGB image to HSV space. """
return cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)
def hsv2rgb(hsv_image):
""" Convert the HSV image to RGB space. """
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)
def choose_camera(sample, camera='random', probs=None):
"""
Choose an image for a specific camera and eventually adjust the steering.
The steering of the left and right cameras is adjusted according to the
defined constant CAMERA_STEERING_CONSTANT
Args:
sample: a sample (row) of the data dataframe. Usually drawn of a batch
by the generator
camera: 'random', 'left', 'center' or 'right'. If 'random' choose the
camera with the given probabilities.
probs: the probabilities to choose the left, center or right cameras. If
None, the probabilities are uniform.
Returns:
a (image, steering) tuple
"""
if camera == 'random':
camera = np.random.choice(["left", "center", "right"], p=probs)
image = mpimg.imread(image_path(sample, camera=camera))
steering = sample["steer"]
if camera == "left":
steering += CAMERA_STEERING_CORRECTION
elif camera == "right":
steering -= CAMERA_STEERING_CORRECTION
return image, steering
def flip(image, steering, prob=0.5):
""" Flip the image and steering with the given probability.
Args:
image: the image to flip
steering: the steering corresponding to the image
prob: the flip probability
Returns:
the augmented image
"""
if np.random.random() < prob:
image = cv2.flip(image, 1)
steering *= -1
return image, steering
def shadow(rgb_image, prob=0.5):
""" Add a shadow to the rgb image with the given probability.
The shadow is created by converting the RGB image into HSV space and
modifying the value channel in a random range. The area in which the value
is modified is defined by a convex hull created for 6 randomly chosen points
in the lower half of the image.
Args:
rgb_image: the image to add the shadow to. Has to be in RGB space.
prob: the probability to add the shadow
Returns:
the augmented image
"""
if np.random.random() < prob:
width, height = rgb_image.shape[1], rgb_image.shape[0]
# Get 6 random vertices in the lower half of the image
x = np.random.randint(-0.1 * width, 1.1 * width, 6)
y = np.random.randint(height * 0.5, 1.1 * height, 6)
vertices = np.column_stack((x, y)).astype(np.int32)
vertices = cv2.convexHull(vertices).squeeze()
# Intilialize mask
mask = np.zeros((height, width), dtype=np.int32)
# Create the polygon mask
cv2.fillPoly(mask, [vertices], 1)
# Adjust value
hsv = rgb2hsv(rgb_image)
v = hsv[:, :, 2]
hsv[:, :, 2] = np.where(mask, v * np.random.uniform(0.5, 0.8), v)
rgb_image = hsv2rgb(hsv)
return rgb_image
def brightness(rgb_image, low=0.6, high=1.4, prob=0.5):
""" Modify the brighntess of the rgb image with the given probability.
The brightness is modified by converting the RGB image into HSV space and
adusting the value channel in a random range between the low and high
bounds.
Args:
rgb_image: the image to modify the brightness. Has to be in RGB space.
low: lower value bound
high: upper value bound
prob: the probability to modify the brightness
Returns:
the augmented image
"""
if np.random.random() < prob:
hsv = rgb2hsv(rgb_image)
value = hsv[:, :, 2]
hsv[:, :, 2] = np.clip(value * np.random.uniform(low, high), 0, 255)
rgb_image = hsv2rgb(hsv)
return rgb_image
def shift(image, steering, shiftx=60, shifty=20, prob=0.5):
""" Shift the image and adjust the steering with the given probability.
The steering of the shifted image is adjusted depending on the amount of
pixels shifted in the width direction.
Args:
image: the image to shift.
steering: the corresponding steering.
shiftx: the upper bound of pixels to shift in the width direction
shifty: the upper bound of pixels to shift in the height direction
prob: the probability to shift the image
Returns:
the augmented image
"""
if np.random.random() < prob:
# The angle correction per pixel is derived from the angle correction
# specified for the side cameras. It is estimated that the images of two
# adjacent cameras are shifted by 80 pixels (at the bottom of the image)
angle_correction_per_pixel = CAMERA_STEERING_CORRECTION / 80
# Draw translations in x and y directions from a uniform distribution
tx = int(np.random.uniform(-shiftx, shiftx))
ty = int(np.random.uniform(-shifty, shifty))
# Transformation matrix
mat = np.float32([[1, 0, tx],
[0, 1, ty]])
# Transform image and correct steering angle
height, width, _ = image.shape
image = cv2.warpAffine(image, mat, (width, height),
borderMode=cv2.BORDER_REPLICATE)
steering += tx * angle_correction_per_pixel
return image, steering
def augment(sample, camera_probs=None, flip_prob=0.5, shadow_prob=0.5,
bright_prob=0.5, shift_prob=0.5, ):
""" Augment the sample with the given probabilities.
Args:
sample: a sample (row) of the data dataframe. Usually drawn of a batch
by the generator
camera_probs: the probabilities to draw left, center or right camera
images
flip_prob: probability for an image to be flipped
shadow_prob: probability of shadow additon to the image
bright_prob: probability to modify the brightness of the image
shift_prob: probability for and image to be shifed
"""
image, steering = choose_camera(sample, probs=camera_probs)
image, steering = flip(image, steering, prob=flip_prob)
image = shadow(image, prob=shadow_prob)
image = brightness(image, prob=bright_prob)
image, steering = shift(image, steering, prob=shift_prob)
return image, steering
| 35.138393
| 81
| 0.632575
| 1,072
| 7,871
| 4.585821
| 0.203358
| 0.035801
| 0.009764
| 0.015256
| 0.256713
| 0.214809
| 0.189992
| 0.157038
| 0.112286
| 0.0476
| 0
| 0.018663
| 0.285224
| 7,871
| 223
| 82
| 35.295964
| 0.855137
| 0.48685
| 0
| 0.186667
| 0
| 0
| 0.025905
| 0.012069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.066667
| 0
| 0.386667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a483acc0e1727f56a550dc2b790cfba50c01c45
| 4,848
|
py
|
Python
|
test_zeroshot.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 17
|
2021-07-30T14:08:24.000Z
|
2022-03-30T13:57:02.000Z
|
test_zeroshot.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 4
|
2021-09-09T03:02:18.000Z
|
2022-03-24T13:55:55.000Z
|
test_zeroshot.py
|
airbert-vln/airbert
|
a4f667db9fb4021094c738dd8d23739aee3785a5
|
[
"MIT"
] | 2
|
2021-08-30T11:51:16.000Z
|
2021-09-03T09:18:50.000Z
|
import json
import logging
from typing import List
import os
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer, BertTokenizer
from vilbert.vilbert import BertConfig
from utils.cli import get_parser
from utils.dataset.common import pad_packed, load_json_data
from utils.dataset.zero_shot_dataset import ZeroShotDataset
from utils.dataset import PanoFeaturesReader
from airbert import Airbert
from train import get_model_input, get_mask_options
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(__name__)
def main():
# ----- #
# setup #
# ----- #
# command line parsing
parser = get_parser(training=False)
parser.add_argument(
"--split",
choices=["train", "val_seen", "val_unseen", "test"],
required=True,
help="Dataset split for evaluation",
)
args = parser.parse_args()
# force arguments
args.num_beams = 1
args.batch_size = 1
print(args)
# create output directory
save_folder = os.path.join(args.output_dir, f"run-{args.save_name}")
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# ------------ #
# data loaders #
# ------------ #
# load a dataset
# tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer, do_lower_case=True)
tokenizer = AutoTokenizer.from_pretrained(args.bert_tokenizer)
if not isinstance(tokenizer, BertTokenizer):
raise ValueError("fix mypy")
features_reader = PanoFeaturesReader(args.img_feature, args.in_memory)
vln_data = f"data/task/{args.prefix}R2R_{args.split}.json"
print(vln_data)
dataset = ZeroShotDataset(
vln_path=vln_data,
tokenizer=tokenizer,
features_reader=features_reader,
max_instruction_length=args.max_instruction_length,
max_path_length=args.max_path_length,
max_num_boxes=args.max_num_boxes,
default_gpu=True,
highlighted_language=args.highlighted_language,
)
data_loader = DataLoader(
dataset,
shuffle=False,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
)
# ----- #
# model #
# ----- #
config = BertConfig.from_json_file(args.config_file)
config.cat_highlight = args.cat_highlight
model = Airbert.from_pretrained(args.from_pretrained, config, default_gpu=True)
model.cuda()
logger.info(f"number of parameters: {sum(p.numel() for p in model.parameters()):,}")
# ---------- #
# evaluation #
# ---------- #
with torch.no_grad():
all_scores = eval_epoch(model, data_loader, args)
# save scores
scores_path = os.path.join(save_folder, f"{args.prefix}_scores_{args.split}.json")
json.dump(all_scores, open(scores_path, "w"))
logger.info(f"saving scores: {scores_path}")
# convert scores into results format
vln_data = load_json_data(vln_data)
instr_id_to_beams = {
f"{item['path_id']}_{i}": item["beams"]
for item in vln_data
for i in range(len(item["instructions"]))
}
all_results = convert_scores(all_scores, instr_id_to_beams)
# save results
results_path = os.path.join(save_folder, f"{args.prefix}_results_{args.split}.json")
json.dump(all_results, open(results_path, "w"))
logger.info(f"saving results: {results_path}")
def eval_epoch(model, data_loader, args):
device = next(model.parameters()).device
model.eval()
all_scores = []
for batch in tqdm(data_loader):
# load batch on gpu
batch = tuple(t.cuda(device=device, non_blocking=True) for t in batch)
instr_ids = get_instr_ids(batch)
# get the model output
output = model(*get_model_input(batch))
opt_mask = get_mask_options(batch)
vil_logit = pad_packed(output[0].squeeze(1), opt_mask)
for instr_id, logit in zip(instr_ids, vil_logit):
all_scores.append((instr_id, logit.tolist()))
return all_scores
def convert_scores(all_scores, instr_id_to_beams):
output = []
for instr_id, scores in all_scores:
idx = np.argmax(scores)
beams = instr_id_to_beams[instr_id]
trajectory = []
trajectory += [beams[idx], 0, 0]
output.append({"instr_id": instr_id, "trajectory": trajectory})
# assert len(output) == len(beam_data)
return output
# ------------- #
# batch parsing #
# ------------- #
def get_instr_ids(batch) -> List[str]:
instr_ids = batch[12]
return [str(item[0].item()) + "_" + str(item[1].item()) for item in instr_ids]
if __name__ == "__main__":
main()
| 27.545455
| 88
| 0.65821
| 629
| 4,848
| 4.828299
| 0.310016
| 0.023049
| 0.011854
| 0.018439
| 0.115904
| 0.095489
| 0.046757
| 0.046757
| 0.023049
| 0
| 0
| 0.002884
| 0.213284
| 4,848
| 175
| 89
| 27.702857
| 0.793393
| 0.094472
| 0
| 0
| 0
| 0
| 0.109271
| 0.037727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036697
| false
| 0
| 0.155963
| 0
| 0.220183
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a49459be97466ed19cf1a661276df8eb41c082e
| 3,184
|
py
|
Python
|
refp.py
|
jon2718/ipycool_2.0
|
34cf74ee99f4a725b997c50a7742ba788ac2dacd
|
[
"MIT"
] | null | null | null |
refp.py
|
jon2718/ipycool_2.0
|
34cf74ee99f4a725b997c50a7742ba788ac2dacd
|
[
"MIT"
] | null | null | null |
refp.py
|
jon2718/ipycool_2.0
|
34cf74ee99f4a725b997c50a7742ba788ac2dacd
|
[
"MIT"
] | null | null | null |
from modeledcommandparameter import *
from pseudoregion import *
class Refp(ModeledCommandParameter, PseudoRegion):
"""
Reference particle
"""
begtag = 'REFP'
endtag = ''
models = {
'model_descriptor': {'desc': 'Phase model',
'name': 'phmodref',
'num_parms': 5,
'for001_format': {'line_splits': [5]}},
'0_crossing':
{'desc': '0-crossing phase iterative procedure',
'doc': 'Uses iterative procedure to find 0-crossing phase; tracks through all regions. Only works with ACCEL modesl 1,2 and 13.',
'icool_model_name': 2,
'parms':
{'phmodref': {'pos': 5, 'type': 'String', 'doc': ''},
'bmtype': {'pos': 1, 'type': 'Int', 'doc': ''}}},
'const_v':
{'desc': 'Assumes constant reference particle velocity',
'doc': 'Applies to any region',
'icool_model_name': 3,
'parms':
{'phmodref': {'pos': 5, 'type': 'String', 'doc': ''},
'bmtype': {'pos': 1, 'type': 'Int', 'doc': ''},
'pz0': {'pos': 2, 'type': 'Real', 'doc': ''},
't0': {'pos': 3, 'type': 'Real', 'doc': ''}}},
'en_loss':
{'desc': 'Assumes constant reference particle velocity',
'doc': 'Applies to any region',
'icool_model_name': 4,
'parms':
{'phmodref': {'pos': 5, 'type': 'String', 'doc': ''},
'bmtype': {'pos': 1, 'type': 'Int', 'doc': ''},
'pz0': {'pos': 2, 'type': 'Real', 'doc': ''},
't0': {'pos': 3, 'type': 'Real', 'doc': ''},
'dedz': {'pos': 4, 'type': 'Real', 'doc': ''}}},
'delta_quad_cav':
{'desc': 'Assumes constant reference particle velocity',
'doc': 'Applies to any region',
'icool_model_name': 5,
'parms':
{'phmodref': {'pos': 5, 'type': 'String', 'doc': ''},
'bmtype': {'pos': 1, 'type': 'Int', 'doc': ''},
'e0': {'pos': 2, 'type': 'Real', 'doc': ''},
'dedz': {'pos': 3, 'type': 'Real', 'doc': ''},
'd2edz2': {'pos': 4, 'type': 'Real', 'doc': ''}}},
'delta_quad_any':
{'desc': 'Assumes constant reference particle velocity',
'doc': 'Applies to any region',
'icool_model_name': 6,
'parms':
{'phmodref': {'pos': 5, 'type': 'String', 'doc': ''},
'bmtype': {'pos': 1, 'type': 'Int', 'doc': ''},
'e0': {'pos': 2, 'type': 'Real', 'doc': ''},
'dedz': {'pos': 3, 'type': 'Real', 'doc': ''},
'd2edz2': {'pos': 4, 'type': 'Real', 'doc': ''}}},
}
def __init__(self, **kwargs):
if ModeledCommandParameter.check_command_params_init(self, Refp.models, **kwargs) is False:
sys.exit(0)
def __call__(self, **kwargs):
pass
def __setattr__(self, name, value):
self.__modeled_command_parameter_setattr__(name, value, Refp.models)
def __str__(self):
pass
| 38.829268
| 139
| 0.451005
| 308
| 3,184
| 4.50974
| 0.298701
| 0.063355
| 0.087113
| 0.061195
| 0.566595
| 0.563715
| 0.563715
| 0.539957
| 0.539957
| 0.539957
| 0
| 0.023289
| 0.339196
| 3,184
| 82
| 140
| 38.829268
| 0.636882
| 0.005653
| 0
| 0.484848
| 0
| 0.015152
| 0.347509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0.030303
| 0.030303
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a4a243b2c4f9a84354c254f16486d8c603e8178
| 10,620
|
py
|
Python
|
utils/dataloaders.py
|
sinahmr/parted-vae
|
261f0654de605c6a260784e47e9a17a737a1a985
|
[
"MIT"
] | 5
|
2021-06-26T07:45:50.000Z
|
2022-03-31T11:41:29.000Z
|
utils/dataloaders.py
|
sinahmr/parted-vae
|
261f0654de605c6a260784e47e9a17a737a1a985
|
[
"MIT"
] | null | null | null |
utils/dataloaders.py
|
sinahmr/parted-vae
|
261f0654de605c6a260784e47e9a17a737a1a985
|
[
"MIT"
] | 1
|
2021-11-26T09:14:03.000Z
|
2021-11-26T09:14:03.000Z
|
import numpy as np
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
from torchvision.utils import save_image
from utils.fast_tensor_dataloader import FastTensorDataLoader
def get_mnist_dataloaders(batch_size=128, path_to_data='../data', warm_up=True, device=None):
data_transforms = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor()
])
target_transform = lambda x: F.one_hot(torch.tensor(x), num_classes=10)
train_data = datasets.MNIST(path_to_data, train=True, download=True, transform=data_transforms, target_transform=target_transform)
test_data = datasets.MNIST(path_to_data, train=False, transform=data_transforms, target_transform=target_transform)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
warm_up_loader = None
if warm_up:
warm_up_x, warm_up_y = WarmUpMNISTDataset(path_to_data, count=256, transform=data_transforms, target_transform=target_transform, device=device).get_tensors()
warm_up_loader = FastTensorDataLoader(warm_up_x, warm_up_y, batch_size=batch_size, shuffle=True)
return train_loader, test_loader, warm_up_loader
class WarmUpMNISTDataset(datasets.MNIST):
def __init__(self, root, transform=None, target_transform=None, download=False, count=256, device=None):
self.__class__.__name__ = 'MNIST' # This is used in directory structure of datasets.MNIST
super(WarmUpMNISTDataset, self).__init__(root, train=True, transform=transform, target_transform=target_transform, download=download)
self.device = device
self.count = count
self.delete = set()
self.mapping = list(set(range(count + len(self.delete))) - self.delete)
self.save_all_images()
def __len__(self):
return self.count
def __getitem__(self, index):
translated_index = self.mapping[index]
return super().__getitem__(translated_index)
def get_tensors(self):
x_shape, y_shape = self[0][0].shape, self[0][1].shape
x, y = torch.zeros(self.count, *x_shape, device=self.device), torch.zeros(self.count, *y_shape, device=self.device)
for i, (data, label) in enumerate(self):
x[i], y[i] = data.to(self.device), label.to(self.device)
return x, y
def save_all_images(self):
x_shape = self[0][0].shape
all_images = torch.zeros(self.count, *x_shape)
for i, (data, label) in enumerate(self):
all_images[i] = data
save_image(all_images, 'warm_up.png', nrow=(len(self) // 16))
def get_celeba_dataloader(batch_size=128, path_to_data='../celeba_64', device=None, warm_up=True):
data_transforms = transforms.Compose([
# transforms.Resize(64),
# transforms.CenterCrop(64),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset_kwargs = {
'target_type': 'attr',
'transform': data_transforms,
}
train_data = datasets.CelebA(path_to_data, split='train', download=True, **dataset_kwargs)
test_data = datasets.CelebA(path_to_data, split='test', **dataset_kwargs)
# warm_up_data = WarmUpCelebADataset(path_to_data, split='train', target_transform=target_transforms, **dataset_kwargs)
dataloader_kwargs = {
'batch_size': batch_size,
'shuffle': True,
'pin_memory': device.type != 'cpu',
# 'pin_memory': False,
'num_workers': 0 if device.type == 'cpu' else 4,
}
train_loader = DataLoader(train_data, **dataloader_kwargs)
test_loader = DataLoader(test_data, **dataloader_kwargs)
# warm_up_loader = DataLoader(warm_up_data, **dataloader_kwargs)
warm_up_loader = None
if warm_up:
# target_transforms = transforms.Compose([
# lambda x: x[celeba_good_columns],
# # lambda x: torch.flatten(F.one_hot(x, num_classes=2))
# my_celeba_target_transfrom
# ])
warm_up_x, warm_up_y = WarmUpCelebADataset(path_to_data, count=800, device=device, **dataset_kwargs).get_tensors() # TODO: If it is good, make the class simpler
warm_up_loader = FastTensorDataLoader(warm_up_x, warm_up_y, batch_size=batch_size, shuffle=True)
return train_loader, test_loader, warm_up_loader
class WarmUpCelebADataset(datasets.CelebA):
def __init__(self, root, split="train", target_type="attr", transform=None, target_transform=None, download=False,
count=256, device=None):
super().__init__(root, split, target_type, transform, target_transform, download)
self.count = count
self.device = device
# self.delete = {2, 36, 43, 66, 74, 96, 119, 148, 149, 162, 166, 168, 183, 188, 198} # From 0 to 255+15
# self.delete = {43, 74, 162, 183} # From 0 to 299
self.delete = set()
self.mapping = list(set(range(count + len(self.delete))) - self.delete)
self.labels = torch.tensor(np.genfromtxt('warm_up_labels.csv', delimiter=','), dtype=torch.float)
# self.save_all_images()
def __len__(self):
return self.count
def __getitem__(self, index):
# return super().__getitem__(index)
translated_index = self.mapping[index]
x, _ = super().__getitem__(translated_index)
return x, self.labels[translated_index]
def get_tensors(self):
x_shape, y_shape = self[0][0].shape, self[0][1].shape
x, y = torch.zeros(self.count, *x_shape, device=self.device), torch.zeros(self.count, *y_shape, device=self.device)
for i, (data, label) in enumerate(self):
x[i], y[i] = data.to(self.device), label.to(self.device)
return x, y
def save_all_images(self):
x_shape = self[0][0].shape
all_images = torch.zeros(self.count, *x_shape)
for i, (data, label) in enumerate(self):
all_images[i] = data
save_image(all_images, 'warm_up.png', nrow=(len(self) // 16))
def get_dsprites_dataloader(batch_size=128, path_to_data='../dsprites/ndarray.npz', fraction=1., device=None, warm_up=False):
dsprites_data = DSpritesDataset(path_to_data, fraction=fraction, device=device)
# dsprites_loader = FastTensorDataLoader(*dsprites_data.get_tensors(), batch_size=batch_size, shuffle=True) # Comment if you have memory limits, and uncomment the next line
dataloader_kwargs = {
'batch_size': batch_size,
'shuffle': True,
'pin_memory': device.type != 'cpu',
'num_workers': 0 if device.type == 'cpu' else 4,
}
dsprites_loader = DataLoader(dsprites_data, **dataloader_kwargs)
warm_up_loader = None
if warm_up:
warm_up_data = DSpritesWarmUpDataset(path_to_data, device=device)
warm_up_loader = FastTensorDataLoader(*warm_up_data.get_tensors(), batch_size=batch_size, shuffle=True)
return dsprites_loader, warm_up_loader
class DSpritesWarmUpDataset(Dataset):
# Color[1], Shape[3], Scale, Orientation, PosX, PosY
def __init__(self, path_to_data, size=10000, device=None): # was 100, 737, 1000, 3686, 10000
self.device = device
data = np.load(path_to_data)
indices = self.good_indices(size)
self.imgs = np.expand_dims(data['imgs'][indices], axis=1)
shape_value = data['latents_classes'][indices, 1]
self.classes = np.zeros((size, 3))
self.classes[np.arange(size), shape_value] = 1
print(np.mean(self.classes, axis=0))
def good_indices(self, size):
# if size < 3 * 6 * 2 * 2 * 2:
# raise Exception('Too small!')
indices = np.zeros(size, dtype=np.long)
# [1, 3, 6, 40, 32, 32]
module = np.array([737280, 245760, 40960, 1024, 32, 1])
i = 0
while True:
for y_span in range(2):
for x_span in range(2):
for orientation_span in range(2):
for scale in range(6):
for shape in range(3):
orientation = int(np.random.randint(0, 20, 1) + orientation_span * 20)
x = int(np.random.randint(0, 16, 1) + x_span * 16)
y = int(np.random.randint(0, 16, 1) + y_span * 16)
sample = np.array([0, shape, scale, orientation, x, y])
indices[i] = np.sum(sample * module)
i += 1
if i >= size:
return indices
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
return self.imgs[idx], self.classes[idx]
def get_tensors(self):
return torch.tensor(self.imgs, dtype=torch.float, device=self.device), torch.tensor(self.classes, device=self.device)
class DSpritesDataset(Dataset):
# Color[1], Shape[3], Scale, Orientation, PosX, PosY
def __init__(self, path_to_data, fraction=1., device=None):
self.device = device
data = np.load(path_to_data)
self.imgs = data['imgs']
self.imgs = np.expand_dims(self.imgs, axis=1)
self.classes = data['latents_classes']
if fraction < 1:
indices = np.random.choice(737280, size=int(fraction * 737280), replace=False)
self.imgs = self.imgs[indices]
self.classes = self.classes[indices]
# self.attrs = data['latents_values'][indices]
# self.transform = transform
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
# # Each image in the dataset has binary values so multiply by 255 to get
# # pixel values
# sample = self.imgs[idx] * 255
# # Add extra dimension to turn shape into (H, W) -> (H, W, C)
# sample = sample.reshape(sample.shape + (1,))
# if self.transform:
# sample = self.transform(sample)
# Since there are no labels, we just return 0 for the "label" here
# return sample, (self.classes[idx], self.attrs[idx])
# return torch.tensor(self.imgs[idx], dtype=torch.float, device=self.device), torch.tensor(self.classes[idx], device=self.device)
return torch.tensor(self.imgs[idx], dtype=torch.float), torch.tensor(self.classes[idx])
def get_tensors(self):
return torch.tensor(self.imgs, dtype=torch.float, device=self.device), torch.tensor(self.classes, device=self.device)
| 44.06639
| 177
| 0.645104
| 1,409
| 10,620
| 4.635912
| 0.165366
| 0.028475
| 0.024495
| 0.022045
| 0.532149
| 0.490814
| 0.448714
| 0.376914
| 0.376914
| 0.326852
| 0
| 0.029893
| 0.234557
| 10,620
| 240
| 178
| 44.25
| 0.77365
| 0.166196
| 0
| 0.478528
| 0
| 0
| 0.029055
| 0.00261
| 0
| 0
| 0
| 0.004167
| 0
| 1
| 0.134969
| false
| 0
| 0.042945
| 0.04908
| 0.300614
| 0.006135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a4a26f9a634d7ab72a8a79970898804d2a1b1c4
| 1,780
|
py
|
Python
|
posts.py
|
girish97115/anonymail
|
f2eb741464ce7b780e4de6de6043c6eed1e13b9a
|
[
"MIT"
] | null | null | null |
posts.py
|
girish97115/anonymail
|
f2eb741464ce7b780e4de6de6043c6eed1e13b9a
|
[
"MIT"
] | null | null | null |
posts.py
|
girish97115/anonymail
|
f2eb741464ce7b780e4de6de6043c6eed1e13b9a
|
[
"MIT"
] | null | null | null |
from flask import (
Blueprint,session, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from anonymail.auth import login_required
from anonymail.db import get_db
import datetime
now = datetime.datetime.now()
current_year = now.year
bp = Blueprint('posts', __name__)
@bp.route('/')
@login_required
def index():
user_id = session.get('user_id')
db = get_db()
posts = db.execute(
'SELECT p.id, body, created'
' FROM post p JOIN user u ON p.dest = u.id'
' WHERE u.id = {}'
' ORDER BY created DESC'.format(user_id)
).fetchall()
return render_template('posts/index.html', posts=posts, year=current_year)
@bp.route('/<string:username>/send', methods=('GET', 'POST'))
def create(username):
db = get_db()
if request.method == 'POST':
body = request.form['body']
dest=db.execute(
'SELECT id from user where username= ?',(username,)
).fetchone()['id']
error = None
if(dest is None):
error='User doesn\'t exist'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (body, dest)'
' VALUES (?, ?)',
(body, dest)
)
db.commit()
return render_template('posts/sent.html',user=username)
if(request.method=='GET'):
dest = db.execute(
'SELECT id from user where username= ?', (username,)
).fetchone()
if dest is None:
abort(404)
else:
return render_template('posts/create.html',user=username)
@bp.route('/sendsomeone')
def send():
return render_template('send_a_message.html')
| 28.253968
| 78
| 0.580337
| 218
| 1,780
| 4.633028
| 0.357798
| 0.069307
| 0.079208
| 0.074257
| 0.114851
| 0.114851
| 0.114851
| 0.114851
| 0.114851
| 0.114851
| 0
| 0.002364
| 0.287079
| 1,780
| 63
| 79
| 28.253968
| 0.793538
| 0
| 0
| 0.163636
| 0
| 0
| 0.206625
| 0.012914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.090909
| 0.018182
| 0.218182
| 0.036364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a4bcff10fc3fa7d7e56bb3812a166c957678a62
| 2,579
|
py
|
Python
|
src/subroutines/array_subroutine.py
|
cyrilico/aoco-code-correction
|
3a780df31eea6caaa37213f6347fb71565ce11e8
|
[
"MIT"
] | 4
|
2020-08-30T08:56:57.000Z
|
2020-08-31T21:32:03.000Z
|
src/subroutines/array_subroutine.py
|
cyrilico/aoco-code-correction
|
3a780df31eea6caaa37213f6347fb71565ce11e8
|
[
"MIT"
] | null | null | null |
src/subroutines/array_subroutine.py
|
cyrilico/aoco-code-correction
|
3a780df31eea6caaa37213f6347fb71565ce11e8
|
[
"MIT"
] | 1
|
2020-10-01T22:15:33.000Z
|
2020-10-01T22:15:33.000Z
|
from .subroutine import subroutine
from parameters.string_parameter import string_parameter as String
from parameters.numeric_parameter import numeric_parameter as Numeric
from parameters.array_parameter import array_parameter as Array
from ast import literal_eval
class array_subroutine(subroutine):
"""Subroutine that returns one or more arrays"""
def __init__(self, name, parameters, outputs):
super().__init__(name, parameters)
self.outputs = outputs
def get_nr_outputs(self):
return len(self.outputs)
def build_test_call(self):
return '{} {} {} printf("\\n");'.format(\
#Declare output variables beforehand, so we have access to them after subroutine call
''.join([parameter.get_test_declaration_representation() for parameter in self.parameters]),\
#Actually make subroutine call
'{}({});'.format(self.name, ','.join([parameter.get_test_call_representation() for parameter in self.parameters])),\
#Access previously declared variables to print their final values
'printf("\\n");'.join(filter(lambda x: x != '', [parameter.get_test_call_output_representation() for parameter in self.parameters])))
def process_parameters(self, parameters):
for idx, parameter in enumerate(parameters):
if parameter == 'string':
self.parameters.append(String(idx, True if idx >= (len(parameters) - len(self.outputs)) else False))
elif 'array' in parameter:
self.parameters.append(Array(idx, parameter.replace('array','').strip(), True if idx >= (len(parameters) - len(self.outputs)) else False))
else: #numeric
self.parameters.append(Numeric(idx, parameter))
def compare_outputs(self, expected, real, precision):
if(len(expected) != len(real)):
return False
for out_type, exp, re in zip(self.outputs, expected, real):
if out_type == 'string' and exp != real:
return False
else: #Array
arr_type = out_type.replace('array','').strip()
re_arr = literal_eval(re)
if(len(exp) != len(re_arr)):
return False
for exp_el, re_el in zip(exp, re_arr):
if arr_type == 'int' and exp_el != re_el:
return False
elif abs(exp_el-re_el) > precision:
return False
return True
| 47.759259
| 154
| 0.606437
| 295
| 2,579
| 5.142373
| 0.294915
| 0.064601
| 0.027686
| 0.055372
| 0.142386
| 0.142386
| 0.059328
| 0.059328
| 0.059328
| 0.059328
| 0
| 0
| 0.288872
| 2,579
| 53
| 155
| 48.660377
| 0.827154
| 0.089957
| 0
| 0.170732
| 0
| 0
| 0.032092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.121951
| 0.04878
| 0.463415
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a4cab617527bcae29b76af4b2c39e67572e4127
| 1,164
|
py
|
Python
|
auth.py
|
nivw/onna_test
|
518c726a656493a5efd7ed6f548f68b2f5350260
|
[
"BSD-2-Clause"
] | null | null | null |
auth.py
|
nivw/onna_test
|
518c726a656493a5efd7ed6f548f68b2f5350260
|
[
"BSD-2-Clause"
] | null | null | null |
auth.py
|
nivw/onna_test
|
518c726a656493a5efd7ed6f548f68b2f5350260
|
[
"BSD-2-Clause"
] | 1
|
2020-06-24T16:52:59.000Z
|
2020-06-24T16:52:59.000Z
|
import requests
import json
from config import config
from logbook import Logger, StreamHandler
import sys
StreamHandler(sys.stdout).push_application()
log = Logger('auth')
class Auth(object):
def __init__(self):
self.config = config
self.auth_code = self.token =None
def get_auth_code(self):
if self.auth_code is not None:
return self.auth_code
response = requests.get(self.config.oauth_url, headers=self.config.headers)
self.auth_code = response.json()['auth_code']
log.info(f"Using auth_code: {self.auth_code}")
return self.auth_code
def get_token(self):
if self.token is not None:
return self.token
data = self.config.auth.login_data_request
data.update({"code": self.get_auth_code()})
response = requests.post(config.auth.auth_url,
data=json.dumps(data),
headers=config.headers)
self.token = response.content.decode("utf-8")
return self.token
def get_login_token(self):
return {'authorization': ' '.join(('Bearer', self.get_token()))}
| 31.459459
| 83
| 0.629725
| 148
| 1,164
| 4.783784
| 0.317568
| 0.112994
| 0.101695
| 0.042373
| 0.053672
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001166
| 0.262887
| 1,164
| 36
| 84
| 32.333333
| 0.824009
| 0
| 0
| 0.133333
| 0
| 0
| 0.064433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.166667
| 0.033333
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a51a2dfb9ee0eb5c3e19b169561bb01b5b7ae90
| 4,063
|
py
|
Python
|
application/api/generate_label.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
application/api/generate_label.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
application/api/generate_label.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
import numpy as np
import dnnlib.tflib as tflib
from training import dataset
tflib.init_tf()
class LabelGenerator:
def __init__(self, tfrecord_dir: str = None):
if tfrecord_dir:
self.training_set = dataset.TFRecordDataset(tfrecord_dir, shuffle_mb=0)
self.labels_available = True
else:
self.labels_available = False
self.label_names = [
'Model',
'Color',
'Manufacturer',
'Body',
'Rotation',
'Ratio',
'Background'
]
def payload_to_label_vector_rotation(
self,
payload: dict,
label_version: str,
num_interpolation_steps: int,
interpolation_type: str
) -> np.ndarray:
if label_version in ['v6', 'v7']:
if interpolation_type == 'spherical':
total_steps = num_interpolation_steps * 8
label = np.zeros((total_steps, 1, 121), dtype=np.float32)
for i in range(total_steps):
payload['Rotation'] = i * 360 / (total_steps - 1)
label[i] = self.payload_to_label_vector(payload, label_version)
return label
else:
label = np.zeros((8, 1, 121), dtype=np.float32)
for i in range(8):
payload['Rotation'] = i * 45
label[i] = self.payload_to_label_vector(payload, label_version)
return label
label = np.zeros((8, 1, 127), dtype=np.float32)
if label_version in ['v4']:
rotation_order = [0, 1, 3, 6, 5, 7, 4, 2]
else:
rotation_order = [0, 1, 2, 3, 4, 5, 6, 7]
for i in range(8):
payload['Rotation'] = rotation_order[i]
label[i] = self.payload_to_label_vector(payload, label_version)
return label
def payload_to_label_vector(
self,
payload: dict,
label_version: str
) -> np.ndarray:
if label_version in ['v4']:
rotation_order = [0, 1, 3, 6, 5, 7, 4, 2]
else:
rotation_order = [0, 1, 2, 3, 4, 5, 6, 7]
if label_version in ['v6', 'v7']:
offsets = [1, 67, 12, 18, 10, 2, 5, 6]
else:
offsets = [1, 67, 12, 18, 10, 8, 5, 6]
onehot = np.zeros((1, sum(offsets)), dtype=np.float32)
onehot[0, 0] = 1
offset = 0
for i, label_name in enumerate(self.label_names):
current_label = int(payload[label_name])
offset += offsets[i]
if label_name is 'Rotation':
if label_version in ['v6', 'v7']:
rotation = (current_label / 360) * 2 * np.pi
onehot[0, offset] = np.cos(rotation)
onehot[0, offset + 1] = np.sin(rotation)
else:
onehot[0, offset + rotation_order[current_label]] = 1
elif current_label >= 0:
onehot[0, offset + current_label] = 1
return onehot
def get_real_label_dict(self, version: str) -> dict:
if not self.labels_available:
label_dict = {}
for i in range(len(self.label_names)):
label_dict[self.label_names[i]] = -1
return label_dict
label = self.training_set.get_random_labels_np(1)
label = label[0]
if version in ['v6', 'v7']:
offsets = [67, 12, 18, 10, 2, 5, 6]
else:
offsets = [67, 12, 18, 10, 8, 5, 6]
offset = 1
label_dict = {}
for i in range(len(self.label_names)):
if np.max(label[offset:offset + offsets[i]]) < 1:
label_dict[self.label_names[i]] = -1
else:
label_dict[self.label_names[i]] = int(np.argmax(label[offset:offset + offsets[i]]))
offset += offsets[i]
return label_dict
| 35.330435
| 99
| 0.502092
| 483
| 4,063
| 4.049689
| 0.204969
| 0.06135
| 0.050102
| 0.051125
| 0.468303
| 0.395194
| 0.326687
| 0.247444
| 0.247444
| 0.195297
| 0
| 0.057545
| 0.388383
| 4,063
| 114
| 100
| 35.640351
| 0.729577
| 0
| 0
| 0.41
| 0
| 0
| 0.027074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.03
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a51f5406e8b8b4afa3d8bc309049e92a8011b92
| 3,333
|
py
|
Python
|
tests/test_urls.py
|
LaudateCorpus1/apostello
|
1ace89d0d9e1f7a1760f6247d90a60a9787a4f12
|
[
"MIT"
] | 69
|
2015-10-03T20:27:53.000Z
|
2021-04-06T05:26:18.000Z
|
tests/test_urls.py
|
LaudateCorpus1/apostello
|
1ace89d0d9e1f7a1760f6247d90a60a9787a4f12
|
[
"MIT"
] | 73
|
2015-10-03T17:53:47.000Z
|
2020-10-01T03:08:01.000Z
|
tests/test_urls.py
|
LaudateCorpus1/apostello
|
1ace89d0d9e1f7a1760f6247d90a60a9787a4f12
|
[
"MIT"
] | 29
|
2015-10-23T22:00:13.000Z
|
2021-11-30T04:48:06.000Z
|
from collections import namedtuple
import pytest
from rest_framework.authtoken.models import Token
from tests.conftest import twilio_vcr
from apostello import models
StatusCode = namedtuple("StatusCode", "anon, user, staff")
@pytest.mark.slow
@pytest.mark.parametrize(
"url,status_code",
[
("/", StatusCode(302, 200, 200)),
("/api/v2/config/", StatusCode(403, 403, 200)),
("/api/v2/elvanto/groups/", StatusCode(403, 403, 200)),
("/api/v2/groups/", StatusCode(403, 200, 200)),
("/api/v2/keywords/", StatusCode(403, 200, 200)),
("/api/v2/queued/sms/", StatusCode(403, 403, 200)),
("/api/v2/recipients/", StatusCode(403, 200, 200)),
("/api/v2/responses/", StatusCode(403, 403, 200)),
("/api/v2/setup/", StatusCode(403, 403, 200)),
("/api/v2/sms/in/", StatusCode(403, 200, 200)),
("/api/v2/sms/out/", StatusCode(403, 200, 200)),
("/api/v2/users/", StatusCode(403, 200, 200)),
("/api/v2/users/profiles/", StatusCode(403, 403, 200)),
("/config/first_run/", StatusCode(302, 302, 302)),
("/graphs/contacts/", StatusCode(302, 302, 200)),
("/graphs/groups/", StatusCode(302, 302, 200)),
("/graphs/keywords/", StatusCode(302, 302, 200)),
("/graphs/recent/", StatusCode(302, 200, 200)),
("/graphs/sms/in/bycontact/", StatusCode(302, 302, 200)),
("/graphs/sms/out/bycontact/", StatusCode(302, 302, 200)),
("/graphs/sms/totals/", StatusCode(302, 302, 200)),
("/keyword/responses/csv/test/", StatusCode(302, 302, 200)),
("/not_approved/", StatusCode(200, 200, 200)),
("/recipient/new/", StatusCode(302, 200, 200)),
],
)
@pytest.mark.django_db
class TestUrls:
"""Test urls and access."""
def test_not_logged_in(self, url, status_code, users):
"""Test not logged in."""
assert users["c_out"].get(url).status_code == status_code.anon
def test_in(self, url, status_code, users):
"""Test site urls when logged in a normal user"""
assert users["c_in"].get(url).status_code == status_code.user
def test_staff(self, url, status_code, users):
"""Test logged in as staff"""
assert users["c_staff"].get(url).status_code == status_code.staff
@pytest.mark.slow
@pytest.mark.django_db
class TestAPITokens:
"""Test Auth Token Access to API."""
def test_no_access(self, users):
assert users["c_out"].get("/api/v2/recipients/").status_code == 403
def test_good_token_staff(self, users, recipients):
t = Token.objects.create(user=users["staff"])
r = users["c_out"].get("/api/v2/recipients/", **{"HTTP_AUTHORIZATION": "Token {}".format(t.key)})
assert r.status_code == 200
data = r.json()
assert data["count"] == len(data["results"])
assert data["count"] == models.Recipient.objects.count()
def test_good_token_not_staff(self, users, recipients):
t = Token.objects.create(user=users["notstaff"])
r = users["c_out"].get("/api/v2/recipients/", **{"HTTP_AUTHORIZATION": "Token {}".format(t.key)})
assert r.status_code == 200
data = r.json()
assert data["count"] == len(data["results"])
assert data["count"] == models.Recipient.objects.filter(is_archived=False).count()
| 40.646341
| 105
| 0.615362
| 429
| 3,333
| 4.682984
| 0.228438
| 0.037332
| 0.047785
| 0.038328
| 0.566949
| 0.501244
| 0.313091
| 0.206073
| 0.206073
| 0.206073
| 0
| 0.089519
| 0.19562
| 3,333
| 81
| 106
| 41.148148
| 0.659828
| 0.042004
| 0
| 0.190476
| 0
| 0
| 0.204294
| 0.03947
| 0
| 0
| 0
| 0
| 0.15873
| 1
| 0.095238
| false
| 0
| 0.079365
| 0
| 0.206349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a52f446636c4417f93211b5960e9ec09c902310
| 2,491
|
py
|
Python
|
guestbook/main.py
|
bradmontgomery/mempy-flask-tutorial
|
8113562460cfa837e7b26df29998e0b6950dd46f
|
[
"MIT"
] | 1
|
2018-01-10T17:54:18.000Z
|
2018-01-10T17:54:18.000Z
|
guestbook/main.py
|
bradmontgomery/mempy-flask-tutorial
|
8113562460cfa837e7b26df29998e0b6950dd46f
|
[
"MIT"
] | null | null | null |
guestbook/main.py
|
bradmontgomery/mempy-flask-tutorial
|
8113562460cfa837e7b26df29998e0b6950dd46f
|
[
"MIT"
] | null | null | null |
"""
A *really* simple guestbook flask app. Data is stored in a SQLite database that
looks something like the following:
+------------+------------------+------------+
| Name | Email | signed_on |
+============+==================+============+
| John Doe | jdoe@example.com | 2012-05-28 |
+------------+------------------+------------+
| Jane Doe | jane@example.com | 2012-05-28 |
+------------+------------------+------------+
This can be created with the following SQL (see bottom of this file):
create table guestbook (name text, email text, signed_on date);
Related Docs:
* `sqlite3 <http://docs.python.org/library/sqlite3.html>`_
* `datetime <http://docs.python.org/library/datetime.html>`_
* `Flask <http://flask.pocoo.org/docs/>`_
"""
from datetime import date
from flask import Flask, redirect, request, url_for, render_template
import sqlite3
app = Flask(__name__) # our Flask app
DB_FILE = 'guestbook.db' # file for our Database
def _select():
"""
just pull all the results from the database
"""
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
cursor.execute("SELECT * FROM guestbook")
return cursor.fetchall()
def _insert(name, email):
"""
put a new entry in the database
"""
params = {'name':name, 'email':email, 'date':date.today()}
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
cursor.execute("insert into guestbook (name, email, signed_on) VALUES (:name, :email, :date)", params)
connection.commit()
cursor.close()
@app.route('/')
def index():
"""
List everyone who's signed the guestbook
"""
entries = [dict(name=row[0], email=row[1], signed_on=row[2] ) for row in _select()]
return render_template('index.html', entries=entries)
@app.route('/sign', methods=['POST'])
def sign():
"""
Accepts POST requests, and processes the form;
Redirect to index when completed.
"""
_insert(request.form['name'], request.form['email'])
return redirect(url_for('index'))
if __name__ == '__main__':
# Make sure our database exists
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
try:
cursor.execute("select count(rowid) from guestbook")
except sqlite3.OperationalError:
cursor.execute("create table guestbook (name text, email text, signed_on date)")
cursor.close()
app.run(host='0.0.0.0', debug=True)
| 29.654762
| 106
| 0.609394
| 302
| 2,491
| 4.92053
| 0.410596
| 0.030283
| 0.048452
| 0.05249
| 0.244953
| 0.188425
| 0.188425
| 0.188425
| 0.153432
| 0.153432
| 0
| 0.014933
| 0.193497
| 2,491
| 83
| 107
| 30.012048
| 0.724739
| 0.415496
| 0
| 0.235294
| 0
| 0
| 0.195636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.088235
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a555159031db4d7f16f4b7224046ffb7dcc0810
| 25,673
|
py
|
Python
|
lingvodoc/scripts/lingvodoc_converter.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 5
|
2017-03-30T18:02:11.000Z
|
2021-07-20T16:02:34.000Z
|
lingvodoc/scripts/lingvodoc_converter.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 15
|
2016-02-24T13:16:59.000Z
|
2021-09-03T11:47:15.000Z
|
lingvodoc/scripts/lingvodoc_converter.py
|
Winking-maniac/lingvodoc
|
f037bf0e91ccdf020469037220a43e63849aa24a
|
[
"Apache-2.0"
] | 22
|
2015-09-25T07:13:40.000Z
|
2021-08-04T18:08:26.000Z
|
import sqlite3
import base64
import requests
import json
import hashlib
import logging
from lingvodoc.queue.client import QueueClient
def get_dict_attributes(sqconn):
dict_trav = sqconn.cursor()
dict_trav.execute("""SELECT
dict_name,
dict_identificator,
dict_description
FROM
dict_attributes
WHERE
id = 1;""")
req = dict()
for dictionary in dict_trav:
req['dictionary_name'] = dictionary[0]
req['dialeqt_id'] = dictionary[1]
return req
def upload_audio(upload_url, audio_sequence, markup_sequence, session):
log = logging.getLogger(__name__)
status = session.post(upload_url, json=audio_sequence)
log.debug(status.text)
audio_ids_list = json.loads(status.text)
if markup_sequence:
for k in range(0, len(audio_ids_list)):
parent_client_id = audio_ids_list[k]['client_id']
parent_object_id = audio_ids_list[k]['object_id']
markup_sequence[k]["parent_client_id"] = parent_client_id
markup_sequence[k]["parent_object_id"] = parent_object_id
status = session.post(upload_url, json=markup_sequence)
log.debug(status.text)
def upload_markup(upload_url, search_url, markup_sequence, session):
log = logging.getLogger(__name__)
for entry in markup_sequence:
audio_hash = entry[0]
markup_element = entry[1]
entity_metadata_search = search_url + '&searchstring=%s&searchtype=ound'% audio_hash # TODO: change ound to sound, when found how to do lowercase like
status = session.get(entity_metadata_search)
ents = json.loads(status.text)
if ents:
existing_entity = ents[0]
if existing_entity:
parent_client_id = existing_entity['client_id']
parent_object_id = existing_entity['object_id']
markup_element["parent_client_id"] = parent_client_id
markup_element["parent_object_id"] = parent_object_id
new_markup_sequence = [o[1] for o in markup_sequence if o[1].get("parent_client_id")]
result = [o for o in markup_sequence if o[1].get("parent_client_id") is None]
status = session.post(upload_url, json=new_markup_sequence)
log.debug(status.text)
return result
def upload_audio_simple(session, ids_mapping, sound_and_markup_cursor, upload_url, audio_hashes, entity_types,
client_id, is_a_regular_form, locale_id=1):
audio_sequence = []
for cursor in sound_and_markup_cursor:
blob_id = cursor[0]
audio = cursor[1]
filename = cursor[2]
word_id = cursor[3]
audio_hash = hashlib.sha224(audio).hexdigest()
if audio_hash not in audio_hashes:
audio_hashes.add(audio_hash)
audio_element = {"locale_id": locale_id,
"level": "leveloneentity",
"data_type": "sound",
"filename": filename,
"entity_type": entity_types[0],
"parent_client_id": ids_mapping[int(word_id)][0],
"parent_object_id": ids_mapping[int(word_id)][1],
"content": base64.urlsafe_b64encode(audio).decode()}
if not is_a_regular_form:
audio_element['additional_metadata'] = json.dumps({"hash": audio_hash,
"client_id": client_id,
"row_id": cursor[4]})
else:
audio_element['additional_metadata'] = json.dumps({"hash": audio_hash })
audio_sequence.append(audio_element)
if len(audio_sequence) > 50:
upload_audio(upload_url, audio_sequence, None, session)
audio_sequence = []
if len(audio_sequence) != 0:
upload_audio(upload_url, audio_sequence, None, session)
audio_sequence = []
def upload_audio_with_markup(session, ids_mapping, sound_and_markup_cursor, upload_url, search_url, audio_hashes, markup_hashes,
entity_types, client_id, is_a_regular_form, locale_id=1):
audio_sequence = []
markup_sequence = []
markup__without_audio_sequence = []
for cursor in sound_and_markup_cursor:
blob_id = cursor[0]
audio = cursor[1]
markup = cursor[2]
common_name = cursor[3]
word_id = cursor[4]
if not audio or not markup:
continue
audio_hash = hashlib.sha224(audio).hexdigest()
markup_hash = hashlib.sha224(markup).hexdigest()
if audio_hash not in audio_hashes:
audio_hashes.add(audio_hash)
audio_element = {"locale_id": locale_id,
"level": "leveloneentity",
"data_type": "sound",
"filename": common_name + ".wav",
"entity_type": entity_types[0],
"parent_client_id": ids_mapping[int(word_id)][0],
"parent_object_id": ids_mapping[int(word_id)][1],
"content": base64.urlsafe_b64encode(audio).decode()}
if not is_a_regular_form:
audio_element['additional_metadata'] = json.dumps({"hash": audio_hash,
"client_id": client_id,
"row_id": cursor[5]})
else:
audio_element['additional_metadata'] = json.dumps({"hash": audio_hash })
audio_sequence.append(audio_element)
markup_hashes.add(markup_hash)
markup_element = {
"locale_id": locale_id,
"level": "leveltwoentity",
"data_type": "markup",
"filename": common_name + ".TextGrid",
"entity_type": entity_types[1],
# need to set after push "parent_client_id": ids_mapping[int(word_id)][0],
# need to set after push "parent_object_id": ids_mapping[int(word_id)][1],
"content": base64.urlsafe_b64encode(markup).decode(),
"additional_metadata": json.dumps({"hash": markup_hash})}
markup_sequence.append(markup_element)
else:
if markup_hash not in markup_hashes:
markup_hashes.add(markup_hash)
markup_element = {
"locale_id": locale_id,
"level": "leveltwoentity",
"data_type": "markup",
"filename": common_name + ".TextGrid",
"entity_type": entity_types[1],
"content": base64.urlsafe_b64encode(markup).decode(),
"additional_metadata": json.dumps({"hash": markup_hash})}
markup__without_audio_sequence.append((audio_hash, markup_element))
if len(markup__without_audio_sequence) > 50:
markup__without_audio_sequence = upload_markup(upload_url, search_url,
markup__without_audio_sequence, session)
if len(audio_sequence) > 50:
upload_audio(upload_url, audio_sequence, markup_sequence, session)
audio_sequence = []
markup_sequence = []
if len(markup__without_audio_sequence) > 50:
markup__without_audio_sequence = upload_markup(upload_url, search_url,
markup__without_audio_sequence, session)
if len(audio_sequence) != 0:
upload_audio(upload_url, audio_sequence, markup_sequence, session)
audio_sequence = []
markup_sequence = []
if len(markup__without_audio_sequence) != 0:
markup__without_audio_sequence = upload_markup(upload_url, search_url, markup__without_audio_sequence, session)
#def change_dict_status(session, converting_status_url, status, task_id, progress):
# def change_dict_status(task_id, progress):
# #session.put(converting_status_url, json={'status': status})
# QueueClient.update_progress(task_id, progress)
def convert_db_new(sqconn, session, language_client_id, language_object_id, server_url,
dictionary_client_id, dictionary_object_id, perspective_client_id, perspective_object_id,
locale_id=1, task_id=None):
log = logging.getLogger(__name__)
dict_attributes = get_dict_attributes(sqconn)
if not dictionary_client_id or not dictionary_object_id:
create_dictionary_request = {"parent_client_id": language_client_id,
"parent_object_id": language_object_id,
"translation": dict_attributes['dictionary_name'],
"translation_string": dict_attributes['dictionary_name']}
status = session.post(server_url + 'dictionary', json=create_dictionary_request)
dictionary = json.loads(status.text)
else:
dictionary = {'client_id': dictionary_client_id, 'object_id': dictionary_object_id}
client_id = dictionary['client_id']
converting_status_url = server_url + 'dictionary/%s/%s/state' % (dictionary['client_id'], dictionary['object_id'])
# There is no way to move this redefinition because single-task version uses `converting_status_url`
# which is assigned here
def async_progress_bar(progress):
QueueClient.update_progress(task_id, progress)
def single_progress_bar(progress):
session.put(converting_status_url, json={'status': 'Converting {0}%'.format(str(progress))})
change_dict_status = single_progress_bar if task_id is None else async_progress_bar
change_dict_status(5)
perspective_create_url = server_url + 'dictionary/%s/%s/perspective' % (
dictionary['client_id'], dictionary['object_id'])
if not perspective_client_id or not perspective_object_id:
create_perspective_request = {"translation": "Этимологический словарь из Lingvodoc 0.98",
"translation_string": "Lingvodoc 0.98 etymology dictionary",
"import_source": "Lingvodoc-0.98",
"import_hash": dict_attributes['dialeqt_id']}
status = session.post(perspective_create_url, json=create_perspective_request)
perspective = json.loads(status.text)
else:
perspective = {'client_id': perspective_client_id, 'object_id': perspective_object_id}
converting_perspective_status_url = server_url + 'dictionary/%s/%s/perspective/%s/%s/state' % \
(dictionary['client_id'], dictionary['object_id'],
perspective['client_id'], perspective['object_id'])
change_dict_status(10)
create_perspective_fields_request = session.get(server_url + 'dictionary/1/6/perspective/1/7/fields')
perspective_fields_create_url = perspective_create_url + '/%s/%s/fields' % (perspective['client_id'],
perspective['object_id'])
status = session.post(perspective_fields_create_url, json=create_perspective_fields_request.text)
get_all_ids = sqconn.cursor()
get_all_ids.execute("select id from dictionary where is_a_regular_form=1")
create_lexical_entries_url = perspective_create_url + '/%s/%s/lexical_entries' % (
perspective['client_id'], perspective['object_id'])
count_cursor = sqconn.cursor()
count_cursor.execute("select count(*) from dictionary where is_a_regular_form=1")
words_count = count_cursor.fetchone()[0]
lexical_entries_create_request = {"count": words_count}
status = session.post(create_lexical_entries_url, json=lexical_entries_create_request)
ids_dict = json.loads(status.text)
ids_mapping = dict()
i = 0
for id_cursor in get_all_ids:
id = id_cursor[0]
client_id = ids_dict[i]['client_id']
object_id = ids_dict[i]['object_id']
ids_mapping[id] = (client_id, object_id)
i += 1
create_entities_url = server_url + 'dictionary/%s/%s/perspective/%s/%s/entities' % (dictionary['client_id'],
dictionary['object_id'],
perspective['client_id'],
perspective['object_id'])
change_dict_status(15)
def create_entity_list(mapping, cursor, level, data_type, entity_type, is_a_regular_form, locale_id=1):
push_list = []
for ld_cursor in cursor:
ld_id = int(ld_cursor[0])
content = ld_cursor[1]
parent_client_id = mapping[ld_id][0]
parent_object_id = mapping[ld_id][1]
element = {"locale_id": locale_id,
"level": level,
"data_type": data_type,
"entity_type": entity_type,
"parent_client_id": parent_client_id,
"parent_object_id": parent_object_id,
"content": content}
if not is_a_regular_form:
element['additional_metadata'] = json.dumps({"client_id": client_id, "row_id": ld_cursor[2]})
push_list.append(element)
return push_list
def prepare_and_upload_text_entities(id_column, is_a_regular_form, text_column, entity_type):
sqcursor = sqconn.cursor()
if is_a_regular_form:
sqcursor.execute("select %s,%s from dictionary where is_a_regular_form=1" % (id_column, text_column))
else:
sqcursor.execute("select %s,%s,id from dictionary where is_a_regular_form=0" % (id_column, text_column))
push_list = create_entity_list(ids_mapping, sqcursor, "leveloneentity", 'text', entity_type, is_a_regular_form)
return session.post(create_entities_url, json=push_list)
for column_and_type in [("word", "Word"),
("transcription", "Transcription"),
("translation", "Translation")]:
status = prepare_and_upload_text_entities("id", True, column_and_type[0], column_and_type[1])
log.debug(status.text)
for column_and_type in [("word", "Paradigm word"),
("transcription", "Paradigm transcription"),
("translation", "Paradigm translation")]:
status = prepare_and_upload_text_entities("regular_form", False, column_and_type[0], column_and_type[1])
log.debug(status.text)
change_dict_status(35)
sound_and_markup_word_cursor = sqconn.cursor()
sound_and_markup_word_cursor.execute("""select blobs.id,
blobs.secblob,
blobs.mainblob,
dict_blobs_description.name,
dictionary.id
from blobs, dict_blobs_description, dictionary
where dict_blobs_description.blobid=blobs.id
and dict_blobs_description.wordid=dictionary.id
and dict_blobs_description.type=2
and dictionary.is_a_regular_form=1;""")
audio_hashes = set()
markup_hashes = set()
perspective_search = server_url + 'dictionary/%s/%s/perspective/%s/%s/all' % (dictionary['client_id'],
dictionary['object_id'],
perspective['client_id'],
perspective['object_id'])
search_url = server_url + 'meta_search' \
'?perspective_client_id=%d&perspective_object_id=%d' % (perspective['client_id'],
perspective['object_id'])
status = session.get(perspective_search)
lexes = json.loads(status.text)['lexical_entries']
sound_types = ['Sound', 'Paradigm sound']
markup_types = ['Praat markup', "Paradigm Praat markup"]
for lex in lexes:
for entry in lex['contains']:
meta = entry.get('additional_metadata')
if meta:
hsh = meta.get('hash')
if hsh:
if entry['entity_type'] in sound_types:
audio_hashes.add(hsh)
if entry.get('contains'):
for ent in entry['contains']:
meta = entry.get('additional_metadata')
if meta:
hsh = meta.get('hash')
if hsh:
if ent['entity_type'] in markup_types:
markup_hashes.add(hsh)
entity_types = ['Sound', 'Praat markup']
upload_audio_with_markup(session, ids_mapping, sound_and_markup_word_cursor, create_entities_url, search_url,
audio_hashes, markup_hashes, entity_types, client_id, True, locale_id)
log.debug(audio_hashes)
change_dict_status(45)
paradigm_sound_and_markup_cursor = sqconn.cursor()
paradigm_sound_and_markup_cursor.execute("""select blobs.id,
blobs.secblob,
blobs.mainblob,
dict_blobs_description.name,
dictionary.regular_form,
dictionary.id
from blobs, dict_blobs_description, dictionary
where dict_blobs_description.blobid=blobs.id
and dict_blobs_description.wordid=dictionary.id
and dict_blobs_description.type=2
and dictionary.is_a_regular_form=0;""")
entity_types = ['Paradigm sound', "Paradigm Praat markup"]
upload_audio_with_markup(session, ids_mapping, paradigm_sound_and_markup_cursor, create_entities_url, search_url,
audio_hashes, markup_hashes, entity_types, client_id, False, locale_id)
log.debug(audio_hashes)
change_dict_status(60)
simple_word_sound_cursor = sqconn.cursor()
simple_word_sound_cursor.execute("""select blobs.id,
blobs.mainblob,
dict_blobs_description.name,
dictionary.id
from blobs, dict_blobs_description, dictionary
where dict_blobs_description.blobid=blobs.id
and dict_blobs_description.wordid=dictionary.id
and dict_blobs_description.type=1
and dictionary.is_a_regular_form=1;""")
entity_types = ['Sound']
upload_audio_simple(session, ids_mapping, simple_word_sound_cursor, create_entities_url, audio_hashes, entity_types,
client_id, True, locale_id)
change_dict_status(70)
simple_paradigm_sound_cursor = sqconn.cursor()
simple_paradigm_sound_cursor.execute("""select blobs.id,
blobs.mainblob,
dict_blobs_description.name,
dictionary.regular_form,
dictionary.id
from blobs, dict_blobs_description, dictionary
where dict_blobs_description.blobid=blobs.id
and dict_blobs_description.wordid=dictionary.id
and dict_blobs_description.type=1
and dictionary.is_a_regular_form=0;""")
entity_types = ['Paradigm sound']
upload_audio_simple(session, ids_mapping, simple_paradigm_sound_cursor, create_entities_url, audio_hashes,
entity_types, client_id, False, locale_id)
change_dict_status(80)
connect_url = server_url + 'dictionary/%s/%s/perspective/%s/%s/lexical_entry/connect' % (dictionary['client_id'],
dictionary['object_id'],
perspective['client_id'],
perspective['object_id'])
etymology_cursor = sqconn.cursor()
etymology_cursor.execute("""select id, etimology_tag
FROM dictionary
WHERE etimology_tag NOT NULL
and dictionary.is_a_regular_form=1; """)
for cursor in etymology_cursor:
id = int(cursor[0])
client_id = ids_mapping[id][0]
object_id = ids_mapping[id][1]
item = {"entity_type": "Etymology", "tag": cursor[1],
"connections": [{"client_id": client_id, "object_id": object_id}]}
status = session.post(connect_url, json=item)
log.debug(status.text)
suggestions_url = server_url + 'merge/suggestions'
suggestions_params = {'threshold': 1.0,
'levenstein': 0,
'client_id': perspective['client_id'],
'object_id': perspective['object_id']}
status = session.post(suggestions_url, json=suggestions_params)
for entry in json.loads(status.text):
if entry['confidence'] >= 1.0:
first_entry = entry['suggestion'][0]
second_entry = entry['suggestion'][1]
lex_move_url = server_url + 'lexical_entry/%d/%d/move' % (second_entry['lexical_entry_client_id'],
second_entry['lexical_entry_object_id'])
move_params = {'client_id': first_entry['lexical_entry_client_id'],
'object_id': first_entry['lexical_entry_object_id'],
'real_delete': True}
status = session.patch(lex_move_url, json=move_params)
else:
break
change_dict_status(95)
change_dict_status(100)
return dictionary
def convert_one(filename, login, password_hash, language_client_id, language_object_id,
dictionary_client_id, dictionary_object_id, perspective_client_id, perspective_object_id,
server_url="http://localhost:6543/", task_id=None):
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.debug("Starting convert_one")
log.debug("Creating session")
session = requests.Session()
session.headers.update({'Connection': 'Keep-Alive'})
adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1, max_retries=10)
session.mount('http://', adapter)
log.debug("Going to login")
login_data = {"login": login, "passwordhash": password_hash}
log.debug("Login data: " + login_data['login'] + login_data['passwordhash'])
cookie_set = session.post(server_url + 'cheatlogin', json=login_data)
log.debug("Login status:" + str(cookie_set.status_code))
if cookie_set.status_code != 200:
log.error("Cheat login for conversion was unsuccessful")
exit(-1)
sqconn = sqlite3.connect(filename)
log.debug("Connected to sqlite3 database")
try:
status = convert_db_new(sqconn, session, language_client_id, language_object_id, server_url,
dictionary_client_id, dictionary_object_id, perspective_client_id,
perspective_object_id, task_id=task_id)
except Exception as e:
log.error("Converting failed")
log.error(e.__traceback__)
raise
log.debug(status)
return status
if __name__ == "__main__":
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s\t%(levelname)s\t[%(name)s]\t%(message)s')
log.debug("!!!!!!!!!! YOU SHOULD NOT SEE IT !!!!!!!!")
convert_one(filename="/home/student/dicts-current/nenets_kaninski.sqlite", login="Test",
password_hash="$2a$12$zBMnhV9oUfKehlHJCHnsPuGM98Wwq/g9hlWWNqg8ZGDuLNyUSfxza",
language_client_id=1, language_object_id=1,
dictionary_client_id=None, dictionary_object_id=None,
perspective_client_id=None, perspective_object_id=None,
server_url="http://lingvodoc.ispras.ru/")
| 51.346
| 159
| 0.569158
| 2,677
| 25,673
| 5.100859
| 0.109824
| 0.04394
| 0.029293
| 0.018455
| 0.598902
| 0.528378
| 0.475943
| 0.437129
| 0.394654
| 0.361626
| 0
| 0.009986
| 0.340825
| 25,673
| 499
| 160
| 51.448898
| 0.796904
| 0.022241
| 0
| 0.37296
| 0
| 0.002331
| 0.257024
| 0.059464
| 0
| 0
| 0
| 0.002004
| 0
| 1
| 0.025641
| false
| 0.009324
| 0.020979
| 0
| 0.060606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a56a9cb8a9973d77c62dc8bff13ecc6a5a858c1
| 1,550
|
py
|
Python
|
tests/test_all.py
|
euranova/DAEMA
|
29fec157c34afcc9abe95bc602a3012615b3c36b
|
[
"MIT"
] | 6
|
2021-09-17T02:09:29.000Z
|
2022-03-20T04:15:15.000Z
|
tests/test_all.py
|
Jason-Xu-Ncepu/DAEMA
|
29fec157c34afcc9abe95bc602a3012615b3c36b
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
Jason-Xu-Ncepu/DAEMA
|
29fec157c34afcc9abe95bc602a3012615b3c36b
|
[
"MIT"
] | 4
|
2021-06-29T22:57:18.000Z
|
2022-03-09T09:19:17.000Z
|
""" Tests the code. """
from torch.utils.data import DataLoader
from models import MODELS
from pipeline import argument_parser
from pipeline.datasets import DATASETS, get_dataset
from run import main
def test_datasets():
""" Tests all the datasets defined in pipeline.datasets.DATASETS. """
for ds_name in DATASETS:
train_set, test_set, _ = get_dataset(ds_name, seed=42)
for set_ in (train_set, test_set):
dl = DataLoader(list(zip(*set_)), batch_size=5)
for data, missing_data, mask in dl:
assert len(data) == 5, f"The {ds_name} dataset has less than 5 samples."
assert data.shape[1] > 1, f"The {ds_name} dataset has none or one column only."
print("data:", data, "missing_data:", missing_data, "mask:", mask, sep="\n")
break
def test_general(capsys):
""" Tests most of the code by checking it produces the expected result. """
main(argument_parser.get_args(["--metric_steps", "50", "--datasets", "Boston", "--seeds", "0", "1"]))
captured = capsys.readouterr()
with open("tests/current_output.txt", "w") as f:
assert f.write(captured.out)
with open("tests/gold_output.txt", "r") as f:
assert captured.out == f.read()
def test_models():
""" Tests all the models (only checks if these run). """
for model in MODELS:
main(argument_parser.get_args(["--model", model, "--metric_steps", "0", "1", "5", "--datasets", "Boston",
"--seeds", "0"]))
| 38.75
| 113
| 0.614839
| 212
| 1,550
| 4.358491
| 0.429245
| 0.025974
| 0.048701
| 0.032468
| 0.097403
| 0.04329
| 0
| 0
| 0
| 0
| 0
| 0.012777
| 0.242581
| 1,550
| 39
| 114
| 39.74359
| 0.774276
| 0.127097
| 0
| 0
| 0
| 0
| 0.19367
| 0.033911
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.115385
| false
| 0
| 0.192308
| 0
| 0.307692
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a586ac04d9d83458edb9f23d9cb90fb787462de
| 2,185
|
py
|
Python
|
src/preprocessing.py
|
Wisteria30/GIM-RL
|
085ba3b8c10590f82226cd1675ba96c5f90740f3
|
[
"Apache-2.0"
] | 3
|
2021-10-15T00:57:05.000Z
|
2021-12-16T13:00:05.000Z
|
src/preprocessing.py
|
Wisteria30/GIM-RL
|
085ba3b8c10590f82226cd1675ba96c5f90740f3
|
[
"Apache-2.0"
] | null | null | null |
src/preprocessing.py
|
Wisteria30/GIM-RL
|
085ba3b8c10590f82226cd1675ba96c5f90740f3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import random
import os
import sys
import torch
from src.agent import (
EpsilonGreedyAgent,
MaxAgent,
RandomAgent,
RandomCreateBVAgent,
ProbabilityAgent,
QAgent,
QAndUtilityAgent,
MultiEpsilonGreedyAgent,
MultiMaxAgent,
MultiProbabilityAgent,
MultiQAgent,
MultiQAndUtilityAgent,
)
def set_seed(seed=0):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def set_agent(env, cfg):
if cfg.agent.name == "q":
agent = QAgent(env, cfg)
elif cfg.agent.name == "qandutility":
agent = QAndUtilityAgent(env, cfg)
elif cfg.agent.name == "max":
agent = MaxAgent(env, cfg)
elif cfg.agent.name == "probability":
agent = ProbabilityAgent(env, cfg)
elif cfg.agent.name == "random":
agent = RandomAgent(env, cfg)
elif cfg.agent.name == "randombv":
agent = RandomCreateBVAgent(env, cfg)
elif cfg.agent.name == "egreedy":
agent = EpsilonGreedyAgent(env, cfg)
else:
print("illegal agent name.")
sys.exit(1)
return agent
def set_multi_agent(env, cfg):
if cfg.agent.name == "q":
agent = MultiQAgent(env, cfg)
elif cfg.agent.name == "qandutility":
agent = MultiQAndUtilityAgent(env, cfg)
elif cfg.agent.name == "max":
agent = MultiMaxAgent(env, cfg)
elif cfg.agent.name == "probability":
agent = MultiProbabilityAgent(env, cfg)
elif cfg.agent.name == "random":
agent = RandomAgent(env, cfg)
elif cfg.agent.name == "randombv":
agent = RandomCreateBVAgent(env, cfg)
elif cfg.agent.name == "egreedy":
agent = MultiEpsilonGreedyAgent(env, cfg)
else:
print("illegal agent name.")
sys.exit(1)
return agent
def set_common_tag(writer, cfg):
writer.set_runname(os.path.join(os.getcwd(), ".hydra/overrides.yaml"))
writer.set_tag("mlflow.note.content", cfg.content)
writer.set_tag("mlflow.user", cfg.user)
writer.set_tag("mlflow.source.name", os.path.abspath(__file__))
writer.set_tag("mlflow.source.git.commit", cfg.commit)
| 27.3125
| 74
| 0.644851
| 260
| 2,185
| 5.353846
| 0.265385
| 0.068966
| 0.12069
| 0.112069
| 0.501437
| 0.466954
| 0.466954
| 0.466954
| 0.314655
| 0.270115
| 0
| 0.002367
| 0.226545
| 2,185
| 79
| 75
| 27.658228
| 0.821302
| 0.009611
| 0
| 0.382353
| 0
| 0
| 0.10407
| 0.020814
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.088235
| 0
| 0.176471
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a5ad370a80119a4cd36243d371bcf4ccf37a3ae
| 1,439
|
py
|
Python
|
src/leaf/file_tools.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
src/leaf/file_tools.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
src/leaf/file_tools.py
|
Pix-00/olea-v2_flask_1_
|
7ddfa83a7a2a7dfbe55b78da002c1193f38781c0
|
[
"Apache-2.0"
] | null | null | null |
from hashlib import sha3_256
import magic
from enums import Dep, MangoType
MIME_MTYPE = {
'text/plain': MangoType.text,
'audio/flac': MangoType.audio_flac,
'audio/wav': MangoType.audio_wav,
'image/png': MangoType.picture_png,
'image/jpeg': MangoType.picture_jpg,
'video/x-matroska': MangoType.video_mkv,
'video/mp4': MangoType.video_mp4
}
def special_save(f, path: str) -> (bytes, MangoType):
hasher = sha3_256()
with open(path, 'wb') as tf:
chunk = f.read(4096)
tf.write(chunk)
hasher.update(chunk)
mime: str = magic.from_buffer(chunk, mime=True)
while True:
chunk = f.read(4096)
if not chunk:
break
hasher.update(chunk)
tf.write(chunk)
fp = hasher.digest()
mtype = MIME_MTYPE[mime] if mime in MIME_MTYPE else MangoType.unknown
return fp, mtype
TYPE_ALLOWED = {
Dep.d51: (MangoType.audio_flac, ),
Dep.d59: (MangoType.audio_flac, ),
Dep.d60: (MangoType.picture_png, ),
Dep.d71: (MangoType.audio_flac, ),
Dep.d72: (MangoType.text, ),
Dep.d73: (MangoType.video_mkv, MangoType.video_mp4)
}
def is_allowed_type(dep: Dep, mtype: MangoType) -> bool:
return mtype in TYPE_ALLOWED[dep]
EXTS = {
MangoType.audio_flac: 'flac',
MangoType.picture_png: 'png',
MangoType.text: 'txt',
MangoType.video_mkv: 'mkv',
MangoType.video_mp4: 'mp4'
}
| 24.810345
| 73
| 0.635858
| 187
| 1,439
| 4.748663
| 0.363636
| 0.060811
| 0.101351
| 0.070946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03
| 0.23558
| 1,439
| 57
| 74
| 25.245614
| 0.777273
| 0
| 0
| 0.133333
| 0
| 0
| 0.063238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.066667
| 0.022222
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a61264c94a41a473e6cc008dcf849ae78b0596c
| 898
|
py
|
Python
|
akamai/cache_buster/bust_cache.py
|
famartinrh/cloud-services-config
|
7dd4fe24fc09a62f360e3407629b1c2567a10260
|
[
"MIT"
] | 11
|
2019-06-25T17:01:12.000Z
|
2022-01-21T18:53:13.000Z
|
akamai/cache_buster/bust_cache.py
|
famartinrh/cloud-services-config
|
7dd4fe24fc09a62f360e3407629b1c2567a10260
|
[
"MIT"
] | 253
|
2019-05-24T12:48:32.000Z
|
2022-03-29T11:00:25.000Z
|
akamai/cache_buster/bust_cache.py
|
famartinrh/cloud-services-config
|
7dd4fe24fc09a62f360e3407629b1c2567a10260
|
[
"MIT"
] | 93
|
2019-04-17T09:22:43.000Z
|
2022-03-21T18:53:28.000Z
|
import sys
import subprocess
def main():
edgeRcPath = sys.argv[1]
branch = sys.argv[2]
navlist = sys.argv[3:]
domain = 'https://console.stage.redhat.com'
if 'prod' in branch:
domain = 'https://console.redhat.com'
if 'beta' in branch:
domain += '/beta'
purgeAssets = ['fed-modules.json']
for nav in navlist:
purgeAssets.append(f'{nav}-navigation.json')
purgeUrls = [f'{domain}/config/main.yml']
for assetPath in purgeAssets:
purgeUrls.append(f'{domain}/config/chrome/{assetPath}')
for endpoint in purgeUrls:
print(f'Purging endpoint cache: {endpoint}')
try:
subprocess.check_output(['akamai', 'purge', '--edgerc', edgeRcPath , 'invalidate', endpoint])
except subprocess.CalledProcessError as e:
print(e.output)
sys.exit(1)
if __name__ == "__main__":
main()
| 30.965517
| 105
| 0.615813
| 105
| 898
| 5.180952
| 0.504762
| 0.038603
| 0.066176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005874
| 0.241648
| 898
| 28
| 106
| 32.071429
| 0.792952
| 0
| 0
| 0
| 0
| 0
| 0.26392
| 0.087973
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.076923
| 0
| 0.115385
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a620af02d14a583cea144484597abc9077f8497
| 6,300
|
py
|
Python
|
gryphon/dashboards/handlers/status.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 1,109
|
2019-06-20T19:23:27.000Z
|
2022-03-20T14:03:43.000Z
|
gryphon/dashboards/handlers/status.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 63
|
2019-06-21T05:36:17.000Z
|
2021-05-26T21:08:15.000Z
|
gryphon/dashboards/handlers/status.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 181
|
2019-06-20T19:42:05.000Z
|
2022-03-21T13:05:13.000Z
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import logging
from delorean import Delorean
import tornado.web
from gryphon.dashboards.handlers.admin_base import AdminBaseHandler
from gryphon.lib.exchange import exchange_factory
from gryphon.lib.models.order import Order
from gryphon.lib.models.exchange import Exchange as ExchangeData
from gryphon.lib.models.exchange import Balance
from gryphon.lib.models.transaction import Transaction
from gryphon.lib.money import Money
logger = logging.getLogger(__name__)
BANK_ACCOUNT_HIGHLIGHT_THRESHOLD = 30000
class GryphonStatusHandler(AdminBaseHandler):
@tornado.web.authenticated
def get(self):
exchanges = exchange_factory.get_all_initialized_exchange_wrappers(
self.trading_db,
)
exchange_info = self.get_exchange_info(exchanges)
system_balances, total_fiat = self.get_system_balances(exchanges)
bank_accounts = self.get_trading_bank_accounts()
in_transit_fiat_txs, in_transit_btc_txs = self.get_in_transit_transactions()
recent_transactions = self.get_recent_transactions()
net_flows = self.get_daily_net_transaction_flows(exchanges)
self.render_template(
'status.html',
args={
'all_exchanges': exchange_info,
'system_balances': system_balances,
'bank_accounts': bank_accounts,
'total_fiat': total_fiat,
'in_transit_fiat_txs': in_transit_fiat_txs,
'in_transit_btc_txs': in_transit_btc_txs,
'recent_transactions': recent_transactions,
'net_flows': net_flows,
},
)
def get_daily_net_transaction_flows(self, exchanges):
flows = []
one_day_ago = Delorean().last_day(1).naive
for exchange in exchanges:
exchange_data = exchange.exchange_account_db_object(self.trading_db)
exchange_txs = exchange_data.transactions\
.filter(Transaction._amount_currency == 'BTC')\
.filter(Transaction.time_created > one_day_ago)\
.filter(Transaction.transaction_status == 'COMPLETED')\
.order_by(Transaction.time_created.desc())\
.all()
deposit_total = sum([
round(tx.amount.amount, 0)
for tx in exchange_txs if tx.transaction_type == Transaction.DEPOSIT
])
withdrawal_total = sum([
round(tx.amount.amount, 0)
for tx in exchange_txs if tx.transaction_type == Transaction.WITHDRAWL
])
flows.append({
'exchange_name': exchange.name,
'withdrawals': withdrawal_total,
'deposits': deposit_total,
})
flows = sorted(
flows,
key=lambda flow: flow.get('withdrawals') + flow.get('deposits'),
reverse=True
)
return flows
def get_recent_transactions(self):
five_hours_ago = Delorean().naive - timedelta(hours=5)
txs = self.trading_db.query(Transaction)\
.filter(Transaction.time_created > five_hours_ago)\
.filter_by(transaction_status='COMPLETED')\
.order_by(Transaction.time_created.desc())\
.join(ExchangeData)\
.all()
return txs
def get_in_transit_transactions(self):
"""
Query all IN_TRANSIT transactions from the database.
Returns the fiat tx results, and then the btc ones
"""
all_txs_query = self.trading_db.query(Transaction)\
.filter_by(transaction_status='IN_TRANSIT')\
.join(ExchangeData)\
.order_by(Transaction.time_created)
fiat_txs = all_txs_query.filter(Transaction._amount_currency != "BTC").all()
btc_txs = all_txs_query.filter(Transaction._amount_currency == "BTC").all()
return fiat_txs, btc_txs
def get_exchange_info(self, exchanges):
up_exchanges = self.get_up_exchanges()
exchange_info = []
for exchange in exchanges:
exchange_data = exchange.exchange_account_db_object(self.trading_db)
exchange_dict = {
'name': exchange.name,
'balance': exchange_data.balance,
'is_up': exchange.name in up_exchanges.keys(),
'up_since': up_exchanges.get(exchange.name),
}
exchange_info.append(exchange_dict)
exchange_info = sorted(exchange_info, key=lambda e: e.get('is_up') is False)
return exchange_info
def get_system_balances(self, exchanges):
system_balance = Balance()
for e in exchanges:
system_balance += e.exchange_account_db_object(self.trading_db).balance
total_fiat = sum([
balance.to("USD") for currency, balance in system_balance.iteritems()
if currency not in Money.CRYPTO_CURRENCIES
])
return system_balance, total_fiat
def get_up_exchanges(self):
now = Delorean().naive
ten_minutes_ago = now - timedelta(minutes=10)
orders = self.trading_db\
.query(Order)\
.filter(Order.time_created > ten_minutes_ago)\
.order_by(Order.time_created.asc())\
.all()
up_exchanges = {}
for order in orders:
up_exchanges[order._exchange_name] = order.time_created
# TODO: convert these into "minutes ago".
for key in up_exchanges.keys():
up_exchanges[key] = now - up_exchanges[key]
return up_exchanges
def get_trading_bank_accounts(self):
trading_bank_acount_keys = ['BMO_USD', 'BMO_CAD']
bank_account_infos = []
for key in trading_bank_acount_keys:
account = exchange_factory.make_exchange_data_from_key(
key,
self.trading_db,
)
account_info = {
'name': account.name,
'balance': account.balance.fiat(),
'highlight': account.balance.fiat() > BANK_ACCOUNT_HIGHLIGHT_THRESHOLD,
}
bank_account_infos.append(account_info)
return bank_account_infos
| 33.157895
| 87
| 0.623968
| 690
| 6,300
| 5.381159
| 0.207246
| 0.032588
| 0.02801
| 0.021546
| 0.260167
| 0.214382
| 0.172906
| 0.16321
| 0.146512
| 0.114732
| 0
| 0.002682
| 0.289683
| 6,300
| 189
| 88
| 33.333333
| 0.827039
| 0.026508
| 0
| 0.131387
| 0
| 0
| 0.046073
| 0
| 0
| 0
| 0
| 0.005291
| 0
| 1
| 0.058394
| false
| 0
| 0.080292
| 0
| 0.19708
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a63239cdeadf5547e515d79f10a494c6c3288e7
| 4,897
|
py
|
Python
|
setup.py
|
Hydar-Zartash/TF_regression
|
ac7cef4c1f248664b57139ae40c582ec80b2355f
|
[
"MIT"
] | null | null | null |
setup.py
|
Hydar-Zartash/TF_regression
|
ac7cef4c1f248664b57139ae40c582ec80b2355f
|
[
"MIT"
] | null | null | null |
setup.py
|
Hydar-Zartash/TF_regression
|
ac7cef4c1f248664b57139ae40c582ec80b2355f
|
[
"MIT"
] | null | null | null |
import yfinance as yf
import numpy as np
import pandas as pd
class StockSetup():
"""
The object of this class includes a dataframe, a classifier trained on it
and some associated test and prediction stats
"""
def __init__(self, ticker: str, target:int) -> None:
"""Initialize the object by downloading stock data and performing several methods
Args:
ticker (string): the ticker to be downloaded and used
target (int): the required next month growth percentage
"""
self.data = yf.download(ticker, period="max")
self.target = target/100 + 1 #returns decimal value of int input (8% -> 1.08)
self.RSI14()
self.STOCHRSI()
self.MACD()
self.AROON()
self.Williams()
self.BULL()
self.data = self.setup()
self.data = self.data.dropna()
def RSI14(self) -> None:
"""The formula for relative strength index is as follows
RSI = 100 - 100/(1- avg gain/avg loss)
in general practice, > 70 indicates a selling oppurtunity and < 30 indicates a buy
all averages used are simple moving averages
"""
self.data['pct_change'] = 100*(self.data['Close']-self.data['Open'])/self.data['Open'] #daily perent change
self.data['day_gain'] = self.data['pct_change'].where(self.data['pct_change'] > 0, other = 0) #filters the percent changes for only the gains
self.data['avg_gain'] = self.data['day_gain'].rolling(window=14).mean() #take rolling avg of the gains
self.data['day_loss'] = self.data['pct_change'].where(self.data['pct_change'] < 0,other = 0) #filters only precent changes of a loss
self.data['avg_loss'] = self.data['day_loss'].rolling(window=14).mean() #takes rolling avg
self.data['RSI-14'] = 100 -(100/ (1 - (self.data['avg_gain']/self.data['avg_loss']))) #unsmoothed RSI self.data
def STOCHRSI(self) -> None:
"""stochastic RSI
calculate a stochastic oscillation for the RSI 14
stoch = (current - recent min) / (recent max - recent min)
"""
self.data['STOCH-RSI'] = (self.data['RSI-14'] - self.data['RSI-14'].rolling(window=14).min())/(self.data['RSI-14'].rolling(window=14).max() - self.data['RSI-14'].rolling(window=14).min())
def MACD(self) -> None:
"""moving average convergence divergence is another indicator calculated by short term EMA - long term EMA
EMA is provided by the pandas.ewm().mean() method
"""
self.data['MACD'] = self.data['Close'].ewm(span = 12, adjust=False).mean() - self.data['Close'].ewm(span = 24, adjust=False).mean()
def AROON(self) -> None:
"""the aroon oscillator is arron up - aroon down and measures the momentum
Aroon up = 100*(interval length - days since rolling max on interval)/interval
Aroon up = 100*(interval length - days since rolling min on interval)/interval
we will be doing a 25 day interval
"""
self.data['AROON'] = (100 * (25 - self.data['Close'].rolling(window=25).apply(np.argmax)) / 25) - (100 * (25 - self.data['Close'].rolling(window=25).apply(np.argmin)) / 25)
def Williams(self) -> None:
"""Williams R% is the (Highest high - Current close)/(Highest High - Lowest low)*-100%
for any given range (14 in this case)
"""
self.data['R%'] = (self.data['High'].rolling(window=14).max() - self.data['Close']) / (self.data['High'].rolling(window=14).max() - self.data['Low'].rolling(window=14).min()) *-100
def BULL(self) -> None:
"""Bull power is the formula of high - exponential weight average of the close
"""
self.data['Bull'] = self.data['High'] - self.data['Close'].ewm(span = 14, adjust=False).mean()
def setup(self) -> pd.DataFrame:
""" Adds a column to see if stock goes up by 8% in 30 days
(1 for True, 0 for false)
Returns:
pd.DataFrame: returns df of cols:
Close Values
RSI 14
Stochastic oscillator of RSI 14
MACD
AROON
Williams R%
Bull-Bear indicator
boolean whether stock grew X% in the next thirty days
"""
self.data['Shift'] = self.data['Adj Close'].shift(-30)
self.data['Shift'] = self.data['Shift'].rolling(window=30).max()
self.data['Growth X%'] = self.data['Adj Close']*self.target <= self.data['Shift']
self.data['Growth X%'] = np.where(self.data['Growth X%']==True, 1, 0)
final = self.data[['Adj Close', 'RSI-14', 'STOCH-RSI', 'MACD', 'AROON', 'R%', "Bull", 'Growth X%']]
return final
if __name__ == "__main__":
stock = StockSetup('SPY', 3)
print(stock.data.tail())
print(stock.data.isna().sum())
| 44.926606
| 195
| 0.596488
| 669
| 4,897
| 4.328849
| 0.301943
| 0.140884
| 0.041436
| 0.029351
| 0.217541
| 0.166091
| 0.149171
| 0.139503
| 0.09047
| 0.064227
| 0
| 0.033791
| 0.256688
| 4,897
| 109
| 196
| 44.926606
| 0.761813
| 0.37329
| 0
| 0
| 0
| 0
| 0.128457
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186047
| false
| 0
| 0.069767
| 0
| 0.302326
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a636c8c285701e4e227ff48aaa2926973c39b10
| 1,893
|
py
|
Python
|
netsuitesdk/api/custom_records.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 47
|
2019-08-15T21:36:36.000Z
|
2022-03-18T23:44:59.000Z
|
netsuitesdk/api/custom_records.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 52
|
2019-06-17T09:43:04.000Z
|
2022-03-22T05:00:53.000Z
|
netsuitesdk/api/custom_records.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 55
|
2019-06-02T22:18:01.000Z
|
2022-03-29T07:20:31.000Z
|
from collections import OrderedDict
from .base import ApiBase
import logging
logger = logging.getLogger(__name__)
class CustomRecords(ApiBase):
SIMPLE_FIELDS = [
'allowAttachments',
'allowInlineEditing',
'allowNumberingOverride',
'allowQuickSearch',
'altName',
'autoName',
'created',
'customFieldList',
'customRecordId',
'description',
'disclaimer',
'enablEmailMerge',
'enableNumbering',
'includeName',
'internalId',
'isAvailableOffline',
'isInactive',
'isNumberingUpdateable',
'isOrdered',
'lastModified',
'name',
'numberingCurrentNumber',
'numberingInit',
'numberingMinDigits',
'numberingPrefix',
'numberingSuffix',
'recordName',
'scriptId',
'showCreationDate',
'showCreationDateOnList',
'showId',
'showLastModified',
'showLastModifiedOnList',
'showNotes',
'showOwner',
'showOwnerAllowChange',
'showOwnerOnList',
'translationsList',
'usePermissions',
'nullFieldList',
]
RECORD_REF_FIELDS = [
'customForm',
'owner',
'parent',
'recType',
]
def __init__(self, ns_client):
ApiBase.__init__(self, ns_client=ns_client, type_name='CustomRecord')
def post(self, data) -> OrderedDict:
assert data['externalId'], 'missing external id'
record = self.ns_client.CustomRecord(externalId=data['externalId'])
self.build_simple_fields(self.SIMPLE_FIELDS, data, record)
self.build_record_ref_fields(self.RECORD_REF_FIELDS, data, record)
logger.debug('able to create custom record = %s', record)
res = self.ns_client.upsert(record)
return self._serialize(res)
| 25.581081
| 77
| 0.59588
| 139
| 1,893
| 7.899281
| 0.604317
| 0.03643
| 0.043716
| 0.029144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.293185
| 1,893
| 73
| 78
| 25.931507
| 0.820628
| 0
| 0
| 0
| 0
| 0
| 0.348653
| 0.057581
| 0
| 0
| 0
| 0
| 0.015873
| 1
| 0.031746
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9a67d0c9f6bb396b9d590ca653e1ee83e64bff97
| 3,421
|
py
|
Python
|
ava/actives/shell_injection.py
|
indeedsecurity/ava-ce
|
4483b301034a096b716646a470a6642b3df8ce61
|
[
"Apache-2.0"
] | 2
|
2019-03-26T15:37:48.000Z
|
2020-01-03T03:47:30.000Z
|
ava/actives/shell_injection.py
|
indeedsecurity/ava-ce
|
4483b301034a096b716646a470a6642b3df8ce61
|
[
"Apache-2.0"
] | 2
|
2021-03-25T21:27:09.000Z
|
2021-06-01T21:20:04.000Z
|
ava/actives/shell_injection.py
|
indeedsecurity/ava-ce
|
4483b301034a096b716646a470a6642b3df8ce61
|
[
"Apache-2.0"
] | null | null | null |
import re
from ava.common.check import _ValueCheck, _TimingCheck
from ava.common.exception import InvalidFormatException
# metadata
name = __name__
description = "checks for shell injection"
class ShellInjectionCheck(_ValueCheck):
"""
Checks for Shell Injection by executing the 'id' command. The payload uses shell separators to inject 'id', such
as ;, &&, ||, \n, and backticks.
"""
key = "shell.value.command"
name = "Shell Injection"
description = "checks for shell injection by executing commands"
example = "; id #"
def __init__(self):
"""Define static payloads"""
self._payloads = [
# no quotes
'; id #',
'| id #',
'&& id #',
'|| id #',
# single quotes
"' ; id #",
"' | id #",
"' && id #",
"' || id #",
# double quotes
'" ; id #',
'" | id #',
'" && id #',
'" || id #',
# inside quotes
'`id`',
'$(id)'
]
def check(self, response, payload):
"""
Checks for Shell Injection by looking for the output of 'id' in the response's body.
:param response: response object from server
:param payload: payload value
:return: true if vulnerable, false otherwise
"""
# check response body
if not response.text:
return False
# check for output
# uid=user gid=group groups=groups
regex = r"(uid=\d+[\(\)\w\-]+)(\s+gid=\d+[\(\)\w\-]+)(\s+groups=\d+[\(\)\w\-,]+)?"
if re.search(regex, response.text):
return True
else:
return False
def _check_payloads(self, payloads):
"""
Checks if the payloads are adoptable for this class and modify the payloads to adjust to check function.
InvalidFormatException is raised, if a payload is not adoptable.
Children can override.
:param payloads: list of payloads
:return: list of modified payloads
"""
for i, payload in enumerate(payloads):
if 'id' not in payload:
raise InvalidFormatException("Payload of {} must include 'id'".format(self.key))
return payloads
class ShellInjectionTimingCheck(_TimingCheck):
"""
Checks for Shell Injection by executing the 'sleep' command. The payload uses shell separators to inject 'sleep',
such as ;, &&, ||, \n, and backticks.
"""
key = "shell.timing.sleep"
name = "Shell Injection Timing"
description = "checks for shell injection by executing delays"
example = "; sleep 9 #"
def __init__(self):
"""Define static payloads"""
self._payloads = [
# no quotes
('; sleep 9 #', 9.00),
('| sleep 9 #', 9.00),
('&& sleep 9 #', 9.00),
('|| sleep 9 #', 9.00),
# single quotes
("' ; sleep 9 #", 9.00),
("' | sleep 9 #", 9.00),
("' && sleep 9 #", 9.00),
("' || sleep 9 #", 9.00),
# double quotes
('" ; sleep 9 #', 9.00),
('" | sleep 9 #', 9.00),
('" && sleep 9 #', 9.00),
('" || sleep 9 #', 9.00),
# inside quotes
('`sleep 9`', 9.00),
('$(sleep 9)', 9.00)
]
| 31.385321
| 117
| 0.501315
| 358
| 3,421
| 4.734637
| 0.290503
| 0.053097
| 0.057817
| 0.074336
| 0.381711
| 0.325664
| 0.325664
| 0.19705
| 0.145133
| 0.130973
| 0
| 0.025968
| 0.358375
| 3,421
| 108
| 118
| 31.675926
| 0.746241
| 0.296405
| 0
| 0.096774
| 0
| 0.016129
| 0.260927
| 0.031347
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.048387
| 0
| 0.33871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bd4c7d5599bd575e062c27d1c3e19928097f821
| 5,967
|
py
|
Python
|
train.py
|
ProfessorHuang/2D-UNet-Pytorch
|
b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
[
"MIT"
] | 11
|
2020-12-09T10:38:47.000Z
|
2022-03-07T13:12:48.000Z
|
train.py
|
lllllllllllll-llll/2D-UNet-Pytorch
|
b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
[
"MIT"
] | 3
|
2020-11-24T02:23:02.000Z
|
2021-04-18T15:31:51.000Z
|
train.py
|
ProfessorHuang/2D-UNet-Pytorch
|
b3941e8dc0ac3e76b6eedb656f943f1bd66fa799
|
[
"MIT"
] | 2
|
2021-04-07T06:17:46.000Z
|
2021-11-11T07:41:46.000Z
|
import argparse
import logging
import os
import sys
import numpy as np
from tqdm import tqdm
import time
import torch
import torch.nn as nn
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from models.unet import UNet
from models.nested_unet import NestedUNet
from datasets.promise12 import Promise12
from datasets.chaos import Chaos
from dice_loss import DiceBCELoss, dice_coeff
from eval import eval_net
torch.manual_seed(2020)
def train_net(net, trainset, valset, device, epochs, batch_size, lr, weight_decay, log_save_path):
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2, pin_memory=True)
val_loader = DataLoader(valset, batch_size=batch_size, shuffle=False, num_workers=2, pin_memory=True, drop_last=True)
writer = SummaryWriter(log_dir=log_save_path)
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.95)
criterion = DiceBCELoss()
best_DSC = 0.0
for epoch in range(epochs):
logging.info(f'Epoch {epoch + 1}')
epoch_loss = 0
epoch_dice = 0
with tqdm(total=len(trainset), desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
net.train()
imgs = batch['image']
true_masks = batch['mask']
imgs = imgs.to(device=device, dtype=torch.float32)
true_masks = true_masks.to(device=device, dtype=torch.float32)
masks_pred = net(imgs)
pred = torch.sigmoid(masks_pred)
pred = (pred>0.5).float()
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
epoch_dice += dice_coeff(pred, true_masks).item()
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_value_(net.parameters(), 5)
optimizer.step()
pbar.set_postfix(**{'loss (batch)': loss.item()})
pbar.update(imgs.shape[0])
scheduler.step()
logging.info('Training loss: {}'.format(epoch_loss/len(train_loader)))
writer.add_scalar('Train/loss', epoch_loss/len(train_loader), epoch)
logging.info('Training DSC: {}'.format(epoch_dice/len(train_loader)))
writer.add_scalar('Train/dice', epoch_dice/len(train_loader), epoch)
val_dice, val_loss = eval_net(net, val_loader, device, criterion)
logging.info('Validation Loss: {}'.format(val_loss))
writer.add_scalar('Val/loss', val_loss, epoch)
logging.info('Validation DSC: {}'.format(val_dice))
writer.add_scalar('Val/dice', val_dice, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
# writer.add_images('images', imgs, epoch)
writer.add_images('masks/true', true_masks, epoch)
writer.add_images('masks/pred', torch.sigmoid(masks_pred) > 0.5, epoch)
writer.close()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100, help='Number of epochs')
parser.add_argument('--batch_size', metavar='B', type=int, nargs='?', default=8, help='Batch size')
parser.add_argument('--lr', metavar='LR', type=float, nargs='?', default=1e-3, help='Learning rate')
parser.add_argument('--weight_decay', type=float, nargs='?', default=1e-5, help='Weight decay')
parser.add_argument('--model', type=str, default='unet', help='Model name')
parser.add_argument('--dataset', type=str, default='promise12', help='Dataset name')
parser.add_argument('--gpu', type=int, default='0', help='GPU number')
parser.add_argument('--save', type=str, default='EXP', help='Experiment name')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
args.save = 'logs_train/{}-{}-{}'.format(args.model, args.dataset, time.strftime("%Y%m%d-%H%M%S"))
if not os.path.exists(args.save):
os.makedirs(args.save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info(f'''
Model: {args.model}
Dataset: {args.dataset}
Total Epochs: {args.epochs}
Batch size: {args.batch_size}
Learning rate: {args.lr}
Weight decay: {args.weight_decay}
Device: GPU{args.gpu}
Log name: {args.save}
''')
torch.cuda.set_device(args.gpu)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# choose a model
if args.model == 'unet':
net = UNet()
elif args.model == 'nestedunet':
net = NestedUNet()
net.to(device=device)
# choose a dataset
if args.dataset == 'promise12':
dir_data = '../data/promise12'
trainset = Promise12(dir_data, mode='train')
valset = Promise12(dir_data, mode='val')
elif args.dataset == 'chaos':
dir_data = '../data/chaos'
trainset = Chaos(dir_data, mode='train')
valset = Chaos(dir_data, mode='val')
try:
train_net(net=net,
trainset=trainset,
valset=valset,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
weight_decay=args.weight_decay,
device=device,
log_save_path=args.save)
except KeyboardInterrupt:
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 37.062112
| 121
| 0.622256
| 755
| 5,967
| 4.761589
| 0.25298
| 0.027538
| 0.03783
| 0.01669
| 0.176356
| 0.091794
| 0.061196
| 0.042281
| 0
| 0
| 0
| 0.011316
| 0.244679
| 5,967
| 160
| 122
| 37.29375
| 0.786332
| 0.012066
| 0
| 0.016
| 0
| 0
| 0.153794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016
| false
| 0
| 0.144
| 0
| 0.168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bd7513f32c35775cd41faee3dba10cf9bfca50a
| 882
|
py
|
Python
|
app/mod_tweepy/controllers.py
|
cbll/SocialDigger
|
177a7b5bb1b295722e8d281a8f33678a02bd5ab0
|
[
"Apache-2.0"
] | 3
|
2016-01-28T20:35:46.000Z
|
2020-03-08T08:49:07.000Z
|
app/mod_tweepy/controllers.py
|
cbll/SocialDigger
|
177a7b5bb1b295722e8d281a8f33678a02bd5ab0
|
[
"Apache-2.0"
] | null | null | null |
app/mod_tweepy/controllers.py
|
cbll/SocialDigger
|
177a7b5bb1b295722e8d281a8f33678a02bd5ab0
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
from flask.ext.tweepy import Tweepy
app = Flask(__name__)
app.config.setdefault('TWEEPY_CONSUMER_KEY', 'sve32G2LtUhvgyj64J0aaEPNk')
app.config.setdefault('TWEEPY_CONSUMER_SECRET', '0z4NmfjET4BrLiOGsspTkVKxzDK1Qv6Yb2oiHpZC9Vi0T9cY2X')
app.config.setdefault('TWEEPY_ACCESS_TOKEN_KEY', '1425531373-dvjiA55ApSFEnTAWPzzZAZLRoGDo3OTTtt4ER1W')
app.config.setdefault('TWEEPY_ACCESS_TOKEN_SECRET', '357nVGYtynDtDBmqAZw2vxeXE3F8GbqBSqWInwStDluDX')
tweepy = Tweepy(app)
@app.route('/tweets')
def show_tweets():
tweets = tweepy.api.public_timeline()
return render_template('tweets.html', tweets=tweets)
@app.route('/say-something')
def say_something():
status = tweepy.api.update_status('Hello, world!')
status_link = 'http://twitter.com/#!/YourUserName/status/%s' % status.id
return render_template('what_i_said.html', status_link=status_link)
| 38.347826
| 102
| 0.794785
| 100
| 882
| 6.76
| 0.46
| 0.053254
| 0.112426
| 0.147929
| 0.204142
| 0.106509
| 0
| 0
| 0
| 0
| 0
| 0.045623
| 0.080499
| 882
| 22
| 103
| 40.090909
| 0.787916
| 0
| 0
| 0
| 0
| 0
| 0.414302
| 0.273553
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bd8f52d214214860defef756924562c2d718956
| 2,135
|
py
|
Python
|
speed/__init__.py
|
Astrochamp/speed
|
e17b2d1de6590d08e5cfddf875b4445f20c1e08a
|
[
"MIT"
] | 1
|
2022-02-12T18:43:43.000Z
|
2022-02-12T18:43:43.000Z
|
speed/__init__.py
|
Astrochamp/speed
|
e17b2d1de6590d08e5cfddf875b4445f20c1e08a
|
[
"MIT"
] | null | null | null |
speed/__init__.py
|
Astrochamp/speed
|
e17b2d1de6590d08e5cfddf875b4445f20c1e08a
|
[
"MIT"
] | null | null | null |
def showSpeed(func, r, *args):
'''Usage: showSpeed(function, runs)
You can also pass arguments into <function> like so:
showSpeed(function, runs, <other>, <args>, <here> ...)
showSpeed() prints the average execution time of <function> over <runs> runs
'''
def formatted(f):
import re
s = str(f)
m = re.fullmatch(r'(-?)(\d)(?:\.(\d+))?e([+-]\d+)', s)
if not m:
return s
sign, intpart, fractpart, exponent = m.groups('')
exponent = int(exponent) + 1
digits = intpart + fractpart
if exponent < 0:
return sign + '0.' + '0'*(-exponent) + digits
exponent -= len(digits)
return sign + digits + '0'*exponent + '.0'
import os, sys, gc
class noPrint:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
from time import perf_counter as pf
garbage = gc.isenabled()
gc.disable()
start = pf()
with noPrint():
for _ in range(r):
func(*args)
end = pf()
if garbage:
gc.enable()
print(f'{formatted((end-start)/r)}')
def getSpeed(func, r, *args):
'''Usage: getSpeed(function, runs)
You can also pass arguments into <function> like so:
getSpeed(function, runs, <other>, <args>, <here> ...)
getSpeed() returns the average execution time of <function> over <runs> runs, as a float
'''
import os, sys, gc
class noPrint:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
from time import perf_counter as pf
garbage = gc.isenabled()
gc.disable()
start = pf()
with noPrint():
for _ in range(r):
func(*args)
end = pf()
if garbage:
gc.enable()
return (end-start)/r
| 31.865672
| 92
| 0.562061
| 266
| 2,135
| 4.383459
| 0.323308
| 0.06175
| 0.06175
| 0.024014
| 0.67753
| 0.634648
| 0.634648
| 0.634648
| 0.634648
| 0.557461
| 0
| 0.004024
| 0.301639
| 2,135
| 66
| 93
| 32.348485
| 0.778001
| 0.209368
| 0
| 0.679245
| 0
| 0
| 0.038953
| 0.034084
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0
| 0.09434
| 0
| 0.339623
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bd9a84e5c6f84dbd90d1bc72cc33fccf0f2c06c
| 9,106
|
py
|
Python
|
polygonize.py
|
yaramohajerani/GL_learning
|
aa8d644024e48ba3e68398050f259b61d0660a2e
|
[
"MIT"
] | 7
|
2021-03-04T15:43:21.000Z
|
2021-07-08T08:42:23.000Z
|
polygonize.py
|
yaramohajerani/GL_learning
|
aa8d644024e48ba3e68398050f259b61d0660a2e
|
[
"MIT"
] | null | null | null |
polygonize.py
|
yaramohajerani/GL_learning
|
aa8d644024e48ba3e68398050f259b61d0660a2e
|
[
"MIT"
] | 2
|
2021-03-11T12:04:42.000Z
|
2021-04-20T16:33:31.000Z
|
#!/usr/bin/env python
u"""
polygonize.py
Yara Mohajerani (Last update 09/2020)
Read output predictions and convert to shapefile lines
"""
import os
import sys
import rasterio
import numpy as np
import getopt
import shapefile
from skimage.measure import find_contours
from shapely.geometry import Polygon,LineString,Point
#-- main function
def main():
#-- Read the system arguments listed after the program
long_options=['DIR=','FILTER=','OUT_BASE=','CODE_BASE=','IN_BASE=','noMASK']
optlist,arglist = getopt.getopt(sys.argv[1:],'D:F:O:C:I:M',long_options)
#-- Set default settings
subdir = os.path.join('geocoded_v1','stitched.dir',\
'atrous_32init_drop0.2_customLossR727.dir')
FILTER = 0.
code_base = '/DFS-L/DATA/isabella/ymohajer/GL_learning'
out_base = '/DFS-L/DATA/gl_ml'
make_mask = True
in_base = os.path.expanduser('~/GL_learning_data')
for opt, arg in optlist:
if opt in ("-D","--DIR"):
subdir = arg
elif opt in ("-F","--FILTER"):
if arg not in ['NONE','none','None','N','n',0]:
FILTER = float(arg)
elif opt in ("O","--OUT_BASE"):
out_base = os.path.expanduser(arg)
elif opt in ("I","--IN_BASE"):
in_base = os.path.expanduser(arg)
elif opt in ("C","--CODE_BASE"):
code_base = os.path.expanduser(arg)
elif opt in ("M","--noMASK"):
make_mask = False
flt_str = '_%.1fkm'%(FILTER/1000)
#-- make sure out directory doesn't end with '\' so we can get parent directory
if out_base.endswith('/'):
out_base = out_base[:-1]
indir = os.path.join(in_base,subdir)
#-- Get list of files
fileList = os.listdir(indir)
pred_list = [f for f in fileList if (f.endswith('.tif') and ('mask' not in f))]
#-- LOCAL output directory
local_output_dir = os.path.join(indir,'shapefiles.dir')
#-- make output directory if it doesn't exist
if not os.path.exists(local_output_dir):
os.mkdir(local_output_dir)
#-- slurm directory
slurm_dir = os.path.join(indir,'slurm.dir')
#-- make slurm directory if it doesn't exist
if not os.path.exists(slurm_dir):
os.mkdir(slurm_dir)
print('# of files: ', len(pred_list))
#-- threshold for getting contours and centerlines
eps = 0.3
#-- open file for list of polygons to run through centerline routine
list_fid = open(os.path.join(slurm_dir,'total_job_list%s.sh'%flt_str),'w')
#-- loop through prediction files
#-- get contours and save each as a line in shapefile format
for pcount,f in enumerate(pred_list):
#-- open job list for this file
sub_list_fid = open(os.path.join(slurm_dir,f.replace('.tif','%s.sh'%flt_str)),'w')
#-- read file
raster = rasterio.open(os.path.join(indir,f),'r')
im = raster.read(1)
#-- get transformation matrix
trans = raster.transform
if make_mask:
#-- also read the corresponding mask file
mask_file = os.path.join(indir,f.replace('.tif','_mask.tif'))
mask_raster = rasterio.open(mask_file,'r')
mask = mask_raster.read(1)
mask_raster.close()
#-- get contours of prediction
#-- close contour ends to make polygons
im[np.nonzero(im[:,0] > eps),0] = eps
im[np.nonzero(im[:,-1] > eps),-1] = eps
im[0,np.nonzero(im[0,:] > eps)] = eps
im[-1,np.nonzero(im[-1,:] > eps)] = eps
contours = find_contours(im, eps)
#-- make contours into closed polyons to find pinning points
#-- also apply noise filter and append to noise list
x = {}
y = {}
noise = []
none_list = []
pols = [None]*len(contours)
pol_type = [None]*len(contours)
for n,contour in enumerate(contours):
#-- convert to coordinates
x[n],y[n] = rasterio.transform.xy(trans, contour[:,0], contour[:,1])
if len(x[n]) < 3:
pols[n] = None
none_list.append(n)
else:
pols[n] = Polygon(zip(x[n],y[n]))
if make_mask:
#-- get elements of mask the contour is on
submask = mask[np.round(contour[:, 0]).astype('int'), np.round(contour[:, 1]).astype('int')]
#-- if more than half of the elements are from test tile, count contour as test type
if np.count_nonzero(submask) > submask.size/2.:
pol_type[n] = 'Test'
else:
pol_type[n] = 'Train'
else:
pol_type[n] = 'Test'
#-- loop through remaining polygons and determine which ones are
#-- pinning points based on the width and length of the bounding box
pin_list = []
box_ll = [None]*len(contours)
box_ww = [None]*len(contours)
for n in range(len(pols)):
if n not in none_list:
box_ll[n] = pols[n].length
box_ww[n] = pols[n].area/box_ll[n]
#-- if the with is larger than 1/25 of the length, it's a pinning point
if box_ww[n] > box_ll[n]/25:
pin_list.append(n)
#-- Loop through all the polygons and take any overlapping areas out
#-- of the enclosing polygon and ignore the inside polygon
ignore_list = []
for i in range(len(pols)):
for j in range(len(pols)):
if (i != j) and (i not in none_list) and (j not in none_list) and pols[i].contains(pols[j]):
# pols[i] = pols[i].difference(pols[j])
if (i in pin_list) and (j in pin_list):
#-- if it's a pinning point, ignore outer loop
ignore_list.append(i)
else:
#-- if not, add inner loop to ignore list
ignore_list.append(j)
#-- get rid of duplicates in ignore list
ignore_list = list(set(ignore_list))
#-- loop through and apply noise filter
for n in range(len(contours)):
#-- apply filter
if (n not in none_list) and (n not in ignore_list) and (len(x[n]) < 2 or LineString(zip(x[n],y[n])).length <= FILTER):
noise.append(n)
#-- find overlap between ignore list nad noise list
if len(list(set(noise) & set(ignore_list))) != 0:
sys.exit('Overlap not empty: ', list(set(noise) & set(ignore_list)))
#-- find overlap between ignore list nad none list
if len(list(set(none_list) & set(ignore_list))) != 0:
sys.exit('Overlap not empty: ', list(set(none_list) & set(ignore_list)))
#-- initialize list of contour linestrings
er = [None]*len(contours)
n = 0 # total center line counter
er_type = [None]*len(er)
er_class = [None]*len(er)
er_lbl = [None]*len(er)
count = 1 #-- file count
pc = 1 # pinning point counter
lc = 1 # line counter
#-- loop through polygons and save to separate files
for idx,p in enumerate(pols):
er[idx] = [list(a) for a in zip(x[idx],y[idx])]
er_type[idx] = pol_type[idx]
if (idx in noise) or (idx in none_list):
er_class[idx] = 'Noise'
elif idx in ignore_list:
er_class[idx] = 'Ignored Contour'
else:
if idx in pin_list:
er_class[idx] = 'Pinning Contour'
er_lbl[idx] = 'pin_err%i'%pc
pc += 1 #- incremenet pinning point counter
else:
er_class[idx] = 'GL Uncertainty'
#-- set label
er_lbl[idx] = 'err%i'%lc
lc += 1 #- incremenet line counter
#-- write individual polygon to file
out_name = f.replace('.tif','%s_ERR_%i'%(flt_str,count))
er_file = os.path.join(local_output_dir,'%s.shp'%out_name)
w = shapefile.Writer(er_file)
w.field('ID', 'C')
w.field('Type','C')
w.field('Class','C')
w.field('Length','C')
w.field('Width','C')
w.line([er[idx]])
w.record(er_lbl[idx] , er_type[idx], er_class[idx], box_ll[idx], box_ww[idx])
w.close()
# create the .prj file
prj = open(er_file.replace('.shp','.prj'), "w")
prj.write(raster.crs.to_wkt())
prj.close()
#-- write corresponding slurm file
#-- calculate run time
run_time = int(p.length/300)+10
outfile = os.path.join(slurm_dir,'%s.sh'%out_name)
fid = open(outfile,'w')
fid.write("#!/bin/bash\n")
fid.write("#SBATCH -N1\n")
fid.write("#SBATCH -n1\n")
fid.write("#SBATCH --mem=10G\n")
fid.write("#SBATCH -t %i\n"%run_time)
fid.write("#SBATCH -p sib2.9,nes2.8,has2.5,brd2.4,ilg2.3\n")
fid.write("#SBATCH --job-name=gl_%i_%i_%i\n"%(pcount,idx,count))
fid.write("#SBATCH --mail-user=ymohajer@uci.edu\n")
fid.write("#SBATCH --mail-type=FAIL\n\n")
fid.write('source ~/miniconda3/bin/activate gl_env\n')
fid.write('python %s %s\n'%\
(os.path.join(code_base,'run_centerline.py'),\
os.path.join(out_base,subdir,'shapefiles.dir','%s.shp'%out_name)))
fid.close()
#-- add job to list
sub_list_fid.write('nohup sbatch %s\n'%os.path.join(out_base,subdir,'slurm.dir','%s.sh'%out_name))
count += 1
sub_list_fid.close()
#-- add sub list fid to total job list
list_fid.write('sh %s\n'%os.path.join(out_base,subdir,'slurm.dir',f.replace('.tif','%s.sh'%flt_str)))
#-- save all contours to file
er_file = os.path.join(local_output_dir,f.replace('.tif','%s_ERR.shp'%flt_str))
w = shapefile.Writer(er_file)
w.field('ID', 'C')
w.field('Type','C')
w.field('Class','C')
w.field('Length','C')
w.field('Width','C')
#-- loop over contours and write them
for n in range(len(er)):
w.line([er[n]])
w.record(er_lbl[n] , er_type[n], er_class[n], box_ll[n], box_ww[n])
w.close()
# create the .prj file
prj = open(er_file.replace('.shp','.prj'), "w")
prj.write(raster.crs.to_wkt())
prj.close()
#-- close input file
raster.close()
#-- close master list fid
list_fid.close()
#-- run main program
if __name__ == '__main__':
main()
| 32.992754
| 121
| 0.647595
| 1,509
| 9,106
| 3.795891
| 0.206759
| 0.021997
| 0.026187
| 0.015712
| 0.261697
| 0.185754
| 0.155203
| 0.148918
| 0.112954
| 0.09602
| 0
| 0.01023
| 0.184164
| 9,106
| 275
| 122
| 33.112727
| 0.760802
| 0.250494
| 0
| 0.16129
| 0
| 0.005376
| 0.143216
| 0.028998
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005376
| false
| 0
| 0.043011
| 0
| 0.048387
| 0.005376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bdb2f5c5a190e7161ceacb56d31dd8753fd3925
| 4,573
|
py
|
Python
|
test_autofit/graphical/regression/test_linear_regression.py
|
rhayes777/AutoFit
|
f5d769755b85a6188ec1736d0d754f27321c2f06
|
[
"MIT"
] | null | null | null |
test_autofit/graphical/regression/test_linear_regression.py
|
rhayes777/AutoFit
|
f5d769755b85a6188ec1736d0d754f27321c2f06
|
[
"MIT"
] | null | null | null |
test_autofit/graphical/regression/test_linear_regression.py
|
rhayes777/AutoFit
|
f5d769755b85a6188ec1736d0d754f27321c2f06
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from autofit.graphical import (
EPMeanField,
LaplaceOptimiser,
EPOptimiser,
Factor,
)
from autofit.messages import FixedMessage, NormalMessage
np.random.seed(1)
prior_std = 10.
error_std = 1.
a = np.array([[-1.3], [0.7]])
b = np.array([-0.5])
n_obs = 100
n_features, n_dims = a.shape
x = 5 * np.random.randn(n_obs, n_features)
y = x.dot(a) + b + np.random.randn(n_obs, n_dims)
@pytest.fixture(name="likelihood")
def make_likelihood(norm):
def likelihood(z, y):
return norm.logpdf(z - y).sum()
return likelihood
@pytest.fixture(name="model")
def make_model(likelihood_factor, linear_factor, prior_a, prior_b):
return likelihood_factor * linear_factor * prior_a * prior_b
@pytest.fixture(name="approx0")
def make_approx0(a_, b_, z_, x_, y_):
return {
a_: NormalMessage.from_mode(np.zeros((n_features, n_dims)), 100),
b_: NormalMessage.from_mode(np.zeros(n_dims), 100),
z_: NormalMessage.from_mode(np.zeros((n_obs, n_dims)), 100),
x_: FixedMessage(x),
y_: FixedMessage(y),
}
@pytest.fixture(name="model_approx")
def make_model_approx(model, approx0):
return EPMeanField.from_approx_dists(model, approx0)
def check_model_approx(mean_field, a_, b_, z_, x_, y_):
X = np.c_[x, np.ones(len(x))]
XTX = X.T.dot(X) + np.eye(3) * (error_std / prior_std)**2
cov = np.linalg.inv(XTX) * error_std**2
cov_a = cov[:2, :]
cov_b = cov[2, :]
# Analytic results
mean_a = cov_a.dot(X.T.dot(y))
mean_b = cov_b.dot(X.T.dot(y))
a_std = cov_a.diagonal()[:, None] ** 0.5
b_std = cov_b[[-1]] ** 0.5
assert mean_field[a_].mean == pytest.approx(mean_a, rel=1e-2)
assert mean_field[b_].mean == pytest.approx(mean_b, rel=1e-2)
assert mean_field[a_].sigma == pytest.approx(a_std, rel=0.5)
assert mean_field[b_].sigma == pytest.approx(b_std, rel=0.5)
@pytest.fixture(name="model_jac_approx")
def make_model_jac_approx(
model, approx0, likelihood_factor, linear_factor_jac, prior_a, prior_b
):
model = likelihood_factor * linear_factor_jac * prior_a * prior_b
model_jac_approx = EPMeanField.from_approx_dists(model, approx0)
return model_jac_approx
def test_jacobian(
a_,
b_,
x_,
z_,
linear_factor,
linear_factor_jac,
):
n, m, d = 5, 4, 3
x = np.random.rand(n, d)
a = np.random.rand(d, m)
b = np.random.rand(m)
values = {x_: x, a_: a, b_: b}
g0 = {z_: np.random.rand(n, m)}
fval0, fjac0 = linear_factor.func_jacobian(values)
fval1, fjac1 = linear_factor_jac.func_jacobian(values)
fgrad0 = fjac0.grad(g0)
fgrad1 = fjac1.grad(g0)
assert fval0 == fval1
assert (fgrad0 - fgrad1).norm() < 1e-6
assert (fval0.deterministic_values - fval1.deterministic_values).norm() < 1e-6
def test_laplace(
model_approx,
a_,
b_,
x_,
y_,
z_,
):
laplace = LaplaceOptimiser()
opt = EPOptimiser(model_approx.factor_graph, default_optimiser=laplace)
model_approx = opt.run(model_approx)
mean_field = model_approx.mean_field
check_model_approx(mean_field, a_, b_, z_, x_, y_)
def _test_laplace_jac(
model_jac_approx,
a_,
b_,
x_,
y_,
z_,
):
laplace = LaplaceOptimiser()
opt = EPOptimiser(model_jac_approx.factor_graph, default_optimiser=laplace)
model_approx = opt.run(model_jac_approx)
mean_field = model_approx.mean_field
check_model_approx(mean_field, a_, b_, z_, x_, y_)
@pytest.fixture(name="normal_model_approx")
def make_normal_model_approx(
model_approx, approx0, linear_factor, a_, b_, y_, z_,
):
y = model_approx.mean_field[y_].mean
normal_factor = NormalMessage(y, np.full_like(y, error_std)).as_factor(z_)
prior_a = NormalMessage(
np.zeros_like(a), np.full_like(a, prior_std)
).as_factor(a_, 'prior_a')
prior_b = NormalMessage(
np.zeros_like(b), np.full_like(b, prior_std)
).as_factor(b_, 'prior_b')
new_model = normal_factor * linear_factor * prior_a * prior_b
return EPMeanField.from_approx_dists(new_model, approx0)
def test_exact_updates(
normal_model_approx,
a_,
b_,
x_,
y_,
z_,
):
laplace = LaplaceOptimiser()
opt = EPOptimiser.from_meanfield(normal_model_approx, default_optimiser=laplace)
new_approx = opt.run(normal_model_approx)
mean_field = new_approx.mean_field
check_model_approx(mean_field, a_, b_, z_, x_, y_)
| 26.9
| 84
| 0.659086
| 682
| 4,573
| 4.073314
| 0.159824
| 0.079194
| 0.059395
| 0.064795
| 0.37653
| 0.342693
| 0.25594
| 0.25594
| 0.212023
| 0.212023
| 0
| 0.020306
| 0.213864
| 4,573
| 170
| 85
| 26.9
| 0.752434
| 0.003499
| 0
| 0.24812
| 0
| 0
| 0.018218
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.090226
| false
| 0
| 0.030075
| 0.030075
| 0.172932
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bdbfbdb118df696ee04cd30b0904cea6a77354a
| 1,716
|
py
|
Python
|
src/linear/linear.py
|
RaulMurillo/cpp-torch
|
30d0ee38c20f389e4b996d821952a48cccc70789
|
[
"MIT"
] | null | null | null |
src/linear/linear.py
|
RaulMurillo/cpp-torch
|
30d0ee38c20f389e4b996d821952a48cccc70789
|
[
"MIT"
] | null | null | null |
src/linear/linear.py
|
RaulMurillo/cpp-torch
|
30d0ee38c20f389e4b996d821952a48cccc70789
|
[
"MIT"
] | null | null | null |
import math
from torch import nn
import torch
import torch.nn.functional as F
import linear_cpu as linear
class LinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weights, bias, params):
is_bias = int(params[0])
outputs = linear.forward(input, weights, bias, is_bias)[0]
variables = [input, weights, bias, params]
ctx.save_for_backward(*variables)
return outputs
@staticmethod
def backward(ctx, gradOutput):
_ = torch.autograd.Variable(torch.zeros(5))
input, weights, bias, params = ctx.saved_tensors
is_bias = int(params[0])
gradInput, gradWeight, gradBias = linear.backward(
input, gradOutput, weights, is_bias)
return gradInput, gradWeight, gradBias, _
class Linear(nn.Module):
def __init__(self, in_features, out_features, is_bias=True):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.is_bias = is_bias
self.params = torch.autograd.Variable(torch.Tensor([is_bias]))
self.weight = nn.Parameter(torch.empty(out_features, in_features))
self.bias = nn.Parameter(torch.empty(out_features))
self._initialize_weights()
def _initialize_weights(self):
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.is_bias:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
return LinearFunction.apply(input, self.weight, self.bias, self.params)
| 29.586207
| 79
| 0.666084
| 221
| 1,716
| 4.950226
| 0.294118
| 0.04936
| 0.058501
| 0.060329
| 0.133455
| 0.058501
| 0
| 0
| 0
| 0
| 0
| 0.006061
| 0.230769
| 1,716
| 57
| 80
| 30.105263
| 0.822727
| 0
| 0
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.128205
| 0.025641
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bdf6ec04e7754ae150125e027e057b6d43b24d9
| 11,907
|
py
|
Python
|
object_files_api/files_api.py
|
ndlib/mellon-manifest-pipeline
|
aa90494e73fbc30ce701771ac653d28d533217db
|
[
"Apache-2.0"
] | 1
|
2021-06-27T15:16:13.000Z
|
2021-06-27T15:16:13.000Z
|
object_files_api/files_api.py
|
ndlib/marble-manifest-pipeline
|
abc036e4c81a8a5e938373a43153e2492a17cbf8
|
[
"Apache-2.0"
] | 8
|
2019-11-05T18:58:23.000Z
|
2021-09-03T14:54:42.000Z
|
object_files_api/files_api.py
|
ndlib/mellon-manifest-pipeline
|
aa90494e73fbc30ce701771ac653d28d533217db
|
[
"Apache-2.0"
] | null | null | null |
""" Files API """
import boto3
import os
import io
from datetime import datetime, timedelta
import json
import time
from s3_helpers import write_s3_json, read_s3_json, delete_s3_key
from api_helpers import json_serial
from search_files import crawl_available_files, update_pdf_fields
from dynamo_helpers import add_file_to_process_keys, add_file_group_keys, get_iso_date_as_string, add_image_group_keys, add_media_group_keys, add_media_keys, add_image_keys
from dynamo_save_functions import save_file_system_record
from add_files_to_json_object import change_file_extensions_to_tif
from pipelineutilities.dynamo_query_functions import get_all_file_to_process_records_by_storage_system
class FilesApi():
def __init__(self, event, config):
self.event = event
self.event['local'] = self.event.get('local', False)
self.config = config
self.local_folder = os.path.dirname(os.path.realpath(__file__)) + "/"
self.time_to_break = datetime.now() + timedelta(seconds=config['seconds-to-allow-for-processing'])
if self.config.get('test', False):
self.directory = os.path.join(os.path.dirname(__file__), 'test')
else:
self.directory = os.path.join(os.path.dirname(__file__), 'cache')
self.start_time = time.time()
self.table_name = self.config.get('website-metadata-tablename', '')
self.resumption_filename = 'file_objects_list_partially_processed.json'
if not self.event['local']:
save_file_system_record(self.table_name, 'S3', 'Marble content bucket')
save_file_system_record(self.table_name, 'S3', 'Multimedia bucket')
self.file_to_process_records_in_dynamo = {}
if not self.config.get('local', True):
print("pulling file_to_process_records from dynamo")
self.file_to_process_records_in_dynamo = get_all_file_to_process_records_by_storage_system(self.config.get('website-metadata-tablename', ''), 'S3')
self.table = boto3.resource('dynamodb').Table(self.table_name)
self.config['forceSaveCrawlAvailableFiles'] = False
def save_files_details(self):
""" This will crawl available files, then loop through the file listing, saving each to dynamo """
if self.event['objectFilesApi_execution_count'] == 1:
marble_files = self._crawl_available_files_from_s3_or_cache(self.config['marble-content-bucket'], True)
# rbsc_files = self._crawl_available_files_from_s3_or_cache(self.config['rbsc-image-bucket'], True) # save in case we need to crawl the RBSC bucket ever again
# all_files_listing = {**rbsc_files, **marble_files}
all_files_listing = {**marble_files}
else:
all_files_listing = self._resume_execution()
file_objects = []
processing_complete = True
for key, value in all_files_listing.items():
if not value.get('recordProcessedFlag', False):
file_objects.extend(self._save_file_objects_per_collection(value))
value['recordProcessedFlag'] = True
print("saved", len(value.get('files', [])), "files for collection: ", key, int(time.time() - self.start_time), 'seconds.')
if datetime.now() >= self.time_to_break:
self._save_progress(all_files_listing)
processing_complete = False
break
if processing_complete:
self._clean_up_when_done()
self.event['objectFilesApiComplete'] = True
if self.event['local']:
self._cache_s3_call(os.path.join(self.directory, "file_objects.json"), file_objects)
else:
write_s3_json(self.config['manifest-server-bucket'], 'objectFiles/all/index.json', file_objects)
return file_objects
def _save_progress(self, all_files_listing: dict):
""" This is used to save progress in order to resume execution later """
if self.event['local']:
cache_file_name = os.path.join(self.directory, self.resumption_filename)
self._cache_s3_call(self, cache_file_name, all_files_listing)
else:
s3_key = os.path.join(self.config['pipeline-control-folder'], self.resumption_filename)
write_s3_json(self.config['process-bucket'], s3_key, all_files_listing)
def _resume_execution(self) -> list:
""" This re-loads the all_files_listing saved as part of _save_progress in order to resume execution """
all_files_listing = []
if self.event['local']:
cache_file_name = os.path.join(self.directory, self.resumption_filename)
if os.path.exists(cache_file_name):
with io.open(cache_file_name, 'r', encoding='utf-8') as json_file:
all_files_listing = json.load(json_file)
else:
s3_key = os.path.join(self.config['pipeline-control-folder'], self.resumption_filename)
all_files_listing = read_s3_json(self.config['process-bucket'], s3_key)
return all_files_listing
def _clean_up_when_done(self):
""" This deletes work-in-progress files """
if self.event['local']:
cache_file_name = os.path.join(self.directory, self.resumption_filename)
if os.path.exists(cache_file_name):
os.remove(cache_file_name)
else:
s3_key = os.path.join(self.config['pipeline-control-folder'], self.resumption_filename)
delete_s3_key(self.config['process-bucket'], s3_key)
def _save_file_objects_per_collection(self, collection_json: dict) -> list: # noqa: C901
""" Loop through every file in a collection and save each record into dynamo """
i = 0
collection_list = []
object_group_ids = []
image_group_ids = []
media_group_ids = []
for file_info in collection_json.get('files', []):
i += 1
my_json = dict(file_info)
my_json['sequence'] = i
my_json['id'] = my_json.get('key', '')
my_json['copyrightStatus'] = "Public domain"
if 'copyright' in my_json.get('key', '').lower(): # If the word "copyright" exists in the folder structure, this file is Copyrighted
my_json['copyrightStatus'] = "Copyright"
if 'mediaGroupId' not in my_json and 'imageGroupId' not in my_json:
continue # We must have either mediaGroupId or imageGroupId or we can't process this record.
if 'mediaGroupId' in my_json: # is_media_file(self.config.get('media-file-extensions', []), my_json.get('id')):
my_json = add_media_keys(my_json, self.config.get('media-server-base-url', ''))
else:
update_pdf_fields(my_json)
my_json = change_file_extensions_to_tif(my_json, self.config.get("file-extensions-to-protect-from-changing-to-tif", []))
my_json = add_image_keys(my_json, self.config.get('image-server-base-url', ''))
my_json['typeOfData'] = my_json.get('typeOfData', 'Marble content bucket')
collection_list.append(my_json)
my_json['storageSystem'] = my_json.get('storageSystem', 'S3')
if 'sourceFilePath' not in my_json:
my_json['sourceFilePath'] = my_json.get('path', '') # only add if this does not already exist
if not self.config.get('local', False):
with self.table.batch_writer() as batch:
batch.put_item(Item=my_json)
item_id = my_json.get('id')
if self.config.get('folder_exposed_through_cdn') not in my_json.get('id'): # nothing in the folder exposed through cdn should have image processing done for it.
if self.event.get('exportAllFilesFlag', False) or item_id not in self.file_to_process_records_in_dynamo or my_json.get('modifiedDate', '') > self.file_to_process_records_in_dynamo[item_id].get('dateModifiedInDynamo', ''): # noqa: #501
file_to_process_json = dict(my_json)
file_to_process_json = add_file_to_process_keys(file_to_process_json)
batch.put_item(Item=file_to_process_json)
collection_list.append(file_to_process_json)
# Only insert Group records once
object_group_id = my_json.get('objectFileGroupId')
if object_group_id and object_group_id not in object_group_ids: # This will be removed once we transition to imageGroupId and mediaGroupId
file_group_record = {'objectFileGroupId': object_group_id}
file_group_record['storageSystem'] = my_json.get('storageSystem')
file_group_record['typeOfData'] = my_json.get('typeOfData')
file_group_record['dateAddedToDynamo'] = get_iso_date_as_string()
file_group_record = add_file_group_keys(file_group_record)
batch.put_item(Item=file_group_record)
collection_list.append(file_group_record)
object_group_ids.append(object_group_id)
image_group_id = my_json.get('imageGroupId')
if image_group_id and image_group_id not in image_group_ids:
image_group_record = {'imageGroupId': image_group_id}
image_group_record['storageSystem'] = my_json.get('storageSystem')
image_group_record['typeOfData'] = my_json.get('typeOfData')
image_group_record['dateAddedToDynamo'] = get_iso_date_as_string()
image_group_record = add_image_group_keys(image_group_record)
batch.put_item(Item=image_group_record)
collection_list.append(image_group_record)
image_group_ids.append(image_group_id)
media_group_id = my_json.get('mediaGroupId')
if media_group_id and media_group_id not in media_group_ids:
media_group_record = {'mediaGroupId': media_group_id}
media_group_record['storageSystem'] = my_json.get('storageSystem')
media_group_record['typeOfData'] = my_json.get('typeOfData')
media_group_record['dateAddedToDynamo'] = get_iso_date_as_string()
media_group_record = add_media_group_keys(media_group_record)
batch.put_item(Item=media_group_record)
collection_list.append(media_group_record)
media_group_ids.append(media_group_id)
return collection_list
def _cache_s3_call(self, file_name: str, objects: dict):
""" Save json file locally """
with open(file_name, 'w') as outfile:
json.dump(objects, outfile, default=json_serial, sort_keys=True, indent=2)
def _crawl_available_files_from_s3_or_cache(self, bucket: str, force_use_s3: bool = False) -> dict:
""" Find all related files, whether from querying S3 or loading from a local json file. """
cache_file = os.path.join(self.directory, 'crawl_available_files_cache.json')
if force_use_s3 or (not self.config.get("test", False) and not self.config.get('local', False)):
objects = crawl_available_files(self.config, bucket)
if self.config.get('local', False) or self.config.get('forceSaveCrawlAvailableFiles', False):
self._cache_s3_call(cache_file, objects)
return objects
elif os.path.exists(cache_file):
with io.open(cache_file, 'r', encoding='utf-8') as json_file:
return json.load(json_file)
else:
return {}
| 62.340314
| 259
| 0.646342
| 1,505
| 11,907
| 4.780731
| 0.163455
| 0.033357
| 0.022516
| 0.015566
| 0.361084
| 0.261015
| 0.218485
| 0.148436
| 0.105073
| 0.082557
| 0
| 0.004741
| 0.256068
| 11,907
| 190
| 260
| 62.668421
| 0.807519
| 0.099773
| 0
| 0.117647
| 0
| 0
| 0.12948
| 0.05076
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047059
| false
| 0
| 0.076471
| 0
| 0.164706
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7be095f1c9c4b3f5f33d92d1c96cc497d62846c5
| 40,240
|
py
|
Python
|
sampledb/frontend/projects.py
|
NicolasCARPi/sampledb
|
d6fd0f4d28d05010d7e0c022fbf2576e25435077
|
[
"MIT"
] | null | null | null |
sampledb/frontend/projects.py
|
NicolasCARPi/sampledb
|
d6fd0f4d28d05010d7e0c022fbf2576e25435077
|
[
"MIT"
] | null | null | null |
sampledb/frontend/projects.py
|
NicolasCARPi/sampledb
|
d6fd0f4d28d05010d7e0c022fbf2576e25435077
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
"""
import flask
import flask_login
import json
from flask_babel import _
from . import frontend
from .. import logic
from ..logic.object_permissions import Permissions
from ..logic.security_tokens import verify_token
from ..logic.languages import get_languages, get_language, get_language_by_lang_code
from ..models.languages import Language
from .projects_forms import CreateProjectForm, EditProjectForm, LeaveProjectForm, InviteUserToProjectForm, InviteGroupToProjectForm, AddSubprojectForm, RemoveSubprojectForm, DeleteProjectForm, RemoveProjectMemberForm, RemoveProjectGroupForm, ObjectLinkForm
from .permission_forms import PermissionsForm
from .utils import check_current_user_is_not_readonly
from ..logic.utils import get_translated_text
@frontend.route('/projects/<int:project_id>', methods=['GET', 'POST'])
@flask_login.login_required
def project(project_id):
if 'token' in flask.request.args:
token = flask.request.args.get('token')
expiration_time_limit = flask.current_app.config['INVITATION_TIME_LIMIT']
token_data = verify_token(token, salt='invite_to_project', secret_key=flask.current_app.config['SECRET_KEY'], expiration=expiration_time_limit)
if token_data is None:
flask.flash(_('Invalid project group invitation token. Please request a new invitation.'), 'error')
return flask.abort(403)
if 'invitation_id' in token_data:
if logic.projects.get_project_invitation(token_data['invitation_id']).accepted:
flask.flash(_('This invitation token has already been used. Please request a new invitation.'), 'error')
return flask.abort(403)
if token_data.get('project_id', None) != project_id:
return flask.abort(403)
permissions = Permissions.from_value(token_data.get('permissions', Permissions.READ.value))
if permissions == Permissions.NONE:
flask.flash(_('Invalid permissions in project group invitation token. Please request a new invitation.'), 'error')
return flask.abort(403)
user_id = token_data.get('user_id', None)
if user_id != flask_login.current_user.id:
if user_id is not None:
try:
invited_user = logic.users.get_user(user_id)
flask.flash(_('Please sign in as user "%(user_name)s" to accept this invitation.', user_name=invited_user.name), 'error')
except logic.errors.UserDoesNotExistError:
pass
return flask.abort(403)
if not flask.current_app.config['DISABLE_SUBPROJECTS']:
other_project_ids = token_data.get('other_project_ids', [])
for notification in logic.notifications.get_notifications(user_id, unread_only=True):
if notification.type == logic.notifications.NotificationType.INVITED_TO_PROJECT:
if notification.data['project_id'] == project_id:
logic.notifications.mark_notification_as_read(notification.id)
else:
other_project_ids = []
try:
logic.projects.add_user_to_project(project_id, user_id, permissions, other_project_ids=other_project_ids)
except logic.errors.UserAlreadyMemberOfProjectError:
flask.flash(_('You are already a member of this project group.'), 'error')
except logic.errors.ProjectDoesNotExistError:
pass
allowed_language_ids = [
language.id
for language in get_languages(only_enabled_for_input=True)
]
user_id = flask_login.current_user.id
try:
project = logic.projects.get_project(project_id)
except logic.errors.ProjectDoesNotExistError:
return flask.abort(404)
user_permissions = logic.projects.get_user_project_permissions(project_id=project_id, user_id=user_id, include_groups=True)
if Permissions.READ in user_permissions:
show_objects_link = True
else:
show_objects_link = False
if Permissions.READ in user_permissions:
leave_project_form = LeaveProjectForm()
else:
leave_project_form = None
translations = []
name_language_ids = []
description_language_ids = []
if Permissions.WRITE in user_permissions:
edit_project_form = EditProjectForm()
for name in project.name.items():
lang_id = get_language_by_lang_code(name[0]).id
name_language_ids.append(lang_id)
item = {
'language_id': lang_id,
'lang_name': get_translated_text(get_language(lang_id).names),
'name': name[1],
'description': ''
}
translations.append(item)
for description in project.description.items():
if description[0] == '':
continue
lang_id = get_language_by_lang_code(description[0]).id
description_language_ids.append(lang_id)
for translation in translations:
if lang_id == translation['language_id']:
translation['description'] = description[1]
break
else:
item = {
'language_id': lang_id,
'lang_name': get_translated_text(get_language(lang_id).names),
'name': '',
'description': description[1]
}
translations.append(item)
else:
edit_project_form = None
show_edit_form = False
english = get_language(Language.ENGLISH)
project_member_user_ids_and_permissions = logic.projects.get_project_member_user_ids_and_permissions(project_id=project_id, include_groups=False)
project_member_group_ids_and_permissions = logic.projects.get_project_member_group_ids_and_permissions(project_id=project_id)
project_member_user_ids = list(project_member_user_ids_and_permissions.keys())
project_member_user_ids.sort(key=lambda user_id: logic.users.get_user(user_id).name.lower())
project_member_group_ids = list(project_member_group_ids_and_permissions.keys())
project_member_group_ids.sort(key=lambda group_id: get_translated_text(logic.groups.get_group(group_id).name).lower())
if Permissions.GRANT in user_permissions:
invitable_user_list = []
for user in logic.users.get_users(exclude_hidden=True, exclude_fed=True):
if user.id not in project_member_user_ids_and_permissions:
invitable_user_list.append(user)
parent_projects_with_add_permissions = logic.projects.get_ancestor_project_ids(project_id, only_if_child_can_add_users_to_ancestor=True)
else:
invitable_user_list = []
parent_projects_with_add_permissions = []
if invitable_user_list:
other_project_ids_data = []
for parent_project_id in parent_projects_with_add_permissions:
other_project_ids_data.append({'project_id': parent_project_id})
invite_user_form = InviteUserToProjectForm(other_project_ids=other_project_ids_data)
else:
invite_user_form = None
if Permissions.GRANT in user_permissions:
invitable_group_list = []
for group in logic.groups.get_user_groups(user_id):
if group.id not in project_member_group_ids_and_permissions:
invitable_group_list.append(group)
else:
invitable_group_list = []
if invitable_group_list:
other_project_ids_data = []
for parent_project_id in parent_projects_with_add_permissions:
other_project_ids_data.append({'project_id': parent_project_id})
invite_group_form = InviteGroupToProjectForm(other_project_ids=other_project_ids_data)
else:
invite_group_form = None
child_project_ids = logic.projects.get_child_project_ids(project_id)
child_project_ids_can_add_to_parent = {child_project_id: logic.projects.can_child_add_users_to_parent_project(parent_project_id=project_id, child_project_id=child_project_id) for child_project_id in child_project_ids}
parent_project_ids = logic.projects.get_parent_project_ids(project_id)
add_subproject_form = None
remove_subproject_form = None
delete_project_form = None
remove_project_member_form = None
remove_project_group_form = None
addable_projects = []
addable_project_ids = []
if Permissions.GRANT in user_permissions:
for other_project in logic.projects.get_user_projects(flask_login.current_user.id, include_groups=True):
if other_project.id == project.id:
continue
if Permissions.GRANT in logic.projects.get_user_project_permissions(other_project.id, flask_login.current_user.id, include_groups=True):
addable_projects.append(other_project)
addable_project_ids = logic.projects.filter_child_project_candidates(project_id, [child_project.id for child_project in addable_projects])
addable_projects = [logic.projects.get_project(child_project_id) for child_project_id in addable_project_ids]
if addable_projects:
add_subproject_form = AddSubprojectForm()
if child_project_ids:
remove_subproject_form = RemoveSubprojectForm()
delete_project_form = DeleteProjectForm()
remove_project_member_form = RemoveProjectMemberForm()
remove_project_group_form = RemoveProjectGroupForm()
project_invitations = None
show_invitation_log = flask_login.current_user.is_admin and logic.settings.get_user_settings(flask_login.current_user.id)['SHOW_INVITATION_LOG']
if Permissions.GRANT in user_permissions or flask_login.current_user.is_admin:
project_invitations = logic.projects.get_project_invitations(
project_id=project_id,
include_accepted_invitations=show_invitation_log,
include_expired_invitations=show_invitation_log
)
object = logic.projects.get_object_linked_to_project(project_id)
if 'leave' in flask.request.form and Permissions.READ in user_permissions:
if leave_project_form.validate_on_submit():
try:
logic.projects.remove_user_from_project(project_id=project_id, user_id=user_id)
except logic.errors.ProjectDoesNotExistError:
flask.flash(_('This project group does not exist.'), 'error')
return flask.redirect(flask.url_for('.projects'))
except logic.errors.UserDoesNotExistError:
return flask.abort(500)
except logic.errors.UserNotMemberOfProjectError:
flask.flash(_('You have already left the project group.'), 'error')
return flask.redirect(flask.url_for('.projects'))
except logic.errors.NoMemberWithGrantPermissionsForProjectError:
flask.flash(_('You cannot leave this project group, because your are the only user with GRANT permissions.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
else:
flask.flash(_('You have successfully left the project group.'), 'success')
# create object log entry if this caused the deletion of a project linked to an object
try:
logic.projects.get_project(project_id)
except logic.errors.ProjectDoesNotExistError:
if object is not None:
logic.object_log.unlink_project(
flask_login.current_user.id,
object.id,
project_id,
project_deleted=True
)
return flask.redirect(flask.url_for('.projects'))
if 'delete' in flask.request.form and Permissions.GRANT in user_permissions:
if delete_project_form.validate_on_submit():
check_current_user_is_not_readonly()
# create object log entry if deleting a project linked to an object
if object is not None:
logic.object_log.unlink_project(
flask_login.current_user.id,
object.id,
project_id,
project_deleted=True
)
try:
logic.projects.delete_project(project_id=project_id)
except logic.errors.ProjectDoesNotExistError:
flask.flash(_('This project group has already been deleted.'), 'success')
return flask.redirect(flask.url_for('.projects'))
else:
flask.flash(_('You have successfully deleted the project group.'), 'success')
return flask.redirect(flask.url_for('.projects'))
if 'remove_member' in flask.request.form and Permissions.GRANT in user_permissions:
if remove_project_member_form.validate_on_submit():
check_current_user_is_not_readonly()
member_id_str = flask.request.form['remove_member']
try:
member_id = int(member_id_str)
except ValueError:
flask.flash(_('The member ID was invalid. Please contact an administrator.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if member_id == flask_login.current_user.id:
flask.flash(_('You cannot remove yourself from a project group. Please press "Leave Project Group" instead.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
try:
logic.projects.remove_user_from_project(project_id=project_id, user_id=member_id)
except logic.errors.ProjectDoesNotExistError:
flask.flash(_('This project group does not exist.'), 'error')
return flask.redirect(flask.url_for('.projects'))
except logic.errors.UserDoesNotExistError:
flask.flash(_('This user does not exist.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.UserNotMemberOfProjectError:
flask.flash(_('This user is not a member of this project group.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.NoMemberWithGrantPermissionsForProjectError:
flask.flash(_('You cannot remove this user from this project group, because they are the only user with GRANT permissions.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
else:
flask.flash(_('You have successfully removed this user from the project group.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if 'remove_group' in flask.request.form and Permissions.GRANT in user_permissions:
if remove_project_group_form.validate_on_submit():
check_current_user_is_not_readonly()
group_id_str = flask.request.form['remove_group']
try:
group_id = int(group_id_str)
except ValueError:
flask.flash(_('The basic group ID was invalid. Please contact an administrator.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
try:
logic.projects.remove_group_from_project(project_id=project_id, group_id=group_id)
except logic.errors.ProjectDoesNotExistError:
flask.flash(_('This project group does not exist.'), 'error')
return flask.redirect(flask.url_for('.projects'))
except logic.errors.GroupDoesNotExistError:
flask.flash(_('This basic group does not exist.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.GroupNotMemberOfProjectError:
flask.flash(_('This basic group is not a member of this project group.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.NoMemberWithGrantPermissionsForProjectError:
flask.flash(_('You cannot remove this basic group from this project group, because they are the only basic group with GRANT permissions.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
else:
flask.flash(_('You have successfully removed this basic group from the project group.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if 'edit' in flask.request.form and Permissions.WRITE in user_permissions:
show_edit_form = True
if edit_project_form.validate_on_submit():
check_current_user_is_not_readonly()
try:
translations = json.loads(edit_project_form.translations.data)
if not translations:
raise ValueError(_('Please enter at least an english name.'))
names = {}
descriptions = {}
for translation in translations:
name = translation['name'].strip()
description = translation['description'].strip()
language_id = int(translation['language_id'])
if language_id not in allowed_language_ids:
continue
if language_id == Language.ENGLISH:
if name == '':
raise ValueError(_('Please enter at least an english name.'))
elif name == '' and description == '':
continue
lang_code = get_language(language_id).lang_code
names[lang_code] = name
if description != '':
descriptions[lang_code] = description
else:
descriptions[lang_code] = ''
logic.projects.update_project(project_id, names, descriptions)
except ValueError as e:
flask.flash(str(e), 'error')
edit_project_form.translations.errors.append(str(e))
except logic.errors.ProjectDoesNotExistError:
flask.flash(_('This project group does not exist.'), 'error')
return flask.redirect(flask.url_for('.projects'))
except logic.errors.ProjectAlreadyExistsError:
edit_project_form.name.errors.append(_('A project group with this name already exists.'))
except logic.errors.InvalidProjectNameError:
edit_project_form.name.errors.append(_('This project group name is invalid.'))
else:
flask.flash(_('Project group information updated successfully.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if 'add_user' in flask.request.form and Permissions.GRANT in user_permissions:
if invite_user_form.validate_on_submit():
check_current_user_is_not_readonly()
if not any(user.id == invite_user_form.user_id.data for user in invitable_user_list):
flask.flash(_('You cannot add this user.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
permissions = Permissions.from_value(invite_user_form.permissions.data)
if Permissions.READ not in permissions:
flask.flash(_('Please select read permissions (or higher) for the invitation.'), 'error')
return flask.redirect(flask.url_for('.projects'))
try:
other_project_ids = []
for other_project_id_form in invite_user_form.other_project_ids:
try:
if other_project_id_form.add_user.data:
other_project_ids.append(int(other_project_id_form.project_id.data))
except (KeyError, ValueError):
pass
logic.projects.invite_user_to_project(project_id, invite_user_form.user_id.data, flask_login.current_user.id, other_project_ids, permissions)
except logic.errors.ProjectDoesNotExistError:
flask.flash(_('This project group does not exist.'), 'error')
return flask.redirect(flask.url_for('.projects'))
except logic.errors.UserDoesNotExistError:
flask.flash(_('This user does not exist.'), 'error')
except logic.errors.UserAlreadyMemberOfProjectError:
flask.flash(_('This user is already a member of this project group.'), 'error')
else:
flask.flash(_('The user was successfully invited to the project group.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if 'add_group' in flask.request.form and Permissions.GRANT in user_permissions:
if invite_group_form.validate_on_submit():
check_current_user_is_not_readonly()
if not any(group.id == invite_group_form.group_id.data for group in invitable_group_list):
flask.flash(_('You cannot add this basic group.'), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
try:
logic.projects.add_group_to_project(project_id, invite_group_form.group_id.data, permissions=Permissions.READ)
except logic.errors.ProjectDoesNotExistError:
flask.flash(_('This project group does not exist.'), 'error')
return flask.redirect(flask.url_for('.projects'))
except logic.errors.GroupDoesNotExistError:
flask.flash(_('This basic group does not exist.'), 'error')
except logic.errors.GroupAlreadyMemberOfProjectError:
flask.flash(_('This basic group is already a member of this project group.'), 'error')
else:
flask.flash(_('The basic group was successfully added to the project group.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if not flask.current_app.config['DISABLE_SUBPROJECTS']:
if 'remove_subproject' in flask.request.form and Permissions.GRANT in user_permissions:
if remove_subproject_form is not None and remove_subproject_form.validate_on_submit():
check_current_user_is_not_readonly()
child_project_id = remove_subproject_form.child_project_id.data
if child_project_id not in child_project_ids:
flask.flash(_('Project group #%(child_project_id)s is not a child of this project group.', child_project_id=int(child_project_id)), 'error')
else:
logic.projects.delete_subproject_relationship(project_id, child_project_id)
flask.flash(_('The child project group was successfully removed from this project group.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if 'add_subproject' in flask.request.form and Permissions.GRANT in user_permissions:
if add_subproject_form is not None and add_subproject_form.validate_on_submit():
check_current_user_is_not_readonly()
child_project_id = add_subproject_form.child_project_id.data
if child_project_id not in addable_project_ids:
flask.flash(_('Project group #%(child_project_id)s cannot become a child of this project group.', child_project_id=int(child_project_id)), 'error')
else:
logic.projects.create_subproject_relationship(project_id, child_project_id, child_can_add_users_to_parent=add_subproject_form.child_can_add_users_to_parent.data)
flask.flash(_('The child project group was successfully added to this project group.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
object_id = object.id if object is not None else None
object_action = None
object_link_form = None
linkable_objects = []
linkable_action_ids = []
already_linked_object_ids = []
if Permissions.GRANT in user_permissions and not flask_login.current_user.is_readonly:
object_link_form = ObjectLinkForm()
if object is None:
already_linked_object_ids = [link[1] for link in logic.projects.get_project_object_links()]
for action_type in logic.actions.get_action_types():
if action_type.enable_project_link:
linkable_action_ids.extend([
action.id
for action in logic.actions.get_actions(action_type.id)
])
if not flask.current_app.config['LOAD_OBJECTS_IN_BACKGROUND']:
for object_info in logic.object_permissions.get_object_info_with_permissions(user_id, Permissions.GRANT, action_type_id=action_type.id):
if object_info.object_id not in already_linked_object_ids:
linkable_objects.append((object_info.object_id, get_translated_text(object_info.name_json)))
if object is not None:
object_permissions = logic.object_permissions.get_user_object_permissions(object.object_id, flask_login.current_user.id)
if Permissions.READ in object_permissions:
object_action = logic.actions.get_action(object.action_id)
else:
object = None
return flask.render_template(
'projects/project.html',
ENGLISH=english,
translations=translations,
languages=get_languages(only_enabled_for_input=True),
name_language_ids=name_language_ids,
description_language_ids=description_language_ids,
get_user=logic.users.get_user,
get_group=logic.groups.get_group,
get_project=logic.projects.get_project,
project=project,
project_member_user_ids=project_member_user_ids,
project_member_group_ids=project_member_group_ids,
project_member_user_ids_and_permissions=project_member_user_ids_and_permissions,
project_member_group_ids_and_permissions=project_member_group_ids_and_permissions,
project_invitations=project_invitations,
show_invitation_log=show_invitation_log,
object=object,
object_id=object_id,
object_link_form=object_link_form,
linkable_action_ids=linkable_action_ids,
already_linked_object_ids=already_linked_object_ids,
linkable_objects=linkable_objects,
object_action=object_action,
leave_project_form=leave_project_form,
delete_project_form=delete_project_form,
remove_project_member_form=remove_project_member_form,
remove_project_group_form=remove_project_group_form,
edit_project_form=edit_project_form,
show_edit_form=show_edit_form,
invite_user_form=invite_user_form,
invitable_user_list=invitable_user_list,
invite_group_form=invite_group_form,
invitable_group_list=invitable_group_list,
show_objects_link=show_objects_link,
child_project_ids=child_project_ids,
child_project_ids_can_add_to_parent=child_project_ids_can_add_to_parent,
parent_project_ids=parent_project_ids,
add_subproject_form=add_subproject_form,
addable_projects=addable_projects,
remove_subproject_form=remove_subproject_form,
user_may_edit_permissions=Permissions.GRANT in user_permissions,
)
@frontend.route('/projects/', methods=['GET', 'POST'])
@flask_login.login_required
def projects():
user_id = None
if 'user_id' in flask.request.args:
try:
user_id = int(flask.request.args['user_id'])
except ValueError:
pass
if user_id is not None:
if user_id != flask_login.current_user.id and not flask_login.current_user.is_admin:
return flask.abort(403)
projects = logic.projects.get_user_projects(user_id)
else:
projects = logic.projects.get_projects()
for project in projects:
project.permissions = logic.projects.get_user_project_permissions(project_id=project.id, user_id=flask_login.current_user.id, include_groups=True)
create_project_form = CreateProjectForm()
show_create_form = False
if 'create' in flask.request.form:
allowed_language_ids = [
language.id
for language in get_languages(only_enabled_for_input=True)
]
show_create_form = True
if create_project_form.validate_on_submit():
check_current_user_is_not_readonly()
if flask_login.current_user.is_admin or not flask.current_app.config['ONLY_ADMINS_CAN_CREATE_PROJECTS']:
try:
translations = json.loads(create_project_form.translations.data)
if not translations:
raise ValueError(_('Please enter at least an english name.'))
names = {}
descriptions = {}
for translation in translations:
name = translation['name'].strip()
description = translation['description'].strip()
language_id = int(translation['language_id'])
if language_id not in allowed_language_ids:
continue
if language_id == Language.ENGLISH:
if name == '':
raise ValueError(_('Please enter at least an english name.'))
lang_code = get_language(language_id).lang_code
names[lang_code] = name
if description != '':
descriptions[lang_code] = description
else:
descriptions[lang_code] = ''
project_id = logic.projects.create_project(names, descriptions, flask_login.current_user.id).id
except ValueError as e:
flask.flash(str(e), 'error')
create_project_form.translations.errors.append(str(e))
except logic.errors.ProjectAlreadyExistsError:
create_project_form.translations.errors.append(_('A project group with this name already exists.'))
except logic.errors.InvalidProjectNameError:
create_project_form.translations.errors.append(_('This project group name is invalid.'))
else:
flask.flash(_('The project group has been created successfully.'), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
else:
create_project_form.translations.errors.append(_('Only administrators can create project groups.'))
projects_by_id = {
project.id: project
for project in projects
}
project_id_hierarchy_list = logic.projects.get_project_id_hierarchy_list(list(projects_by_id))
english = get_language(Language.ENGLISH)
return flask.render_template(
"projects/projects.html",
create_project_form=create_project_form,
show_create_form=show_create_form,
Permissions=logic.projects.Permissions,
projects_by_id=projects_by_id,
project_id_hierarchy_list=project_id_hierarchy_list,
languages=get_languages(only_enabled_for_input=True),
ENGLISH=english
)
@frontend.route('/projects/<int:project_id>/permissions')
@flask_login.login_required
def project_permissions(project_id):
try:
project = logic.projects.get_project(project_id)
except logic.errors.ProjectDoesNotExistError:
return flask.abort(404)
user_permissions = logic.projects.get_project_member_user_ids_and_permissions(project_id, include_groups=False)
group_permissions = logic.projects.get_project_member_group_ids_and_permissions(project_id)
if Permissions.GRANT in logic.projects.get_user_project_permissions(project_id=project_id, user_id=flask_login.current_user.id, include_groups=True):
delete_project_form = DeleteProjectForm()
user_permission_form_data = []
for user_id, permissions in sorted(user_permissions.items()):
if user_id is None:
continue
user_permission_form_data.append({'user_id': user_id, 'permissions': permissions.name.lower()})
group_permission_form_data = []
for group_id, permissions in sorted(group_permissions.items()):
if group_id is None:
continue
group_permission_form_data.append({'group_id': group_id, 'permissions': permissions.name.lower()})
permissions_form = PermissionsForm(user_permissions=user_permission_form_data, group_permissions=group_permission_form_data)
# disable permissions for all users and other projects
permissions_form.all_user_permissions.choices = [('none', Permissions.NONE)]
permissions_form.project_permissions.max_entries = 0
else:
delete_project_form = None
permissions_form = None
return flask.render_template(
'projects/project_permissions.html',
project=project,
delete_project_form=delete_project_form,
user_permissions=user_permissions,
group_permissions=group_permissions,
get_user=logic.users.get_user,
get_group=logic.groups.get_group,
Permissions=Permissions,
permissions_form=permissions_form
)
@frontend.route('/projects/<int:project_id>/permissions', methods=['POST'])
@flask_login.login_required
def update_project_permissions(project_id):
check_current_user_is_not_readonly()
try:
if Permissions.GRANT not in logic.projects.get_user_project_permissions(project_id, flask_login.current_user.id, include_groups=True):
return flask.abort(403)
except logic.errors.ProjectDoesNotExistError:
return flask.abort(404)
permissions_form = PermissionsForm()
# disable permissions for all users and other projects
permissions_form.all_user_permissions.choices = [('none', Permissions.NONE)]
permissions_form.project_permissions.max_entries = 0
if 'edit_permissions' in flask.request.form and permissions_form.validate_on_submit():
# First handle GRANT updates, then others (to prevent temporarily not having a GRANT user)
for user_permissions_data in sorted(permissions_form.user_permissions.data, key=lambda upd: upd['permissions'] != 'grant'):
user_id = user_permissions_data['user_id']
try:
logic.users.get_user(user_id)
except logic.errors.UserDoesNotExistError:
continue
permissions = Permissions.from_name(user_permissions_data['permissions'])
try:
logic.projects.update_user_project_permissions(project_id=project_id, user_id=user_id, permissions=permissions)
except logic.errors.NoMemberWithGrantPermissionsForProjectError:
continue
for group_permissions_data in permissions_form.group_permissions.data:
group_id = group_permissions_data['group_id']
try:
logic.groups.get_group(group_id)
except logic.errors.GroupDoesNotExistError:
continue
permissions = Permissions.from_name(group_permissions_data['permissions'])
logic.projects.update_group_project_permissions(project_id=project_id, group_id=group_id, permissions=permissions)
flask.flash(_("Successfully updated project group permissions."), 'success')
else:
flask.flash(_("A problem occurred while changing the project group permissions. Please try again."), 'error')
try:
logic.projects.get_project(project_id)
except logic.errors.ProjectDoesNotExistError:
return flask.redirect(flask.url_for('.projects'))
return flask.redirect(flask.url_for('.project_permissions', project_id=project_id))
@frontend.route('/projects/<int:project_id>/object_link', methods=['POST'])
@flask_login.login_required
def link_object(project_id):
check_current_user_is_not_readonly()
object_link_form = ObjectLinkForm()
if not object_link_form.validate_on_submit():
flask.flash(_("Missing or invalid object ID."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
object_id = object_link_form.object_id.data
try:
if Permissions.GRANT not in logic.projects.get_user_project_permissions(project_id, flask_login.current_user.id, True):
flask.flash(_("You do not have GRANT permissions for this project group."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
if Permissions.GRANT not in logic.object_permissions.get_user_object_permissions(object_id, flask_login.current_user.id):
flask.flash(_("You do not have GRANT permissions for this object."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
logic.projects.link_project_and_object(project_id, object_id, flask_login.current_user.id)
except logic.errors.ProjectObjectLinkAlreadyExistsError:
flask.flash(_("Project group is already linked to an object or object is already linked to a project group."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.ProjectDoesNotExistError:
flask.flash(_("Project group does not exist."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.ObjectDoesNotExistError:
flask.flash(_("Object does not exist."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
flask.flash(_("Successfully linked the object to a project group."), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
@frontend.route('/projects/<int:project_id>/object_unlink', methods=['POST'])
@flask_login.login_required
def unlink_object(project_id):
check_current_user_is_not_readonly()
object_link_form = ObjectLinkForm()
if not object_link_form.validate_on_submit():
flask.flash(_("Missing or invalid object ID."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
object_id = object_link_form.object_id.data
try:
if Permissions.GRANT not in logic.projects.get_user_project_permissions(project_id, flask_login.current_user.id, True):
flask.flash(_("You do not have GRANT permissions for this project group."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
logic.projects.unlink_project_and_object(project_id, object_id, flask_login.current_user.id)
except logic.errors.ProjectObjectLinkDoesNotExistsError:
flask.flash(_("No link exists between this object and project group."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.ProjectDoesNotExistError:
flask.flash(_("Project group does not exist."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
except logic.errors.ObjectDoesNotExistError:
flask.flash(_("Object does not exist."), 'error')
return flask.redirect(flask.url_for('.project', project_id=project_id))
flask.flash(_("Successfully unlinked the object and project group."), 'success')
return flask.redirect(flask.url_for('.project', project_id=project_id))
| 56.437588
| 256
| 0.675149
| 4,720
| 40,240
| 5.4375
| 0.060593
| 0.060316
| 0.033041
| 0.034366
| 0.672823
| 0.619209
| 0.542139
| 0.490395
| 0.448899
| 0.430859
| 0
| 0.001411
| 0.242768
| 40,240
| 712
| 257
| 56.516854
| 0.840865
| 0.008921
| 0
| 0.475336
| 0
| 0.001495
| 0.127377
| 0.009432
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008969
| false
| 0.005979
| 0.020927
| 0
| 0.119581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7be58215b629ccdaed1b12b4ee8ac016d5bf374b
| 1,474
|
py
|
Python
|
setup.py
|
caalle/caaalle
|
3653155338fefde73579508ee83905a8ad8e3924
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
caalle/caaalle
|
3653155338fefde73579508ee83905a8ad8e3924
|
[
"Apache-2.0"
] | 4
|
2021-04-26T18:42:38.000Z
|
2021-04-26T18:42:41.000Z
|
setup.py
|
caalle/caaalle
|
3653155338fefde73579508ee83905a8ad8e3924
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import codecs
import os
import re
from setuptools import setup
with open('README.md', 'r') as f:
readme = f.read()
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
_title = 'caaalle'
_description = 'caaalle'
_author = 'Carl Larsson'
_author_email = 'example@gmail.com'
_license = 'Apache 2.0'
_url = 'https://github.com/caalle/caaalle'
setup(
name=_title,
description=_description,
long_description=readme,
long_description_content_type='text/markdown',
version=find_version("caaalle", "__init__.py"),
author=_author,
author_email=_author_email,
url=_url,
packages=['caaalle'],
include_package_data=True,
python_requires=">=3.5.*",
install_requires=[],
license=_license,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.5'
],
keywords='caaalle'
)
| 26.321429
| 68
| 0.643148
| 177
| 1,474
| 5.101695
| 0.519774
| 0.084164
| 0.110742
| 0.115172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012007
| 0.208955
| 1,474
| 55
| 69
| 26.8
| 0.762436
| 0.014247
| 0
| 0
| 0
| 0
| 0.263774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.085106
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7be827f0693117abffb3e3ef853dcd8e6d5807a0
| 10,522
|
py
|
Python
|
kevlar/tests/test_novel.py
|
johnsmith2077/kevlar
|
3ed06dae62479e89ccd200391728c416d4df8052
|
[
"MIT"
] | 24
|
2016-12-07T07:59:09.000Z
|
2019-03-11T02:05:36.000Z
|
kevlar/tests/test_novel.py
|
johnsmith2077/kevlar
|
3ed06dae62479e89ccd200391728c416d4df8052
|
[
"MIT"
] | 325
|
2016-12-07T07:37:17.000Z
|
2019-03-12T19:01:40.000Z
|
kevlar/tests/test_novel.py
|
standage/kevlar
|
622d1869266550422e91a60119ddc7261eea434a
|
[
"MIT"
] | 8
|
2017-08-17T01:37:39.000Z
|
2019-03-01T16:17:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import filecmp
import glob
import json
import pytest
import re
from tempfile import NamedTemporaryFile, mkdtemp
import screed
from shutil import rmtree
import sys
import kevlar
from kevlar.tests import data_file, data_glob
from khmer import Counttable
def test_novel_banding_args():
errormsg = r'Must specify `numbands` and `band` together'
with pytest.raises(ValueError, match=errormsg):
reads = list(kevlar.novel.novel(None, [], [], numbands=4))
with pytest.raises(ValueError, match=errormsg):
reads = list(kevlar.novel.novel(None, [], [], band=0))
errormsg = r'`band` must be a value between 0 and 3'
with pytest.raises(ValueError, match=errormsg):
reads = list(kevlar.novel.novel(None, [], [], numbands=4, band=-1))
def test_cli():
args = kevlar.cli.parser().parse_args([
'novel', '--case', 'case1.fq', '--control', 'cntl1.fq', '--control',
'cntl2.fq', '-k', '17',
])
assert args.ksize == 17
assert args.case_min == 6
assert args.ctrl_max == 1
assert args.num_bands is None
assert args.band is None
args = kevlar.cli.parser().parse_args([
'novel', '--num-bands', '8', '--band', '1', '--case', 'case1.fq',
'--control', 'cntl1.fq', '--control', 'cntl2.fq',
])
assert args.ksize == 31
assert args.case_min == 6
assert args.ctrl_max == 1
assert args.num_bands == 8
assert args.band == 1
errormsg = r'Must specify --num-bands and --band together'
with pytest.raises(ValueError, match=errormsg):
args = kevlar.cli.parser().parse_args([
'novel', '--case', 'case1.fq', '--control', 'cntl1.fq',
'--band', '1'
])
kevlar.novel.main(args)
@pytest.mark.parametrize('kmer', [
('ACCGTACAA' * 3),
('TTATAATAG' * 3),
('CGAAAAATT' * 3),
])
def test_assumptions(kmer):
ct = Counttable(27, 1e5, 2)
kmer_rc = kevlar.revcom(kmer)
assert ct.hash(kmer) == ct.hash(kmer_rc)
assert ct.get_kmer_hashes(kmer)[0] == ct.get_kmer_hashes(kmer_rc)[0]
@pytest.mark.parametrize('case,ctrl', [
('microtrios/trio-li-proband.fq.gz', 'microtrios/trio-li-??ther.fq.gz'),
('microtrios/trio-na-proband.fq.gz', 'microtrios/trio-na-??ther.fq.gz'),
('microtrios/trio-k-proband.fq.gz', 'microtrios/trio-k-??ther.fq.gz'),
])
def test_novel_single_mutation(case, ctrl, capsys):
casestr = data_file(case)
ctrls = kevlar.tests.data_glob(ctrl)
arglist = ['novel', '--case', casestr, '--ksize', '25', '--case-min', '7',
'--control', ctrls[0], '--control', ctrls[1],
'--num-bands', '2', '--band', '2',
'--ctrl-max', '0', '--memory', '500K']
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
for line in out.split('\n'):
if not line.endswith('#') or line.startswith('#mateseq'):
continue
abundmatch = re.search(r'(\d+) (\d+) (\d+)#$', line)
assert abundmatch, line
case = int(abundmatch.group(1))
ctl1 = int(abundmatch.group(2))
ctl2 = int(abundmatch.group(3))
assert case >= 7, line
assert ctl1 == 0 and ctl2 == 0, line
def test_novel_two_cases(capsys):
cases = kevlar.tests.data_glob('trio1/case6*.fq')
controls = kevlar.tests.data_glob('trio1/ctrl[5,6].fq')
with NamedTemporaryFile(suffix='.ct') as case1ct, \
NamedTemporaryFile(suffix='.ct') as case2ct, \
NamedTemporaryFile(suffix='.ct') as ctrl1ct, \
NamedTemporaryFile(suffix='.ct') as ctrl2ct:
counttables = [case1ct, case2ct, ctrl1ct, ctrl2ct]
seqfiles = cases + controls
for ct, seqfile in zip(counttables, seqfiles):
arglist = ['count', '--ksize', '19', '--memory', '1e7', ct.name,
seqfile]
print(arglist)
args = kevlar.cli.parser().parse_args(arglist)
kevlar.count.main(args)
arglist = ['novel', '--ksize', '19', '--memory', '1e7',
'--ctrl-max', '1', '--case-min', '7',
'--case', cases[0], '--case', cases[1],
'--case-counts', case1ct.name, case2ct.name,
'--control-counts', ctrl1ct.name, ctrl2ct.name]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
assert out.strip() != ''
for line in out.split('\n'):
if not line.endswith('#') or line.startswith('#mateseq'):
continue
abundmatch = re.search(r'(\d+) (\d+) (\d+) (\d+)#$', line)
assert abundmatch, line
case1 = int(abundmatch.group(1))
case2 = int(abundmatch.group(2))
ctl1 = int(abundmatch.group(3))
ctl2 = int(abundmatch.group(4))
assert case1 >= 7 and case2 >= 7
assert ctl1 <= 1 and ctl2 <= 1
def test_kmer_rep_in_read(capsys):
from sys import stdout
read = ('AGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGATGAGGAT'
'GAGGATGAGGATGAGGAT')
record = kevlar.sequence.Record(name='reqseq', sequence=read)
record.annotate('GATGAGGATGAGGATGAGGATGAGG', 2, (11, 1, 0))
record.annotate('GATGAGGATGAGGATGAGGATGAGG', 8, (11, 1, 0))
kevlar.print_augmented_fastx(record, stdout)
out, err = capsys.readouterr()
assert read in out
def test_iter_read_multi_file():
infiles = kevlar.tests.data_glob('bogus-genome/mask-chr[1,2].fa')
print(infiles)
records = [r for r in kevlar.multi_file_iter_khmer(infiles)]
assert len(records) == 4
def test_novel_abund_screen(capsys):
case = data_file('screen-case.fa')
ctrl = data_file('screen-ctrl.fa')
arglist = ['novel', '--ksize', '25', '--ctrl-max', '1', '--case-min', '8',
'--case', case, '--control', ctrl, '--abund-screen', '3']
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
out, err = capsys.readouterr()
assert '>seq_error' not in out
def test_skip_until(capsys):
readname = 'bogus-genome-chr1_115_449_0:0:0_0:0:0_1f4/1'
case = data_file('trio1/case1.fq')
ctrls = kevlar.tests.data_glob('trio1/ctrl[1,2].fq')
arglist = [
'novel', '--ctrl-max', '0', '--case-min', '6', '--case', case,
'--control', ctrls[0], '--control', ctrls[1], '--skip-until', readname
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
out, err = capsys.readouterr()
message = ('Found read bogus-genome-chr1_115_449_0:0:0_0:0:0_1f4/1 '
'(skipped 1001 reads)')
assert message in err
assert '29 unique novel kmers in 14 reads' in err
readname = 'BOGUSREADNAME'
arglist = [
'novel', '--ctrl-max', '0', '--case-min', '6', '--case', case,
'--control', ctrls[0], '--control', ctrls[1], '--skip-until', readname
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
kevlar.logstream = logstream
out, err = capsys.readouterr()
assert 'Found read' not in err
assert '(skipped ' not in err
assert 'Found 0 instances of 0 unique novel kmers in 0 reads' in err
def test_novel_save_counts():
outdir = mkdtemp()
try:
for ind in ('father', 'mother', 'proband'):
outfile = '{:s}/{:s}.ct'.format(outdir, ind)
infile = data_file('microtrios/trio-na-{:s}.fq.gz'.format(ind))
arglist = ['count', '--ksize', '27', '--memory', '500K', outfile,
infile]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.count.main(args)
arglist = [
'novel', '--ksize', '27', '--out', outdir + '/novel.augfastq.gz',
'--save-case-counts', outdir + '/kid.ct', '--save-ctrl-counts',
outdir + '/mom.ct', outdir + '/dad.ct', '--case',
data_file('microtrios/trio-na-proband.fq.gz'),
'--control', data_file('microtrios/trio-na-mother.fq.gz'),
'--control', data_file('microtrios/trio-na-father.fq.gz'),
'--memory', '500K'
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.novel.main(args)
counts = ('father', 'mother', 'proband')
testcounts = ('dad', 'mom', 'kid')
for c1, c2 in zip(counts, testcounts):
f1 = '{:s}/{:s}.ct'.format(outdir, c1)
f2 = '{:s}/{:s}.ct'.format(outdir, c2)
assert filecmp.cmp(f1, f2)
finally:
rmtree(outdir)
def test_novel_save_counts_mismatch(capsys):
outdir = mkdtemp()
try:
arglist = [
'novel', '--ksize', '27', '--out', outdir + '/novel.augfastq.gz',
'--save-case-counts', outdir + '/kid.ct', '--save-ctrl-counts',
outdir + '/mom.ct', outdir + '/dad.ct', outdir + '/sibling.ct',
'--case', data_file('microtrios/trio-k-proband.fq.gz'),
'--control', data_file('microtrios/trio-k-mother.fq.gz'),
'--control', data_file('microtrios/trio-k-father.fq.gz'),
'--memory', '500K'
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
kevlar.logstream = logstream
finally:
rmtree(outdir)
out, err = capsys.readouterr()
assert 'stubbornly refusing to save k-mer counts' in err
def test_novel_load_counts(capsys):
file1 = data_file('simple-genome-case-reads.fa.gz')
file2 = data_file('ambig.fasta')
file3 = data_file('simple-genome-case.ct')
file4, file5 = data_glob('simple-genome-ctrl?.ct')
arglist = [
'novel', '-k', '25',
'--case', file1, file2, '--case-counts', file3,
'--control-counts', file4, file5
]
args = kevlar.cli.parser().parse_args(arglist)
kevlar.logstream, logstream = sys.stderr, kevlar.logstream
kevlar.novel.main(args)
kevlar.logstream = logstream
out, err = capsys.readouterr()
assert 'counttables for 2 sample(s) provided' in err
| 37.180212
| 79
| 0.585535
| 1,306
| 10,522
| 4.64242
| 0.199081
| 0.02639
| 0.027874
| 0.040739
| 0.498763
| 0.427016
| 0.395349
| 0.381824
| 0.351146
| 0.331024
| 0
| 0.02712
| 0.229044
| 10,522
| 282
| 80
| 37.312057
| 0.720291
| 0.035735
| 0
| 0.350427
| 0
| 0
| 0.227089
| 0.069449
| 0
| 0
| 0
| 0
| 0.128205
| 1
| 0.051282
| false
| 0
| 0.055556
| 0
| 0.106838
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bea7db6a9ed79dea66853c2fd9ed8df8241cc8b
| 1,353
|
py
|
Python
|
bot.py
|
egor5q/pvp-combat
|
42d0f9df14e35c408deb7a360a9f7544ceae7dd7
|
[
"MIT"
] | null | null | null |
bot.py
|
egor5q/pvp-combat
|
42d0f9df14e35c408deb7a360a9f7544ceae7dd7
|
[
"MIT"
] | null | null | null |
bot.py
|
egor5q/pvp-combat
|
42d0f9df14e35c408deb7a360a9f7544ceae7dd7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
#client=MongoClient(os.environ['database'])
#db=client.
#users=db.users
def battle(game):
if game['started']==False:
game['started']=True
for ids in game['players']:
sendkb(game['players'][ids], game)
def sendkb(player, game):
if player['die']!=1 and player['stun']<=0:
kb=types.ReplyKeyboardMarkup()
kb.add(types.KeyboardButton('Атаковать'), types.KeyboardButton('Блокировать'))
gamestats=stats(game)
bot.send_message(player['id'], gamestats, reply_markup=kb)
def createplayer(user):
return {user.id:{
'hp':1000,
'stamina':15,
'stun':0,
'status':'rest',
'action':None
}
}
def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode=None):
return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup,
parse_mode=parse_mode)
print('7777')
bot.polling(none_stop=True,timeout=600)
| 22.932203
| 115
| 0.625277
| 164
| 1,353
| 5.036585
| 0.47561
| 0.053269
| 0.039952
| 0.041162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016699
| 0.247598
| 1,353
| 58
| 116
| 23.327586
| 0.794695
| 0.064302
| 0
| 0
| 0
| 0
| 0.08254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.25
| 0.055556
| 0.416667
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bee6b98a8502317f53e2986edd1dc16f78c2ac7
| 50,039
|
py
|
Python
|
simleague/simleague.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | 38
|
2021-03-07T17:13:10.000Z
|
2022-02-28T19:50:00.000Z
|
simleague/simleague.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | 44
|
2021-03-12T19:13:32.000Z
|
2022-03-18T10:20:52.000Z
|
simleague/simleague.py
|
Kuro-Rui/flare-cogs
|
f739e3a4a8c65bf0e10945d242ba0b82f96c6d3d
|
[
"MIT"
] | 33
|
2021-03-08T18:59:59.000Z
|
2022-03-23T10:57:46.000Z
|
import asyncio
import logging
import random
import time
from abc import ABC
from typing import Literal, Optional
import aiohttp
import discord
from redbot.core import Config, bank, checks, commands
from redbot.core.utils.chat_formatting import box
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from tabulate import tabulate
from .core import SimHelper
from .functions import WEATHER
from .simset import SimsetMixin
from .stats import StatsMixin
from .teamset import TeamsetMixin
# THANKS TO https://code.sololearn.com/ci42wd5h0UQX/#py FOR THE SIMULATION AND FIXATOR/AIKATERNA/STEVY FOR THE PILLOW HELP/LEVELER
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""This allows the metaclass used for proper type detection to coexist with discord.py's
metaclass."""
class SimLeague(
SimHelper, TeamsetMixin, StatsMixin, SimsetMixin, commands.Cog, metaclass=CompositeMetaClass
):
"""SimLeague"""
__version__ = "3.1.0"
def format_help_for_context(self, ctx):
"""Thanks Sinbad."""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
def __init__(self, bot):
self.log = logging.getLogger("red.flarecogs.SimLeague")
defaults = {
"levels": {},
"teams": {},
"fixtures": [],
"standings": {},
"stats": {
"goals": {},
"yellows": {},
"reds": {},
"penalties": {},
"assists": {},
"motm": {},
"cleansheets": {},
},
"users": [],
"resultchannel": [],
"gametime": 1,
"bettime": 180,
"htbreak": 5,
"bettoggle": True,
"betmax": 10000,
"betmin": 10,
"mentions": True,
"redcardmodifier": 22,
"probability": {
"goalchance": 96,
"yellowchance": 98,
"redchance": 398,
"penaltychance": 249,
"penaltyblock": 0.6,
},
"maxplayers": 4,
"active": False,
"started": False,
"betteams": [],
"transferwindow": False,
"cupmode": False,
}
defaults_user = {"notify": True}
self.config = Config.get_conf(self, identifier=4268355870, force_registration=True)
self.config.register_guild(**defaults)
self.config.register_user(**defaults_user)
self.bot = bot
self.bets = {}
self.cache = time.time()
self.session = aiohttp.ClientSession()
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
await self.config.user_from_id(user_id).clear()
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
@commands.command()
async def notify(self, ctx, toggle: bool):
"""Set wheter to recieve notifications of matches and results."""
await self.config.user(ctx.author).notify.set(toggle)
if toggle:
await ctx.send("You will recieve a notification on matches and results.")
else:
await ctx.send("You will no longer recieve a notification on matches and results.")
@checks.admin_or_permissions(manage_guild=True)
@commands.command()
async def register(
self,
ctx,
teamname: str,
members: commands.Greedy[discord.Member],
logo: Optional[str] = None,
*,
role: discord.Role = None,
):
"""Register a team.
Try keep team names to one word if possible."""
maxplayers = await self.config.guild(ctx.guild).maxplayers()
if len(members) != maxplayers:
return await ctx.send(f"You must provide {maxplayers} members.")
names = {str(x.id): x.name for x in members}
memids = await self.config.guild(ctx.guild).users()
a = [uid for uid in names if uid in memids]
if a:
b = []
for ids in a:
user = self.bot.get_user(ids)
if user is None:
user = await self.bot.fetch_user(ids)
b.append(user.name)
return await ctx.send(", ".join(b) + " is/are on a team.")
async with self.config.guild(ctx.guild).teams() as teams:
if teamname in teams:
return await ctx.send("{} is already a team!".format(teamname))
a = []
teams[teamname] = {
"members": names,
"captain": {str(members[0].id): members[0].name},
"logo": logo,
"role": role.name if role is not None else None,
"cachedlevel": 0,
"fullname": None,
"kits": {"home": None, "away": None, "third": None},
"stadium": None,
"bonus": 0,
}
async with self.config.guild(ctx.guild).standings() as standings:
standings[teamname] = {
"played": 0,
"wins": 0,
"losses": 0,
"points": 0,
"gd": 0,
"gf": 0,
"ga": 0,
"draws": 0,
}
await self.config.guild(ctx.guild).users.set(memids + list(names.keys()))
for uid in list(names.keys()):
await self.addrole(ctx, uid, role)
await ctx.tick()
@commands.command(name="teams")
async def _list(self, ctx, updatecache: bool = False, mobilefriendly: bool = True):
"""List current teams."""
if updatecache:
await self.updatecacheall(ctx.guild)
teams = await self.config.guild(ctx.guild).teams()
if not teams:
return await ctx.send("No teams have been registered.")
if mobilefriendly:
embed = discord.Embed(colour=ctx.author.colour)
msg = await ctx.send(
"This may take some time depending on the amount of teams currently registered."
)
if time.time() - self.cache >= 86400:
await msg.edit(
content="Updating the level cache, please wait. This may take some time."
)
await self.updatecacheall(ctx.guild)
self.cache = time.time()
async with ctx.typing():
for team in teams:
mems = list(teams[team]["members"].values())
lvl = teams[team]["cachedlevel"]
embed.add_field(
name="Team {}".format(team),
value="{}**Members**:\n{}\n**Captain**: {}\n**Team Level**: ~{}{}{}".format(
"**Full Name**:\n{}\n".format(teams[team]["fullname"])
if teams[team]["fullname"] is not None
else "",
"\n".join(mems),
list(teams[team]["captain"].values())[0],
lvl,
"\n**Role**: {}".format(
ctx.guild.get_role(teams[team]["role"]).mention
)
if teams[team]["role"] is not None
else "",
"\n**Stadium**: {}".format(teams[team]["stadium"])
if teams[team]["stadium"] is not None
else "",
),
inline=True,
)
await msg.edit(embed=embed, content=None)
else:
teamlen = max(*[len(str(i)) for i in teams], 5) + 3
rolelen = max(*[len(str(teams[i]["role"])) for i in teams], 5) + 3
caplen = max(*[len(list(teams[i]["captain"].values())[0]) for i in teams], 5) + 3
lvllen = 6
msg = f'{"Team":{teamlen}} {"Level":{lvllen}} {"Captain":{caplen}} {"Role":{rolelen}} Members\n'
non = "None"
for team in teams:
lvl = teams[team]["cachedlevel"]
captain = list(teams[team]["captain"].values())[0]
role = teams[team]["role"]
msg += f'{team} {lvl} {captain} {role.name if role is not None else non}{", ".join(list(teams[team]["members"].values()))} \n'
msg = await ctx.send(box(msg, lang="ini"))
@commands.command()
async def team(self, ctx, *, team: str):
"""List a team."""
teams = await self.config.guild(ctx.guild).teams()
if not teams:
return await ctx.send("No teams have been registered.")
if team not in teams:
return await ctx.send("Team does not exist, ensure that it is correctly capitilized.")
async with ctx.typing():
embeds = []
embed = discord.Embed(
title="{} {}".format(
team,
"- {}".format(teams[team]["fullname"])
if teams[team]["fullname"] is not None
else "",
),
colour=ctx.author.colour,
)
embed.add_field(
name="Members:",
value="\n".join(list(teams[team]["members"].values())),
inline=True,
)
embed.add_field(name="Captain:", value=list(teams[team]["captain"].values())[0])
embed.add_field(name="Level:", value=teams[team]["cachedlevel"], inline=True)
embed.add_field(name="Bonus %:", value=f"{teams[team]['bonus'] * 15}%", inline=True)
if teams[team]["role"] is not None:
embed.add_field(
name="Role:",
value=ctx.guild.get_role(teams[team]["role"]).mention,
inline=True,
)
if teams[team]["stadium"] is not None:
embed.add_field(name="Stadium:", value=teams[team]["stadium"], inline=True)
if teams[team]["logo"] is not None:
embed.set_thumbnail(url=teams[team]["logo"])
embeds.append(embed)
for kit in teams[team]["kits"]:
if teams[team]["kits"][kit] is not None:
embed = discord.Embed(title=f"{kit.title()} Kit", colour=ctx.author.colour)
embed.set_image(url=teams[team]["kits"][kit])
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.command()
async def fixtures(self, ctx, week: Optional[int] = None):
"""Show all fixtures."""
fixtures = await self.config.guild(ctx.guild).fixtures()
if not fixtures:
return await ctx.send("No fixtures have been made.")
if week is None:
embed = discord.Embed(color=0xFF0000)
for i, fixture in enumerate(fixtures[:25]):
a = [f"{game[0]} vs {game[1]}" for game in fixture]
embed.add_field(name="Week {}".format(i + 1), value="\n".join(a))
await ctx.send(embed=embed)
if len(fixtures) > 25:
embed = discord.Embed(color=0xFF0000)
for i, fixture in enumerate(fixtures[25:], 25):
a = [f"{game[0]} vs {game[1]}" for game in fixture]
embed.add_field(name="Week {}".format(i + 1), value="\n".join(a))
await ctx.send(embed=embed)
else:
if week == 0:
return await ctx.send("Try starting with week 1.")
try:
games = fixtures
games.reverse()
games.append("None")
games.reverse()
games = games[week]
except IndexError:
return await ctx.send("Invalid gameweek.")
a = [f"{fixture[0]} vs {fixture[1]}" for fixture in games]
await ctx.maybe_send_embed("\n".join(a))
@commands.command()
async def standings(self, ctx, verbose: bool = False):
"""Current sim standings."""
if await self.config.guild(ctx.guild).cupmode():
return await ctx.send(
"This simulation league is in cup mode, contact the maintainer of the league for the current standings."
)
standings = await self.config.guild(ctx.guild).standings()
if standings is None:
return await ctx.send("The table is empty.")
t = [] # PrettyTable(["Team", "W", "L", "D", "PL", "PO"])
if not verbose:
for x in sorted(
standings,
key=lambda x: (standings[x]["points"], standings[x]["gd"], standings[x]["gf"]),
reverse=True,
):
t.append(
[
x,
standings[x]["wins"],
standings[x]["losses"],
standings[x]["draws"],
standings[x]["played"],
standings[x]["points"],
]
)
tab = tabulate(t, headers=["Team", "Wins", "Losses", "Draws", "Played", "Points"])
else:
for x in sorted(
standings,
key=lambda x: (standings[x]["points"], standings[x]["gd"], standings[x]["gf"]),
reverse=True,
):
t.append(
[
x,
standings[x]["wins"],
standings[x]["losses"],
standings[x]["draws"],
standings[x]["played"],
standings[x]["points"],
standings[x]["gd"],
standings[x]["gf"],
standings[x]["ga"],
]
)
tab = tabulate(
t,
headers=["Team", "Wins", "Losses", "Draws", "Played", "Points", "GD", "GF", "GA"],
)
await ctx.send(box(tab))
@checks.admin_or_permissions(manage_guild=True)
@commands.cooldown(rate=1, per=30, type=commands.BucketType.guild)
@commands.max_concurrency(1, per=commands.BucketType.guild)
@commands.command(aliases=["playsim", "simulate"])
async def sim(self, ctx, team1: str, team2: str):
"""Simulate a game between two teams."""
teams = await self.config.guild(ctx.guild).teams()
if team1 not in teams or team2 not in teams:
return await ctx.send("One of those teams do not exist.")
if team1 == team2:
return await ctx.send("You can't sim two of the same teams silly.")
msg = await ctx.send("Updating cached levels...")
await self.updatecachegame(ctx.guild, team1, team2)
await msg.delete()
await asyncio.sleep(2)
teams = await self.config.guild(ctx.guild).teams()
lvl1 = teams[team1]["cachedlevel"]
lvl2 = teams[team2]["cachedlevel"]
bonuslvl1 = teams[team1]["bonus"]
bonuslvl2 = teams[team2]["bonus"]
homewin = lvl2 / lvl1
awaywin = lvl1 / lvl2
try:
draw = homewin / awaywin
except ZeroDivisionError:
draw = 0.5
await self.config.guild(ctx.guild).active.set(True)
await self.config.guild(ctx.guild).betteams.set([team1, team2])
goals = {}
assists = {}
reds = {team1: 0, team2: 0}
bettime = await self.config.guild(ctx.guild).bettime()
stadium = teams[team1]["stadium"] if teams[team1]["stadium"] is not None else None
weather = random.choice(WEATHER)
im = await self.matchinfo(ctx, [team1, team2], weather, stadium, homewin, awaywin, draw)
await ctx.send(file=im)
await self.matchnotif(ctx, team1, team2)
bet = await ctx.send(
"Betting is now open, game will commence in {} seconds.\nUsage: {}bet <amount> <team>".format(
bettime, ctx.prefix
)
)
for i in range(1, bettime):
if i % 5 == 0:
await bet.edit(
content="Betting is now open, game will commence in {} seconds.\nUsage: {}bet <amount> <team>".format(
bettime - i, ctx.prefix
)
)
await asyncio.sleep(1)
await bet.delete()
probability = await self.config.guild(ctx.guild).probability()
await self.config.guild(ctx.guild).started.set(True)
redcardmodifier = await self.config.guild(ctx.guild).redcardmodifier()
team1players = list(teams[team1]["members"].keys())
team2players = list(teams[team2]["members"].keys())
logos = ["sky", "bt", "bein", "bbc"]
yellowcards = []
logo = random.choice(logos)
motm = {}
events = False
# Team 1 stuff
yC_team1 = []
rC_team1 = []
injury_team1 = []
sub_in_team1 = []
sub_out_team1 = []
sub_count1 = 0
rc_count1 = 0
score_count1 = 0
injury_count1 = 0
team1Stats = [
team1,
yC_team1,
rC_team1,
injury_team1,
sub_in_team1,
sub_out_team1,
sub_count1,
rc_count1,
score_count1,
injury_count1,
]
# Team 2 stuff
yC_team2 = []
rC_team2 = []
injury_team2 = []
sub_in_team2 = []
sub_out_team2 = []
sub_count2 = 0
rc_count2 = 0
score_count2 = 0
injury_count2 = 0
team2Stats = [
team2,
yC_team2,
rC_team2,
injury_team2,
sub_in_team2,
sub_out_team2,
sub_count2,
rc_count2,
score_count2,
injury_count2,
]
async def TeamWeightChance(
ctx, t1totalxp, t2totalxp, reds1: int, reds2: int, team1bonus: int, team2bonus: int
):
if t1totalxp < 2:
t1totalxp = 1
if t2totalxp < 2:
t2totalxp = 1
team1bonus *= 15
team2bonus *= 15
t1totalxp = t1totalxp * float(f"1.{team1bonus}")
t2totalxp = t2totalxp * float(f"1.{team2bonus}")
self.log.debug(f"Team 1: {t1totalxp} - Team 2: {t2totalxp}")
redst1 = float(f"0.{reds1 * redcardmodifier}")
redst2 = float(f"0.{reds2 * redcardmodifier}")
total = ["A"] * int(((1 - redst1) * 100) * t1totalxp) + ["B"] * int(
((1 - redst2) * 100) * t2totalxp
)
rdmint = random.choice(total)
if rdmint == "A":
return team1Stats
else:
return team2Stats
async def TeamChance():
rndint = random.randint(1, 10)
if rndint >= 5:
return team1Stats
else:
return team2Stats
async def PlayerGenerator(event, team, yc, rc):
random.shuffle(team1players)
random.shuffle(team2players)
output = []
if team == team1:
fs_players = team1players
yc = yC_team1
rc = rC_team1
elif team == team2:
fs_players = team2players
yc = yC_team2
rc = rC_team2
if event == 0:
rosterUpdate = [i for i in fs_players if i not in rc]
if not rosterUpdate:
return await ctx.send(
"Game abandoned, no score recorded due to no players remaining."
)
isassist = False
assist = random.randint(0, 100)
if assist > 20:
isassist = True
if len(rosterUpdate) < 3:
isassist = False
player = random.choice(rosterUpdate)
if not isassist:
return [team, player]
rosterUpdate.remove(player)
assister = random.choice(rosterUpdate)
return [team, player, assister]
elif event == 1:
rosterUpdate = [i for i in fs_players if i not in rc]
if len(rosterUpdate) == 1:
return None
player = random.choice(rosterUpdate)
if player in yc or player in yellowcards:
return [team, player, 2]
else:
return [team, player]
elif event in [2, 3]:
rosterUpdate = [i for i in fs_players if i not in rc]
if len(rosterUpdate) == 1 and event == 2:
return None
player_out = random.choice(rosterUpdate)
output = [team, player_out]
return output
# Start of Simulation!
im = await self.walkout(ctx, team1, "home")
im2 = await self.walkout(ctx, team2, "away")
await ctx.send("Teams:", file=im)
await ctx.send(file=im2)
timemsg = await ctx.send("Kickoff!")
gametime = await self.config.guild(ctx.guild).gametime()
for min in range(1, 91):
await asyncio.sleep(gametime)
if min % 5 == 0:
await timemsg.edit(content="Minute: {}".format(min))
if events is False:
gC = await self.goalChance(ctx.guild, probability)
if gC is True:
teamStats = await TeamWeightChance(
ctx, lvl1, lvl2, reds[team1], reds[team2], bonuslvl1, bonuslvl2
)
playerGoal = await PlayerGenerator(0, teamStats[0], teamStats[1], teamStats[2])
teamStats[8] += 1
async with self.config.guild(ctx.guild).stats() as stats:
if playerGoal[1] not in stats["goals"]:
stats["goals"][playerGoal[1]] = 1
else:
stats["goals"][playerGoal[1]] += 1
if len(playerGoal) == 3:
if playerGoal[2] not in stats["assists"]:
stats["assists"][playerGoal[2]] = 1
else:
stats["assists"][playerGoal[2]] += 1
events = True
if len(playerGoal) == 3:
user2 = self.bot.get_user(int(playerGoal[2]))
if user2 is None:
user2 = await self.bot.fetch_user(int(playerGoal[2]))
if user2 not in motm:
motm[user2] = 1
else:
motm[user2] += 1
if user2.id not in assists:
assists[user2.id] = 1
else:
assists[user2.id] += 1
user = self.bot.get_user(int(playerGoal[1]))
if user is None:
user = await self.bot.fetch_user(int(playerGoal[1]))
if user not in motm:
motm[user] = 2
else:
motm[user] += 2
if user.id not in goals:
goals[user.id] = 1
else:
goals[user.id] += 1
if len(playerGoal) == 3:
image = await self.simpic(
ctx,
str(min),
"goal",
user,
team1,
team2,
str(playerGoal[0]),
str(team1Stats[8]),
str(team2Stats[8]),
user2,
)
else:
image = await self.simpic(
ctx,
str(min),
"goal",
user,
team1,
team2,
str(playerGoal[0]),
str(team1Stats[8]),
str(team2Stats[8]),
)
await ctx.send(file=image)
if events is False:
pC = await self.penaltyChance(ctx.guild, probability)
if pC is True:
teamStats = await TeamWeightChance(
ctx, lvl1, lvl2, reds[team1], reds[team2], bonuslvl1, bonuslvl2
)
playerPenalty = await PlayerGenerator(
3, teamStats[0], teamStats[1], teamStats[2]
)
user = self.bot.get_user(int(playerPenalty[1]))
if user is None:
user = await self.bot.fetch_user(int(playerPenalty[1]))
image = await self.penaltyimg(ctx, str(playerPenalty[0]), str(min), user)
await ctx.send(file=image)
pB = await self.penaltyBlock(ctx.guild, probability)
if pB is True:
events = True
async with self.config.guild(ctx.guild).stats() as stats:
if playerPenalty[1] not in stats["penalties"]:
stats["penalties"][playerPenalty[1]] = {"scored": 0, "missed": 1}
else:
stats["penalties"][playerPenalty[1]]["missed"] += 1
user = self.bot.get_user(int(playerPenalty[1]))
if user is None:
user = await self.bot.fetch_user(int(playerPenalty[1]))
image = await self.simpic(
ctx,
str(min),
"penmiss",
user,
team1,
team2,
str(playerPenalty[0]),
str(team1Stats[8]),
str(team2Stats[8]),
)
await ctx.send(file=image)
else:
teamStats[8] += 1
async with self.config.guild(ctx.guild).stats() as stats:
if playerPenalty[1] not in stats["goals"]:
stats["goals"][playerPenalty[1]] = 1
else:
stats["goals"][playerPenalty[1]] += 1
if playerPenalty[1] not in stats["penalties"]:
stats["penalties"][playerPenalty[1]] = {"scored": 1, "missed": 0}
else:
stats["penalties"][playerPenalty[1]]["scored"] += 1
events = True
user = self.bot.get_user(int(playerPenalty[1]))
if user is None:
user = await self.bot.fetch_user(int(playerPenalty[1]))
if user not in motm:
motm[user] = 2
else:
motm[user] += 2
if user.id not in goals:
goals[user.id] = 1
else:
goals[user.id] += 1
image = await self.simpic(
ctx,
str(min),
"penscore",
user,
team1,
team2,
str(playerPenalty[0]),
str(team1Stats[8]),
str(team2Stats[8]),
)
await ctx.send(file=image)
if events is False:
yC = await self.yCardChance(ctx.guild, probability)
if yC is True:
teamStats = await TeamChance()
playerYellow = await PlayerGenerator(
1, teamStats[0], teamStats[1], teamStats[2]
)
if playerYellow is not None:
if len(playerYellow) == 3:
teamStats[7] += 1
teamStats[2].append(playerYellow[1])
async with self.config.guild(ctx.guild).stats() as stats:
reds[str(playerYellow[0])] += 1
if playerYellow[1] not in stats["reds"]:
stats["reds"][playerYellow[1]] = 1
stats["yellows"][playerYellow[1]] += 1
else:
stats["yellows"][playerYellow[1]] += 1
stats["reds"][playerYellow[1]] += 1
events = True
user = self.bot.get_user(int(playerYellow[1]))
if user is None:
user = await self.bot.fetch_user(int(playerYellow[1]))
if user not in motm:
motm[user] = -2
else:
motm[user] += -2
image = await self.simpic(
ctx,
str(min),
"2yellow",
user,
team1,
team2,
str(playerYellow[0]),
str(team1Stats[8]),
str(team2Stats[8]),
None,
str(
len(teams[str(str(playerYellow[0]))]["members"])
- (int(teamStats[7]))
),
)
await ctx.send(file=image)
else:
teamStats[1].append(playerYellow[1])
yellowcards.append(str(playerYellow[1]))
async with self.config.guild(ctx.guild).stats() as stats:
if playerYellow[1] not in stats["yellows"]:
stats["yellows"][playerYellow[1]] = 1
else:
stats["yellows"][playerYellow[1]] += 1
events = True
user = self.bot.get_user(int(playerYellow[1]))
if user is None:
user = await self.bot.fetch_user(int(playerYellow[1]))
if user not in motm:
motm[user] = -1
else:
motm[user] += -1
image = await self.simpic(
ctx,
str(min),
"yellow",
user,
team1,
team2,
str(playerYellow[0]),
str(team1Stats[8]),
str(team2Stats[8]),
)
await ctx.send(file=image)
if events is False:
rC = await self.rCardChance(ctx.guild, probability)
if rC is True:
teamStats = await TeamChance()
playerRed = await PlayerGenerator(2, teamStats[0], teamStats[1], teamStats[2])
if playerRed is not None:
teamStats[7] += 1
async with self.config.guild(ctx.guild).stats() as stats:
if playerRed[1] not in stats["reds"]:
stats["reds"][playerRed[1]] = 1
else:
stats["reds"][playerRed[1]] += 1
reds[str(playerRed[0])] += 1
teamStats[2].append(playerRed[1])
events = True
user = self.bot.get_user(int(playerRed[1]))
if user is None:
user = await self.bot.fetch_user(int(playerRed[1]))
if user not in motm:
motm[user] = -2
else:
motm[user] += -2
image = await self.simpic(
ctx,
str(min),
"red",
user,
team1,
team2,
str(playerRed[0]),
str(team1Stats[8]),
str(team2Stats[8]),
None,
str(
len(teams[str(str(playerRed[0]))]["members"]) - (int(teamStats[7]))
),
)
await ctx.send(file=image)
if events is False:
pass
events = False
if min == 45:
added = random.randint(1, 5)
im = await self.extratime(ctx, added)
await ctx.send(file=im)
s = 45
for i in range(added):
s += 1
gC = await self.goalChance(ctx.guild, probability)
if gC is True:
teamStats = await TeamWeightChance(
ctx, lvl1, lvl2, reds[team1], reds[team2], bonuslvl1, bonuslvl2
)
playerGoal = await PlayerGenerator(
0, teamStats[0], teamStats[1], teamStats[2]
)
teamStats[8] += 1
async with self.config.guild(ctx.guild).stats() as stats:
if playerGoal[1] not in stats["goals"]:
stats["goals"][playerGoal[1]] = 1
else:
stats["goals"][playerGoal[1]] += 1
if len(playerGoal) == 3:
if playerGoal[2] not in stats["assists"]:
stats["assists"][playerGoal[2]] = 1
else:
stats["assists"][playerGoal[2]] += 1
if len(playerGoal) == 3:
user2 = self.bot.get_user(int(playerGoal[2]))
if user2 is None:
user2 = await self.bot.fetch_user(int(playerGoal[2]))
if user2 not in motm:
motm[user2] = 1
else:
motm[user2] += 1
if user2.id not in assists:
assists[user2.id] = 1
else:
assists[user2.id] += 1
events = True
user = self.bot.get_user(int(playerGoal[1]))
if user is None:
user = await self.bot.fetch_user(int(playerGoal[1]))
if user not in motm:
motm[user] = 2
else:
motm[user] += 2
if user.id not in goals:
goals[user.id] = 1
else:
goals[user.id] += 1
if len(playerGoal) == 3:
image = await self.simpic(
ctx,
str(min) + "+" + str(i + 1),
"goal",
user,
team1,
team2,
str(playerGoal[0]),
str(team1Stats[8]),
str(team2Stats[8]),
user2,
)
else:
image = await self.simpic(
ctx,
str(min) + "+" + str(i + 1),
"goal",
user,
team1,
team2,
str(playerGoal[0]),
str(team1Stats[8]),
str(team2Stats[8]),
)
await ctx.send(file=image)
await asyncio.sleep(gametime)
events = False
ht = await self.config.guild(ctx.guild).htbreak()
im = await self.timepic(
ctx, team1, team2, str(team1Stats[8]), str(team2Stats[8]), "HT", logo
)
await ctx.send(file=im)
await asyncio.sleep(ht)
await timemsg.delete()
timemsg = await ctx.send("Second Half!")
if min == 90:
added = random.randint(1, 5)
im = await self.extratime(ctx, added)
await ctx.send(file=im)
s = 90
for i in range(added):
s += 1
gC = await self.goalChance(ctx.guild, probability)
if gC is True:
teamStats = await TeamWeightChance(
ctx, lvl1, lvl2, reds[team1], reds[team2], bonuslvl1, bonuslvl2
)
playerGoal = await PlayerGenerator(
0, teamStats[0], teamStats[1], teamStats[2]
)
teamStats[8] += 1
async with self.config.guild(ctx.guild).stats() as stats:
if playerGoal[1] not in stats["goals"]:
stats["goals"][playerGoal[1]] = 1
else:
stats["goals"][playerGoal[1]] += 1
if len(playerGoal) == 3:
if playerGoal[2] not in stats["assists"]:
stats["assists"][playerGoal[2]] = 1
else:
stats["assists"][playerGoal[2]] += 1
if len(playerGoal) == 3:
user2 = self.bot.get_user(int(playerGoal[2]))
if user2 is None:
user2 = await self.bot.fetch_user(int(playerGoal[2]))
if user2 not in motm:
motm[user2] = 1
else:
motm[user2] += 1
if user2.id not in assists:
assists[user2.id] = 1
else:
assists[user2.id] += 1
events = True
user = self.bot.get_user(int(playerGoal[1]))
if user is None:
user = await self.bot.fetch_user(int(playerGoal[1]))
if user not in motm:
motm[user] = 2
else:
motm[user] += 2
if user.id not in goals:
goals[user.id] = 1
else:
goals[user.id] += 1
if len(playerGoal) == 3:
image = await self.simpic(
ctx,
str(min) + "+" + str(i + 1),
"goal",
user,
team1,
team2,
str(playerGoal[0]),
str(team1Stats[8]),
str(team2Stats[8]),
user2,
)
else:
image = await self.simpic(
ctx,
str(min) + "+" + str(i + 1),
"goal",
user,
team1,
team2,
str(playerGoal[0]),
str(team1Stats[8]),
str(team2Stats[8]),
)
await ctx.send(file=image)
await asyncio.sleep(gametime)
events = False
im = await self.timepic(
ctx, team1, team2, str(team1Stats[8]), str(team2Stats[8]), "FT", logo
)
await timemsg.delete()
await ctx.send(file=im)
if team1Stats[8] > team2Stats[8]:
async with self.config.guild(ctx.guild).standings() as standings:
standings[team1]["wins"] += 1
standings[team1]["points"] += 3
standings[team1]["played"] += 1
standings[team2]["losses"] += 1
standings[team2]["played"] += 1
t = await self.payout(ctx.guild, team1, homewin)
if team1Stats[8] < team2Stats[8]:
async with self.config.guild(ctx.guild).standings() as standings:
standings[team2]["points"] += 3
standings[team2]["wins"] += 1
standings[team2]["played"] += 1
standings[team1]["losses"] += 1
standings[team1]["played"] += 1
t = await self.payout(ctx.guild, team2, awaywin)
if team1Stats[8] == team2Stats[8]:
async with self.config.guild(ctx.guild).standings() as standings:
standings[team1]["played"] += 1
standings[team2]["played"] += 1
standings[team1]["points"] += 1
standings[team2]["points"] += 1
standings[team2]["draws"] += 1
standings[team1]["draws"] += 1
t = await self.payout(ctx.guild, "draw", draw)
await self.cleansheets(ctx, team1, team2, team1Stats[8], team2Stats[8])
team1gd = team1Stats[8] - team2Stats[8]
team2gd = team2Stats[8] - team1Stats[8]
async with self.config.guild(ctx.guild).standings() as standings:
if team1gd != 0:
standings[team1]["gd"] += team1gd
if team2gd != 0:
standings[team2]["gd"] += team2gd
if team2Stats[8] != 0:
standings[team2]["gf"] += team2Stats[8]
standings[team1]["ga"] += team2Stats[8]
if team1Stats[8] != 0:
standings[team1]["gf"] += team1Stats[8]
standings[team2]["ga"] += team1Stats[8]
await self.postresults(ctx, team1, team2, team1Stats[8], team2Stats[8])
await self.config.guild(ctx.guild).active.set(False)
await self.config.guild(ctx.guild).started.set(False)
await self.config.guild(ctx.guild).betteams.set([])
if ctx.guild.id in self.bets:
self.bets[ctx.guild.id] = {}
if motm:
motmwinner = sorted(motm, key=motm.get, reverse=True)[0]
if motmwinner.id in goals:
motmgoals = goals[motmwinner.id]
else:
motmgoals = 0
if motmwinner.id in assists:
motmassists = assists[motmwinner.id]
else:
motmassists = 0
try:
await bank.deposit_credits(
self.bot.get_user(motmwinner.id), (75 * motmgoals) + (30 * motmassists)
)
except AttributeError:
pass
im = await self.motmpic(
ctx,
motmwinner,
team1 if str(motmwinner.id) in teams[team1]["members"].keys() else team2,
motmgoals,
motmassists,
)
async with self.config.guild(ctx.guild).stats() as stats:
if str(motmwinner.id) not in stats["motm"]:
stats["motm"][str(motmwinner.id)] = 1
else:
stats["motm"][str(motmwinner.id)] += 1
await ctx.send(file=im)
if t is not None:
await ctx.send("Bet Winners:\n" + t)
async def bet_conditions(self, ctx, bet, team):
bettoggle = await self.config.guild(ctx.guild).bettoggle()
active = await self.config.guild(ctx.guild).active()
started = await self.config.guild(ctx.guild).started()
if not bettoggle:
return await ctx.send("Betting is currently disabled.")
if not active:
await ctx.send("There isn't a game onright now.")
return False
elif started:
try:
await ctx.author.send("You can't place a bet after the game has started.")
except discord.HTTPException:
await ctx.send(
"Maybe you should unblock me or turn off privacy settings if you want to bet ¯\\_(ツ)_/¯. {}".format(
ctx.author.mention
)
)
return False
if ctx.guild.id not in self.bets:
self.bets[ctx.guild.id] = {}
elif ctx.author.id in self.bets[ctx.guild.id]:
await ctx.send("You have already entered a bet for the game.")
return False
teams = await self.config.guild(ctx.guild).teams()
if team not in teams and team != "draw":
await ctx.send("That team isn't currently playing.")
return False
minbet = await self.config.guild(ctx.guild).betmin()
if bet < minbet:
await ctx.send("The minimum bet is {}".format(minbet))
return False
maxbet = await self.config.guild(ctx.guild).betmax()
if bet > maxbet:
await ctx.send("The maximum bet is {}".format(maxbet))
return False
if await bank.can_spend(ctx.author, bet):
return True
await ctx.send("You do not have enough money to cover the bet.")
return False
@commands.command(name="bet")
async def _bet(self, ctx, bet: int, *, team: str):
"""Bet on a team or a draw."""
if await self.bet_conditions(ctx, bet, team):
self.bets[ctx.guild.id][ctx.author] = {"Bets": [(team, bet)]}
currency = await bank.get_currency_name(ctx.guild)
await bank.withdraw_credits(ctx.author, bet)
await ctx.send(f"{ctx.author.mention} placed a {bet} {currency} bet on {team}.")
async def payout(self, guild, winner, odds):
if winner is None:
return None
bet_winners = []
if guild.id not in self.bets:
return None
for better in self.bets[guild.id]:
for team, bet in self.bets[guild.id][better]["Bets"]:
if team == winner:
bet_winners.append(f"{better.mention} - Winnings: {int(bet + (bet * odds))}")
await bank.deposit_credits(better, int(bet + (bet * odds)))
return "\n".join(bet_winners) if bet_winners else None
async def cleansheets(self, ctx, team1, team2, team1score, team2score):
if team1score == 0 and team2score > 0:
async with self.config.guild(ctx.guild).stats() as stats:
if team2 in stats["cleansheets"]:
stats["cleansheets"][team2] += 1
else:
stats["cleansheets"][team2] = 1
elif team2score == 0 and team1score > 0:
async with self.config.guild(ctx.guild).stats() as stats:
if team2 in stats["cleansheets"]:
stats["cleansheets"][team1] += 1
else:
stats["cleansheets"][team1] = 1
| 43.85539
| 142
| 0.428846
| 4,680
| 50,039
| 4.551068
| 0.113675
| 0.033804
| 0.029297
| 0.037185
| 0.513123
| 0.476407
| 0.43387
| 0.392225
| 0.35997
| 0.352364
| 0
| 0.028665
| 0.465277
| 50,039
| 1,140
| 143
| 43.89386
| 0.767276
| 0.006915
| 0
| 0.457407
| 0
| 0.00463
| 0.074023
| 0.002471
| 0
| 0
| 0.000324
| 0
| 0
| 1
| 0.002778
| false
| 0.001852
| 0.015741
| 0
| 0.056481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7befce5f0d88c105c0447661c3338248d03f3ae9
| 2,118
|
py
|
Python
|
7_neural_networks/4_DeepLearning2.py
|
edrmonteiro/DataSciencePython
|
0a35fb085bc0b98b33e083d0e1b113a04caa3aac
|
[
"MIT"
] | null | null | null |
7_neural_networks/4_DeepLearning2.py
|
edrmonteiro/DataSciencePython
|
0a35fb085bc0b98b33e083d0e1b113a04caa3aac
|
[
"MIT"
] | null | null | null |
7_neural_networks/4_DeepLearning2.py
|
edrmonteiro/DataSciencePython
|
0a35fb085bc0b98b33e083d0e1b113a04caa3aac
|
[
"MIT"
] | null | null | null |
"""
Deep Learning
"""
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.compose import make_column_transformer
import os
path = os.path.abspath(os.getcwd()) + r"/0_dataset/"
dataset = pd.read_csv(path + "Credit2.csv", sep=";")
dataset
#separação dos variáveis, ignoro primeira pois não tem valor semântico
X = dataset.iloc[:,1:10].values
y = dataset.iloc[:, 10].values
#temos um arry e não mais um data frame
X
#label encoder coluna checking_status
#atribui valores de zero a 3
labelencoder = LabelEncoder()
X[:,0] = labelencoder.fit_transform(X[:,0])
X
#one hot encoder coluna credit_history
#deve adicionar 5 colunas
onehotencoder = make_column_transformer((OneHotEncoder(categories='auto', sparse=False), [1]), remainder="passthrough")
X = onehotencoder.fit_transform(X)
X
#Excluimos a variável para evitar a dummy variable trap X = X:,1: X
#Laber encoder com a classe
labelencoder_Y = LabelEncoder()
y = labelencoder_Y.fit_transform(y)
y
#separação em treino e teste
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print(len(X_train),len(X_test),len(y_train),len(y_test))
#Feature Scalling, Padronização z-score
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_test
classifier = Sequential()
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 12))
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
y_pred
#matriz de confusão
cm = confusion_matrix(y_test, y_pred)
cm
| 29.830986
| 119
| 0.767705
| 316
| 2,118
| 4.984177
| 0.452532
| 0.019048
| 0.024762
| 0.04381
| 0.07873
| 0.07873
| 0.07873
| 0.07873
| 0.07873
| 0.07873
| 0
| 0.015021
| 0.119924
| 2,118
| 70
| 120
| 30.257143
| 0.829936
| 0.1983
| 0
| 0
| 0
| 0
| 0.062649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.02439
| 0.219512
| 0
| 0.219512
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bf5401a73cd65b2b3dab4a303b9fc867d22f877
| 3,142
|
py
|
Python
|
presta_connect.py
|
subteno-it/presta_connect
|
7cc8f2f915b28ada40a03573651a3558e6503004
|
[
"MIT"
] | null | null | null |
presta_connect.py
|
subteno-it/presta_connect
|
7cc8f2f915b28ada40a03573651a3558e6503004
|
[
"MIT"
] | null | null | null |
presta_connect.py
|
subteno-it/presta_connect
|
7cc8f2f915b28ada40a03573651a3558e6503004
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Subteno IT
# License MIT License
import requests
import xmltodict
import string
import random
import io
class PrestaConnectError(RuntimeError):
pass
class PrestaConnect:
_BOUNDARY_CHARS = string.digits + string.ascii_letters
_STATUSES = (200, 201)
def __init__(self, api, key):
self.api = api
self.key = key
def _get_url(self, path):
return self.api + '/' + path
def _check_response(self, res, ret):
if res.status_code not in self._STATUSES:
raise PrestaConnectError('Status %s, %s' % (res.status_code, ret))
return ret
def _encode_multipart_formdata(self, files):
"""Encode files to an http multipart/form-data.
:param files: a sequence of (type, filename, value)
elements for data to be uploaded as files.
:return: headers and body.
"""
BOUNDARY = (''.join(random.choice(self._BOUNDARY_CHARS) for i in range(30)))
CRLF = b'\r\n'
L = []
for (key, filename, value) in files:
L.append(bytes(('--' + BOUNDARY).encode('utf8')))
L.append(
bytes(('Content-Disposition: form-data; \
name="%s"; filename="%s"' % (key, filename)).encode('utf8')))
L.append(bytes(('Content-Type: %s' % self._get_content_type(filename)).encode('utf8')))
L.append(b'')
L.append(value)
L.append(bytes(('--' + BOUNDARY + '--').encode('utf8')))
L.append(b'')
body = CRLF.join(L)
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % BOUNDARY
}
return headers, body
def add(self, path, data):
return self._request('POST', path, data=data)
def _load_image(self, file_name):
"""loads image to upload"""
fd = io.open(file_name, "rb")
content = fd.read()
fd.close()
return content, file_name
def _request(self, method, path, params=None, data=None, files=None):
if data is not None:
data = xmltodict.unparse({'prestashop': data}).encode('utf-8')
res = requests.request(method, self._get_url(path),
auth=(self._api_token(), ''),
params=params,
data=data,
files=files)
return self._check_response(res, xmltodict.parse(res.text)['prestashop'] if not files and res.text else None)
def add_image(self, path, file_name, exists=False):
content, file_name = self._load_image(file_name)
files = [('image', file_name, content)]
headers, data = self._encode_multipart_formdata(files)
return self._request('POST', 'images/' + path, params={'ps_method': 'PUT'} if exists else None, data=data, headers=headers)
def get(self, path, params=None):
return self._request('GET', path, params)
def edit(self, path, data):
return self._request('PUT', path, data=data)
def delete(self, path):
return self._request('DELETE', path)
| 34.911111
| 131
| 0.579885
| 380
| 3,142
| 4.660526
| 0.313158
| 0.039526
| 0.047995
| 0.038396
| 0.117448
| 0.097685
| 0.041784
| 0.041784
| 0
| 0
| 0
| 0.008014
| 0.285169
| 3,142
| 90
| 132
| 34.911111
| 0.780499
| 0.083386
| 0
| 0.030769
| 0
| 0
| 0.069841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169231
| false
| 0.015385
| 0.076923
| 0.076923
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bf8ba88150b609b31fa7978009e2b6cda410d96
| 1,702
|
py
|
Python
|
examples/run_burgers.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | 4
|
2022-02-16T14:52:55.000Z
|
2022-03-17T13:31:42.000Z
|
examples/run_burgers.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | 3
|
2022-02-17T08:57:42.000Z
|
2022-03-28T08:41:53.000Z
|
examples/run_burgers.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | 7
|
2022-02-13T14:35:00.000Z
|
2022-03-28T08:51:11.000Z
|
import argparse
import torch
from torch.nn import Softplus
from pina import PINN, Plotter
from pina.model import FeedForward
from problems.burgers import Burgers1D
class myFeature(torch.nn.Module):
"""
Feature: sin(pi*x)
"""
def __init__(self, idx):
super(myFeature, self).__init__()
self.idx = idx
def forward(self, x):
return torch.sin(torch.pi * x[:, self.idx])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run PINA")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "-save", action="store_true")
group.add_argument("-l", "-load", action="store_true")
parser.add_argument("id_run", help="number of run", type=int)
parser.add_argument("features", help="extra features", type=int)
args = parser.parse_args()
feat = [myFeature(0)] if args.features else []
burgers_problem = Burgers1D()
model = FeedForward(
layers=[30, 20, 10, 5],
output_variables=burgers_problem.output_variables,
input_variables=burgers_problem.input_variables,
func=Softplus,
extra_features=feat,
)
pinn = PINN(
burgers_problem,
model,
lr=0.006,
error_norm='mse',
regularizer=0,
lr_accelerate=None)
if args.s:
pinn.span_pts(2000, 'latin', ['D'])
pinn.span_pts(150, 'random', ['gamma1', 'gamma2', 't0'])
pinn.train(5000, 100)
pinn.save_state('pina.burger.{}.{}'.format(args.id_run, args.features))
else:
pinn.load_state('pina.burger.{}.{}'.format(args.id_run, args.features))
plotter = Plotter()
plotter.plot(pinn)
| 28.366667
| 79
| 0.636898
| 212
| 1,702
| 4.90566
| 0.45283
| 0.042308
| 0.021154
| 0.038462
| 0.080769
| 0.080769
| 0.080769
| 0.080769
| 0.080769
| 0
| 0
| 0.024187
| 0.222679
| 1,702
| 59
| 80
| 28.847458
| 0.761905
| 0.010576
| 0
| 0
| 0
| 0
| 0.092326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.133333
| 0.022222
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bf92b8ac984ff1d4af8bc11028ce720f6dccb7d
| 2,072
|
py
|
Python
|
questions/cousins-in-binary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 141
|
2017-12-12T21:45:53.000Z
|
2022-03-25T07:03:39.000Z
|
questions/cousins-in-binary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 32
|
2015-10-05T14:09:52.000Z
|
2021-05-30T10:28:41.000Z
|
questions/cousins-in-binary-tree/Solution.py
|
marcus-aurelianus/leetcode-solutions
|
8b43e72fe1f51c84abc3e89b181ca51f09dc7ca6
|
[
"MIT"
] | 56
|
2015-09-30T05:23:28.000Z
|
2022-03-08T07:57:11.000Z
|
"""
In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.
Two nodes of a binary tree are cousins if they have the same depth, but have different parents.
We are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.
Return true if and only if the nodes corresponding to the values x and y are cousins.
Example 1:
Input: root = [1,2,3,4], x = 4, y = 3
Output: false
Example 2:
Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
Output: true
Example 3:
Input: root = [1,2,3,null,4], x = 2, y = 3
Output: false
Constraints:
The number of nodes in the tree will be between 2 and 100.
Each node has a unique integer value from 1 to 100.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
def find_vals(node, x, y, lvl, pos1, pos2):
if node.left is not None:
if node.left.val == x:
pos1[0] = lvl + 1
pos1[1] = node
if node.left.val == y:
pos2[0] = lvl + 1
pos2[1] = node
find_vals(node.left, x, y, lvl + 1, pos1, pos2)
if node.right is not None:
if node.right.val == x:
pos1[0] = lvl + 1
pos1[1] = node
if node.right.val == y:
pos2[0] = lvl + 1
pos2[1] = node
find_vals(node.right, x, y, lvl + 1, pos1, pos2)
if root is None:
return False
if root.val == x or root.val == y:
return False
pos1, pos2 = [-1, None], [-1, None]
find_vals(root, x, y, 0, pos1, pos2)
if pos1[0] == pos2[0] and pos1[0] != -1 and pos1[1] != pos2[1]:
return True
return False
| 28
| 117
| 0.531853
| 329
| 2,072
| 3.325228
| 0.246201
| 0.032907
| 0.040219
| 0.030165
| 0.23766
| 0.173675
| 0.173675
| 0.113346
| 0.113346
| 0.113346
| 0
| 0.059848
| 0.362934
| 2,072
| 74
| 118
| 28
| 0.768939
| 0.446911
| 0
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bfad01ae563f31b06389bcaffa8bf4fb786658a
| 456
|
py
|
Python
|
utility_ai/models/action.py
|
TomasMaciulis/Utility-AI-API
|
29144e4b5dc038854335bd11ed3b072ba1231ebc
|
[
"MIT"
] | null | null | null |
utility_ai/models/action.py
|
TomasMaciulis/Utility-AI-API
|
29144e4b5dc038854335bd11ed3b072ba1231ebc
|
[
"MIT"
] | null | null | null |
utility_ai/models/action.py
|
TomasMaciulis/Utility-AI-API
|
29144e4b5dc038854335bd11ed3b072ba1231ebc
|
[
"MIT"
] | null | null | null |
from .configuration_entry import ConfigurationEntry
from utility_ai.traits.utility_score_trait import UtilityScoreTrait
class Action(ConfigurationEntry, UtilityScoreTrait):
def __init__(self, name: str, description: dict):
ConfigurationEntry.__init__(self, name, description)
UtilityScoreTrait.__init__(
self,
description['utility_score_formula'],
super().weight_value,
name
)
| 30.4
| 67
| 0.699561
| 41
| 456
| 7.317073
| 0.585366
| 0.08
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230263
| 456
| 14
| 68
| 32.571429
| 0.854701
| 0
| 0
| 0
| 0
| 0
| 0.046053
| 0.046053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bfb0d85a9d2727156196fca82066ec05a53a3a0
| 1,119
|
py
|
Python
|
widdy/styles.py
|
ubunatic/widdy
|
1e5923d90010f27e352ad3eebb670c09752dd86b
|
[
"MIT"
] | 2
|
2018-05-30T17:23:46.000Z
|
2019-08-29T20:32:27.000Z
|
widdy/styles.py
|
ubunatic/widdy
|
1e5923d90010f27e352ad3eebb670c09752dd86b
|
[
"MIT"
] | null | null | null |
widdy/styles.py
|
ubunatic/widdy
|
1e5923d90010f27e352ad3eebb670c09752dd86b
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
Style = namedtuple('Style', 'name fg bg')
default_pal = {
Style('inv-black', 'black', 'light gray'),
Style('green-bold', 'dark green,bold', ''),
Style('red-bold', 'dark red,bold', ''),
Style('blue-bold', 'dark blue,bold', ''),
Style('yellow-bold', 'yellow,bold', ''),
Style('magenta-bold', 'dark magenta,bold', ''),
Style('cyan-bold', 'dark cyan,bold', ''),
Style('green', 'dark green', ''),
Style('red', 'dark red', ''),
Style('blue', 'dark blue', ''),
Style('cyan', 'dark cyan', ''),
Style('magenta', 'dark magenta', ''),
Style('yellow', 'yellow', ''),
}
INV_BLACK = 'inv-black'
RED_BOLD = 'red-bold'
GREEN_BOLD = 'green-bold'
BLUE_BOLD = 'blue-bold'
MAGENTA_BOLD = 'magenta-bold'
CYAN_BOLD = 'cyan-bold'
YELLOW_BOLD = 'yellow-bold'
BLUE = 'blue'
GREEN = 'green'
RED = 'red'
MAGENTA = 'magenta'
CYAN = 'cyan'
YELLOW = 'yellow'
| 29.447368
| 61
| 0.489723
| 116
| 1,119
| 4.655172
| 0.189655
| 0.1
| 0.077778
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.319929
| 1,119
| 37
| 62
| 30.243243
| 0.709593
| 0
| 0
| 0
| 0
| 0
| 0.323503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bfc0a90c6e361e602b8b4fb5d3bb23952ab70e8
| 3,468
|
py
|
Python
|
nist_tools/combine_images.py
|
Nepherhotep/roboarchive-broom
|
a60c6038a5506c19edc6b74dbb47de525b246d2a
|
[
"MIT"
] | null | null | null |
nist_tools/combine_images.py
|
Nepherhotep/roboarchive-broom
|
a60c6038a5506c19edc6b74dbb47de525b246d2a
|
[
"MIT"
] | null | null | null |
nist_tools/combine_images.py
|
Nepherhotep/roboarchive-broom
|
a60c6038a5506c19edc6b74dbb47de525b246d2a
|
[
"MIT"
] | null | null | null |
import os
import random
import cv2
import numpy as np
from gen_textures import add_noise, texture, blank_image
from nist_tools.extract_nist_text import BaseMain, parse_args, display
class CombineMain(BaseMain):
SRC_DIR = 'blurred'
DST_DIR = 'combined_raw'
BG_DIR = 'backgrounds'
SMPL_DIR = 'combined_clean'
def __init__(self):
self.backgrounds = os.listdir(os.path.join(args.data_dir, self.BG_DIR))
self.backgrounds.sort()
def get_random_bg(self):
filename = random.choice(self.backgrounds)
return os.path.join(args.data_dir, self.BG_DIR, filename)
def main(self, args):
lst = self.get_sorted_files(args)
a = lst[::3]
b = lst[1::3]
c = lst[2::3]
text_files = list(zip(a, b, c))
if args.index:
text_files = text_files[args.index:args.index + 1]
for i, chunk in enumerate(text_files):
paths = [os.path.join(args.data_dir, self.SRC_DIR, p) for p in chunk]
fname = 'combined-{}.png'.format(i)
smpl_path = os.path.join(args.data_dir, self.SMPL_DIR, fname)
bg_path = self.get_random_bg()
output_path = os.path.join(args.data_dir, self.DST_DIR, fname)
print('Processing {}/{}'.format(i, len(text_files)))
self.combine_file(args, bg_path, output_path, smpl_path, *paths)
def random_bool(self):
return random.choice([True, False])
def load_text_image(self, shape, path, vert_offset, hor_offset):
layer = np.full(shape, 255)
sub_image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
h, w = sub_image.shape
layer[vert_offset:vert_offset + h, hor_offset:hor_offset + w] = sub_image
return layer
def merge_with_text(self, img, text_file_path, density, vert_offset, hor_offset=200):
a_img = 255 - self.load_text_image(img.shape, text_file_path, vert_offset, hor_offset)
img = img - (density * a_img).astype('int')
return img.clip(0, 255)
def combine_file(self, args, bg_path, output_path, smpl_path, *text_paths):
# open files and invert text
raw_image = cv2.imread(bg_path, cv2.IMREAD_GRAYSCALE).astype('int')
h, w = raw_image.shape
# generate random noise
noise = 160 - add_noise(texture(blank_image(background=125, height=4096, width=4096),
sigma=4), sigma=10).astype('float')[:h, :w]
noise = (random.random() * noise).astype('int')
raw_image = raw_image + noise
raw_image = raw_image.clip(0, 255)
# random horizontal flip
if self.random_bool():
raw_image = cv2.flip(raw_image, 0)
# random vertical flip
if self.random_bool():
raw_image = cv2.flip(raw_image, 1)
# create a clean training image
clean_image = np.full(raw_image.shape, 255)
# save reference to raw image
for i, path in enumerate(text_paths):
v_offset = 100 + i * 1250
density = 0.2 + random.random() * 0.3
raw_image = self.merge_with_text(raw_image, path, density, v_offset)
clean_image = self.merge_with_text(clean_image, path, 1, v_offset)
cv2.imwrite(output_path, raw_image)
cv2.imwrite(smpl_path, clean_image)
if __name__ == '__main__':
random.seed(123)
args = parse_args()
CombineMain().main(args)
print('done')
| 31.527273
| 94
| 0.625144
| 489
| 3,468
| 4.188139
| 0.269939
| 0.058594
| 0.024414
| 0.03418
| 0.20752
| 0.13916
| 0.13916
| 0.099609
| 0.071289
| 0.041992
| 0
| 0.026974
| 0.262399
| 3,468
| 109
| 95
| 31.816514
| 0.773651
| 0.043253
| 0
| 0.028571
| 0
| 0
| 0.030504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.085714
| 0.014286
| 0.314286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bfe07fff56233f17c17498061812fd747efa684
| 1,205
|
py
|
Python
|
auto_funcs/look_for_date.py
|
rhysrushton/testauto
|
9c32f40640f58703a0d063afbb647855fb680a61
|
[
"MIT"
] | null | null | null |
auto_funcs/look_for_date.py
|
rhysrushton/testauto
|
9c32f40640f58703a0d063afbb647855fb680a61
|
[
"MIT"
] | null | null | null |
auto_funcs/look_for_date.py
|
rhysrushton/testauto
|
9c32f40640f58703a0d063afbb647855fb680a61
|
[
"MIT"
] | null | null | null |
# this function looks for either the encounter date or the patient's date of birth
# so that we can avoid duplicate encounters.
import time
def look_for_date (date_string, driver):
print('looking for date')
date_present = False
for div in driver.find_elements_by_class_name('card.my-4.patient-card.assessment-reg-patient'):
if date_string in div.get_attribute('innerHTML'):
print('date here')
date_present = True
#print(div.get_attribute('innerHTML'))
break
return date_present
#this will select element in div with relement div.
def find_date_click (date_string, driver):
print('getting div to add encounter to.')
for div in driver.find_elements_by_class_name('card.my-4.patient-card.assessment-reg-patient'):
if date_string in div.get_attribute('innerHTML'):
#print("We here" )
#print(div.get_attribute('innerHTML'))
#time.sleep(20)
new_encounter_button = div.find_element_by_class_name('btn.btn-primary.mr-4')
new_encounter_button.click()
#break
return
print('hey')
| 30.125
| 99
| 0.637344
| 159
| 1,205
| 4.63522
| 0.421384
| 0.054274
| 0.081411
| 0.130258
| 0.398915
| 0.320217
| 0.320217
| 0.320217
| 0.320217
| 0.320217
| 0
| 0.005695
| 0.271369
| 1,205
| 39
| 100
| 30.897436
| 0.833713
| 0.237344
| 0
| 0.222222
| 0
| 0
| 0.207048
| 0.099119
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.277778
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bfefe9a585dfb51817f970316b20305a606310a
| 1,047
|
py
|
Python
|
app/api/apis/token_api.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/apis/token_api.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/apis/token_api.py
|
boceckts/ideahub
|
fbd48c53a5aaf7252a5461d0c0d2fe9d4eef9aed
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import g
from flask_restplus import Resource, marshal
from app import db
from app.api.namespaces.token_namespace import token_ns, token
from app.api.security.authentication import basic_auth, token_auth
@token_ns.route('', strict_slashes=False)
@token_ns.response(401, 'Unauthenticated')
@token_ns.response(500, 'Internal Server Error')
class TokensResource(Resource):
@token_ns.response(200, 'Token successfully generated')
@token_ns.doc(security='Basic Auth')
@basic_auth.login_required
def post(self):
"""Generate a new bearer token"""
g.current_user.generate_auth_token()
db.session.commit()
token_obj = {'token': g.current_user.token,
'expires_on': g.current_user.token_expiration}
return marshal(token_obj, token), 200
@token_ns.response(204, 'Token successfully revoked')
@token_auth.login_required
def delete(self):
"""Revoke a token"""
g.current_user.revoke_token()
db.session.commit()
return '', 204
| 32.71875
| 67
| 0.700096
| 136
| 1,047
| 5.191176
| 0.419118
| 0.069405
| 0.084986
| 0.072238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021251
| 0.191022
| 1,047
| 31
| 68
| 33.774194
| 0.812279
| 0.040115
| 0
| 0.083333
| 0
| 0
| 0.115694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.208333
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0003ec058228de9777e23294e4fbffc93d7d212
| 4,816
|
py
|
Python
|
docker_multiarch/tool.py
|
CynthiaProtector/helo
|
ad9e22363a92389b3fa519ecae9061c6ead28b05
|
[
"Apache-2.0"
] | 399
|
2017-05-30T05:12:48.000Z
|
2022-01-29T05:53:08.000Z
|
docker_multiarch/tool.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 58
|
2017-05-30T23:25:32.000Z
|
2019-11-18T09:30:54.000Z
|
docker_multiarch/tool.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 107
|
2017-05-30T05:53:22.000Z
|
2021-06-24T02:43:31.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Multi arch dockerized build tool.
"""
__author__ = 'Pedro Larroy'
__version__ = '0.1'
import os
import sys
import subprocess
import logging
import argparse
from subprocess import check_call
import glob
import re
class CmdResult(object):
def __init__(self, std_out, std_err, status_code):
self.std_out = std_out
self.std_err = std_err
self.status_code = status_code if status_code is not None else 0
def __str__(self):
return "%s, %s, %s" % (self.std_out, self.std_err, self.status_code)
def run(cmd, fail_on_error=True):
logging.debug("executing shell command:\n" + cmd)
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
std_out, std_err = proc.communicate()
if fail_on_error:
if proc.returncode != 0:
logging.warn('Error running command: {}'.format(cmd))
assert proc.returncode == 0, std_err
res = CmdResult(std_out.decode('utf-8'), std_err.decode('utf-8'), proc.returncode)
return res
def mkdir_p(d):
rev_path_list = list()
head = d
while len(head) and head != os.sep:
rev_path_list.append(head)
(head, tail) = os.path.split(head)
rev_path_list.reverse()
for p in rev_path_list:
try:
os.mkdir(p)
except OSError as e:
if e.errno != 17:
raise
def get_arches():
"""Get a list of architectures given our dockerfiles"""
dockerfiles = glob.glob("Dockerfile.build.*")
dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles))
arches = list(map(lambda x: re.sub(r"Dockerfile.build.(.*)", r"\1", x), dockerfiles))
arches.sort()
return arches
def sync_source():
logging.info("Copying sources")
check_call(["rsync","-a","--delete","--exclude=\".git/\"",'--exclude=/docker_multiarch/',"../","mxnet"])
def get_docker_tag(arch):
return "mxnet.build.{0}".format(arch)
def get_dockerfile(arch):
return "Dockerfile.build.{0}".format(arch)
def build(arch):
"""Build the given architecture in the container"""
assert arch in get_arches(), "No such architecture {0}, Dockerfile.build.{0} not found".format(arch)
logging.info("Building for target platform {0}".format(arch))
check_call(["docker", "build",
"-f", get_dockerfile(arch),
"-t", get_docker_tag(arch),
"."])
def collect_artifacts(arch):
"""Collects the artifacts built inside the docker container to the local fs"""
def artifact_path(arch):
return "{}/build/{}".format(os.getcwd(), arch)
logging.info("Collect artifacts from build in {0}".format(artifact_path(arch)))
mkdir_p("build/{}".format(arch))
# Mount artifact_path on /$arch inside the container and copy the build output so we can access
# locally from the host fs
check_call(["docker","run",
"-v", "{}:/{}".format(artifact_path(arch), arch),
get_docker_tag(arch),
"bash", "-c", "cp -r /work/build/* /{}".format(arch)])
def main():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)-15s %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch",
help="Architecture",
type=str)
parser.add_argument("-l", "--list_arch",
help="List architectures",
action='store_true')
args = parser.parse_args()
if args.list_arch:
arches = get_arches()
print(arches)
elif args.arch:
sync_source()
build(args.arch)
collect_artifacts(args.arch)
else:
arches = get_arches()
logging.info("Building for all architectures: {}".format(arches))
logging.info("Artifacts will be produced in the build/ directory.")
sync_source()
for arch in arches:
build(arch)
collect_artifacts(arch)
return 0
if __name__ == '__main__':
sys.exit(main())
| 30.871795
| 108
| 0.65054
| 648
| 4,816
| 4.697531
| 0.376543
| 0.013798
| 0.014455
| 0.015769
| 0.034166
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0069
| 0.217608
| 4,816
| 155
| 109
| 31.070968
| 0.800955
| 0.232143
| 0
| 0.038835
| 0
| 0
| 0.170725
| 0.013406
| 0
| 0
| 0
| 0
| 0.019417
| 1
| 0.116505
| false
| 0
| 0.07767
| 0.038835
| 0.271845
| 0.009709
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d003fb1f6605d874e72c3a666281e62431d7b2a8
| 3,283
|
py
|
Python
|
02module/module_containers.py
|
mayi140611/szzy_pytorch
|
81978d75513bc9a1b85aec05023d14fa6f748674
|
[
"Apache-2.0"
] | null | null | null |
02module/module_containers.py
|
mayi140611/szzy_pytorch
|
81978d75513bc9a1b85aec05023d14fa6f748674
|
[
"Apache-2.0"
] | null | null | null |
02module/module_containers.py
|
mayi140611/szzy_pytorch
|
81978d75513bc9a1b85aec05023d14fa6f748674
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
# @file name : module_containers.py
# @author : tingsongyu
# @date : 2019-09-20 10:08:00
# @brief : 模型容器——Sequential, ModuleList, ModuleDict
"""
import torch
import torchvision
import torch.nn as nn
from collections import OrderedDict
# ============================ Sequential
class LeNetSequential(nn.Module):
def __init__(self, classes):
super(LeNetSequential, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 6, 5),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, 5),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),)
self.classifier = nn.Sequential(
nn.Linear(16*5*5, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, classes),)
def forward(self, x):
x = self.features(x)
x = x.view(x.size()[0], -1)
x = self.classifier(x)
return x
class LeNetSequentialOrderDict(nn.Module):
def __init__(self, classes):
super(LeNetSequentialOrderDict, self).__init__()
self.features = nn.Sequential(OrderedDict({
'conv1': nn.Conv2d(3, 6, 5),
'relu1': nn.ReLU(inplace=True),
'pool1': nn.MaxPool2d(kernel_size=2, stride=2),
'conv2': nn.Conv2d(6, 16, 5),
'relu2': nn.ReLU(inplace=True),
'pool2': nn.MaxPool2d(kernel_size=2, stride=2),
}))
self.classifier = nn.Sequential(OrderedDict({
'fc1': nn.Linear(16*5*5, 120),
'relu3': nn.ReLU(),
'fc2': nn.Linear(120, 84),
'relu4': nn.ReLU(inplace=True),
'fc3': nn.Linear(84, classes),
}))
def forward(self, x):
x = self.features(x)
x = x.view(x.size()[0], -1)
x = self.classifier(x)
return x
# net = LeNetSequential(classes=2)
# net = LeNetSequentialOrderDict(classes=2)
#
# fake_img = torch.randn((4, 3, 32, 32), dtype=torch.float32)
#
# output = net(fake_img)
#
# print(net)
# print(output)
# ============================ ModuleList
class ModuleList(nn.Module):
def __init__(self):
super(ModuleList, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(20)])
def forward(self, x):
for i, linear in enumerate(self.linears):
x = linear(x)
return x
# net = ModuleList()
#
# print(net)
#
# fake_data = torch.ones((10, 10))
#
# output = net(fake_data)
#
# print(output)
# ============================ ModuleDict
class ModuleDict(nn.Module):
def __init__(self):
super(ModuleDict, self).__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict({
'relu': nn.ReLU(),
'prelu': nn.PReLU()
})
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
net = ModuleDict()
fake_img = torch.randn((4, 10, 32, 32))
output = net(fake_img, 'conv', 'relu')
print(output)
# 4 AlexNet
alexnet = torchvision.models.AlexNet()
| 22.486301
| 76
| 0.540664
| 392
| 3,283
| 4.423469
| 0.257653
| 0.031142
| 0.025375
| 0.034602
| 0.363322
| 0.317186
| 0.235294
| 0.182814
| 0.182814
| 0.182814
| 0
| 0.052609
| 0.282059
| 3,283
| 145
| 77
| 22.641379
| 0.682223
| 0.180627
| 0
| 0.328947
| 0
| 0
| 0.027872
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.263158
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d00408e74248e82eceb28ea83155d9b67a8bad9f
| 2,124
|
py
|
Python
|
tests/test_sample_images.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
tests/test_sample_images.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | 20
|
2019-07-15T21:49:29.000Z
|
2020-01-09T14:35:03.000Z
|
tests/test_sample_images.py
|
olavosamp/semiauto-video-annotation
|
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
|
[
"MIT"
] | null | null | null |
import pytest
import shutil as sh
import pandas as pd
from pathlib import Path
from glob import glob
import libs.dirs as dirs
from libs.iteration_manager import SampleImages
from libs.utils import copy_files, replace_symbols
class Test_SampleImages():
def test_setup_SampleImages(self):
# Metatest if test assets are in place
sourceFolder = Path(dirs.test_assets) / "dataset_test"
setupImageList = glob(str(sourceFolder) + "/**.jpg", recursive=True)
assert len(setupImageList) == 2666
def setup_sample_from_folder(self):
self.sourceFolder = Path(dirs.test_assets) / "dataset_test"
self.sampleImagesFolder = Path(dirs.test) / "test_sample_images"
self.destFolderSFF = self.sampleImagesFolder / "test_sample_from_folder"
# Guarantee that the destination folder was created for this test only
if self.destFolderSFF.is_dir():
self.teardown_sample_from_folder()
dirs.create_folder(self.destFolderSFF)
def teardown_sample_from_folder(self):
sh.rmtree(self.destFolderSFF)
def test_sample_from_folder(self):
self.setup_sample_from_folder()
assert self.destFolderSFF.is_dir()
self.sampler = SampleImages(self.sourceFolder, self.destFolderSFF)
# Test image sampling and copying
self.sampler.sample(percentage=0.01)
globString = str(self.sampler.imageFolder) + "/**.jpg"
globString = replace_symbols(globString)
imageList = glob(globString, recursive=True)
assert len(imageList) == 26
# Test saving samples to index
self.outIndexPathSFF = self.sampler.imageFolder / "test_index_sample_from_file.csv"
print("Saving index to\n", self.outIndexPathSFF)
self.sampler.save_to_index(indexPath=self.outIndexPathSFF)
self.outIndexSFF = pd.read_csv(self.outIndexPathSFF)
assert self.outIndexPathSFF.is_file()
assert self.outIndexSFF.shape[0] == 26
self.teardown_sample_from_folder()
| 34.819672
| 91
| 0.677966
| 243
| 2,124
| 5.740741
| 0.358025
| 0.057348
| 0.080287
| 0.043011
| 0.167742
| 0.058781
| 0.058781
| 0
| 0
| 0
| 0
| 0.007453
| 0.241996
| 2,124
| 61
| 92
| 34.819672
| 0.859006
| 0.078154
| 0
| 0.052632
| 0
| 0
| 0.064995
| 0.027636
| 0
| 0
| 0
| 0
| 0.131579
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.342105
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d00676794b322b39517d8082c8b83c61f4836359
| 284
|
py
|
Python
|
Unit 2/2.16/2.16.5 Black and White Squares.py
|
shashwat73/cse
|
60e49307e57105cf9916c7329f53f891c5e81fdb
|
[
"MIT"
] | 1
|
2021-04-08T14:02:49.000Z
|
2021-04-08T14:02:49.000Z
|
Unit 2/2.16/2.16.5 Black and White Squares.py
|
shashwat73/cse
|
60e49307e57105cf9916c7329f53f891c5e81fdb
|
[
"MIT"
] | null | null | null |
Unit 2/2.16/2.16.5 Black and White Squares.py
|
shashwat73/cse
|
60e49307e57105cf9916c7329f53f891c5e81fdb
|
[
"MIT"
] | null | null | null |
speed(0)
def make_square(i):
if i % 2 == 0:
begin_fill()
for i in range(4):
forward(25)
left(90)
end_fill()
penup()
setposition(-100, 0)
pendown()
for i in range (6):
pendown()
make_square(i)
penup()
forward(35)
| 14.947368
| 23
| 0.503521
| 40
| 284
| 3.475
| 0.625
| 0.143885
| 0.158273
| 0.158273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082418
| 0.359155
| 284
| 18
| 24
| 15.777778
| 0.681319
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0075df444476cd69e92bd3d5f61f5eff5a35b08
| 771
|
py
|
Python
|
Q1/read.py
|
arpanmangal/Regression
|
06969286d7db65a537e89ac37905310592542ca9
|
[
"MIT"
] | null | null | null |
Q1/read.py
|
arpanmangal/Regression
|
06969286d7db65a537e89ac37905310592542ca9
|
[
"MIT"
] | null | null | null |
Q1/read.py
|
arpanmangal/Regression
|
06969286d7db65a537e89ac37905310592542ca9
|
[
"MIT"
] | null | null | null |
"""
Module for reading data from 'linearX.csv' and 'linearY.csv'
"""
import numpy as np
def loadData (x_file="ass1_data/linearX.csv", y_file="ass1_data/linearY.csv"):
"""
Loads the X, Y matrices.
Splits into training, validation and test sets
"""
X = np.genfromtxt(x_file)
Y = np.genfromtxt(y_file)
Z = [X, Y]
Z = np.c_[X.reshape(len(X), -1), Y.reshape(len(Y), -1)]
np.random.shuffle(Z)
# Partition the data into three sets
size = len(Z)
training_size = int(0.8 * size)
validation_size = int(0.1 * size)
test_size = int(0.1 * size)
training_Z = Z[0:training_size]
validation_Z = Z[training_size:training_size+validation_size]
test_Z = Z[training_size+validation_size:]
return (Z[:,0], Z[:,1])
| 25.7
| 78
| 0.639429
| 124
| 771
| 3.822581
| 0.346774
| 0.126582
| 0.082278
| 0.037975
| 0.054852
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021417
| 0.212711
| 771
| 29
| 79
| 26.586207
| 0.759473
| 0.217899
| 0
| 0
| 0
| 0
| 0.072917
| 0.072917
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d008c5731d8fedc349d8c20f7b0bc4f197dfbb75
| 1,172
|
py
|
Python
|
utils/get_dic_question_id.py
|
Pxtri2156/M4C_inforgraphicsVQA
|
8846ea01a9be726de03e8944c746203936334bc9
|
[
"BSD-3-Clause"
] | 1
|
2022-02-15T14:46:15.000Z
|
2022-02-15T14:46:15.000Z
|
utils/get_dic_question_id.py
|
Pxtri2156/M4C_inforgraphicsVQA
|
8846ea01a9be726de03e8944c746203936334bc9
|
[
"BSD-3-Clause"
] | null | null | null |
utils/get_dic_question_id.py
|
Pxtri2156/M4C_inforgraphicsVQA
|
8846ea01a9be726de03e8944c746203936334bc9
|
[
"BSD-3-Clause"
] | 1
|
2022-02-13T11:15:11.000Z
|
2022-02-13T11:15:11.000Z
|
import argparse
import json
from os import openpty
def create_dic_question_id(path):
set_name = ['train', 'val', 'test']
dic_qid = {}
for i in range(len(set_name)):
print("Processing, ", set_name[i])
annot_path = path.replace("change", set_name[i])
annot_fi = open(annot_path)
data = json.load(annot_fi)
data = data['data']
for sample in data:
questionId = sample['questionId']
img_id = sample['image'].split('/')[-1].split('.')[0]
new_id = int(str(i+1) + img_id + '00')
while new_id in dic_qid.keys():
new_id += 1
dic_qid[questionId] = new_id
dic_qid[new_id] = questionId
annot_fi.close()
return dic_qid
def main(args):
dic_qid = create_dic_question_id(args.path)
print(dic_qid)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--path",
default="/mlcv/Databases/VN_InfographicVQA/change/VietInfographicVQA_change_v1.0.json",
type=str,
)
return parser.parse_args()
if __name__ == "__main__":
args = get_parser()
main(args)
| 29.3
| 96
| 0.595563
| 153
| 1,172
| 4.27451
| 0.418301
| 0.06422
| 0.051988
| 0.058104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00939
| 0.273038
| 1,172
| 40
| 97
| 29.3
| 0.758216
| 0
| 0
| 0
| 0
| 0
| 0.12191
| 0.064791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.222222
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d00b60aaa781272c43b31aa8c0398a217c133f07
| 1,863
|
py
|
Python
|
admin_reskin/templatetags/sort_menu_items.py
|
cuongnb14/django-admin-reskin
|
9245b60195892e8a3d51294ec70692714452bc29
|
[
"MIT"
] | null | null | null |
admin_reskin/templatetags/sort_menu_items.py
|
cuongnb14/django-admin-reskin
|
9245b60195892e8a3d51294ec70692714452bc29
|
[
"MIT"
] | null | null | null |
admin_reskin/templatetags/sort_menu_items.py
|
cuongnb14/django-admin-reskin
|
9245b60195892e8a3d51294ec70692714452bc29
|
[
"MIT"
] | null | null | null |
from django import template
from django.conf import settings
from ..models import Bookmark
register = template.Library()
RESKIN_MENU_APP_ORDER = settings.RESKIN_MENU_APP_ORDER
RESKIN_MENU_MODEL_ORDER = settings.RESKIN_MENU_MODEL_ORDER
RESKIN_APP_ICON = settings.RESKIN_APP_ICON
@register.filter
def sort_apps(apps):
max_index = len(apps)
for app in apps:
if app['app_label'] == 'auth':
app['name'] = 'Groups'
if RESKIN_APP_ICON.get(app['app_label']):
app['icon'] = RESKIN_APP_ICON.get(app['app_label'])
else:
app['icon'] = 'fas fa-layer-group'
apps.sort(
key=lambda x:
RESKIN_MENU_APP_ORDER.index(x['app_label'])
if x['app_label'] in RESKIN_MENU_APP_ORDER
else max_index
)
bookmarks = Bookmark.objects.filter(is_active=True).order_by('order')
bookmarks_model = []
for bookmark in bookmarks:
item = {
'name': bookmark.name,
'object_name': bookmark.name,
'perms': {'add': False, 'change': False, 'delete': False, 'view': True},
'admin_url': bookmark.url,
'view_only': True,
}
bookmarks_model.append(item)
if bookmarks_model:
bookmark_app = {
'name': 'Bookmark',
'icon': 'fas fa-bookmark',
'app_label': 'admin_reskin_bookmark',
'app_url': '/admin/admin_reskin/bookmark',
'has_module_perms': True,
'models': bookmarks_model,
}
apps = [bookmark_app] + apps
return apps
@register.filter
def sort_models(models):
max_index = len(models)
models.sort(
key=lambda x:
RESKIN_MENU_MODEL_ORDER.index(x['object_name'])
if x['object_name'] in RESKIN_MENU_MODEL_ORDER
else max_index
)
return models
| 27.397059
| 84
| 0.607085
| 228
| 1,863
| 4.684211
| 0.263158
| 0.074906
| 0.048689
| 0.067416
| 0.095506
| 0.095506
| 0.050562
| 0
| 0
| 0
| 0
| 0
| 0.278583
| 1,863
| 67
| 85
| 27.80597
| 0.794643
| 0
| 0
| 0.109091
| 0
| 0
| 0.154053
| 0.026302
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.054545
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d00bef4cf659464b2641f10ea3856a63d0a1dab5
| 1,537
|
py
|
Python
|
fda/db.py
|
tsbischof/fda510k
|
40065cc873547ceaf992bd0f51e24fe2b2ea4387
|
[
"BSD-2-Clause"
] | null | null | null |
fda/db.py
|
tsbischof/fda510k
|
40065cc873547ceaf992bd0f51e24fe2b2ea4387
|
[
"BSD-2-Clause"
] | 3
|
2021-08-31T14:00:17.000Z
|
2021-09-01T20:47:06.000Z
|
fda/db.py
|
tsbischof/fda
|
40065cc873547ceaf992bd0f51e24fe2b2ea4387
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import io
import urllib.request
import zipfile
import pandas
import fda
def get_510k_db(root_dir=os.path.join(fda.root_db_dir, "510k"),
force_download=False):
if not os.path.exists(root_dir):
os.makedirs(root_dir)
db_urls = [
"http://www.accessdata.fda.gov/premarket/ftparea/pmnlstmn.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn96cur.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn9195.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn8690.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn8185.zip",
"http://www.accessdata.fda.gov/premarket/ftparea/pmn7680.zip"
]
db = pandas.concat(map(lambda url: load_510k_db(url, root_dir,
force_download=force_download),
db_urls))
db = db.drop_duplicates().reset_index().drop("index", axis=1)
return(db)
def load_510k_db(url, root_dir, force_download=False):
db_filename = os.path.join(root_dir, os.path.basename(url))
if force_download or not os.path.exists(db_filename):
urllib.request.urlretrieve(url, db_filename)
frames = list()
with zipfile.ZipFile(db_filename) as db:
for filename in db.filelist:
raw = db.read(filename).decode("iso8859_2")
data = pandas.read_csv(io.StringIO(raw), delimiter="|")
frames.append(data)
return(pandas.concat(frames))
| 34.155556
| 84
| 0.63175
| 200
| 1,537
| 4.71
| 0.35
| 0.044586
| 0.10828
| 0.127389
| 0.334395
| 0.334395
| 0.334395
| 0.292994
| 0
| 0
| 0
| 0.030848
| 0.240729
| 1,537
| 44
| 85
| 34.931818
| 0.77635
| 0
| 0
| 0
| 0
| 0
| 0.243982
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d00e57b669e23409bb8d461e39ac2d007f53bbe7
| 4,657
|
py
|
Python
|
inpr/get_num_plate.py
|
patrickn699/INPR
|
737a3454a4b83e51e50937bb227ac7f8bc01d0e9
|
[
"MIT"
] | 2
|
2021-09-25T06:00:40.000Z
|
2021-10-14T13:24:43.000Z
|
inpr/get_num_plate.py
|
patrickn699/INPR
|
737a3454a4b83e51e50937bb227ac7f8bc01d0e9
|
[
"MIT"
] | null | null | null |
inpr/get_num_plate.py
|
patrickn699/INPR
|
737a3454a4b83e51e50937bb227ac7f8bc01d0e9
|
[
"MIT"
] | 1
|
2022-01-27T11:39:10.000Z
|
2022-01-27T11:39:10.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import re as r
import easyocr
#import os
#os.environ['KMP_DUPLICATE_LIB_OK']='True'
re = easyocr.Reader(['en'])
#pl = []
chk = []
a = ''
a1 = ''
#pl = []
#sym = ['{', ']', '[', '}']
class get_number_plate:
def get_bboxes_from(self, output):
""" returns list of bboxes """
return output["instances"].__dict__['_fields']['pred_boxes'].__dict__['tensor']
def crop(self, bbox, in_img):
""" bbox is a list with xmin, ymin, xmax, ymax """
xmin, ymin, xmax, ymax = bbox
cropped_im = in_img[int(ymin):int(ymax), int(xmin):int(xmax)]
return cropped_im
def ocr(self, imm):
op = re.readtext(imm, add_margin=0.1, canvas_size=960000)
# stt = []
for i in range(len(op)):
for j in range(len(op[i])):
if type(op[i][j]) == str:
return (op[i][j].replace(" ", ""))
def remove_un(self, arr):
pl = []
sym = ['{', ']', '[', '}','.','/','(',')','-']
arr = list(arr)
#print(arr)
for m in arr:
if '.' in m:
arr.remove('.')
arr = ''.join(arr)
#print(arr)
'''
for j in arr:
if len(j) == 10 or len(j) >= 9:
for p in j:
#pl.append(p)
print(p)
'''
if len(arr) == 10 or len(arr) >= 9:
arr = list(arr)
arr = list(arr)
#print(type(arr))
for j in arr:
if j in sym:
arr.remove(j)
arr = (''.join(arr))
#print (arr)
return arr
def get_num_plate(self, lis, show_plates=False):
#print(lis)
#pl = []
sta = ['AP','AR','AS','BR','CG','GA','GJ','HR' ,'HP' ,'JK','JH','KA','KL','MP','MH','MN','ML','MZ','NL' ,'OD','PB' ,'RJ','SK','TN','TS','TR','UA','UK','UP','WB','AN','CH','DN','DD','DL' ,'LD','PY']
opp = []
#sym = ['{', ']', '[', '}']
#print(lis) # prints list of cropped num plate imgs
for p in range(len(lis)):
#print(lis[p])
pl1 = self.remove_un(lis[p])
#print(pl1)
pattern_1 = r.compile(r'\w\w\d\d\w\w\d\d\d\d')
pattern_2 = r.compile(r'\w\w\d\d\w\d\d\d\d')
global a
if r.search(pattern_1, pl1) or r.search(pattern_2, pl1):
a = 'pattern matched!!'
else:
a = 'pattern not matched!!'
global a1
a1 = pl1
#print(a1)
for p in sta:
if p == pl1[:2].upper():
#print('correct state')
opp.append(pl1.upper())
#print(opp) # prints a list of cleaned number plates
try:
opp.remove(a1)
except Exception as e:
print(' ')
#print(set(opp))
return set(opp)
'''
for i in lis:
if len(i) == 10 or len(i) >= 9:
for p in i:
pl.append(p)
for j in pl:
if j in sym:
pl.remove(j)
pl1 = (''.join(pl))
print(pl)
pattern_1 = r.compile(r'\w\w\d\d\w\w\d\d\d\d')
pattern_2 = r.compile(r'\w\w\d\d\w\d\d\d\d')
global a
if r.search(pattern_1, pl1) or r.search(pattern_2, pl1):
a = 'pattern matched!!'
else:
a = 'pattern not matched!!'
sta = ['AP','AR','AS','BR','CG','GA','GJ','HR' ,'HP' ,'JK','JH','KA','KL','MP','MH','MN','ML','MZ','NL' ,'OD','PB' ,'RJ','SK','TN','TS','TR','UA','UK','UP','WB','AN','CH','DN','DD','DL' ,'LD','PY']
for p in sta:
if p == pl1[:2].upper():
#print('correct state')
return pl1.upper(), a
'''
def disp(self, im):
oc = self.ocr(im)
#plt.imshow(im) # shows cropped num plate imgs
plt.show()
plt.close()
return oc
def run_easy_ocr(self, output, im, show_plates=False):
bboxes = self.get_bboxes_from(output)
#print(bboxes)
for bbox in bboxes:
crop_im = self.crop(bbox, in_img=im)
# display cropped image
ocr_op = self.disp(crop_im)
chk.append(ocr_op)
nump = self.get_num_plate(chk,show_plates = show_plates)
#print(nump) # prints list of num plates
return nump
| 26.01676
| 205
| 0.428817
| 613
| 4,657
| 3.176183
| 0.265905
| 0.016436
| 0.012327
| 0.012327
| 0.29019
| 0.273241
| 0.25886
| 0.25886
| 0.25886
| 0.25886
| 0
| 0.015504
| 0.390595
| 4,657
| 178
| 206
| 26.162921
| 0.670543
| 0.116384
| 0
| 0.041667
| 0
| 0
| 0.066263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0
| 0.055556
| 0
| 0.263889
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d00eac7a88a79181fbec1ff905386e4e480a89db
| 3,632
|
py
|
Python
|
client/nodes/detector_docker/sign_filter_node.py
|
CanboYe/BusEdge
|
2e53e1d1d82559fc3e9f0029b2f0faf4e356b210
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2
|
2021-08-17T14:14:28.000Z
|
2022-02-02T02:09:33.000Z
|
client/nodes/detector_docker/sign_filter_node.py
|
cmusatyalab/gabriel-BusEdge
|
528a6ee337882c6e709375ecd7ec7e201083c825
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
client/nodes/detector_docker/sign_filter_node.py
|
cmusatyalab/gabriel-BusEdge
|
528a6ee337882c6e709375ecd7ec7e201083c825
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2021-09-01T16:18:29.000Z
|
2021-09-01T16:18:29.000Z
|
# SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
import logging
import cv2
from busedge_protocol import busedge_pb2
from gabriel_protocol import gabriel_pb2
from sign_filter import SignFilter
logger = logging.getLogger(__name__)
import argparse
import multiprocessing
import time
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import CompressedImage, Image, NavSatFix
from std_msgs.msg import UInt8MultiArray
DEFAULT_SOURCE_NAME = "sign_filter3"
CUR_GPS = NavSatFix()
def run_node(source_name):
cam_id = source_name[-1]
camera_name = "camera" + cam_id
rospy.init_node(camera_name + "_sign_filter_node")
rospy.loginfo("Initialized node sign_filter for " + camera_name)
model_dir = "./model/ssd_mobilenet_v1_mtsd_hunter/saved_model"
model = SignFilter(model_dir)
pub = rospy.Publisher(source_name, UInt8MultiArray, queue_size=1)
image_sub = rospy.Subscriber(
camera_name + "/image_raw/compressed",
CompressedImage,
img_callback,
callback_args=(model, camera_name, pub),
queue_size=1,
buff_size=2 ** 24,
)
gps_sub = rospy.Subscriber("/fix", NavSatFix, gps_callback, queue_size=1)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def img_callback(image, args):
global CUR_GPS
model = args[0]
camera_name = args[1]
pub = args[2]
camera_id = int(camera_name[-1])
bridge = CvBridge()
frame = bridge.compressed_imgmsg_to_cv2(
image, desired_encoding="passthrough"
) # BGR images
frame = frame[:, :, ::-1] # BGR to RGB
frame_copy = frame.copy()
# FILTER
# send_flag = model.send(frame_copy, show_flag = True)
min_score_thresh = 0.75
output_dict = model.detect(frame_copy, min_score_thresh)
send_flag = output_dict["num_detections"] > 0
if send_flag == True:
_, jpeg_frame = cv2.imencode(".jpg", frame)
input_frame = gabriel_pb2.InputFrame()
input_frame.payload_type = gabriel_pb2.PayloadType.IMAGE
input_frame.payloads.append(jpeg_frame.tostring())
engine_fields = busedge_pb2.EngineFields()
engine_fields.gps_data.latitude = CUR_GPS.latitude
engine_fields.gps_data.longitude = CUR_GPS.longitude
engine_fields.gps_data.altitude = CUR_GPS.altitude
secs = image.header.stamp.secs
nsecs = image.header.stamp.nsecs
time_stamps = "_{:0>10d}_{:0>9d}".format(secs, nsecs)
image_filename = camera_name + time_stamps + ".jpg"
engine_fields.image_filename = image_filename
input_frame.extras.Pack(engine_fields)
serialized_message = input_frame.SerializeToString()
rospy.loginfo(
"Sent image msg with size {:.2f} KB".format(len(serialized_message) / 1024)
)
pub_data = UInt8MultiArray()
pub_data.data = serialized_message
pub.publish(pub_data)
time.sleep(0.1)
else:
pass
def gps_callback(data):
global CUR_GPS
if data.status.status == -1:
rospy.logdebug("Sign filter node cannot get valid GPS data")
else:
CUR_GPS = data
if __name__ == "__main__":
# run_node('camera3')
parser = argparse.ArgumentParser()
parser.add_argument(
"-n",
"--source-name",
nargs="+",
default=[DEFAULT_SOURCE_NAME],
help="Set source name for this pipeline",
)
args = parser.parse_args()
for source in args.source_name:
multiprocessing.Process(target=run_node, args=(source,)).start()
| 28.155039
| 87
| 0.681718
| 460
| 3,632
| 5.104348
| 0.382609
| 0.034072
| 0.012777
| 0.024276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017302
| 0.220264
| 3,632
| 128
| 88
| 28.375
| 0.811794
| 0.071586
| 0
| 0.043956
| 0
| 0
| 0.096371
| 0.020524
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032967
| false
| 0.021978
| 0.131868
| 0
| 0.164835
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d01001102fad7912a59abc8be03d31f0294830cb
| 3,095
|
py
|
Python
|
collector/cli.py
|
mvinii94/aws-lambda-log-collector
|
682850f282b70aa18663699c7e5e32bc4f6a8be1
|
[
"MIT"
] | 4
|
2019-11-13T12:49:31.000Z
|
2020-11-19T06:59:45.000Z
|
collector/cli.py
|
mvinii94/aws-lambda-log-collector
|
682850f282b70aa18663699c7e5e32bc4f6a8be1
|
[
"MIT"
] | null | null | null |
collector/cli.py
|
mvinii94/aws-lambda-log-collector
|
682850f282b70aa18663699c7e5e32bc4f6a8be1
|
[
"MIT"
] | null | null | null |
import click
from pathlib import Path
# Local imports
from .__init__ import *
from .utils import parse_time, create_dir, write_file, get_profiles, compress, INVALID_PROFILE, INVALID_DATES
from .lambda_log_collector import LambdaLogCollector
@click.command()
@click.version_option()
@click.option("--function-name", "-f", type=str, help="i.e. HelloWorld", required=True)
@click.option("--profile", "-p", type=str, help="AWS profile name (i.e. dev)", required=True)
@click.option("--region", "-r", type=str, help="AWS region (i.e. eu-west-1)", required=True)
@click.option("--output", "-o", type=click.Path(dir_okay=True, resolve_path=True), help="i.e. /tmp/", required=True)
@click.option("--start-time", "-s", type=str, help="2019-10-30T12:00:00", required=True)
@click.option("--end-time", "-e", type=str, help="2019-10-31T12:00:00", required=True)
@click.option("--pattern", type=str, help="ERROR", required=True)
@click.option("--log-level", type=click.Choice(['INFO', 'ERROR', 'DEBUG']), help='logging level', default='INFO')
def cli(function_name, profile, region, output, start_time, end_time, pattern, log_level):
define_log_level(log_level)
# get start and end time in epoch
epoch_start_time = parse_time(start_time)
epoch_end_time = parse_time(end_time)
if epoch_start_time > epoch_end_time:
raise Exception(INVALID_DATES)
available_profiles = get_profiles()
if profile not in available_profiles:
raise Exception(INVALID_PROFILE % available_profiles)
# initiate LambdaLogCollector class
lambda_log_collector = LambdaLogCollector(region, profile, function_name, epoch_start_time, epoch_end_time, pattern)
# get lambda function configuration
lambda_configuration = lambda_log_collector.get_function_configuration()
if lambda_configuration is not False:
# find CloudWatch Logs between start_time and end_time
streams = lambda_log_collector.find_log_streams()
# collect logs from filtered log streams
logs = lambda_log_collector.collect_logs()
# replacing timestamp strings : to _ (windows support)
start_time = start_time.replace(":", "_")
end_time = end_time.replace(":", "_")
# create output dir
output_path = Path(output)
new_dir_name = function_name + "-" + start_time + "-" + end_time
new_dir = Path(output_path / new_dir_name)
create_dir(new_dir)
# write lambda config file
lambda_fn_config_file = function_name + "-config.json"
write_file(new_dir, lambda_fn_config_file, lambda_configuration)
# write streams file
if streams is not False:
lambda_fn_streams_file = function_name + "-streams-" + start_time + "-" + end_time + ".json"
write_file(new_dir, lambda_fn_streams_file, streams)
# write logs file
if logs is not False:
lambda_fn_logs_file = function_name + "-logs-" + start_time + "-" + end_time + ".json"
write_file(new_dir, lambda_fn_logs_file, logs)
compress(new_dir, new_dir_name)
| 41.824324
| 120
| 0.695315
| 416
| 3,095
| 4.894231
| 0.245192
| 0.044695
| 0.058448
| 0.079077
| 0.152259
| 0.107564
| 0.055501
| 0.04224
| 0.04224
| 0.04224
| 0
| 0.011462
| 0.182553
| 3,095
| 73
| 121
| 42.39726
| 0.793281
| 0.108562
| 0
| 0
| 0
| 0
| 0.106298
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.113636
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d01640b2fef264dfd84ea3721e0ecaa46ce8a2a7
| 2,072
|
py
|
Python
|
common/data_helper.py
|
ThisIsSoSteve/Project-Tensorflow-Cars
|
6cdfedceffa56ac0885ce2253dae4549859b2dbf
|
[
"MIT"
] | 1
|
2017-05-11T06:01:46.000Z
|
2017-05-11T06:01:46.000Z
|
common/data_helper.py
|
ThisIsSoSteve/Project-Tensorflow-Cars
|
6cdfedceffa56ac0885ce2253dae4549859b2dbf
|
[
"MIT"
] | 2
|
2017-05-11T10:03:16.000Z
|
2017-06-21T18:25:00.000Z
|
common/data_helper.py
|
ThisIsSoSteve/Project-Tensorflow-Cars
|
6cdfedceffa56ac0885ce2253dae4549859b2dbf
|
[
"MIT"
] | null | null | null |
import glob
import pickle
from shutil import copy
from tqdm import tqdm
class DataHelper:
"""
helpers to transform and move data around add more as needed.
"""
def copy_specific_training_data_to_new_folder(self, source_folder_path, destination_folder_path,
track_name, track_variation):
"""
Copies filtered raw data from source_folder_path to destination_folder_path.
Keyword arguments:
source_folder_path -- where the dat will be read from
destination_folder_path -- where the filtered data will be saved
track_name -- filter by track name
track_variation -- filter by track variation (e.g short)
"""
listing = glob.glob(source_folder_path + '/*.png')
for filename in tqdm(listing):
filename = filename.replace('\\', '/')
filename = filename.replace('-image.png', '')
with open(filename + '-data.pkl', 'rb') as data:
project_cars_state = pickle.load(data)
#controller_state = pickle.load(data)
# only do Watkins Glen International track data
current_track = str(project_cars_state.mTrackLocation).replace(
"'", "").replace("b", "")
current_track_variation = str(
project_cars_state.mTrackVariation).replace("'", "").replace("b", "")
# if not on the correct track goto next track. *variation: #Short Circuit or #Grand Prix
if(current_track != track_name and current_track_variation != track_variation):
continue
copy(filename + '-data.pkl', destination_folder_path)
copy(filename + '-image.png', destination_folder_path)
#copy_specific_training_data_to_new_folder('F:/Project_Cars_Data/Raw',
#'F:/Project_Cars_Data/Watkins Glen International - Short Circuit',
# 'Watkins Glen International', 'Short Circuit')
# b'Watkins Glen International'
# b'Short Circuit'
# b'Watkins Glen International'
# b'Grand Prix'
| 35.118644
| 100
| 0.639479
| 241
| 2,072
| 5.278008
| 0.360996
| 0.070755
| 0.082547
| 0.037736
| 0.16195
| 0.11478
| 0.11478
| 0
| 0
| 0
| 0
| 0
| 0.265444
| 2,072
| 59
| 101
| 35.118644
| 0.835742
| 0.393822
| 0
| 0
| 0
| 0
| 0.045416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d017493522e0d4e934860f36259d7cd6e8ff4de0
| 1,009
|
py
|
Python
|
swifitool/faults/flp.py
|
chenoya/swifi-tool
|
9386fab56e12d83cbe14024b5d5edac0fd1e3baf
|
[
"MIT"
] | null | null | null |
swifitool/faults/flp.py
|
chenoya/swifi-tool
|
9386fab56e12d83cbe14024b5d5edac0fd1e3baf
|
[
"MIT"
] | null | null | null |
swifitool/faults/flp.py
|
chenoya/swifi-tool
|
9386fab56e12d83cbe14024b5d5edac0fd1e3baf
|
[
"MIT"
] | null | null | null |
from faults.faultmodel import FaultModel
from utils import *
class FLP(FaultModel):
name = 'FLP'
docs = ' FLP addr significance \t flip one specific bit'
nb_args = 2
def __init__(self, config, args):
super().__init__(config, args)
self.addr = parse_addr(args[0])
check_or_fail(len(self.addr) == 1, "FLP does not support address range")
try:
self.significance = int(args[1], 0)
check_or_fail(0 <= self.significance < 8,
"Significance must be between 0 and 7 : " + str(self.significance))
except ValueError:
check_or_fail(False, "Wrong significance format : " + args[1])
def edited_file_locations(self):
return [self.addr[0] * 8 + self.significance]
def apply(self, opened_file):
opened_file.seek(self.addr[0])
prev_value = ord(opened_file.read(1))
prev_value ^= (1 << self.significance)
set_bytes(opened_file, self.addr[0], prev_value)
| 34.793103
| 93
| 0.617443
| 132
| 1,009
| 4.522727
| 0.477273
| 0.067002
| 0.055276
| 0.040201
| 0.060302
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.270565
| 1,009
| 28
| 94
| 36.035714
| 0.789402
| 0
| 0
| 0
| 0
| 0
| 0.152626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.086957
| 0.043478
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d01a949b661519f2a818675ee51e8c4ae04571b0
| 3,120
|
py
|
Python
|
MLGame/games/snake/ml/rule.py
|
Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING
|
f4a58d0d9f5832a77a4a86352e084065dc7bae50
|
[
"MIT"
] | null | null | null |
MLGame/games/snake/ml/rule.py
|
Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING
|
f4a58d0d9f5832a77a4a86352e084065dc7bae50
|
[
"MIT"
] | null | null | null |
MLGame/games/snake/ml/rule.py
|
Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING
|
f4a58d0d9f5832a77a4a86352e084065dc7bae50
|
[
"MIT"
] | null | null | null |
"""
The template of the script for playing the game in the ml mode
"""
class MLPlay:
def __init__(self):
"""
Constructor
"""
self.direction = 0#上下左右 :1,2,3,4
self.current_x = 0
self.current_y = 0
self.last_x = 0
self.last_y = 0
self.x_dir = 0
self.y_dir = 0
#pass
def update(self, scene_info):
"""
Generate the command according to the received scene information
"""
if scene_info["status"] == "GAME_OVER":
snake_body = scene_info["snake_body"]
print(len(snake_body))
return "RESET"
snake_head = scene_info["snake_head"]
food = scene_info["food"]
snake_body = scene_info["snake_body"]
if scene_info["frame"] == 0:
self.direction = 0 #上下左右 :1,2,3,4
self.current_x = snake_head[0]
self.current_y = snake_head[1]
self.last_x = snake_head[0]
self.last_y = snake_head[1]
self.x_dir = 0
self.y_dir = 0
else:
self.current_x = snake_head[0]
self.current_y = snake_head[1]
self.x_dir = self.current_x - self.last_x
self.y_dir = self.current_y - self.last_y
if self.x_dir > 0 and self.y_dir == 0:#right
self.direction = 4
if self.x_dir < 0 and self.y_dir == 0:#left
self.direction = 3
if self.x_dir == 0 and self.y_dir > 0:#down
self.direction = 2
if self.x_dir == 0 and self.y_dir < 0:#up
self.direction = 1
self.last_x = snake_head[0]
self.last_y = snake_head[1]
#重複繞滿整個場地
if (self.current_x != 0 and self.current_x != 10 and self.current_x != 290):
if(((self.current_y / 10) % 2) == 0):
return "RIGHT"
elif(((self.current_y / 10) % 2) == 1):
return "LEFT"
elif (self.current_x == 0):
if (self.current_y == 0):
return "RIGHT"
else:
return "UP"
elif (self.current_x == 10):
if(self.current_y == 290):
return "LEFT"
elif((self.current_y / 10) % 2 == 0):
return "RIGHT"
elif((self.current_y / 10) % 2 == 1):
return "DOWN"
elif (self.current_x == 290):
if((self.current_y / 10) % 2 == 0):
return "DOWN"
elif((self.current_y / 10) % 2 == 1):
return "LEFT"
if snake_head[0] > food[0]:
return "LEFT"
elif snake_head[0] < food[0]:
return "RIGHT"
elif snake_head[1] > food[1]:
return "UP"
elif snake_head[1] < food[1]:
return "DOWN"
def reset(self):
"""
Reset the status if needed
"""
pass
| 32.5
| 88
| 0.458013
| 387
| 3,120
| 3.498708
| 0.157623
| 0.17873
| 0.106352
| 0.039882
| 0.546529
| 0.519202
| 0.445347
| 0.405465
| 0.378877
| 0.353767
| 0
| 0.049042
| 0.43141
| 3,120
| 95
| 89
| 32.842105
| 0.714205
| 0.071154
| 0
| 0.458333
| 0
| 0
| 0.03946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0.013889
| 0
| 0
| 0.25
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d01ad5a73de06c489b92a116216a85d95752401d
| 856
|
py
|
Python
|
CodingInterviews/python/37_get_num_of_k_2.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
CodingInterviews/python/37_get_num_of_k_2.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
CodingInterviews/python/37_get_num_of_k_2.py
|
YorkFish/git_study
|
6e023244daaa22e12b24e632e76a13e5066f2947
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding:utf-8
class Solution:
def GetNumberOfK(self, data, k):
if data == [] or k > data[-1]:
return 0
def binSearch(data, num):
left = 0
right = len(data) - 1
while left < right:
mid = left + (right - left) // 2
if data[mid] < num:
left = mid + 1
else:
right = mid
return left
if data[-1] == k:
stop = len(data)
else:
stop = binSearch(data, k + 0.5)
return stop - binSearch(data, k - 0.5)
if __name__ == "__main__":
# data = [1, 3, 3, 3, 3, 4, 5]
# k = 2
# k = 3
# k = 4
# k = 6
data = [1, 2, 3, 3, 3, 3]
k = 3
s = Solution()
ans = s.GetNumberOfK(data, k)
print(ans)
| 20.878049
| 48
| 0.408879
| 110
| 856
| 3.109091
| 0.345455
| 0.035088
| 0.035088
| 0.105263
| 0.116959
| 0.116959
| 0
| 0
| 0
| 0
| 0
| 0.066955
| 0.459112
| 856
| 40
| 49
| 21.4
| 0.671706
| 0.101636
| 0
| 0.08
| 0
| 0
| 0.010499
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0
| 0
| 0.24
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d01bbe0df932770a9de781be883abde7e781fb15
| 23,356
|
py
|
Python
|
PerceptualLoss.py
|
kirill-pinigin/DeepImageDenoiser
|
9a228c821bd3960688a4ed35f47f4767d226b57c
|
[
"Apache-2.0"
] | null | null | null |
PerceptualLoss.py
|
kirill-pinigin/DeepImageDenoiser
|
9a228c821bd3960688a4ed35f47f4767d226b57c
|
[
"Apache-2.0"
] | null | null | null |
PerceptualLoss.py
|
kirill-pinigin/DeepImageDenoiser
|
9a228c821bd3960688a4ed35f47f4767d226b57c
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torchvision import models
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from DeepImageDenoiser import LR_THRESHOLD, DIMENSION, LEARNING_RATE
from NeuralModels import SpectralNorm
ITERATION_LIMIT = int(1e6)
SQUEEZENET_CONFIG = {'dnn' : models.squeezenet1_1(pretrained=True).features, 'features' : [2, 5, 8, 13]}
VGG_16_CONFIG = {'dnn' : models.vgg16(pretrained=True).features, 'features' : [4, 9, 16, 23]}
VGG_16_BN_CONFIG = {'dnn' : models.vgg16_bn(pretrained=True).features, 'features' : [6, 13, 23, 33] }
VGG_19_CONFIG = {'dnn' : models.vgg19(pretrained=True).features, 'features' : [ 4, 9, 18, 36] }
VGG_19_BN_CONFIG = {'dnn': models.vgg19_bn(pretrained=True).features, 'features' : [6, 13, 23, 52]}
class BasicFeatureExtractor(nn.Module):
def __init__(self, vgg_config , feature_limit = 9):
super(BasicFeatureExtractor, self).__init__()
if DIMENSION == 3:
self.mean = Parameter(torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1))
self.std = Parameter(torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1))
elif DIMENSION == 1:
self.mean = Parameter(torch.tensor([0.449]).view(-1, 1, 1))
self.std = Parameter(torch.tensor([0.226]).view(-1, 1, 1))
else:
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
vgg_pretrained = vgg_config['dnn']
conv = BasicFeatureExtractor.configure_input(DIMENSION, vgg_pretrained)
self.slice1 = nn.Sequential(conv)
for x in range(1, feature_limit):
self.slice1.add_module(str(x), vgg_pretrained[x])
@staticmethod
def configure_input(dimension, vgg):
conv = nn.Conv2d(dimension, 64, kernel_size=3, padding=1)
if dimension == 1 or dimension == 3:
weight = torch.FloatTensor(64, DIMENSION, 3, 3)
parameters = list(vgg.parameters())
for i in range(64):
if DIMENSION == 1:
weight[i, :, :, :] = parameters[0].data[i].mean(0)
else:
weight[i, :, :, :] = parameters[0].data[i]
conv.weight.data.copy_(weight)
conv.bias.data.copy_(parameters[1].data)
return conv
def forward(self, x):
if DIMENSION == 1 or DIMENSION == 3:
if self.mean.device != x.device:
self.mean.to(x.device)
if self.std.device != x.device:
self.std.to(x.device)
x = (x - self.mean) / self.std
return self.slice1(x)
class BasicMultiFeatureExtractor(BasicFeatureExtractor):
def __init__(self, vgg_config , requires_grad):
super(BasicMultiFeatureExtractor, self).__init__(vgg_config, vgg_config['features'][0])
vgg_pretrained = vgg_config['dnn']
self.slice2 = torch.nn.Sequential()
for x in range(vgg_config['features'][0], vgg_config['features'][1]):
self.slice2.add_module(str(x), vgg_pretrained[x])
self.slice3 = torch.nn.Sequential()
for x in range(vgg_config['features'][1], vgg_config['features'][2]):
self.slice3.add_module(str(x), vgg_pretrained[x])
self.slice4 = torch.nn.Sequential()
for x in range(vgg_config['features'][2], vgg_config['features'][3]):
self.slice4.add_module(str(x), vgg_pretrained[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h_relu1 = super(BasicMultiFeatureExtractor, self).forward(x)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
return h_relu1, h_relu2, h_relu3, h_relu4
class FastNeuralStyleExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False , bn = True):
features = VGG_16_BN_CONFIG if bn else VGG_16_CONFIG
super(FastNeuralStyleExtractor, self).__init__(features, requires_grad)
class FastNeuralStylePerceptualLoss(nn.Module):
def __init__(self, weight:float = 1e-3):
super(FastNeuralStylePerceptualLoss, self).__init__()
self.factors = [1e0 , 1e-1, 1e-2 , 1e-3]
self.weight = weight
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = FastNeuralStyleExtractor()
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def compute_gram_matrix(self, x):
b, ch, h, w = x.size()
f = x.view(b, ch, w * h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (h * w * ch)
return G
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
closs = 0.0
for i in range(len(actuals)):
closs += self.factors[i] * self.criterion(actuals[i], desires[i])
sloss = 0.0
if self.weight != 0:
self.weight * self.criterion(self.compute_gram_matrix(actuals[i]),
self.compute_gram_matrix(desires[i]))
self.loss = closs + sloss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class FluentExtractor(BasicMultiFeatureExtractor):
def __init__(self):
super(BasicFeatureExtractor, self).__init__()
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
self.slice1 = torch.nn.Sequential(
nn.Conv2d(in_channels=DIMENSION, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice2 = torch.nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice3 = torch.nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice4 = torch.nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
class AdaptivePerceptualLoss(nn.Module):
def __init__(self):
super(AdaptivePerceptualLoss, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = FluentExtractor()
self.factors = [1e0, 1e-1, 1e-2, 1e-3]
self.predictor = nn.Sequential()
self.predictor.add_module('conv_9', nn.Conv2d(in_channels=512, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False))
self.predictor.add_module('lrelu_9', nn.LeakyReLU(0.2))
self.predictor.add_module('fc', nn.Conv2d(8, 1, 1, 1, 0, bias=False))
self.predictor.add_module('sigmoid', nn.Sigmoid())
self.features.to(self.device)
self.predictor.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=LEARNING_RATE)
self.ContentCriterion = nn.L1Loss()
self.AdversarialCriterion = nn.BCELoss()
self.loss = None
self.counter = int(0)
self.best_loss = float(100500)
self.current_loss = float(0)
self.relu = nn.ReLU()
self.margin = 1.0
def evaluate(self, actual, desire):
actual_features = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desire_features = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
ploss = 0.0
for i in range(len(desire_features)):
ploss += self.factors[i]*self.ContentCriterion(actual_features[i], desire_features[i])
return actual_features, desire_features, ploss
def meta_optimize(self, lossD, length):
self.current_loss += float(lossD.item()) / length
if self.counter > ITERATION_LIMIT:
self.current_loss = self.current_loss / float(ITERATION_LIMIT)
if self.current_loss < self.best_loss:
self.best_loss = self.current_loss
print('! best_loss !', self.best_loss)
else:
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
if lr >= LR_THRESHOLD:
param_group['lr'] = lr * 0.2
print('! Decrease LearningRate in Perceptual !', lr)
self.counter = int(0)
self.current_loss = float(0)
self.counter += int(1)
def pretrain(self, dataloaders, num_epochs=20):
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
self.features.train(True)
self.predictor.train(True)
else:
self.features.train(False)
self.predictor.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
inputs, targets = data
targets = targets.float()
inputs = Variable(inputs.to(self.device))
targets = Variable(targets.to(self.device))
self.optimizer.zero_grad()
features = torch.nn.parallel.data_parallel(module=self.features, inputs=inputs, device_ids=self.cudas)
outputs = torch.nn.parallel.data_parallel(module=self.predictor, inputs=features[-1].detach(), device_ids=self.cudas).view(-1)
loss = self.AdversarialCriterion(outputs, targets)
if phase == 'train':
loss.backward()
self.optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(torch.round(outputs.data) == targets.data)
self.meta_optimize(loss, float(targets.size(0)))
epoch_loss = float(running_loss) / float(len(dataloaders[phase].dataset))
epoch_acc = float(running_corrects) / float(len(dataloaders[phase].dataset))
print(' epoch_acc ', epoch_acc, ' epoch_loss ', epoch_loss)
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
print('curent best_acc ', best_acc)
self.optimizer = torch.optim.Adam(self.parameters(), lr=LEARNING_RATE)
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),device_ids=self.cudas).view(-1)
zeros = Variable(torch.zeros(fake.shape).to(self.device))
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(), device_ids=self.cudas).view(-1)
ones = Variable(torch.ones(real.shape).to(self.device))
lossDreal = self.AdversarialCriterion(real, ones)
lossDfake = self.AdversarialCriterion(fake, zeros)
lossD = lossDreal + lossDfake + self.relu(self.margin - ploss).mean()
lossD.backward(retain_graph=True)
self.optimizer.step()
self.meta_optimize(lossD, float(actual.size(0)))
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
rest = self.predictor(actual_features[-1]).view(-1)
ones = Variable(torch.ones(rest.shape).to(self.device))
aloss = self.AdversarialCriterion(rest, ones)
self.loss = ploss + aloss + self.ContentCriterion(actual, desire)
self.fit(actual, desire)
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class MobileExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False, bn = True):
features = VGG_19_BN_CONFIG if bn else VGG_19_CONFIG
super(MobileExtractor, self).__init__(features, requires_grad)
class MobilePerceptualLoss(nn.Module):
def __init__(self):
super(MobilePerceptualLoss, self).__init__()
self.factors = [1e0, 1e-1, 1e-2, 1e-3]
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = MobileExtractor()
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
loss = 0.0
for i in range(len(actuals)):
loss += self.factors[i]*self.criterion(actuals[i], desires[i])
self.loss = loss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class SimpleExtractor(BasicFeatureExtractor):
def __init__(self, feat=1, bn = True):
features_list = VGG_19_BN_CONFIG['features'] if bn else VGG_19_CONFIG['features']
features_limit = features_list[1]
super(SimpleExtractor, self).__init__(VGG_19_CONFIG, features_limit)
class SimplePerceptualLoss(nn.Module):
def __init__(self, feat : int = 2):
super(SimplePerceptualLoss, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = SimpleExtractor(feat)
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
loss = self.criterion(actuals, desires)
self.loss = loss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class SqueezeExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False):
super(SqueezeExtractor, self).__init__(SQUEEZENET_CONFIG, requires_grad)
class SqueezeAdaptivePerceptualLoss(AdaptivePerceptualLoss):
def __init__(self):
super(SqueezeAdaptivePerceptualLoss, self).__init__()
self.features = SqueezeExtractor(requires_grad=True)
self.features.to(self.device)
self.predictor.to(self.device)
class SpectralFluentExtractor(BasicMultiFeatureExtractor):
def __init__(self):
super(BasicFeatureExtractor, self).__init__()
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
self.slice1 = torch.nn.Sequential(
nn.Conv2d(in_channels=DIMENSION, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice2 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice3 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice4 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=512, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
class SpectralAdaptivePerceptualLoss(AdaptivePerceptualLoss):
def __init__(self):
super(SpectralAdaptivePerceptualLoss, self).__init__()
self.features = SpectralFluentExtractor()
self.predictor = nn.Sequential()
self.predictor.add_module('fc', SpectralNorm(nn.Conv2d(8, 1, 1, 1, 0, bias=False)))
self.features.to(self.device)
self.predictor.to(self.device)
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),
device_ids=self.cudas).view(-1)
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(),
device_ids=self.cudas).view(-1)
lossDreal = self.relu(1.0 - real).mean()
lossDfake = self.relu(1.0 + fake).mean()
lossD = lossDreal + lossDfake + self.relu(self.margin - ploss).mean()
lossD.backward(retain_graph=True)
self.optimizer.step()
self.meta_optimize(lossD, float(actual.size(0)))
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
self.loss = ploss - self.predictor(actual_features[-1]).view(-1).mean() + self.ContentCriterion(actual, desire)
self.fit(actual, desire)
return self.loss
class WassersteinAdaptivePerceptualLoss(SpectralAdaptivePerceptualLoss):
def __init__(self):
super(WassersteinAdaptivePerceptualLoss, self).__init__()
self.predictor.add_module('sigmoid', nn.Sigmoid())
self.predictor.to(self.device)
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
result = self.predictor(actual_features[-1]).view(-1)
self.loss = ploss - result.view(-1).mean() + torch.nn.functional.binary_cross_entropy(result, torch.ones_like(result))
self.fit(actual, desire)
return self.loss
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),
device_ids=self.cudas).view(-1)
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(),
device_ids=self.cudas).view(-1)
real_loss = torch.nn.functional.binary_cross_entropy(real, Variable(torch.ones_like(real)).to(self.device))
fake_loss = torch.nn.functional.binary_cross_entropy(fake, Variable(torch.zeros_like(fake)).to(self.device))
wgan_loss = fake.mean() - real.mean()
interpolates = 0.5 * desire + (1 - 0.5) * actual
interpolates = Variable(interpolates.clone(), requires_grad=True).to(self.device)
interpolatesl_features = torch.nn.parallel.data_parallel(module=self.features, inputs=interpolates, device_ids=self.cudas)
interpolates_discriminator_out = torch.nn.parallel.data_parallel(module=self.predictor, inputs=interpolatesl_features[-1], device_ids=self.cudas).view(-1)
buffer = Variable(torch.ones_like(interpolates_discriminator_out), requires_grad=True).to(self.device)
gradients = torch.autograd.grad(outputs=interpolates_discriminator_out, inputs=interpolates,
grad_outputs=buffer,
retain_graph=True,
create_graph=True)[0]
gradient_penalty = ((gradients.view(gradients.size(0), -1).norm(2, dim=1) - 1) ** 2).mean()
lossD = (real_loss + fake_loss) / 2.0 + wgan_loss + 1e-2*gradient_penalty
lossD.backward()
self.optimizer.step()
self.current_loss += float(lossD.item()) / float(actual.size(0))
if self.counter > ITERATION_LIMIT:
self.current_loss = self.current_loss / float(ITERATION_LIMIT)
if self.current_loss < self.best_loss:
self.best_loss = self.current_loss
print('! best_loss !', self.best_loss)
else:
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
if lr >= LR_THRESHOLD:
param_group['lr'] = lr * 0.2
print('! Decrease LearningRate in Perceptual !', lr)
self.counter = int(0)
self.current_loss = float(0)
self.counter += int(1)
| 45.088803
| 163
| 0.630074
| 2,855
| 23,356
| 4.992644
| 0.091769
| 0.016697
| 0.015996
| 0.023993
| 0.6622
| 0.632665
| 0.585169
| 0.563351
| 0.534096
| 0.518381
| 0
| 0.030851
| 0.245033
| 23,356
| 517
| 164
| 45.176015
| 0.777519
| 0
| 0
| 0.472684
| 0
| 0
| 0.016441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083135
| false
| 0
| 0.016627
| 0.009501
| 0.171021
| 0.019002
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0205b5caed2d6f638ffecd766f2e084e27abd9b
| 11,517
|
py
|
Python
|
Python/Unittest/Fixtures/tests.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 115
|
2015-03-23T13:34:42.000Z
|
2022-03-21T00:27:21.000Z
|
Python/Unittest/Fixtures/tests.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 56
|
2015-02-25T15:04:26.000Z
|
2022-01-03T07:42:48.000Z
|
Python/Unittest/Fixtures/tests.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 59
|
2015-11-26T11:44:51.000Z
|
2022-03-21T00:27:22.000Z
|
#!/usr/bin/env python
import os
import shutil
import sqlite3
import unittest
import init_db
'''name of database to use as master'''
master_name = 'projects.db'
def setUpModule():
'''create and fill the database'''
conn = sqlite3.connect(master_name)
init_db.execute_file(conn, 'create_db.sql')
init_db.execute_file(conn, 'fill_db.sql')
def tearDownModule():
'''remove database file once testing is done'''
os.remove(master_name)
class ContentsTest(unittest.TestCase):
test_name = 'test.db'
@classmethod
def setUpClass(cls):
'''copy original database'''
shutil.copyfile(master_name, cls.test_name)
def setUp(self):
'''open connection, create cursor'''
self._conn = sqlite3.connect(self.__class__.test_name)
self._conn.row_factory = sqlite3.Row
self._cursor = self._conn.cursor()
def tearDown(self):
'''close database connection'''
self._conn.close()
@classmethod
def tearDownClass(cls):
'''remove test database'''
os.remove(cls.test_name)
def test_num_projects(self):
'''test whether the projects table has the expected number of
rows'''
expected = 3
self._cursor.execute(''' SELECT COUNT(*) FROM projects;''')
nr_rows = self._cursor.fetchone()[0]
self.assertEqual(expected, nr_rows)
def test_num_researchers(self):
'''test whether the researchers table has the expected number of
rows'''
expected = 3
self._cursor.execute(''' SELECT COUNT(*) FROM researchers;''')
nr_rows = self._cursor.fetchone()[0]
self.assertEqual(expected, nr_rows)
def test_num_assignments(self):
'''test whether the number of staff assignments is the expected
number'''
expected = 2
self._cursor.execute(''' SELECT COUNT(*) FROM staff_assignments;''')
nr_rows = self._cursor.fetchone()[0]
self.assertEqual(expected, nr_rows)
def test_view_consistency(self):
'''test whether the staff assignments table has the same number
of rows as the project staffing view'''
self._cursor.execute(''' SELECT COUNT(*) FROM staff_assignments;''')
nr_table_rows = self._cursor.fetchone()[0]
self._cursor.execute(''' SELECT COUNT(*) FROM project_staffing;''')
nr_view_rows = self._cursor.fetchone()[0]
self.assertEqual(nr_table_rows, nr_view_rows)
def test_researcher_bob(self):
'''test whether there is a researcher named Bob, and whether
his last name is Dreary'''
expected_last_name = 'Dreary'
expected_nr_rows = 1
self._cursor.execute(
'''SELECT last_name FROM researchers
WHERE first_name = ?;''',
('Bob', )
)
nr_rows = 0
last_name = None
for row in self._cursor:
nr_rows += 1
last_name = row['last_name']
self.assertEqual(expected_nr_rows, nr_rows)
self.assertEqual(expected_last_name, last_name)
def test_projects_start_date(self):
'''test whether the projects table has the correct number of
projects starting before November 1, 2014, and check those are
the expected projects'''
expected_projects = ['project 1', 'project 2']
self._cursor.execute(
'''SELECT project_name FROM projects
WHERE start_date < ?
ORDER BY project_name ASC;''',
('2014-11-01', )
)
projects = []
for row in self._cursor:
projects.append(row['project_name'])
self.assertListEqual(expected_projects, projects)
def test_unassigned_researchers(self):
'''test whether the number of unassigned researchers is the
expected one, and that Carol is idle'''
expected_researchers = ['Carol']
self._cursor.execute(
'''SELECT first_name FROM researchers
WHERE researcher_id IN (
SELECT researcher_id
FROM researchers
EXCEPT SELECT researcher_id
FROM staff_assignments);'''
)
researchers = []
for row in self._cursor:
researchers.append(row['first_name'])
self.assertListEqual(expected_researchers, researchers)
def test_assigned_projects(self):
'''test whether the expected number of projects has been
assigned'''
expected_nr_assignments = 2
self._cursor.execute(
'''SELECT COUNT(DISTINCT project_id) FROM staff_assignments;'''
)
nr_assignments = self._cursor.fetchone()[0]
self.assertEqual(expected_nr_assignments, nr_assignments)
def test_samples_per_project(self):
'''tset whether each project has the correct number of samples
associated with it'''
expected_samples = {
'project 1': {'homo sapiens', 'felis catus'},
'project 2': {'felis catus'},
'project 3': set(),
}
self._cursor.execute(
'''SELECT p.project_name AS 'project_name',
COUNT(s.sample_id) AS 'nr_samples'
FROM projects AS p, samples AS s
WHERE s.project_id = p.project_id
GROUP BY p.project_id;'''
)
for row in self._cursor:
self.assertEqual(len(expected_samples[row['project_name']]),
row['nr_samples'])
for project_name in expected_samples:
self._cursor.execute(
'''SELECT s.organism AS organism
FROM projects AS p, samples AS s
WHERE p.project_name = ? AND
p.project_id = s.project_id;''',
(project_name, )
)
samples = set()
for row in self._cursor:
samples.add(row['organism'])
self.assertSetEqual(expected_samples[project_name], samples)
class ConstraintsTest(unittest.TestCase):
'''tests the table constraints and triggers defined on the schema'''
test_name = 'test.db'
def setUp(self):
'''copy original database, and open connection, create cursor'''
shutil.copyfile(master_name, self.__class__.test_name)
self._conn = sqlite3.connect(self.__class__.test_name)
self._conn.row_factory = sqlite3.Row
self._cursor = self._conn.cursor()
def tearDown(self):
'''close database connection and remove test database'''
self._conn.close()
os.remove(self.__class__.test_name)
def test_project_end_date(self):
'''inserting a project with an invaid end date should fail'''
project_name = 'project 4'
start_date = '2015-01-05'
end_date = '2014-12-15'
with self.assertRaises(sqlite3.IntegrityError):
self._cursor.execute(
'''INSERT INTO projects (project_name, start_date, end_date)
VALUES (?, ?, ?);''',
(project_name, start_date, end_date)
)
def test_project_name_uniqueness(self):
'''inserting a project with a name that is already in the table
should fail'''
project_name = 'project 2'
start_date = '2015-01-05'
end_date = '2015-12-15'
with self.assertRaises(sqlite3.IntegrityError):
self._cursor.execute(
'''INSERT INTO projects (project_name, start_date, end_date)
VALUES (?, ?, ?);''',
(project_name, start_date, end_date)
)
def test_double_assignment(self):
'''assigning a researcher to a project twice should fail'''
project_name = 'project 1'
first_name = 'Bob'
with self.assertRaises(sqlite3.IntegrityError):
self._cursor.execute(
'''INSERT INTO staff_assignments
(project_id, researcher_id)
SELECT p.project_id AS 'project_id',
r.researcher_id AS 'researcher_id'
FROM projects AS p, researchers AS r
WHERE p.project_name = ? AND
r.first_name = ?;''',
(project_name, first_name)
)
def test_researcher_delete_trigger(self):
'''when a researcher is deleted, the staff assignments for this
person should be deleted as well'''
expected_nr_rows = 1
project_name = 'project 2'
first_name = 'Bob'
# first, assign Bob to project 2 as well
self._cursor.execute(
'''INSERT INTO staff_assignments
(project_id, researcher_id)
SELECT p.project_id AS 'project_id',
r.researcher_id AS 'researcher_id'
FROM projects AS p, researchers AS r
WHERE p.project_name = ? AND
r.first_name = ?;''',
(project_name, first_name)
)
self._cursor.execute(
'''DELETE FROM researchers WHERE first_name = ?;''',
(first_name, )
)
self._cursor.execute(
'''SELECT COUNT(*) FROM staff_assignments;'''
)
nr_rows = 0
for row in self._cursor:
nr_rows += 1
self.assertEqual(expected_nr_rows, nr_rows)
self._cursor.execute(
'''SELECT COUNT(*) FROM project_staffing;'''
)
nr_rows = 0
for row in self._cursor:
nr_rows += 1
self.assertEqual(expected_nr_rows, nr_rows)
def test_project_delete_trigger(self):
'''when a project is deleted, the staff assignments for this
project should be deleted as well'''
project_name = 'project 1'
expected_staffed_projects = {'project 2'}
self._cursor.execute(
'''DELETE FROM projects
WHERE project_name = ?;''',
(project_name, )
)
self._cursor.execute(
'''SELECT p.project_name AS 'project_name'
FROM projects AS p, staff_assignments AS s
WHERE p.project_id = s.project_id;'''
)
staffed_projects = set()
for row in self._cursor:
staffed_projects.add(row['project_name'])
self.assertSetEqual(expected_staffed_projects, staffed_projects)
def test_sample_update_trigger(self):
'''when a project is deleted, samples for that project should
refer to NULL'''
project_name = 'project 1'
expected_nr_samples = 3
expected_nr_null_ref_samples = 2
self._cursor.execute(
'''DELETE FROM projects
WHERE project_name = ?;''',
(project_name, )
)
self._cursor.execute(
'''SELECT COUNT(*) FROM samples
WHERE project_id IS NULL;'''
)
nr_null_ref_samples = self._cursor.fetchone()[0]
self.assertEqual(expected_nr_null_ref_samples, nr_null_ref_samples)
self._cursor.execute(
'''SELECT COUNT(*) FROM samples;'''
)
nr_samples = self._cursor.fetchone()[0]
self.assertEqual(expected_nr_samples, nr_samples)
if __name__ == '__main__':
unittest.main()
| 36.33123
| 76
| 0.581488
| 1,284
| 11,517
| 4.976636
| 0.145639
| 0.064163
| 0.061189
| 0.05759
| 0.540219
| 0.438028
| 0.398122
| 0.355869
| 0.319718
| 0.280908
| 0
| 0.011418
| 0.323174
| 11,517
| 316
| 77
| 36.446203
| 0.808339
| 0.139272
| 0
| 0.418367
| 0
| 0
| 0.073825
| 0
| 0
| 0
| 0
| 0
| 0.096939
| 1
| 0.117347
| false
| 0
| 0.02551
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d021199fc85a8a81bc13417b44056945e03b66e3
| 3,668
|
py
|
Python
|
backend/ids/views/ids.py
|
block-id/wallet
|
b5479df7df0e5b5733f0ae262ffc17f9b923347d
|
[
"Apache-2.0"
] | null | null | null |
backend/ids/views/ids.py
|
block-id/wallet
|
b5479df7df0e5b5733f0ae262ffc17f9b923347d
|
[
"Apache-2.0"
] | null | null | null |
backend/ids/views/ids.py
|
block-id/wallet
|
b5479df7df0e5b5733f0ae262ffc17f9b923347d
|
[
"Apache-2.0"
] | 1
|
2021-12-31T17:27:44.000Z
|
2021-12-31T17:27:44.000Z
|
import json
from django.http.response import JsonResponse
from django.db.models import Q
from django.contrib.auth import authenticate
from rest_framework import viewsets, mixins
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import ValidationError, AuthenticationFailed
from rest_framework.decorators import action
from jsonschema.exceptions import ValidationError as JsonValidationError
from ids.models import Id
from ids.serializers.id.create import IdCreateSerializer
from ids.serializers.id.list import IdListSerializer
from ids.actions import create_verifiable_presentation
from ids.utils import verify_json_id
from lib.json_ids.validate import validate_json_id
from lib.drf.pagination import DefaultPageNumberPagination
class IdViewset(
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet,
):
permission_classes = [IsAuthenticated]
pagination_class = DefaultPageNumberPagination
def get_queryset(self):
qs = Id.objects.filter(owner=self.request.user)
if self.action == "list":
query = self.request.GET.get("query")
if query:
qs = qs.filter(
Q(id_name__icontains=query) | Q(issuer_name__icontains=query)
)
type = self.request.GET.get("type")
if type:
qs = qs.filter(Q(type=type))
qs = qs.order_by("-id")
return qs
def get_serializer_class(self):
if self.action == "list":
return IdListSerializer
else:
return IdCreateSerializer
def create(self, request, *args, **kwargs):
if request.content_type != "application/json":
json_data = request.data.get("json")
json_id = json.loads(json_data)
else:
json_id = request.data.get("json")
# Validation
try:
validate_json_id(json_id)
except JsonValidationError as e:
error_path = "json." + ".".join(map(str, e.path))
raise ValidationError(f"{error_path}: {e.message}")
except (AssertionError, ValueError) as e:
raise ValidationError(str(e))
# ID signature verification
try:
verify_json_id(json_id)
except AssertionError as e:
raise ValidationError(str(e))
# Create ID
serializer = self.get_serializer(
data={
"owner": request.user.id,
"type": json_id["data"]["idType"],
"issuer_name": json_id["data"]["issuer"]["name"],
"id_name": json_id["data"]["idName"],
"verifiable_id": json_id,
}
)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return JsonResponse(serializer.data)
@action(
methods=["post"],
detail=True,
url_path="create-vp",
permission_classes=[IsAuthenticated],
)
def create_vp(self, request, pk):
id = self.get_object()
attribute_groups = set(request.data.get("attribute_groups", []))
password = request.data.get("password")
entropy = request.data.get("entropy", "")
if not authenticate(request, username=request.user.username, password=password):
raise AuthenticationFailed("Invalid password")
presentation = create_verifiable_presentation(
id,
attribute_groups,
password,
entropy,
)
return JsonResponse(presentation)
| 33.045045
| 88
| 0.634133
| 386
| 3,668
| 5.880829
| 0.305699
| 0.031718
| 0.030837
| 0.017621
| 0.039648
| 0.023789
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272628
| 3,668
| 110
| 89
| 33.345455
| 0.850825
| 0.013631
| 0
| 0.086957
| 0
| 0
| 0.057831
| 0
| 0.01087
| 0
| 0
| 0
| 0.021739
| 1
| 0.043478
| false
| 0.043478
| 0.173913
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0229c062e76ef7372542bd68ae2fdd99d5d9b15
| 1,257
|
py
|
Python
|
pattern.py
|
surajwate/textpattern
|
79869f932717bec47fc4a0e3e968c5a8321d8038
|
[
"MIT"
] | null | null | null |
pattern.py
|
surajwate/textpattern
|
79869f932717bec47fc4a0e3e968c5a8321d8038
|
[
"MIT"
] | null | null | null |
pattern.py
|
surajwate/textpattern
|
79869f932717bec47fc4a0e3e968c5a8321d8038
|
[
"MIT"
] | null | null | null |
def plusdash(plus, dash):
for i in range((plus-1)*dash + plus):
if i%(dash+1)==0:
print('+', end='')
else:
print('-', end='')
print('')
def pipe(pipe, space):
for i in range((pipe-1)*space + pipe):
if i % (space+1) == 0:
print('|', end='')
else:
print(' ', end='')
print('')
def wordinbar(word, space):
pipe = len(word) + 1
j = 0
for i in range((pipe-1)*space + pipe):
if i % (space+1) == 0:
print('|', end='')
elif i % (space//2 + 1) == 0:
print(word[j], end='')
j += 1
else:
print(' ', end='')
print('')
def wordinbox(word, space):
plusdash(len(word)+1, space)
for i in range(1):
wordinbar(word, space)
plusdash(len(word)+1, space)
def wordinsquare(word, height):
if height % 2 == 0:
space = height + 1
else:
space = height
space = space * 3
plusdash(len(word)+1, space)
for i in range(space//6):
pipe(len(word)+1, space)
for i in range(1):
wordinbar(word, space)
for i in range(space//6):
pipe(len(word)+1, space)
plusdash(len(word)+1, space)
wordinsquare('SURAJ W', 3)
| 22.854545
| 42
| 0.478123
| 170
| 1,257
| 3.535294
| 0.170588
| 0.079867
| 0.069884
| 0.12812
| 0.635607
| 0.59401
| 0.559068
| 0.509151
| 0.509151
| 0.386023
| 0
| 0.036145
| 0.339698
| 1,257
| 54
| 43
| 23.277778
| 0.687952
| 0
| 0
| 0.586957
| 0
| 0
| 0.010342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0
| 0
| 0.108696
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d028aa49515cb0d7956170029a3d7c9b7460dad7
| 2,624
|
py
|
Python
|
src/apps/analysis/gen/edgeWeightBipartiteGaphGenerator.py
|
JacobFV/mln-analysis
|
f78a6531e5126f29e6895e9b8e4b4600110b3858
|
[
"MIT"
] | null | null | null |
src/apps/analysis/gen/edgeWeightBipartiteGaphGenerator.py
|
JacobFV/mln-analysis
|
f78a6531e5126f29e6895e9b8e4b4600110b3858
|
[
"MIT"
] | null | null | null |
src/apps/analysis/gen/edgeWeightBipartiteGaphGenerator.py
|
JacobFV/mln-analysis
|
f78a6531e5126f29e6895e9b8e4b4600110b3858
|
[
"MIT"
] | null | null | null |
import os
def get_comm_no(community_id, community_dict):
community_id = str(community_id)
if community_id in community_dict:
return community_dict[community_id]
else:
return 0
def edgeWeightBipartiteGraphGenerator(
layer1,
layer2,
layer1CommunityFile,
layer2CommunityFile,
layer12InterEdgeFile,
resultFile
):
# it looks like this was written for the IMDb dataset in particular.
# shouldn't it be more general?
director_community_info = []
director_community_dict = {}
movie_community_info = []
movie_community_dict = {}
no_of_vertices_in_movie_communities = {}
no_of_vertices_in_director_communities = {}
with open(layer1CommunityFile) as f:
for line in f:
director_community_info.append(line.split(' '))
for i in director_community_info:
if len(i) == 3:
director_community_dict[i[0]] = i[1]
if i[1] not in no_of_vertices_in_director_communities:
no_of_vertices_in_director_communities[i[1]] = 1
else:
no_of_vertices_in_director_communities[i[1]] += 1
with open(layer2CommunityFile) as f:
for line in f:
movie_community_info.append(line.split(' '))
for i in movie_community_info:
if (len(i) == 3):
movie_community_dict[i[0]] = i[1]
if i[1] not in no_of_vertices_in_movie_communities:
no_of_vertices_in_movie_communities[i[1]] = 1
else:
no_of_vertices_in_movie_communities[i[1]] += 1
edges_between_director_movie_communities = {}
with open(layer12InterEdgeFile) as b:
for line in b:
item = line.strip().split(',')
comm1 = get_comm_no(item[0], director_community_dict)
comm2 = get_comm_no(item[1], movie_community_dict)
if comm1 in no_of_vertices_in_director_communities:
ver1 = no_of_vertices_in_director_communities[comm1]
else:
ver1 = 0
if comm2 in no_of_vertices_in_movie_communities:
ver2 = no_of_vertices_in_movie_communities[comm2]
else:
ver2 = 0
if ver1 > 0 and ver2 > 0:
if (comm1, comm2) not in edges_between_director_movie_communities:
edges_between_director_movie_communities[(comm1, comm2)] = 1
else:
edges_between_director_movie_communities[(comm1, comm2)] += 1
if not os.path.exists(os.path.dirname(resultFile)):
os.makedirs(os.path.dirname(resultFile))
fs = open(resultFile, "w")
for k, v in edges_between_director_movie_communities.items():
fs.write("{0},{1},{2},{3},{4}\n".format("1", k[0], "0", k[1], v, "\n"))
| 35.459459
| 76
| 0.671113
| 358
| 2,624
| 4.589385
| 0.223464
| 0.029215
| 0.087645
| 0.102252
| 0.483262
| 0.461351
| 0.336579
| 0.272672
| 0.174072
| 0.093731
| 0
| 0.0308
| 0.232851
| 2,624
| 73
| 77
| 35.945205
| 0.785395
| 0.036585
| 0
| 0.123077
| 0
| 0
| 0.011827
| 0.008564
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030769
| false
| 0
| 0.015385
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d029c8b65c82f8223b70d8ea031a22a8434f3b04
| 5,171
|
py
|
Python
|
pubs/utils.py
|
WIPACrepo/publication-web-db
|
f5d77f43c89377449f4fbe952f6b1dcfc458c91a
|
[
"MIT"
] | null | null | null |
pubs/utils.py
|
WIPACrepo/publication-web-db
|
f5d77f43c89377449f4fbe952f6b1dcfc458c91a
|
[
"MIT"
] | 16
|
2020-09-26T00:49:56.000Z
|
2021-09-09T19:03:42.000Z
|
pubs/utils.py
|
WIPACrepo/publication-web-db
|
f5d77f43c89377449f4fbe952f6b1dcfc458c91a
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import logging
import json
import csv
from io import StringIO
import pymongo
from bson.objectid import ObjectId
from . import PUBLICATION_TYPES, PROJECTS, SITES
def nowstr():
return datetime.utcnow().isoformat()
def date_format(datestring):
if 'T' in datestring:
if '.' in datestring:
date = datetime.strptime(datestring, "%Y-%m-%dT%H:%M:%S.%f")
else:
date = datetime.strptime(datestring, "%Y-%m-%dT%H:%M:%S")
else:
date = datetime.strptime(datestring, "%Y-%m-%d")
return date.strftime("%d %B %Y")
def create_indexes(db_url, db_name, background=True):
db = pymongo.MongoClient(db_url)[db_name]
indexes = db.publications.index_information()
if 'projects_index' not in indexes:
logging.info('creating projects_index')
db.publications.create_index('projects', name='projects_index', background=background)
if 'date_index' not in indexes:
logging.info('creating date_index')
db.publications.create_index('date', name='date_index', background=background)
if 'text_index' not in indexes:
logging.info('creating text_index')
db.publications.create_index([('title', pymongo.TEXT), ('authors', pymongo.TEXT), ('citation', pymongo.TEXT)],
weights={'title': 10, 'authors': 5, 'citation': 1},
name='text_index', background=background)
def validate(title, authors, pub_type, citation, date, downloads, projects, sites):
assert isinstance(title, str)
assert isinstance(authors, list)
for a in authors:
assert isinstance(a, str)
assert pub_type in PUBLICATION_TYPES
assert isinstance(citation, str)
assert isinstance(date, str)
date_format(date)
assert isinstance(downloads, list)
for d in downloads:
assert isinstance(d, str)
assert projects
assert isinstance(projects, list)
for p in projects:
assert p in PROJECTS
for s in sites:
assert s in SITES
async def add_pub(db, title, authors, pub_type, citation, date, downloads, projects, sites=None):
if not sites:
sites = []
validate(title, authors, pub_type, citation, date, downloads, projects, sites)
data = {
"title": title,
"authors": authors,
"type": pub_type,
"citation": citation,
"date": date,
"downloads": downloads,
"projects": projects,
"sites": sites,
}
await db.publications.insert_one(data)
async def edit_pub(db, mongo_id, title=None, authors=None, pub_type=None, citation=None, date=None, downloads=None, projects=None, sites=None):
match = {'_id': ObjectId(mongo_id)}
update = {}
if title:
assert isinstance(title, str)
update['title'] = title
if authors:
assert isinstance(authors, list)
for a in authors:
assert isinstance(a, str)
update['authors'] = authors
if pub_type:
assert pub_type in PUBLICATION_TYPES
update['type'] = pub_type
if citation:
assert isinstance(citation, str)
update['citation'] = citation
if date:
assert isinstance(date, str)
date_format(date)
update['date'] = date
if downloads:
assert isinstance(downloads, list)
for d in downloads:
assert isinstance(d, str)
update['downloads'] = downloads
if projects:
assert isinstance(projects, list)
for p in projects:
assert p in PROJECTS
update['projects'] = projects
if sites:
assert isinstance(sites, list)
for s in sites:
assert s in SITES
update['sites'] = sites
await db.publications.update_one(match, {'$set': update})
async def try_import_file(db, data):
"""
Try importing authors from file data (csv or json).
"""
# parse the data
try:
pubs = json.loads(data)
if 'publications' in pubs:
pubs = pubs['publications']
except json.JSONDecodeError:
try:
def parse_csv(row):
for k in row:
val = row[k]
if k in ('downloads', 'projects', 'sites'):
row[k] = val.split(',') if val else []
return row
with StringIO(data) as f:
reader = csv.DictReader(f)
pubs = [parse_csv(row) for row in reader]
except csv.Error:
raise Exception('File is not in a recognizable format. Only json or csv are valid.')
# now validate
for p in pubs:
if isinstance(p['authors'], str):
p['authors'] = [p['authors']]
try:
validate(p['title'], p['authors'], p['type'], p['citation'], p['date'], p['downloads'], p['projects'], p['sites'])
except AssertionError:
raise Exception(f'Error validating pub with title {p["title"][:100]}')
# now add:to db
for p in pubs:
await db.publications.replace_one({'title': p['title'], 'authors': p['authors'], 'date': p['date']}, p, upsert=True)
| 34.704698
| 143
| 0.602591
| 626
| 5,171
| 4.905751
| 0.185304
| 0.088571
| 0.019538
| 0.029306
| 0.362423
| 0.31423
| 0.294041
| 0.221752
| 0.205471
| 0.188212
| 0
| 0.001878
| 0.27925
| 5,171
| 148
| 144
| 34.939189
| 0.822109
| 0.007929
| 0
| 0.3
| 0
| 0
| 0.120182
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 1
| 0.038462
| false
| 0
| 0.069231
| 0.007692
| 0.130769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d030a03f345b6f7f695002177f49aa4bf23d3d3c
| 2,471
|
py
|
Python
|
src/ColorfulData_Package/ColorfulData.py
|
Alex8695/Colored
|
f72a5f5da041b73a8771c1b0f6ef80d5e0e83e7b
|
[
"MIT"
] | null | null | null |
src/ColorfulData_Package/ColorfulData.py
|
Alex8695/Colored
|
f72a5f5da041b73a8771c1b0f6ef80d5e0e83e7b
|
[
"MIT"
] | null | null | null |
src/ColorfulData_Package/ColorfulData.py
|
Alex8695/Colored
|
f72a5f5da041b73a8771c1b0f6ef80d5e0e83e7b
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import ceil,floor
class ColorfulData:
"""
Create custom evenly distributed color palete
\n`Get_Colors_Matched`: key,value relationship evenly distributed for given unique values
\n`Get_Colors`: Evenly distributed for given length
"""
@staticmethod
def Get_Colors_Matched(items:list([any]),colorPalette:dict[int,any])->np.array:
"""
Returns 2d ndarray of unique `items` for given `colorPalette`
\nIf `colorPalette` is larger then unique `items`: \n\treturned values are equaly spaced from start to end of `colorPalette`
\n\nIf `colorPalette` is smaller then unique `items`: \n\t`colorPalette` is expanded by repeating colors, in given order, then equaly spaced from start to end
"""
_items = np.unique(np.array(items))
_itemcount = len(_items)
_ret = ColorfulData.Get_Colors(_itemcount,colorPalette=colorPalette)
_ret = np.column_stack(
[np.array(_items),
_ret])
return _ret
@staticmethod
def Get_Colors(count:int,colorPalette:dict[int,any])->np.array:
"""
Returns ndarray of given `count` for given `colorPalette`
\nIf `colorPalette` is larger then `count`: \n\treturned values are equaly spaced from start to end of `colorPalette`
\n\nIf `colorPalette` is smaller then `count`: \n\t`colorPalette` is expanded by repeating colors, in given order, then equaly spaced from start to end
"""
_paletteCount = len(colorPalette)
_colorsCount = count
_repeat = ceil(_colorsCount/_paletteCount)
_colorsIn = np.repeat(np.array(colorPalette),_repeat)
_remainder = len(_colorsIn)-_colorsCount
_colorIndex = _colorsIn
_skip = floor(_remainder/_colorsCount)
_index = np.arange(start=0,stop=_paletteCount,step= _skip if _skip>1 else 1)
if _skip > 0:
_colorIndex = \
[_colorsIn[x] for x in (_index)][:_colorsCount]
print('')
else:
_colorIndex = \
_colorsIn[:_colorsCount]
#print(f'{str(_colorsCount).rjust(5)}:'
#+f' x{_repeat}'
#+f' new palette: {str(len(_colorsIn)).rjust(5)}'
#+f' remainder: {str(_remainder).rjust(5)}'
#+f' skip:{str(_skip).rjust(3)}'
#+f' color index:{str(len(_colorIndex)).rjust(5)}')
return _colorIndex
| 39.222222
| 167
| 0.628086
| 290
| 2,471
| 5.175862
| 0.310345
| 0.055963
| 0.045303
| 0.055963
| 0.338441
| 0.338441
| 0.338441
| 0.290473
| 0.227848
| 0.227848
| 0
| 0.005495
| 0.263456
| 2,471
| 62
| 168
| 39.854839
| 0.819231
| 0.477135
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.233333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d031d9ffaf0e038bf3ce7cef8d63034738a6cd8f
| 6,453
|
py
|
Python
|
Algorithms/SPO2CART.py
|
rtm2130/SPOTree
|
0b92946a2d14202a1ca251201ddbb07892951e78
|
[
"MIT"
] | 15
|
2020-03-06T23:07:09.000Z
|
2022-03-30T09:46:30.000Z
|
Algorithms/SPO2CART.py
|
Tobias272727/SPOTree
|
88e2e8423cb133f6c521bae5b8c7a0acba01ccab
|
[
"MIT"
] | 1
|
2020-09-14T14:32:03.000Z
|
2020-10-16T02:39:24.000Z
|
Algorithms/SPO2CART.py
|
Tobias272727/SPOTree
|
88e2e8423cb133f6c521bae5b8c7a0acba01ccab
|
[
"MIT"
] | 13
|
2020-04-04T16:43:56.000Z
|
2022-03-27T05:28:19.000Z
|
"""
Encodes SPOT MILP as the structure of a CART tree in order to apply CART's pruning method
Also supports traverse() which traverses the tree
"""
import numpy as np
from mtp_SPO2CART import MTP_SPO2CART
from decision_problem_solver import*
from scipy.spatial import distance
def truncate_train_x(train_x, train_x_precision):
return(np.around(train_x, decimals=train_x_precision))
class SPO2CART(object):
'''
This function initializes the SPO tree
Parameters:
max_depth: the maximum depth of the pre-pruned tree (default = Inf: no depth limit)
min_weight_per_node: the mininum number of observations (with respect to cumulative weight) per node
min_depth: the minimum depth of the pre-pruned tree (default: set equal to max_depth)
min_diff: if depth > min_depth, stop splitting if improvement in fit does not exceed min_diff
binary_splits: if True, use binary splits when building the tree, else consider multiway splits
(i.e., when splitting on a variable, split on all unique vals)
debias_splits/frac_debias_set/min_debias_set_size: Additional params when binary_splits = True. If debias_splits = True, then in each node,
hold out frac_debias_set of the training set (w.r.t. case weights) to evaluate the error of the best splitting point for each feature.
Stop bias-correcting when we have insufficient data; i.e. the total weight in the debias set < min_debias_set_size.
Note: after finding best split point, we then refit the model on all training data and recalculate the training error
quant_discret: continuous variable split points are chosen from quantiles of the variable corresponding to quant_discret,2*quant_discret,3*quant_discret, etc..
run_in_parallel: if set to True, enables parallel computing among num_workers threads. If num_workers is not
specified, uses the number of cpu cores available.
'''
def __init__(self, a,b,**kwargs):
kwargs["SPO_weight_param"] = 1.0
if "SPO_full_error" not in kwargs:
kwargs["SPO_full_error"] = True
self.SPO_weight_param = kwargs["SPO_weight_param"]
self.SPO_full_error = kwargs["SPO_full_error"]
self.tree = MTP_SPO2CART(a,b,**kwargs)
'''
This function fits the tree on data (X,C,weights).
X: The feature data used in tree splits. Can either be a pandas data frame or numpy array, with:
(a) rows of X = observations
(b) columns of X = features
C: the cost vectors used in the leaf node models. Must be a numpy array, with:
(a) rows of C = observations
(b) columns of C = cost vector components
weights: a numpy array of case weights. Is 1-dimensional, with weights[i] yielding weight of observation i
feats_continuous: If False, all feature are treated as categorical. If True, all feature are treated as continuous.
feats_continuous can also be a boolean vector of dimension = num_features specifying how to treat each feature
verbose: if verbose=True, prints out progress in tree fitting procedure
'''
def fit(self, X, C, train_x_precision,
weights=None, feats_continuous=True, verbose=False, refit_leaves=False,
**kwargs):
self.decision_kwargs = kwargs
X = truncate_train_x(X, train_x_precision)
num_obs = C.shape[0]
A = np.array(range(num_obs))
if self.SPO_full_error == True and self.SPO_weight_param != 0.0:
for i in range(num_obs):
A[i] = find_opt_decision(C[i,:].reshape(1,-1),**kwargs)['objective'][0]
if self.SPO_weight_param != 0.0 and self.SPO_weight_param != 1.0:
if self.SPO_full_error == True:
SPO_loss_bound = -float("inf")
for i in range(num_obs):
SPO_loss = -find_opt_decision(-C[i,:].reshape(1,-1),**kwargs)['objective'][0] - A[i]
if SPO_loss >= SPO_loss_bound:
SPO_loss_bound = SPO_loss
else:
c_max = np.max(C,axis=0)
SPO_loss_bound = -find_opt_decision(-c_max.reshape(1,-1),**kwargs)['objective'][0]
#Upper bound for MSE loss: maximum pairwise difference between any two elements
dists = distance.cdist(C, C, 'sqeuclidean')
MSE_loss_bound = np.max(dists)
else:
SPO_loss_bound = 1.0
MSE_loss_bound = 1.0
#kwargs["SPO_loss_bound"] = SPO_loss_bound
#kwargs["MSE_loss_bound"] = MSE_loss_bound
self.tree.fit(X,A,C,
weights=weights, feats_continuous=feats_continuous, verbose=verbose, refit_leaves=refit_leaves,
SPO_loss_bound = SPO_loss_bound, MSE_loss_bound = MSE_loss_bound,
**kwargs)
'''
Prints out the tree.
Required: call tree fit() method first
Prints pruned tree if prune() method has been called, else prints unpruned tree
verbose=True prints additional statistics within each leaf
'''
def traverse(self, verbose=False):
self.tree.traverse(verbose=verbose)
'''
Prunes the tree. Set verbose=True to track progress
'''
def prune(self, Xval, Cval,
weights_val=None, one_SE_rule=True,verbose=False,approx_pruning=False):
num_obs = Cval.shape[0]
Aval = np.array(range(num_obs))
if self.SPO_full_error == True and self.SPO_weight_param != 0.0:
for i in range(num_obs):
Aval[i] = find_opt_decision(Cval[i,:].reshape(1,-1),**self.decision_kwargs)['objective'][0]
self.tree.prune(Xval,Aval,Cval,
weights_val=weights_val,one_SE_rule=one_SE_rule,verbose=verbose,approx_pruning=approx_pruning)
'''
Produces decision given data Xnew
Required: call tree fit() method first
Uses pruned tree if pruning method has been called, else uses unpruned tree
Argument alpha controls level of pruning. If not specified, uses alpha trained from the prune() method
As a step in finding the estimated decisions for data (Xnew), this function first finds
the leaf node locations corresponding to each row of Xnew. It does so by a top-down search
starting at the root node 0.
If return_loc=True, est_decision will also return the leaf node locations for the data, in addition to the decision.
'''
def est_decision(self, Xnew, alpha=None, return_loc=False):
return self.tree.predict(Xnew, np.array(range(0,Xnew.shape[0])), alpha=alpha, return_loc=return_loc)
def est_cost(self, Xnew, alpha=None, return_loc=False):
return self.tree.predict(Xnew, np.array(range(0,Xnew.shape[0])), alpha=alpha, return_loc=return_loc, get_cost=True)
| 44.8125
| 162
| 0.712692
| 1,010
| 6,453
| 4.389109
| 0.272277
| 0.030453
| 0.024363
| 0.020302
| 0.228288
| 0.204827
| 0.116851
| 0.103316
| 0.103316
| 0.103316
| 0
| 0.007947
| 0.200527
| 6,453
| 144
| 163
| 44.8125
| 0.851328
| 0.266233
| 0
| 0.12069
| 0
| 0
| 0.041555
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0
| 0.068966
| 0.051724
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d037b0f6bf8c9bdca8f41dcdf3788289e4161b30
| 2,954
|
py
|
Python
|
lib/m96_visualization.py
|
jaenrig-ifx/MID
|
a7284f50105575ed6675daeb8a70e144784a0550
|
[
"MIT"
] | 2
|
2020-12-13T11:52:32.000Z
|
2022-01-06T20:41:24.000Z
|
lib/m96_visualization.py
|
jaenrig-ifx/MID
|
a7284f50105575ed6675daeb8a70e144784a0550
|
[
"MIT"
] | null | null | null |
lib/m96_visualization.py
|
jaenrig-ifx/MID
|
a7284f50105575ed6675daeb8a70e144784a0550
|
[
"MIT"
] | null | null | null |
# This package uses tk to create a simple graphical
# output representing the iDrive state
import tkinter as tk
import numpy as np
# why not use the numpy native? but whatever
def rotate_2D(vector, angle):
r = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
return r.dot(vector)
# this class intializes the canvas and all geometrical
# objets drawn onto it. The method setState simply
# adjusts the color of the respective objects
class IDriveVisualizer:
def __init__(self,root):
self.root = root
self.state = [0,0]
cnvs_height = 400
cnvs_width = 400
rect_cn = (400,400)
rect_c = (80,80)
BG = '#F5F5DC'
self.cnvs = tk.Canvas(self.root, bg=BG, height=cnvs_height, width=cnvs_width)
self.cnvs.pack()
delta = 20.
x1a,y1a,x2a,y2a,x3a,y3a = 150.0+delta, 150.0, 200.0, 100.0+delta, 250.0-delta, 150.0
x1b,y1b,x2b,y2b,x3b,y3b = 150.0+delta, 250.0, 250.0-delta, 250.0, 200.0, 300.0-delta
x1c,y1c,x2c,y2c,x3c,y3c = 250.0, 150.0+delta, 300.0-delta, 200.0, 250.0, 250.0-delta
x1d,y1d,x2d,y2d,x3d,y3d = 100.0+delta, 200.0, 150.0, 150.0+delta, 150.0, 250.0-delta
SB1 = '#CCCCCC'
C0 = '#8B8878'
self.h_arrow = [
self.cnvs.create_oval(0,0,0,0,fill='blue'),
self.cnvs.create_polygon(x1a,y1a,x2a,y2a,x3a,y3a,fill=SB1,outline=C0,width=1),
self.cnvs.create_polygon(x1b,y1b,x2b,y2b,x3b,y3b,fill=SB1,outline=C0,width=1),
self.cnvs.create_polygon(x1c,y1c,x2c,y2c,x3c,y3c,fill=SB1,outline=C0,width=1),
self.cnvs.create_polygon(x1d,y1d,x2d,y2d,x3d,y3d,fill=SB1,outline=C0,width=1),
self.cnvs.create_oval(200-rect_c[1]/2.,200-rect_c[0]/2.,200+rect_c[1]/2.,200+rect_c[0]/2.,fill=SB1,outline=C0,width=1)]
r = 120
d = 16
self.h_circle = []
for i in range(0,72):
x1 = rect_cn[1]/2. + rotate_2D([0,-r], i*2*np.pi/72)[0] - d/2.
y1 = rect_cn[0]/2. + rotate_2D([0,-r], i*2*np.pi/72)[1] - d/2.
x2 = rect_cn[1]/2. + rotate_2D([0,-r], i*2*np.pi/72)[0] + d/2.
y2 = rect_cn[0]/2. + rotate_2D([0,-r], i*2*np.pi/72)[1] + d/2.
self.h_circle.append(self.cnvs.create_oval(x1, y1, x2, y2, fill=SB1,outline=C0,width=1))
def setState(self, state):
SB1 = '#CCCCCC'
RED = '#FF3030'
GREEN = '#9acd32'
if self.state[0] != state[0]:
self.cnvs.itemconfig(self.h_arrow[self.state[0]],fill = SB1)
self.cnvs.itemconfig(self.h_arrow[state[0]],fill = GREEN)
self.state[0] = state[0]
if self.state[1] != state[1]:
self.cnvs.itemconfig(self.h_circle[self.state[1]],fill = SB1)
self.cnvs.itemconfig(self.h_circle[state[1]],fill = RED)
self.state[1] = state[1]
| 38.868421
| 132
| 0.574475
| 493
| 2,954
| 3.36714
| 0.273834
| 0.062651
| 0.059036
| 0.057831
| 0.46988
| 0.383735
| 0.229518
| 0.193373
| 0.193373
| 0.171687
| 0
| 0.132541
| 0.259309
| 2,954
| 75
| 133
| 39.386667
| 0.626143
| 0.094787
| 0
| 0.039216
| 0
| 0
| 0.017747
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.039216
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d039ac9e3ce8ea272819341ba9dcf26eae196cff
| 2,054
|
py
|
Python
|
popoff/atom_types.py
|
pzarabadip/PopOff
|
4a9db1ff264ab96196014388721a832aea0f7325
|
[
"MIT"
] | 4
|
2021-06-18T12:22:50.000Z
|
2021-12-27T16:00:31.000Z
|
popoff/atom_types.py
|
pzarabadip/PopOff
|
4a9db1ff264ab96196014388721a832aea0f7325
|
[
"MIT"
] | 1
|
2021-06-27T23:02:23.000Z
|
2021-08-02T10:07:46.000Z
|
popoff/atom_types.py
|
pzarabadip/PopOff
|
4a9db1ff264ab96196014388721a832aea0f7325
|
[
"MIT"
] | 2
|
2021-06-22T10:39:06.000Z
|
2021-12-27T17:52:16.000Z
|
class AtomType():
"""
Class for each atom type.
"""
def __init__( self, atom_type_index, label, element_type, mass, charge, core_shell=None ):
"""
Initialise an instance for each atom type in the structure.
Args:
atom_type_index (int): Integer index for this atom type.
label (str): Label used to identify this atom type.
element_type (str): Elemental symbol for atom type.
mass (float): Mass of the atom type.
charge(float): Charge of the atom type.
core_shell (optional:str): 'core' or 'shell'. Default is None.
Returns:
None
"""
if not isinstance(atom_type_index, int) or isinstance(atom_type_index, bool):
raise TypeError('The atom type index must be an integer.')
if not isinstance(label, str):
raise TypeError('The label must be of type string.')
if not isinstance(element_type, str):
raise TypeError('The element type must be of type string.')
if not isinstance(mass, float):
raise TypeError('The mass must be a float.')
if not isinstance(charge, float):
raise TypeError('The charge must be a float.')
if core_shell not in ['core', 'shell', None]:
raise ValueError('core_shell argument should be "core" or "shell"')
self.atom_type_index = atom_type_index
self.label = label
self.element_type = element_type
self.mass = mass
self.charge = charge
self.formal_charge = charge
self.core_shell = core_shell
@property
def core_shell_string(self):
"""
Defines a string for a comment in a lammps input file format labelling cores/shells.
Args:
None
Returns:
str: Either 'core', 'shell', or '' if core_shell is None.
"""
if self.core_shell is None:
return ''
return self.core_shell
| 36.678571
| 94
| 0.581792
| 257
| 2,054
| 4.521401
| 0.256809
| 0.096386
| 0.078313
| 0.025818
| 0.080895
| 0.056799
| 0.056799
| 0.056799
| 0
| 0
| 0
| 0
| 0.336904
| 2,054
| 55
| 95
| 37.345455
| 0.853157
| 0.304284
| 0
| 0
| 0
| 0
| 0.176565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d03a3dde95a4d151a055d00333559975c2f67791
| 2,116
|
py
|
Python
|
fastreg/ols.py
|
ajferraro/fastreg
|
32cdb15908480bd8d5a084126968c78b17010189
|
[
"MIT"
] | null | null | null |
fastreg/ols.py
|
ajferraro/fastreg
|
32cdb15908480bd8d5a084126968c78b17010189
|
[
"MIT"
] | 1
|
2017-11-28T16:21:09.000Z
|
2017-11-28T17:19:04.000Z
|
fastreg/ols.py
|
ajferraro/fastreg
|
32cdb15908480bd8d5a084126968c78b17010189
|
[
"MIT"
] | 3
|
2017-11-28T16:56:25.000Z
|
2021-02-18T18:18:46.000Z
|
import numpy as np
from scipy import stats
import utils
def fit(xdata, ydata):
"""Calculate 2D regression.
Args:
xdata (numpy.ndarray): 1D array of independent data [ntim],
where ntim is the number of time points (or other independent
points).
ydata (numpy.ndarray): 2D array of dependent data [ntim, nspat],
where nspat is the number of spatial points (or other dependent
points).
Returns:
numpy.ndarray of dimension [5, nspat]. The 5 outputs are: slope,
intercept, Pearson's correlation coefficient, two-sided p-value for
a hypothesis test with null hypothesis that the slope is zero,
standard error for the slope estimate.
"""
# Small number to prevent divide-by-zero errors
TINY = 1.0e-20
# Dimensions
ntim = xdata.shape[0]
nspat = ydata.shape[1]
# Add a constant (1) to the xdata to allow for intercept calculation
xdata_plus_const = utils.add_constant(xdata)
# Calculate parameters of the regression by solving the OLS problem
# in its matrix form
mat1 = np.swapaxes(np.dot(xdata_plus_const.T,
(xdata_plus_const[np.newaxis, :, :])), 0, 1)
mat2 = np.dot(xdata_plus_const.T, ydata)
beta = np.linalg.solve(mat1, mat2.T)
output = beta.T
# Pearson correlation coefficient
xm, ym = xdata-xdata.mean(0), ydata-ydata.mean(0)
r_num = np.dot(xm, ym)
r_den = np.sqrt(stats.ss(xm) * stats.ss(ym))
pearson_r = r_num / r_den
# Two-sided p-value for a hypothesis test whose null hypothesis is that
# the slope is zero.
df = ntim - 2
tval = pearson_r * np.sqrt(df / ((1.0 - pearson_r + TINY) *
(1.0 + pearson_r + TINY)))
pval = stats.distributions.t.sf(np.abs(tval), df)*2
# Standard error of the slope estimate
sst = np.sum(ym ** 2, 0)
ssr = (output[0, :] ** 2) * np.sum(xm ** 2)
se = np.sqrt((1. / df) * (sst - ssr))
stderr = se / np.sqrt(np.sum(xm ** 2))
return np.vstack([output, pearson_r, pval, stderr])
| 33.587302
| 75
| 0.614367
| 312
| 2,116
| 4.108974
| 0.384615
| 0.031201
| 0.043682
| 0.020281
| 0.131045
| 0.081123
| 0.049922
| 0.049922
| 0
| 0
| 0
| 0.021611
| 0.278355
| 2,116
| 62
| 76
| 34.129032
| 0.817944
| 0.45983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d03b6aeb253fdd06dec81e7fe877f6830639e18f
| 796
|
py
|
Python
|
event/timeout.py
|
dannl/hunter-sim-classic
|
e32cccc8431cc3e78b08067dd58e10fec52aac6a
|
[
"MIT"
] | null | null | null |
event/timeout.py
|
dannl/hunter-sim-classic
|
e32cccc8431cc3e78b08067dd58e10fec52aac6a
|
[
"MIT"
] | null | null | null |
event/timeout.py
|
dannl/hunter-sim-classic
|
e32cccc8431cc3e78b08067dd58e10fec52aac6a
|
[
"MIT"
] | null | null | null |
from event import Event
class BuffTimeOut(Event):
def __init__(self, buff, rotation, engine, char_state, priority):
super().__init__('buff_time_out', priority)
self.buff = buff
self.rotation = rotation
self.engine = engine
self.char_state = char_state
def act(self):
if not self.engine.has_future_timeout(self.buff.name):
# self.buff.timeout(self.engine, self.char_state)
# self.buff.on_going = False
# self.rotation.statistics.add_end(self.buff.name, self.priority)
self.act_force()
def act_force(self):
self.buff.timeout(self.rotation, self.engine, self.char_state)
self.buff.on_going = False
self.rotation.statistics.add_end(self.buff.name, self.priority)
| 33.166667
| 77
| 0.653266
| 104
| 796
| 4.778846
| 0.298077
| 0.144869
| 0.084507
| 0.114688
| 0.382294
| 0.382294
| 0.382294
| 0.382294
| 0.382294
| 0.382294
| 0
| 0
| 0.238693
| 796
| 23
| 78
| 34.608696
| 0.820132
| 0.173367
| 0
| 0
| 0
| 0
| 0.019878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d03c4e907665dac0cd64374cfeb54bcf34b259de
| 2,017
|
py
|
Python
|
server.py
|
shawkyelshazly1/Chat-App
|
7cb27e9ad0e014409407bc7f2053caf406236797
|
[
"MIT"
] | null | null | null |
server.py
|
shawkyelshazly1/Chat-App
|
7cb27e9ad0e014409407bc7f2053caf406236797
|
[
"MIT"
] | null | null | null |
server.py
|
shawkyelshazly1/Chat-App
|
7cb27e9ad0e014409407bc7f2053caf406236797
|
[
"MIT"
] | null | null | null |
import socket
import threading
import json
PORT = 5000
SERVER = socket.gethostbyname(socket.gethostname())
ADDRESS = ('', PORT)
FORMAT = 'utf-8'
clients, names = [], []
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDRESS)
def StartChat():
print(f'server is working on: {SERVER}')
b = b''
server.listen()
while True:
try:
conn, addr = server.accept()
try:
data = conn.recv(1024)
message_data = json.loads(data.decode(FORMAT))
if len(clients) >= 1:
join_message = {
'welcome_message': f'{message_data["username"]} Joined the chat'}
join_send_obj = json.dumps(join_message).encode(FORMAT)
broadcastMessage(join_send_obj)
except:
continue
message = {'welcome_message': 'Connected Successfully!'}
send_obj = json.dumps(message).encode(FORMAT)
conn.send(send_obj)
thread = threading.Thread(target=handle, args=(conn, addr))
thread.daemon = True
thread.start()
clients.append(conn)
print(f'active connections: {threading.active_count()-1}')
except:
continue
def handle(conn, addr):
print(f'new connection {addr}')
connected = True
while connected:
message = conn.recv(1024)
if message:
broadcastMessage(message)
else:
remove_connection(conn)
connected = False
print(f'active connections: {threading.active_count()-1}')
def remove_connection(conn):
for client in clients:
clients.remove(client)
def broadcastMessage(message):
for client in clients:
try:
print(message)
client.send(message)
except:
print('opss')
remove_connection(client)
finally:
continue
StartChat()
| 23.729412
| 89
| 0.562717
| 204
| 2,017
| 5.470588
| 0.387255
| 0.021505
| 0.021505
| 0.028674
| 0.078853
| 0.078853
| 0.078853
| 0.078853
| 0
| 0
| 0
| 0.011905
| 0.333664
| 2,017
| 84
| 90
| 24.011905
| 0.818452
| 0
| 0
| 0.209677
| 0
| 0
| 0.124442
| 0.040654
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.048387
| 0
| 0.112903
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d041f4ae9fd51d426b42247db152f3d516a92484
| 561
|
py
|
Python
|
slam_recognition/filters/rgby.py
|
SimLeek/pySILEnT
|
feec2d1fb654d7c8dc25f610916f4e9b202a1092
|
[
"Apache-2.0",
"MIT"
] | 5
|
2018-11-18T17:35:59.000Z
|
2019-02-13T20:25:58.000Z
|
slam_recognition/filters/rgby.py
|
SimLeek/slam_recognition
|
feec2d1fb654d7c8dc25f610916f4e9b202a1092
|
[
"Apache-2.0",
"MIT"
] | 12
|
2018-10-31T01:57:55.000Z
|
2019-02-07T05:49:36.000Z
|
slam_recognition/filters/rgby.py
|
SimLeek/pySILEnT
|
feec2d1fb654d7c8dc25f610916f4e9b202a1092
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from slam_recognition.constant_convolutions.center_surround import rgby_3
from slam_recognition.util.get_dimensions import get_dimensions
import tensorflow as tf
def rgby_filter(tensor # type: tf.Tensor
):
n_dimensions = get_dimensions(tensor)
rgby = rgby_3(n_dimensions)
conv_rgby = tf.constant(rgby, dtype=tf.float32, shape=(3, 3, 3, 3))
compiled_rgby = tf.maximum(tf.nn.conv2d(input=tensor, filter=conv_rgby, strides=[1, 1, 1, 1],
padding='SAME'), [0])
return compiled_rgby
| 37.4
| 97
| 0.673797
| 76
| 561
| 4.763158
| 0.473684
| 0.107735
| 0.104972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032333
| 0.228164
| 561
| 14
| 98
| 40.071429
| 0.803695
| 0.026738
| 0
| 0
| 0
| 0
| 0.007353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d042d23ac886c0996046b66ccaa7d239f4bcb644
| 6,293
|
py
|
Python
|
source/preprocessing/lm_text_generator.py
|
lzzhaha/self_talk
|
238e5583c0f6ca0ed8a4a035b74f366d376bcd6d
|
[
"Apache-2.0"
] | 63
|
2020-04-14T03:40:12.000Z
|
2022-03-30T07:10:20.000Z
|
source/preprocessing/lm_text_generator.py
|
lzzhaha/self_talk
|
238e5583c0f6ca0ed8a4a035b74f366d376bcd6d
|
[
"Apache-2.0"
] | 2
|
2021-07-10T04:10:18.000Z
|
2022-03-22T20:33:18.000Z
|
source/preprocessing/lm_text_generator.py
|
lzzhaha/self_talk
|
238e5583c0f6ca0ed8a4a035b74f366d376bcd6d
|
[
"Apache-2.0"
] | 7
|
2020-12-06T03:22:17.000Z
|
2022-03-25T09:27:19.000Z
|
"""
Adapted from https://github.com/huggingface/transformers/blob/master/examples/run_generation.py
"""
import re
import torch
import logging
from typing import List
from collections import defaultdict
from transformers import GPT2Tokenizer, XLNetTokenizer, TransfoXLTokenizer, OpenAIGPTTokenizer
from transformers import GPT2LMHeadModel, XLNetLMHeadModel, TransfoXLLMHeadModel, OpenAIGPTLMHeadModel
logging.basicConfig(
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO)
logger = logging.getLogger(__name__)
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
MODEL_CLASSES = {
'distilgpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2-medium': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2-large': (GPT2LMHeadModel, GPT2Tokenizer),
'gpt2-xl': (GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet-base-cased': (XLNetLMHeadModel, XLNetTokenizer),
'xlnet-large-cased': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl-wt103': (TransfoXLLMHeadModel, TransfoXLTokenizer)
}
class LMTextGenerator:
"""
Generating text with a language model using the HuggingFace implementation.
"""
def __init__(self,
model_name: str,
device: torch.device = torch.device("cpu")) -> None:
logger.info("Loading the language model")
self.model_name = model_name
self.lm_head, self.tokenizer = init_model(model_name, device)
self.device = device
def generate(self,
prefixes: List[str],
p: float = 0.0,
k: float = 0.0,
temperature: float = 1.0,
length: int = 25,
num_samples: int = 1,
stop_token=None):
"""
Generate an ending for the beginning of the text
:param prefixes: text on which the generation is conditioned
:param p: p for nucleus sampling
:param k: k for top k sampling
:param temperature: default = 1
:param length: the maximum length to sample
:param num_samples: how many texts to generate at once
:param stop_token: if this token was generated, it's the end of the generated text.
:return: the text
"""
if "transfo-xl" in self.model_name or "xlnet" in self.model_name:
prefixes = [PADDING_TEXT + prefix for prefix in prefixes]
generated_strings = defaultdict(list)
reduce_spaces = lambda s: ' '.join(s.split())
for index, prefix in enumerate(prefixes):
out = self.generate_texts(
prompt_text=prefix, length=length, temperature=temperature,
k=k, p=p, num_samples=num_samples, stop_token=stop_token)
generated_strings[index] = [reduce_spaces(t) for t in out]
return generated_strings
def generate_texts(self,
length: int,
prompt_text: str,
num_samples: int = 1,
temperature: float = 1.0,
p: float = 0.0,
k: float = 0.0,
stop_token='?'):
"""
Generate an ending for the beginning of the text
:param prompt_text: text on which the generation is conditioned
:param p: p for nucleus sampling
:param temperature: default = 1
:param length: the maximum length to sample
:return: the text
"""
eos_token_ids = self.tokenizer.encode(f"{stop_token} <eop> <eod>", add_special_tokens=False)
if "xlnet" in self.model_name and len(eos_token_ids) > 1:
eos_token_ids = eos_token_ids[1:]
k = k if k > 0 else None
p = p if p > 0 else None
context_tokens = self.tokenizer.encode(prompt_text)
max_length = length + len(context_tokens)
input_ids = torch.tensor(context_tokens, device=self.device).unsqueeze(0)
outputs = self.lm_head.generate(
input_ids=input_ids, max_length=max_length, do_sample=True, temperature=temperature,
num_return_sequences=num_samples, top_p=p, top_k=k, eos_token_ids=eos_token_ids, repetition_penalty=2.0)
if len(outputs.shape) == 3:
outputs = outputs[0]
outputs = outputs[:, len(context_tokens):]
outputs = [self.tokenizer.decode(text, clean_up_tokenization_spaces=True) for text in outputs]
if stop_token is not None:
outputs = [text[:text.find(stop_token)+1] for text in outputs if stop_token in text]
outputs = [re.sub(" +", " ", text).strip() for text in outputs]
outputs = set([text for text in outputs if len(text) > 0])
return outputs
def init_model(model_name: str,
device: str):
"""
Initialize a pre-trained LM
:param model_name: from MODEL_CLASSES
:param device: CUDA / CPU device
:return: the model and tokenizer
"""
logger.info(f'Initializing {model_name}')
model_class, tokenizer_class = MODEL_CLASSES[model_name]
tokenizer = tokenizer_class.from_pretrained(model_name)
model = model_class.from_pretrained(model_name)
model.to(device)
model.eval()
return model, tokenizer
| 39.33125
| 116
| 0.656444
| 801
| 6,293
| 5.037453
| 0.330836
| 0.028996
| 0.016357
| 0.015861
| 0.148451
| 0.134077
| 0.106815
| 0.093432
| 0.085502
| 0.085502
| 0
| 0.013407
| 0.253297
| 6,293
| 159
| 117
| 39.578616
| 0.845286
| 0.186715
| 0
| 0.084211
| 0
| 0.010526
| 0.202282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042105
| false
| 0
| 0.073684
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d04578120df1707824a754d31bbc073113fe0980
| 440
|
py
|
Python
|
Python_ABC/2-7dictionary/countLetter.py
|
Chandler-Song/Python_Awesome
|
a44b8b79de7b429a00ac5798e7ecdc26c79a09ed
|
[
"MIT"
] | null | null | null |
Python_ABC/2-7dictionary/countLetter.py
|
Chandler-Song/Python_Awesome
|
a44b8b79de7b429a00ac5798e7ecdc26c79a09ed
|
[
"MIT"
] | null | null | null |
Python_ABC/2-7dictionary/countLetter.py
|
Chandler-Song/Python_Awesome
|
a44b8b79de7b429a00ac5798e7ecdc26c79a09ed
|
[
"MIT"
] | null | null | null |
import pprint
# message
message = '''
Books and doors are the same thing books.
You open them, and you go through into another world.
'''
# split message to words into a list
words = message.split()
# define dictionary counter
count = {}
# traverse every word and accumulate
for word in words:
if not word[-1].isalpha():
word = word[:-1]
word = word.lower()
count.setdefault(word, 0)
count[word] +=1
# print
pprint.pprint(count)
| 18.333333
| 53
| 0.702273
| 67
| 440
| 4.61194
| 0.61194
| 0.048544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011142
| 0.184091
| 440
| 24
| 54
| 18.333333
| 0.849582
| 0.247727
| 0
| 0
| 0
| 0
| 0.300614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d04a7bba3d57ad48f159bb585e370285252259ef
| 3,113
|
py
|
Python
|
src/peachyprintertools.py
|
PeachyPrinter/tkpeachyprinter
|
d88dcb4891d19c4b81a7f4f072e120d05c02124c
|
[
"Apache-2.0"
] | 1
|
2017-03-08T02:48:19.000Z
|
2017-03-08T02:48:19.000Z
|
src/peachyprintertools.py
|
PeachyPrinter/tkpeachyprinter
|
d88dcb4891d19c4b81a7f4f072e120d05c02124c
|
[
"Apache-2.0"
] | null | null | null |
src/peachyprintertools.py
|
PeachyPrinter/tkpeachyprinter
|
d88dcb4891d19c4b81a7f4f072e120d05c02124c
|
[
"Apache-2.0"
] | 6
|
2016-05-12T04:10:18.000Z
|
2020-02-15T09:55:00.000Z
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import logging
from peachyprinter import config, PrinterAPI
import argparse
import os
import sys
import time
from Tkinter import *
from ui.main_ui import MainUI
class PeachyPrinterTools(Tk):
def __init__(self, parent, path):
Tk.__init__(self, parent)
self.path = path
self.geometry("800x700")
self.title('Peachy Printer Tools')
if sys.platform != 'darwin':
self.setup_icon()
self.parent = parent
self._api = PrinterAPI()
self.start_main_window()
self.protocol("WM_DELETE_WINDOW", self.close)
def start_main_window(self):
MainUI(self, self._api)
def setup_icon(self):
img_file = os.path.join(self.path, 'resources', 'peachy.gif')
img = PhotoImage(file=img_file)
self.tk.call('wm', 'iconphoto', self._w, img)
def close(self):
self.destroy()
sys.exit(0)
def setup_logging(args):
if args.devmode:
timestr = time.strftime("%Y-%m-%d-%H%M%S")
logfile = os.path.join(config.PEACHY_PATH, 'peachyprinter-%s.log' % timestr)
else:
logfile = os.path.join(config.PEACHY_PATH, 'peachyprinter.log')
if os.path.isfile(logfile):
os.remove(logfile)
logging_format = '%(levelname)s: %(asctime)s %(module)s - %(message)s'
logging_level = getattr(logging, args.loglevel.upper(), "WARNING")
if not isinstance(logging_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
if args.console:
rootLogger = logging.getLogger()
logFormatter = logging.Formatter(logging_format)
fileHandler = logging.FileHandler(logfile)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging_level)
else:
logging.basicConfig(filename=logfile, format=logging_format, level=logging_level)
if __name__ == "__main__":
if not os.path.exists(config.PEACHY_PATH):
os.makedirs(config.PEACHY_PATH)
parser = argparse.ArgumentParser("Configure and print with Peachy Printer")
parser.add_argument('-l', '--log', dest='loglevel', action='store', required=False, default="WARNING", help="Enter the loglevel [DEBUG|INFO|WARNING|ERROR] default: WARNING")
parser.add_argument('-c', '--console', dest='console', action='store_true', required=False, help="Logs to console not file")
parser.add_argument('-d', '--development', dest='devmode', action='store_true', required=False, help="Enable Developer Testing Mode")
args, unknown = parser.parse_known_args()
setup_logging(args)
if args.devmode:
config.devmode = True
if getattr(sys, 'frozen', False):
path = os.path.dirname(sys.executable)
else:
path = os.path.dirname(os.path.realpath(__file__))
app = PeachyPrinterTools(None, path)
app.title('Peachy Printer Tools')
app.mainloop()
| 34.208791
| 186
| 0.666881
| 373
| 3,113
| 5.420912
| 0.38874
| 0.023739
| 0.031652
| 0.02275
| 0.105836
| 0.105836
| 0.0455
| 0.0455
| 0
| 0
| 0
| 0.004845
| 0.204305
| 3,113
| 90
| 187
| 34.588889
| 0.811465
| 0.013813
| 0
| 0.070423
| 0
| 0
| 0.158083
| 0.008475
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070423
| false
| 0
| 0.112676
| 0
| 0.197183
| 0.056338
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d04d2d19a25223c8c1cc1c6c129d213851622ac0
| 813
|
py
|
Python
|
db/db_create.py
|
dafarz/base-service
|
95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7
|
[
"MIT"
] | null | null | null |
db/db_create.py
|
dafarz/base-service
|
95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7
|
[
"MIT"
] | null | null | null |
db/db_create.py
|
dafarz/base-service
|
95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from env_variables import SQL_ALCHEMY_URL
_db_url_without_db = '/'.join(SQL_ALCHEMY_URL.split('/')[:-1])
engine = create_engine(f'{_db_url_without_db}', isolation_level='AUTOCOMMIT', echo=True)
Session = sessionmaker(engine)
def create_database():
db_name = SQL_ALCHEMY_URL.split('/')[-1]
create_database_statement = f"""SELECT 'CREATE DATABASE {db_name}'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '{db_name}')"""
with Session() as con:
result = con.execute(create_database_statement).fetchone()
if result:
con.execute(result)
else:
print(f'{db_name} already exists')
if __name__ == '__main__':
create_database()
| 32.52
| 109
| 0.676507
| 101
| 813
| 5.079208
| 0.445545
| 0.136452
| 0.076023
| 0.054581
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00311
| 0.209102
| 813
| 24
| 110
| 33.875
| 0.794712
| 0
| 0
| 0
| 0
| 0
| 0.254613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.222222
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d04e92b69338a9a744afe83b7964f2f2ce880ffe
| 2,382
|
py
|
Python
|
util/data.py
|
arturb90/nl2pl
|
2cd37bdd7c6f9f99349f1235001a1755ba169f4a
|
[
"MIT"
] | null | null | null |
util/data.py
|
arturb90/nl2pl
|
2cd37bdd7c6f9f99349f1235001a1755ba169f4a
|
[
"MIT"
] | null | null | null |
util/data.py
|
arturb90/nl2pl
|
2cd37bdd7c6f9f99349f1235001a1755ba169f4a
|
[
"MIT"
] | 1
|
2021-07-16T09:21:15.000Z
|
2021-07-16T09:21:15.000Z
|
import torch
from random import random
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
def collate_fn(batch):
'''
Batch-wise preprocessing and padding.
:param batch: the current batch.
:returns: padded sources, targets, alignments
stacks and corresponding real lengths.
'''
sources, targets, alignments, \
stacks, stack_lens = zip(*batch)
src_lens = [len(src) for src in sources]
tgt_lens = [len(tgt) for tgt in targets]
source_pad = pad_sequence(sources, padding_value=0)
target_pad = pad_sequence(targets, padding_value=0)
align_pad = pad_sequence(alignments, padding_value=0)
max_stack_len = max(s.size(1) for s in stacks)
max_target_len = target_pad.size(0)
# Must be send to device.
stack_pad = torch.zeros(
[len(batch),
max_target_len,
max_stack_len]
).long()
for i in range(len(batch)):
stack = stacks[i]
stack_pad[i, :stack.size(0), :stack.size(1)] = stack
# Padding value is 1, for stacks that only contain
# start-of-sequence token. Ignored during forward pass
# since it corresponds to decoder padding targets.
stack_lens = pad_sequence(stack_lens, padding_value=1)
stack_lens = stack_lens.tolist()
return (source_pad,
target_pad,
src_lens,
tgt_lens,
align_pad,
stack_pad,
stack_lens)
class Dataset(Dataset):
'''
Pytorch dataset object.
'''
def __init__(self, dataset, device, mask_ratio=0):
self.data = dataset
self.device = device
self.mask_ratio = mask_ratio
def __len__(self):
return len(self.data)
def __getitem__(self, i):
sample = self.data[i]
x = torch.LongTensor(sample['src_i']).to(self.device)
y = torch.LongTensor(sample['tgt_i']).to(self.device)
align = torch.LongTensor(sample['alignment']).to(self.device)
stacks = torch.LongTensor(sample['value_stacks']).to(self.device)
stack_lens = torch.LongTensor(sample['stack_lens'])
if self.mask_ratio:
for i in range(1, len(x)-1):
if random() <= self.mask_ratio:
# 3 is <UNK> token.
x[i] = 3
return x, y, align, stacks, stack_lens
| 27.697674
| 73
| 0.615869
| 313
| 2,382
| 4.492013
| 0.28754
| 0.05761
| 0.07468
| 0.042674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008163
| 0.280017
| 2,382
| 85
| 74
| 28.023529
| 0.811662
| 0.166667
| 0
| 0
| 0
| 0
| 0.021156
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.08
| 0.02
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d050c2f9fe46941d4dbe952021eec4b5d9528020
| 6,548
|
py
|
Python
|
mth5/io/lemi424.py
|
kujaku11/mth5
|
b7681335871f3cd1b652276fd93c08554c7538ff
|
[
"MIT"
] | 5
|
2021-01-08T23:38:47.000Z
|
2022-03-31T14:13:47.000Z
|
mth5/io/lemi424.py
|
kujaku11/mth5
|
b7681335871f3cd1b652276fd93c08554c7538ff
|
[
"MIT"
] | 76
|
2020-09-04T02:35:19.000Z
|
2022-03-31T22:18:09.000Z
|
mth5/io/lemi424.py
|
kujaku11/mth5
|
b7681335871f3cd1b652276fd93c08554c7538ff
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue May 11 15:31:31 2021
:copyright:
Jared Peacock (jpeacock@usgs.gov)
:license: MIT
"""
from pathlib import Path
import pandas as pd
import numpy as np
import logging
from mth5.timeseries import ChannelTS, RunTS
from mt_metadata.timeseries import Station, Run
class LEMI424:
"""
Read in a LEMI424 file, this is a place holder until IRIS finalizes
their reader.
"""
def __init__(self, fn=None):
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
self.fn = fn
self._has_data = False
self.sample_rate = 1.0
self.chunk_size = 10000
self.column_names = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"bx",
"by",
"bz",
"temperature_e",
"temperature_h",
"e1",
"e2",
"e3",
"e4",
"battery",
"elevation",
"latitude",
"lat_hemisphere",
"longitude",
"lon_hemisphere",
"n_satellites",
"gps_fix",
"tdiff",
]
if self.fn:
self.read()
@property
def fn(self):
return self._fn
@fn.setter
def fn(self, value):
if value is not None:
value = Path(value)
if not value.exists():
raise IOError(f"Could not find {value}")
self._fn = value
@property
def start(self):
if self._has_data:
return "T".join(
[
"-".join(
[
f"{self._df.year.min()}",
f"{self._df.month.min():02d}",
f"{self._df.day.min():02d}",
]
),
":".join(
[
f"{self._df.hour.min():02d}",
f"{self._df.minute.min():02d}",
f"{self._df.second.min():02d}",
]
),
]
)
@property
def end(self):
if self._has_data:
return "T".join(
[
"-".join(
[
f"{self._df.year.max()}",
f"{self._df.month.max():02d}",
f"{self._df.day.max():02d}",
]
),
":".join(
[
f"{self._df.hour.max():02d}",
f"{self._df.minute.max():02d}",
f"{self._df.second.max():02d}",
]
),
]
)
@property
def latitude(self):
if self._has_data:
return np.rad2deg(self._df.latitude.median() / 3600)
@property
def longitude(self):
if self._has_data:
return np.rad2deg(self._df.longitude.median() / 3600)
@property
def elevation(self):
if self._has_data:
return self._df.elevation.median()
@property
def gps_lock(self):
if self._has_data:
return self._df.gps_fix.values
@property
def station_metadata(self):
s = Station()
if self._has_data:
s.location.latitude = self.latitude
s.location.longitude = self.longitude
s.location.elevation = self.elevation
s.time_period.start = self.start
s.time_period.end = self.end
return s
@property
def run_metadata(self):
r = Run()
r.sample_rate = self.sample_rate
r.data_logger.model = "LEMI424"
r.data_logger.manufacturer = "LEMI"
if self._has_data:
r.data_logger.power_source.voltage.start = self._df.battery.max()
r.data_logger.power_source.voltage.end = self._df.battery.min()
r.time_period.start = self.start
r.time_period.end = self.end
def read(self, fn=None):
"""
Read a LEMI424 file using pandas
:param fn: DESCRIPTION, defaults to None
:type fn: TYPE, optional
:return: DESCRIPTION
:rtype: TYPE
"""
if fn is not None:
self.fn = fn
if not self.fn.exists():
msg = "Could not find file %s"
self.logger.error(msg, self.fn)
raise IOError(msg % self.fn)
self._df = pd.read_csv(self.fn, delimiter="\s+", names=self.column_names)
self._has_data = True
def to_run_ts(self, fn=None, e_channels=["e1", "e2"]):
"""
Return a RunTS object from the data
:param fn: DESCRIPTION, defaults to None
:type fn: TYPE, optional
:return: DESCRIPTION
:rtype: TYPE
"""
ch_list = []
for comp in (
["bx", "by", "bz"] + e_channels + ["temperature_e", "temperature_h"]
):
if comp[0] in ["h", "b"]:
ch = ChannelTS("magnetic")
elif comp[0] in ["e"]:
ch = ChannelTS("electric")
else:
ch = ChannelTS("auxiliary")
ch.sample_rate = self.sample_rate
ch.start = self.start
ch.ts = self._df[comp].values
ch.component = comp
ch_list.append(ch)
return RunTS(
array_list=ch_list,
station_metadata=self.station_metadata,
run_metadata=self.run_metadata,
)
# =============================================================================
# define the reader
# =============================================================================
def read_lemi424(fn, e_channels=["e1", "e2"], logger_file_handler=None):
"""
Read a LEMI 424 TXT file.
:param fn: input file name
:type fn: string or Path
:param e_channels: A list of electric channels to read,
defaults to ["e1", "e2"]
:type e_channels: list of strings, optional
:return: A RunTS object with appropriate metadata
:rtype: :class:`mth5.timeseries.RunTS`
"""
txt_obj = LEMI424()
if logger_file_handler:
txt_obj.logger.addHandler(logger_file_handler)
txt_obj.read(fn)
return txt_obj.to_run_ts(e_channels=e_channels)
| 27.170124
| 81
| 0.464875
| 694
| 6,548
| 4.213256
| 0.257925
| 0.04104
| 0.028728
| 0.035568
| 0.25855
| 0.159371
| 0.127223
| 0.127223
| 0.107387
| 0.107387
| 0
| 0.021607
| 0.399206
| 6,548
| 240
| 82
| 27.283333
| 0.721657
| 0.144319
| 0
| 0.174699
| 0
| 0
| 0.114681
| 0.06205
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084337
| false
| 0
| 0.036145
| 0.006024
| 0.186747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d050d5f902907c952287689dc0a4c79b3535eea2
| 4,895
|
py
|
Python
|
preprocessing/encoder.py
|
mjlaali/housing-model
|
8f0286a4b1909b7e0218d9a8f1340b95d5b9463d
|
[
"Apache-2.0"
] | null | null | null |
preprocessing/encoder.py
|
mjlaali/housing-model
|
8f0286a4b1909b7e0218d9a8f1340b95d5b9463d
|
[
"Apache-2.0"
] | 3
|
2020-11-13T18:43:28.000Z
|
2022-02-10T01:18:05.000Z
|
preprocessing/encoder.py
|
mjlaali/housing_model
|
8f0286a4b1909b7e0218d9a8f1340b95d5b9463d
|
[
"Apache-2.0"
] | null | null | null |
import abc
import logging
import os
import pickle
from collections import Counter
from datetime import datetime
from typing import List, Union
import numpy as np
_logger = logging.getLogger(__name__)
class Transformation(abc.ABC):
@abc.abstractmethod
def analyze(self, raw: object) -> object:
pass
@abc.abstractmethod
def process(self, raw: object) -> object:
pass
def save(self):
pass
class StatelessTransformation(Transformation, abc.ABC):
def analyze(self, raw: object) -> object:
return self.process(raw)
def save(self):
pass
class WhitespaceTokenizer(StatelessTransformation):
def process(self, raw: str) -> list:
return raw.split(" ")
class CategoricalFeature(Transformation):
UNK = "unk"
def __init__(self, vocab_file, num_values):
self._vocab_file = vocab_file
self._token_id = {self.UNK: 0}
self._num_values = num_values
self._if_analyze = not os.path.exists(vocab_file)
if self._if_analyze:
self._vocabs = Counter()
else:
with open(self._vocab_file, "rb") as fin:
self._vocabs = pickle.load(fin)
_logger.warning(
f"The vocab file {vocab_file} already exist, hence, vocabs will not be computed again"
)
def analyze(self, raw: list) -> list:
res = []
for a_token in raw:
if self._if_analyze:
self._vocabs[a_token] += 1
res.append(0)
return res
def process(self, raw: list) -> list:
res = []
for a_token in raw:
token_id = self._token_id.get(a_token)
if token_id is None:
token_id = self._token_id[self.UNK]
res.append(token_id)
return res
def save(self):
if self._if_analyze:
with open(self._vocab_file, "wb") as fout:
pickle.dump(self._vocabs, fout)
total_tokens = sum(self._vocabs.values())
considered_token = 0
for token, freq in self._vocabs.most_common(self._num_values):
considered_token += freq
self._token_id[token] = len(self._token_id)
_logger.info(
f"{self._vocab_file} covers {considered_token/total_tokens:.2f} of tokens."
)
class ToList(StatelessTransformation):
def process(self, raw: object) -> list:
return [raw]
class Lowercase(StatelessTransformation):
def process(self, raw: str) -> str:
return raw.lower()
class DateTransformer(StatelessTransformation):
def __init__(self, template_format: str, base: str):
self._template_format = template_format
self._base = datetime.strptime(base, template_format).date()
def process(self, raw: str) -> int:
delta = datetime.strptime(raw, self._template_format).date() - self._base
return delta.days
class Scale(StatelessTransformation):
def __init__(self, scale: Union[float, int]):
self._scale = scale
def process(self, raw: float) -> float:
return self._scale * raw
class PositionEncoder(StatelessTransformation):
def __init__(self, dim, scale):
i = 2 * np.arange(dim) // 2
self._angle_rates = 1 / np.power(scale, i / np.float32(dim))
def process(self, raw: int) -> list:
angle_rads = raw * self._angle_rates
# apply sin to even indices in the array; 2i
angle_rads[0::2] = np.sin(angle_rads[0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[1::2] = np.cos(angle_rads[1::2])
return angle_rads
class Encoder(object):
def __init__(self, transformations: List[Transformation], dtype: str, dim: int):
self._transformations = transformations
self._mode = "analyze"
self._dtype = dtype
self._dim = dim
def __call__(self, raw_input):
assert raw_input is not None
in_val = raw_input
out_val = None
for a_transformation in self._transformations:
op = getattr(a_transformation, self._mode)
out_val = op(in_val)
if out_val is None:
raise ValueError(
f"{type(a_transformation)} generates None value for {in_val}"
)
in_val = out_val
try:
feature_value = np.asarray(out_val, dtype=self._dtype)
except TypeError as e:
raise ValueError(f"{out_val} is not compatible with {self._dtype}") from e
assert len(feature_value.shape) == self._dim
return feature_value
def save(self):
for a_transformation in self._transformations:
a_transformation.save()
self._mode = "process"
@property
def dtype(self):
return self._dtype
@property
def dim(self):
return self._dim
| 28.459302
| 102
| 0.614913
| 605
| 4,895
| 4.740496
| 0.242975
| 0.029289
| 0.039052
| 0.04742
| 0.216179
| 0.117155
| 0.022315
| 0.022315
| 0.022315
| 0.022315
| 0
| 0.006036
| 0.289275
| 4,895
| 171
| 103
| 28.625731
| 0.818339
| 0.017569
| 0
| 0.193798
| 0
| 0
| 0.058469
| 0.012276
| 0
| 0
| 0
| 0
| 0.015504
| 1
| 0.178295
| false
| 0.031008
| 0.062016
| 0.054264
| 0.418605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0526bab2f2fcce625c5809ae54737f104402629
| 2,402
|
py
|
Python
|
tests/test_anglicize.py
|
hugovk/python-anglicize
|
1284ec72026f78d56ff5e995328547565ddb4f0b
|
[
"BSD-2-Clause"
] | 1
|
2020-03-08T09:33:14.000Z
|
2020-03-08T09:33:14.000Z
|
tests/test_anglicize.py
|
hugovk/python-anglicize
|
1284ec72026f78d56ff5e995328547565ddb4f0b
|
[
"BSD-2-Clause"
] | 2
|
2020-03-08T16:45:08.000Z
|
2020-03-08T20:34:04.000Z
|
tests/test_anglicize.py
|
hugovk/python-anglicize
|
1284ec72026f78d56ff5e995328547565ddb4f0b
|
[
"BSD-2-Clause"
] | 1
|
2020-03-08T16:33:22.000Z
|
2020-03-08T16:33:22.000Z
|
import pytest
from pytest import param as p
from anglicize import anglicize, build_mapping
@pytest.mark.parametrize(
"text, expected",
[
p("Abc 123", "Abc 123", id="noop"),
p("ĂaÂâÎîȘșȚț", "AaAaIiSsTt", id="romanian"),
p("ĄąĆćĘꣳŃńŹźŻż", "AaCcEeLlNnZzZz", id="polish"),
p("ÁáÉéÍíÓóÖöŐőÚúÜüŰű", "AaEeIiOoOoOoUuUuUu", id="hungarian"),
p("ÀàÆæÇçÊêËëÈèÉéÏïÔôŒœÙùÛûŸÿ", "AaAaCcEeEeEeEeIiOoOoUuUuYy", id="french"),
p("ÁáÉéÍíÓóÑñÚúÝý", "AaEeIiOoNnUuYy", id="spanish"),
p("ÁáÂâÃãÀàÇçÉéÊêÍíÓóÔôÕõÚú", "AaAaAaAaCcEeEeIiOoOoOoUu", id="portuguese"),
# Don't be fooled by the similarities, these four swear they speak diferent languages:
p("ĆćČčĐ𩹮ž", "CcCcDdSsZz", id="bosnian-croatian-montenegrin-serbian"),
p("ÇçËë", "CcEe", id="albanian"),
p("ßÄäÖöÜü", "sAaOoUu", id="german"),
p("IJij", "Ii", id="dutch"),
p("Ëë", "Ee", id="luxembourgish"),
p("ÐðÉéÓóÚúÝýÞþÆæÖö", "DdEeOoUuYyPpAaOo", id="icelandic"),
p("ÆæÅ娸ÉéÈèÊêÓóÒòÂâÔô", "AaAaOoEeEeEeOoOoAaOo", id="norwegian"),
p("ÅåÄäÖö", "AaAaOo", id="swedish"),
p("ÅåÄäÖöŠšŽž", "AaAaOoSsZz", id="finnish"),
p("ŠšŽžÄäÖöÜü", "SsZzAaOoUu", id="estonian"),
p("ĀāČčĒēĢģĪīĶķĻļŅņŠšŪūŽž", "AaCcEeGgIiKkLlNnSsUuZz", id="latvian"),
p("ĄąČčĖėĘęĮįŠšŲųŪū", "AaCcEeEeIiSsUuUu", id="lithuanian"),
p("Ç窺ĞğIıİiÖöÜü", "CcSsGgIiIiOoUu", id="turkish"),
p("ÄäƏəÇçĞğIıİiKkÖöŞşÜü", "AaAaCcGgIiIiKkOoSsUu", id="azerbaijani"),
p("ÄäÇçĞğIıIiÍíÑñÖöŞşÜü", "AaCcGgIiIiIiNnOoSsUu", id="tatar"),
p("ÇçÄäŇňÖöŞşÜüÝýŽž", "CcAaNnOoSsUuYyZz", id="turkmen"),
p("ÄäÇçÊêIıİiÖöŞşŢţÜü", "AaCcEeIiIiOoSsTtUu", id="gagauz"),
p("ǍǎČčŠšŽž", "AaCcSsZz", id="bulgarian-transliteration"),
p("ᵻᶧ", "Ii", id="misc"),
],
)
def test_anglicize(text, expected):
assert anglicize(text) == expected
@pytest.mark.parametrize(
"mapping, expected",
[
(
{"A": "ĂÂ", "T": "Ț", "S": "Șß"},
{
"ă": "a",
"â": "a",
"ș": "s",
"ț": "t",
"Ă": "A",
"Â": "A",
"Ș": "S",
"Ț": "T",
"ß": "s",
},
),
],
)
def test_build_mapping(mapping, expected):
assert build_mapping(mapping) == expected
| 37.53125
| 94
| 0.562448
| 224
| 2,402
| 6.03125
| 0.558036
| 0.026647
| 0.031088
| 0.005922
| 0.011843
| 0.011843
| 0.011843
| 0.011843
| 0
| 0
| 0
| 0.003348
| 0.253955
| 2,402
| 63
| 95
| 38.126984
| 0.747768
| 0.034971
| 0
| 0.070175
| 0
| 0
| 0.418394
| 0.088515
| 0
| 0
| 0
| 0
| 0.035088
| 1
| 0.035088
| false
| 0
| 0.052632
| 0
| 0.087719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0543092d21f71915cd4c279a74f105e00c18015
| 7,035
|
py
|
Python
|
cogs/Reminders.py
|
noahkw/botw-bot
|
8d8c9515a177c52270093fb64abf34d111535d16
|
[
"MIT"
] | 1
|
2020-11-29T23:00:27.000Z
|
2020-11-29T23:00:27.000Z
|
cogs/Reminders.py
|
noahkw/botw-bot
|
8d8c9515a177c52270093fb64abf34d111535d16
|
[
"MIT"
] | 18
|
2020-08-05T11:59:31.000Z
|
2022-03-15T03:48:40.000Z
|
cogs/Reminders.py
|
noahkw/botw-bot
|
8d8c9515a177c52270093fb64abf34d111535d16
|
[
"MIT"
] | null | null | null |
import logging
import re
from datetime import timezone
import pendulum
from aioscheduler import TimedScheduler
from dateparser import parse
from discord.ext import commands
from discord.ext.menus import MenuPages
import db
from cogs import CustomCog, AinitMixin
from cogs.Logging import log_usage
from const import UNICODE_EMOJI
from menu import ReminderListSource, SimpleConfirm
from models import Reminder
from util import has_passed, auto_help, safe_send
logger = logging.getLogger(__name__)
def setup(bot):
bot.add_cog(Reminders(bot))
class ReminderConverter(commands.Converter):
async def convert(self, ctx, argument):
# match strings like 'in 1 hour to do the laundry'
r_to = re.search(r"(.*?) to (.*)", argument, re.DOTALL)
if r_to:
return r_to.group(1), r_to.group(2)
# match strings like '"28-05-20 at 18:00 KST" "Red Velvet comeback"'
# may be improved to also parse the forms '"longer string" singleword'
# and 'singleword "longer string"'
r_quotes = re.search(r'"(.*)" *"(.*)"', argument, re.DOTALL)
if r_quotes:
return r_quotes.group(1), r_quotes.group(2)
# match strings like 'tomorrow football'
tokens = argument.split()
return tokens[0], tokens[1]
def parse_date(date):
parsed_date = parse(date)
if parsed_date is None:
raise commands.BadArgument("Couldn't parse the date.")
if parsed_date.tzinfo is None:
parsed_date = parsed_date.astimezone(timezone.utc)
parsed_date = pendulum.parse(str(parsed_date))
# add 1s to account for processing time, results in nicer diffs
parsed_date = parsed_date.add(seconds=1)
if has_passed(parsed_date):
raise commands.BadArgument(
f"`{parsed_date.to_cookie_string()}` is in the past."
)
return parsed_date
class Reminders(CustomCog, AinitMixin):
def __init__(self, bot):
super().__init__(bot)
self.scheduler = TimedScheduler(prefer_utc=True)
self.scheduler.start()
Reminder.inject_bot(bot)
super(AinitMixin).__init__()
async def _ainit(self):
await self.bot.wait_until_ready()
async with self.bot.Session() as session:
reminders = await db.get_reminders(session)
for reminder in reminders:
if reminder.is_due():
await self.remind_user(reminder.reminder_id, late=True)
else:
self.scheduler.schedule(
self.remind_user(reminder.reminder_id), reminder.due
)
logger.info(f"# Initial reminders from db: {len(reminders)}")
def cog_unload(self):
self.scheduler._task.cancel()
@auto_help
@commands.group(
name="reminders",
aliases=["remindme", "remind"],
invoke_without_command=True,
brief="Set reminders in the future",
)
async def reminders_(self, ctx, *, args: ReminderConverter = None):
if args:
await ctx.invoke(self.add, args=args)
else:
await ctx.send_help(self.reminders_)
@reminders_.command(brief="Adds a new reminder")
@log_usage(command_name="remind")
async def add(self, ctx, *, args: ReminderConverter):
"""
Adds a new reminder.
Example usage:
`{prefix}remind in 3 hours to do the laundry`
`{prefix}remind 15-06-20 at 6pm KST to Irene & Seulgi debut`
`{prefix}remind in 6 minutes 30 seconds to eggs`
"""
when, what = args
parsed_date = parse_date(when)
now = pendulum.now("UTC")
diff = parsed_date.diff_for_humans(now, True)
async with self.bot.Session() as session:
reminder = Reminder(_user=ctx.author.id, due=parsed_date, content=what)
session.add(reminder)
await session.flush()
self.scheduler.schedule(self.remind_user(reminder.reminder_id), parsed_date)
await session.commit()
await ctx.send(
f"I'll remind you on `{parsed_date.to_cookie_string()}` (in {diff}): `{what}`."
)
@reminders_.command()
async def list(self, ctx):
"""
Lists your reminders
"""
async with self.bot.Session() as session:
reminders = await db.get_reminders(session, user_id=ctx.author.id)
if len(reminders) > 0:
pages = MenuPages(
source=ReminderListSource(reminders), clear_reactions_after=True
)
await pages.start(ctx)
else:
await ctx.send("You have 0 pending reminders!")
async def remind_user(self, reminder_id, late=False):
async with self.bot.Session() as session:
reminder = await db.get_reminder(session, reminder_id)
diff = reminder.created.diff_for_humans(reminder.due, True)
assert not reminder.done
user = reminder.user
if user and late:
await safe_send(
user,
f"{self.bot.custom_emoji['SHOUT']} You told me to remind you some time ago. "
f"Sorry for being late:\n{reminder.content}",
)
elif user:
message = await safe_send(
user,
f"{self.bot.custom_emoji['SHOUT']} You told me to remind you {diff} ago:\n{reminder.content}",
)
if message:
ctx = await self.bot.get_context(message)
ctx.author = user
confirm = await SimpleConfirm(
message, timeout=120.0, emoji=UNICODE_EMOJI["SNOOZE"]
).prompt(ctx)
if confirm:
try:
new_due = await self.prompt_snooze_time(reminder)
reminder.due = new_due
self.scheduler.schedule(
self.remind_user(reminder.reminder_id), new_due
)
await session.commit()
return
except commands.BadArgument as ba:
await ctx.send(ba)
reminder.done = True
await session.commit()
async def prompt_snooze_time(self, reminder):
user = reminder.user
message = await user.send(
"When do you want me to remind you again? (e.g.: `in 30 minutes`)"
)
channel = message.channel
answer = await self.bot.wait_for(
"message", check=lambda msg: msg.channel == channel and msg.author == user
)
parsed_date = parse_date(answer.content)
now = pendulum.now("UTC")
diff = parsed_date.diff_for_humans(now, True)
await channel.send(f"Reminding you again in {diff}.")
return parsed_date
| 33.341232
| 114
| 0.5828
| 825
| 7,035
| 4.831515
| 0.282424
| 0.050176
| 0.013046
| 0.016056
| 0.190416
| 0.157802
| 0.149774
| 0.149774
| 0.129704
| 0.089814
| 0
| 0.007995
| 0.324378
| 7,035
| 210
| 115
| 33.5
| 0.830633
| 0.045203
| 0
| 0.145695
| 0
| 0.006623
| 0.100343
| 0.028202
| 0
| 0
| 0
| 0
| 0.006623
| 1
| 0.02649
| false
| 0.013245
| 0.099338
| 0
| 0.178808
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d05487672c8369c2d9e228e3c2e3d6e6a8514f49
| 4,598
|
py
|
Python
|
lambda/code/lambda_function.py
|
acloudfan/Amazon-Aurora-DAS-Setup
|
9c5ca4ac3705e78e877fc51b9ba927a7d367d029
|
[
"MIT-0"
] | null | null | null |
lambda/code/lambda_function.py
|
acloudfan/Amazon-Aurora-DAS-Setup
|
9c5ca4ac3705e78e877fc51b9ba927a7d367d029
|
[
"MIT-0"
] | null | null | null |
lambda/code/lambda_function.py
|
acloudfan/Amazon-Aurora-DAS-Setup
|
9c5ca4ac3705e78e877fc51b9ba927a7d367d029
|
[
"MIT-0"
] | 2
|
2021-05-25T16:14:13.000Z
|
2022-01-14T14:04:49.000Z
|
import json
import base64
import os
import boto3
import zlib
# Used for decryption of the received payload
import aws_encryption_sdk
from aws_encryption_sdk import CommitmentPolicy
from aws_encryption_sdk.internal.crypto import WrappingKey
from aws_encryption_sdk.key_providers.raw import RawMasterKeyProvider
from aws_encryption_sdk.identifiers import WrappingAlgorithm, EncryptionKeyType
import processor.heartbeat_processor as heartbeat_processor
import processor.sqlevents_processor as sqlevents_processor
from processor import heartbeat_processor
from processor import sqlevents_processor
# Controls the filtering of Heartbean events
FILTER_HEARTBEAT_EVENTS = os.getenv('FILTER_HEARTBEAT_EVENTS', "false").lower() == "true"
# Setup the session | clients
REGION_NAME= os.environ['AWS_REGION']
session = boto3.session.Session()
kms = session.client('kms', region_name=REGION_NAME)
# Create the encryption client
enc_client = aws_encryption_sdk.EncryptionSDKClient(commitment_policy=CommitmentPolicy.REQUIRE_ENCRYPT_ALLOW_DECRYPT)
# Represents the Master Key Provider
class MyRawMasterKeyProvider(RawMasterKeyProvider):
provider_id = "BC"
def __new__(cls, *args, **kwargs):
obj = super(RawMasterKeyProvider, cls).__new__(cls)
return obj
def __init__(self, plain_key):
RawMasterKeyProvider.__init__(self)
self.wrapping_key = WrappingKey(wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING,
wrapping_key=plain_key, wrapping_key_type=EncryptionKeyType.SYMMETRIC)
def _get_raw_key(self, key_id):
return self.wrapping_key
# Decrypt the payload using the key and then decompress (zip to plaintext)
def decrypt_decompress(payload, key):
my_key_provider = MyRawMasterKeyProvider(key)
my_key_provider.add_master_key("DataKey")
decrypted_plaintext, header = enc_client.decrypt(
source=payload,
materials_manager=aws_encryption_sdk.materials_managers.default.DefaultCryptoMaterialsManager(master_key_provider=my_key_provider))
# print(decrypted)
return zlib.decompress(decrypted_plaintext, zlib.MAX_WBITS + 16)
# Lambda Handler function
def lambda_handler(event, context):
# Output is an array of transformed records
output = []
heartBeatEventRecords = heartbeat_processor.HeartBeatEventRecords()
sQLEventRecords = sqlevents_processor.SQLEventRecords()
for record in event['records']:
# Get the data from record - it is in base64 format
data = record['data']
payload_overall = base64.b64decode(data)
payload_overall = payload_overall.decode('utf-8')
# Parse the json payload
payload_overall_json=json.loads(payload_overall)
# Get the base64 decoded databaseActivityEvents array from the record
payload_decoded = base64.b64decode(payload_overall_json['databaseActivityEvents'])
# Decrypt the key
# RESOURCE_ID = Cluster ID of the RDS instance
RESOURCE_ID = os.environ['RESOURCE_ID']
# Decrypt
data_key_decoded = base64.b64decode(payload_overall_json['key'])
data_key_decrypt_result = kms.decrypt(CiphertextBlob=data_key_decoded, EncryptionContext={'aws:rds:dbc-id': RESOURCE_ID})
# Decrypt the data
# print(data_key_decrypt_result['Plaintext'])
data_decrypted_decompressed = decrypt_decompress(payload_decoded, data_key_decrypt_result['Plaintext'])
# Parse the JSON
data_decrypted_decompressed_json =json.loads(data_decrypted_decompressed)
if data_decrypted_decompressed_json['databaseActivityEventList'][0]['type'] == "heartbeat" :
# print(data_decrypted_decompressed_json)
heartBeatEventRecords.add(record['recordId'], data_decrypted_decompressed_json,record['approximateArrivalTimestamp'])
else:
sQLEventRecords.add(record['recordId'], data_decrypted_decompressed_json, record['approximateArrivalTimestamp'])
# output.append(heartBeatEventRecords.process(FILTER_HEARTBEAT_EVENTS))
# output.append(sQLEventRecords.process())
# output_hb = heartBeatEventRecords.process(FILTER_HEARTBEAT_EVENTS)
output_hb = heartBeatEventRecords.process(FILTER_HEARTBEAT_EVENTS)
output_sql = sQLEventRecords.process()
print('Total records processed {} records.'.format(len(output_hb)+len(output_sql)))
return {'records': output_hb + output_sql }
| 38.316667
| 139
| 0.738582
| 502
| 4,598
| 6.464143
| 0.316733
| 0.028043
| 0.034515
| 0.044684
| 0.145146
| 0.127273
| 0.08567
| 0.08567
| 0.04869
| 0
| 0
| 0.008302
| 0.187908
| 4,598
| 119
| 140
| 38.638655
| 0.860739
| 0.182253
| 0
| 0
| 0
| 0
| 0.074659
| 0.033182
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.233333
| 0.016667
| 0.416667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d05d10f97cc5c0bdb332b3fd013760d9dc94d719
| 9,449
|
py
|
Python
|
Code/Maskrcnn-keras/Experiments2/our_preprocessing.py
|
SZamboni/NightPedestrianDetection
|
fc492e0bd3f6f99070975d08a229cc6ef969f9e8
|
[
"MIT"
] | 3
|
2020-04-03T06:25:23.000Z
|
2021-04-06T07:30:56.000Z
|
Code/Maskrcnn-keras/Experiments2/our_preprocessing.py
|
SZamboni/NightPedestrianDetection
|
fc492e0bd3f6f99070975d08a229cc6ef969f9e8
|
[
"MIT"
] | null | null | null |
Code/Maskrcnn-keras/Experiments2/our_preprocessing.py
|
SZamboni/NightPedestrianDetection
|
fc492e0bd3f6f99070975d08a229cc6ef969f9e8
|
[
"MIT"
] | 1
|
2021-04-06T07:40:26.000Z
|
2021-04-06T07:40:26.000Z
|
import cv2
import numpy as np
from skimage import exposure as ex
from skimage import data
from PIL import Image
import skfuzzy as fuzz
import math
import timeit
import time
'''
Histogram equalization with colour YCR_CB and histogram equalization only on Y
@img: the image to modify
@return: the image with the histogram equalized
'''
def hisEqulColor_YCRCB(img):
ycrcb=cv2.cvtColor(img,cv2.COLOR_RGB2YCR_CB)
channels=cv2.split(ycrcb)
cv2.equalizeHist(channels[0],channels[0])
cv2.merge(channels,ycrcb)
result = cv2.cvtColor(ycrcb,cv2.COLOR_YCR_CB2RGB)
return result
'''
Histogram equalization with colour YCR_CB and histogram equalization only on Y
@img: the image to modify
@return: the image with the histogram equalized
'''
def hisEqulColor_HSV(img):
ycrcb=cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
channels=cv2.split(ycrcb)
cv2.equalizeHist(channels[0],channels[0])
cv2.merge(channels,ycrcb)
result = cv2.cvtColor(ycrcb,cv2.COLOR_HSV2RGB)
return result
# Histrogram equalization from
# https://github.com/AndyHuang1995/Image-Contrast-Enhancement/blob/master/he.py
'''
Histogram equalization equalizing every colour
@img: the image to modify
@return: the image with the histogram equalized
'''
def hisEqulColor_RGB(img):
outImg = np.zeros((img.shape[0],img.shape[1],3))
for channel in range(img.shape[2]):
outImg[:, :, channel] = ex.equalize_hist(img[:, :, channel])*255
outImg[outImg>255] = 255
outImg[outImg<0] = 0
return outImg.astype(np.uint8)
'''
Gamma correction
@imgage: the image to modify
@gamma: the gamma value, 1.0 does nothing
@return: the image with the gamma corrected
'''
def adjust_gamma(image, gamma=1.0):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(image, table)
# Adaptive gamma correction based on the reference.
# Reference:
# S. Huang, F. Cheng and Y. Chiu, "Efficient Contrast Enhancement Using Adaptive Gamma Correction With
# Weighting Distribution," in IEEE Transactions on Image Processing, vol. 22, no. 3, pp. 1032-1041,
# March 2013. doi: 10.1109/TIP.2012.2226047
# Revised from https://github.com/mss3331/AGCWD/blob/master/AGCWD.m
#from https://github.com/qyou/AGCWD/blob/master/agcwd.py
import numpy as np
import cv2
'''
Adaptive gamma correction with Weighting Distribution
@image: the image to modify
@w: the weight distribution
@return: the image with the gamma corrected
'''
def agcwd(image, w=0.5):
is_colorful = len(image.shape) >= 3
img = extract_value_channel(image) if is_colorful else image
img_pdf = get_pdf(img)
max_intensity = np.max(img_pdf)
min_intensity = np.min(img_pdf)
w_img_pdf = max_intensity * (((img_pdf - min_intensity) / (max_intensity - min_intensity)) ** w)
w_img_cdf = np.cumsum(w_img_pdf) / np.sum(w_img_pdf)
l_intensity = np.arange(0, 256)
l_intensity = np.array([255 * (e / 255) ** (1 - w_img_cdf[e]) for e in l_intensity], dtype=np.uint8)
enhanced_image = np.copy(img)
height, width = img.shape
for i in range(0, height):
for j in range(0, width):
intensity = enhanced_image[i, j]
enhanced_image[i, j] = l_intensity[intensity]
enhanced_image = set_value_channel(image, enhanced_image) if is_colorful else enhanced_image
return enhanced_image
def extract_value_channel(color_image):
color_image = color_image.astype(np.float32) / 255.
hsv = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
v = hsv[:, :, 2]
return np.uint8(v * 255)
def get_pdf(gray_image):
height, width = gray_image.shape
pixel_count = height * width
hist = cv2.calcHist([gray_image], [0], None, [256], [0, 256])
return hist / pixel_count
def set_value_channel(color_image, value_channel):
value_channel = value_channel.astype(np.float32) / 255
color_image = color_image.astype(np.float32) / 255.
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
color_image[:, :, 2] = value_channel
color_image = np.array(cv2.cvtColor(color_image, cv2.COLOR_HSV2BGR) * 255, dtype=np.uint8)
return color_image
# Then we sould have from https://github.com/AndyHuang1995/Image-Contrast-Enhancement/blob/master/ying.py
# from https://www.programcreek.com/python/example/89353/cv2.createCLAHE,
# CLAHE (Contrast-limited adaptive histogram equalization)
'''
Function that apply CLAHE (Contrast-limited adaptive histogram equalization)
to every channel of the image
@imgage: the image to modify
@return: the image with the histrogram corrected
'''
def applyCLAHE(image, clip_limit=3):
# convert image to LAB color model
image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# split the image into L, A, and B channels
l_channel, a_channel, b_channel = cv2.split(image_lab)
# apply CLAHE to lightness channel
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(8, 8))
cl = clahe.apply(l_channel)
# merge the CLAHE enhanced L channel with the original A and B channel
merged_channels = cv2.merge((cl, a_channel, b_channel))
# convert iamge from LAB color model back to RGB color model
final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2BGR)
return final_image
# RETINEX from https://github.com/dongb5/Retinex/blob/master/
def singleScaleRetinex(img, sigma):
retinex = np.log10(img) - np.log10(cv2.GaussianBlur(img, (0, 0), sigma))
return retinex
def multiScaleRetinex(img, sigma_list):
retinex = np.zeros_like(img)
for sigma in sigma_list:
retinex += singleScaleRetinex(img, sigma)
retinex = retinex / len(sigma_list)
return retinex
def colorRestoration(img, alpha, beta):
img_sum = np.sum(img, axis=2, keepdims=True)
color_restoration = beta * (np.log10(alpha * img) - np.log10(img_sum))
return color_restoration
def simplestColorBalance(img, low_clip, high_clip):
total = img.shape[0] * img.shape[1]
for i in range(img.shape[2]):
unique, counts = np.unique(img[:, :, i], return_counts=True)
current = 0
for u, c in zip(unique, counts):
if float(current) / total < low_clip:
low_val = u
if float(current) / total < high_clip:
high_val = u
current += c
img[:, :, i] = np.maximum(np.minimum(img[:, :, i], high_val), low_val)
return img
def MSRCR(img, sigma_list, G, b, alpha, beta, low_clip, high_clip):
img = np.float64(img) + 1.0
img_retinex = multiScaleRetinex(img, sigma_list)
img_color = colorRestoration(img, alpha, beta)
img_msrcr = G * (img_retinex * img_color + b)
for i in range(img_msrcr.shape[2]):
img_msrcr[:, :, i] = (img_msrcr[:, :, i] - np.min(img_msrcr[:, :, i])) / \
(np.max(img_msrcr[:, :, i]) - np.min(img_msrcr[:, :, i])) * \
255
img_msrcr = np.uint8(np.minimum(np.maximum(img_msrcr, 0), 255))
img_msrcr = simplestColorBalance(img_msrcr, low_clip, high_clip)
return img_msrcr
def automatedMSRCR(img, sigma_list):
img = np.float64(img) + 1.0
img_retinex = multiScaleRetinex(img, sigma_list)
for i in range(img_retinex.shape[2]):
unique, count = np.unique(np.int32(img_retinex[:, :, i] * 100), return_counts=True)
for u, c in zip(unique, count):
if u == 0:
zero_count = c
break
low_val = unique[0] / 100.0
high_val = unique[-1] / 100.0
for u, c in zip(unique, count):
if u < 0 and c < zero_count * 0.1:
low_val = u / 100.0
if u > 0 and c < zero_count * 0.1:
high_val = u / 100.0
break
img_retinex[:, :, i] = np.maximum(np.minimum(img_retinex[:, :, i], high_val), low_val)
img_retinex[:, :, i] = (img_retinex[:, :, i] - np.min(img_retinex[:, :, i])) / \
(np.max(img_retinex[:, :, i]) - np.min(img_retinex[:, :, i])) \
* 255
img_retinex = np.uint8(img_retinex)
return img_retinex
'''
Function that apply MSRCP (Multi Scale Retinex
@img: the image to modify
@sigma_list: the list of the sigma, by default [15,80,250]
@return: the image with the histrogram corrected
'''
def MSRCP(img, sigma_list = [15,80,250], low_clip = 0.01, high_clip = 0.99):
img = np.float64(img) + 1.0
intensity = np.sum(img, axis=2) / img.shape[2]
retinex = multiScaleRetinex(intensity, sigma_list)
intensity = np.expand_dims(intensity, 2)
retinex = np.expand_dims(retinex, 2)
intensity1 = simplestColorBalance(retinex, low_clip, high_clip)
intensity1 = (intensity1 - np.min(intensity1)) / \
(np.max(intensity1) - np.min(intensity1)) * \
255.0 + 1.0
img_msrcp = np.zeros_like(img)
for y in range(img_msrcp.shape[0]):
for x in range(img_msrcp.shape[1]):
B = np.max(img[y, x])
A = np.minimum(256.0 / B, intensity1[y, x, 0] / intensity[y, x, 0])
img_msrcp[y, x, 0] = A * img[y, x, 0]
img_msrcp[y, x, 1] = A * img[y, x, 1]
img_msrcp[y, x, 2] = A * img[y, x, 2]
img_msrcp = np.uint8(img_msrcp - 1.0)
return img_msrcp
| 31.288079
| 105
| 0.650122
| 1,365
| 9,449
| 4.367766
| 0.184615
| 0.021469
| 0.01476
| 0.018786
| 0.353908
| 0.296377
| 0.238007
| 0.208487
| 0.155317
| 0.143408
| 0
| 0.042325
| 0.229866
| 9,449
| 301
| 106
| 31.392027
| 0.776969
| 0.112816
| 0
| 0.15528
| 0
| 0
| 0.000693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.099379
| false
| 0
| 0.068323
| 0
| 0.267081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d05de342ea54b26f257e91dab0c259cdcde355f4
| 1,812
|
py
|
Python
|
bin/make_known_good_cice_masks.py
|
PRIMAVERA-H2020/pre-proc
|
0c47636cbe32a13a9544f3e5ce9f4c778dc55078
|
[
"BSD-3-Clause"
] | null | null | null |
bin/make_known_good_cice_masks.py
|
PRIMAVERA-H2020/pre-proc
|
0c47636cbe32a13a9544f3e5ce9f4c778dc55078
|
[
"BSD-3-Clause"
] | null | null | null |
bin/make_known_good_cice_masks.py
|
PRIMAVERA-H2020/pre-proc
|
0c47636cbe32a13a9544f3e5ce9f4c778dc55078
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
make_known_good_cice_masks.py
Copy known good CICE masks for use in fixing the HadGEM CICE masks.
"""
import os
import numpy as np
from netCDF4 import Dataset
OUTPUT_DIR = "/gws/nopw/j04/primavera1/masks/HadGEM3Ocean_fixes/cice_masks"
def main():
"""main entry"""
rootgrp = Dataset(os.path.join(OUTPUT_DIR, "primavera_cice_orca1_uv.nc"),
"w", format="NETCDF3_CLASSIC")
print(os.path.join(OUTPUT_DIR, "primavera_cice_orca1_uv.nc"))
mask = np.zeros((330, 360))
mask[-1, 180:] += 1
_i = rootgrp.createDimension('i', 360)
_j = rootgrp.createDimension('j', 330)
mask_variable = rootgrp.createVariable('mask', 'i4', ('j', 'i'))
mask_variable.units = '1'
mask_variable[:] = mask
rootgrp.close()
print(os.path.join(OUTPUT_DIR, "primavera_cice_orca025_t.nc"))
rootgrp = Dataset(os.path.join(OUTPUT_DIR, "primavera_cice_orca025_t.nc"),
"w", format="NETCDF3_CLASSIC")
mask = np.zeros((1205, 1440))
mask[-1, 720:] += 1
_i = rootgrp.createDimension('i', 1440)
_j = rootgrp.createDimension('j', 1205)
mask_variable = rootgrp.createVariable('mask', 'i4', ('j', 'i'))
mask_variable.units = '1'
mask_variable[:] = mask
rootgrp.close()
print(os.path.join(OUTPUT_DIR, "primavera_cice_orca12_t.nc"))
rootgrp = Dataset(os.path.join(OUTPUT_DIR, "primavera_cice_orca12_t.nc"),
"w", format="NETCDF3_CLASSIC")
mask = np.zeros((3604, 4320))
mask[-1, 2160:] += 1
_i = rootgrp.createDimension('i', 4320)
_j = rootgrp.createDimension('j', 3604)
mask_variable = rootgrp.createVariable('mask', 'i4', ('j', 'i'))
mask_variable.units = '1'
mask_variable[:] = mask
rootgrp.close()
if __name__ == "__main__":
main()
| 31.241379
| 78
| 0.642936
| 241
| 1,812
| 4.59751
| 0.302905
| 0.097473
| 0.054152
| 0.086643
| 0.655235
| 0.568592
| 0.568592
| 0.568592
| 0.564079
| 0.493682
| 0
| 0.059066
| 0.196468
| 1,812
| 57
| 79
| 31.789474
| 0.701923
| 0.071744
| 0
| 0.375
| 0
| 0
| 0.183942
| 0.130617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.075
| 0
| 0.1
| 0.075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d05e5954805301cc10d8ab2d703ec21b5e037de7
| 756
|
py
|
Python
|
config.py
|
raspberry9/tinypost
|
6e4b4bf764e93f6d344fbdb9369f326f08146d00
|
[
"MIT"
] | null | null | null |
config.py
|
raspberry9/tinypost
|
6e4b4bf764e93f6d344fbdb9369f326f08146d00
|
[
"MIT"
] | null | null | null |
config.py
|
raspberry9/tinypost
|
6e4b4bf764e93f6d344fbdb9369f326f08146d00
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import configparser
class Config(object):
def __init__(self, filename):
logging.config.fileConfig(filename)
config = configparser.RawConfigParser()
config.read(filename)
for option, value in config.items(self.name):
try:
_val = eval(value)
except:
_val = value
setattr(self, option, _val)
session_opts = {}
for option, value in config.items("session"):
key = "session." + option
session_opts[key] = value
self.session_opts = session_opts
host, port = self.bind.split(":")
self.host = host
self.port = int(port)
| 26.068966
| 53
| 0.539683
| 76
| 756
| 5.223684
| 0.460526
| 0.110831
| 0.070529
| 0.080605
| 0.13602
| 0.13602
| 0
| 0
| 0
| 0
| 0
| 0.002058
| 0.357143
| 756
| 28
| 54
| 27
| 0.814815
| 0.027778
| 0
| 0
| 0
| 0
| 0.021858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d05e5b044a9120637eea4c01afc5076feed78586
| 2,817
|
py
|
Python
|
database/database.py
|
Valzavator/YouTubeTrendingVideosAnalysis
|
4baca01a351a20bec04331936cd9f6eafaea815d
|
[
"MIT"
] | 2
|
2019-06-11T03:26:50.000Z
|
2020-04-13T01:28:23.000Z
|
database/database.py
|
Valzavator/YouTubeTrendingVideosAnalysis
|
4baca01a351a20bec04331936cd9f6eafaea815d
|
[
"MIT"
] | 2
|
2020-01-08T13:11:49.000Z
|
2020-01-08T13:11:54.000Z
|
database/database.py
|
Valzavator/YouTubeTrendingVideosAnalysis
|
4baca01a351a20bec04331936cd9f6eafaea815d
|
[
"MIT"
] | 1
|
2019-06-11T03:26:54.000Z
|
2019-06-11T03:26:54.000Z
|
import os
import subprocess
from dotenv import load_dotenv
import pymongo
from pymongo import MongoClient
from pymongo.cursor import Cursor
from pymongo.errors import DuplicateKeyError, BulkWriteError
from util.args import Args
load_dotenv()
class Database:
def __init__(self, uri=Args.db_host()):
self.__client = MongoClient(uri)
self.__db = self.__client["videos_analysis"]
self.__videos_coll = self.__db["videos"]
self.__videos_coll.create_index([("county_code", pymongo.DESCENDING)])
self.__mongodump_path = os.getenv('MONGODUMP_PATH')
self.__mongorestore_path = os.getenv('MONGORESTORE_PATH')
def __del__(self):
self.close()
def save_one_video(self, video: dict) -> bool:
if video is None:
return False
try:
self.__videos_coll.insert_one(video)
return True
except DuplicateKeyError as e:
print(e)
return False
def save_many_videos(self, videos: list, ordered=False) -> int:
if videos is None or len(videos) == 0:
return 0
is_repeat = False
quantity_before = self.__videos_coll.count()
try:
self.__videos_coll.insert_many(videos, ordered=ordered)
except BulkWriteError as e:
if ordered:
raise e
is_repeat = True
if is_repeat:
return self.__videos_coll.count() - quantity_before
else:
return len(videos)
def get_all_videos(self) -> Cursor:
return self.__videos_coll.find()
def get_videos_by_country_code(self, country_code: str) -> Cursor:
return self.__videos_coll.find({'country_code': country_code})
def get_videos_by_country_codes(self, country_codes: list) -> Cursor:
return self.__videos_coll.find({'country_code': {'$in': country_codes}})
def remove_all_documents(self):
self.__videos_coll.remove()
def count(self):
return self.__videos_coll.count()
def get_all_country_codes(self) -> list:
return list(self.__videos_coll.distinct('country_code'))
def backup_database(self):
if not os.path.exists(Args.backup_db_dir()):
os.makedirs(Args.backup_db_dir())
cns_command = f'"{self.__mongodump_path}" --collection videos --db videos_analysis' \
f' --out "{os.path.abspath(Args.backup_db_dir())}"'
subprocess.check_output(cns_command)
def restore_database(self):
if not os.path.exists(Args.backup_db_dir()):
os.makedirs(Args.backup_db_dir())
cns_command = f'"{self.__mongorestore_path}" "{os.path.abspath(Args.backup_db_dir())}"'
subprocess.check_output(cns_command)
def close(self):
self.__client.close()
| 28.17
| 95
| 0.649627
| 351
| 2,817
| 4.849003
| 0.262108
| 0.076381
| 0.098707
| 0.052879
| 0.318449
| 0.237368
| 0.219741
| 0.219741
| 0.171563
| 0.171563
| 0
| 0.000944
| 0.248136
| 2,817
| 99
| 96
| 28.454545
| 0.802644
| 0
| 0
| 0.147059
| 0
| 0
| 0.101526
| 0.047923
| 0
| 0
| 0
| 0
| 0
| 1
| 0.191176
| false
| 0
| 0.117647
| 0.073529
| 0.485294
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0629421490c20c90017965031c7298c1372c640
| 4,066
|
py
|
Python
|
messaging_components/services/service_docker.py
|
fgiorgetti/qpid-dispatch-tests
|
164c609d28db87692eed53d5361aa1ee5c97375c
|
[
"Apache-2.0"
] | null | null | null |
messaging_components/services/service_docker.py
|
fgiorgetti/qpid-dispatch-tests
|
164c609d28db87692eed53d5361aa1ee5c97375c
|
[
"Apache-2.0"
] | null | null | null |
messaging_components/services/service_docker.py
|
fgiorgetti/qpid-dispatch-tests
|
164c609d28db87692eed53d5361aa1ee5c97375c
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
from typing import Union
from iqa_common.executor import Command, Execution, ExecutorAnsible, CommandAnsible, ExecutorContainer, \
CommandContainer, Executor
from iqa_common.utils.docker_util import DockerUtil
from messaging_abstract.component import Service, ServiceStatus
import logging
class ServiceDocker(Service):
"""
Implementation of a service represented by a docker container.
So startup and shutdown are done by managing current state of related
docker container name.
"""
_logger = logging.getLogger(__name__)
def __init__(self, name: str, executor: Union[ExecutorAnsible, ExecutorContainer]):
super().__init__(name, executor)
self.docker_host = executor.docker_host
self.docker_util = DockerUtil(docker_host=executor.docker_host)
class ServiceDockerState(Enum):
STARTED = ('start', 'started')
STOPPED = ('stop', 'stopped')
RESTARTED = ('restart', 'started')
def __init__(self, system_state, ansible_state):
self.system_state = system_state
self.ansible_state = ansible_state
def status(self) -> ServiceStatus:
"""
Returns the status based on status of container name.
:return: The status of this specific service
:rtype: ServiceStatus
"""
try:
container = self.docker_util.get_container(self.name)
if not container:
ServiceDocker._logger.debug("Service: %s - Status: UNKNOWN" % self.name)
return ServiceStatus.UNKNOWN
if container.status == 'running':
ServiceDocker._logger.debug("Service: %s - Status: RUNNING" % self.name)
return ServiceStatus.RUNNING
elif container.status == 'exited':
ServiceDocker._logger.debug("Service: %s - Status: STOPPED" % self.name)
return ServiceStatus.STOPPED
except Exception:
ServiceDocker._logger.exception('Error retrieving status of docker container')
return ServiceStatus.FAILED
return ServiceStatus.UNKNOWN
def start(self) -> Execution:
return self.executor.execute(self._create_command(self.ServiceDockerState.STARTED))
def stop(self) -> Execution:
return self.executor.execute(self._create_command(self.ServiceDockerState.STOPPED))
def restart(self) -> Execution:
return self.executor.execute(self._create_command(self.ServiceDockerState.RESTARTED))
def enable(self) -> Execution:
"""
Simply ignore it (not applicable to containers)
:return:
"""
return None
def disable(self) -> Execution:
"""
Simply ignore it (not applicable to containers)
:return:
"""
return None
def _create_command(self, service_state: ServiceDockerState):
"""
Creates a Command instance based on executor type and state
that is specific to each type of command.
:param service_state:
:return:
"""
if isinstance(self.executor, ExecutorAnsible):
state = service_state.ansible_state
restart = 'no'
if service_state == self.ServiceDockerState.RESTARTED:
restart = 'yes'
print('name=%s state=%s restart=%s docker_host=%s'
% (self.name, state, restart, self.docker_host))
docker_host_opt = 'docker_host=%s' % self.docker_host if self.docker_host else ''
return CommandAnsible('name=%s state=%s restart=%s %s'
% (self.name, state, restart, docker_host_opt),
ansible_module='docker_container',
stdout=True,
timeout=self.TIMEOUT)
elif isinstance(self.executor, ExecutorContainer):
state = service_state.system_state
return CommandContainer([], docker_command=state, stdout=True, timeout=self.TIMEOUT)
| 38.72381
| 105
| 0.632809
| 415
| 4,066
| 6.043373
| 0.26988
| 0.04386
| 0.022329
| 0.037081
| 0.270734
| 0.20933
| 0.148724
| 0.148724
| 0.148724
| 0.148724
| 0
| 0
| 0.280374
| 4,066
| 104
| 106
| 39.096154
| 0.857143
| 0.128628
| 0
| 0.063492
| 0
| 0
| 0.085138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.095238
| 0.047619
| 0.47619
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d063b8972e4afe0fab8307dbfa94ac49321f94ea
| 4,836
|
py
|
Python
|
seatsvotes/bootstrap/abstracts.py
|
ljwolf/seatsvotes
|
6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b
|
[
"MIT"
] | null | null | null |
seatsvotes/bootstrap/abstracts.py
|
ljwolf/seatsvotes
|
6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b
|
[
"MIT"
] | null | null | null |
seatsvotes/bootstrap/abstracts.py
|
ljwolf/seatsvotes
|
6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..mixins import Preprocessor, AlwaysPredictPlotter, AdvantageEstimator
from warnings import warn
class Bootstrap(Preprocessor, AlwaysPredictPlotter, AdvantageEstimator):
def __init__(self, elex_frame, covariate_columns=None,
weight_column=None,
share_column='vote_share',
year_column='year',
redistrict_column=None, district_id='district_id',
missing='ignore', uncontested='ignore'):
super().__init__(elex_frame,
covariates=covariate_columns,
weight_column=weight_column,
share_column=share_column,
year_column=year_column,
redistrict_column=redistrict_column,
district_id=district_id,
missing=missing,
uncontested=uncontested,
)
self._years = np.sort(self.long.year.unique())
@property
def years(self):
return self._years
def simulate_elections(self, n_sims=1000, predict=True,
t=-1, year=None, swing=0, target_v=None, fix=False, replace=True):
"""
Simulate elections according to a bootstrap technique.
Arguments
---------
n_sims : int
number of simulations to conduct
swing : float
arbitrary shift in vote means, will be added to the
empirical distribution of $\delta_{t}$.
target_v: float
target mean vote share to peg the simulations to.
Will ensure that the average of all
simulations shift towards this value, but no guarantees
about simulation expectation
can be made due to the structure of the bootstrap.
t : int
the target time offset to use for the counterfactual
simulations. Overridden by year.
year : int
the target year to use for the counterfactual simulations
predict : bool
flag denoting whether to use the predictive distribution
(i.e. add bootstrapped swings to
the voteshare in the previous year) or the counterfactual
distribution (i.e. add bootstrapped
swings to the voteshare in the current year).
fix : bool
flag denoting whether to force the average district vote to be
target_v exactly. If True, all elections will have exactly target_v
mean district vote share. If False, all elections will have approximately
target_v mean district vote share, with the grand mean vote share being target_v
replace : bool
flag denoting whether to resample swings with replacement or without replacement.
If the sampling occurs without replacement, then each swing is used exactly one time in a simulation.
If the sampling occurs with replacement, then each swing can be used more than one
time in a simulation, and some swings may not be used in a simulation.
Returns
---------
an (n_sims, n_districts) matrix of simulated vote shares.
"""
if fix:
raise Exception("Bootstrapped elections cannot be fixed in "
"mean to the target value.")
t = list(self.years).index(year) if year is not None else t
this_year = self.wide[t]
party_voteshares = np.average(this_year.vote_share,
weights=this_year.weight)
if predict is False:
self._GIGO("Prediction must be true if using bootstrap")
target_h = this_year.vote_share.values.flatten()
else:
target_h = this_year.vote_share__prev.values.flatten()
if swing is not None and target_v is not None:
raise ValueError("either swing or target_v, not both.")
elif target_v is not None:
swing = (target_v - party_voteshares)
obs_swings = (this_year.vote_share - this_year.vote_share__prev)
obs_swings = obs_swings.fillna(obs_swings.mean())
n_dists = len(target_h)
pweights = (this_year.weight / this_year.weight.sum()).values.flatten()
pweights /= pweights.sum()
sim_swings = np.random.choice(obs_swings + swing, (n_sims, n_dists),
replace=replace, p=pweights)
sim_h = target_h[None, :] + sim_swings
return np.clip(sim_h, 0, 1)
| 49.85567
| 121
| 0.575889
| 548
| 4,836
| 4.928832
| 0.332117
| 0.033321
| 0.022214
| 0.03147
| 0.188819
| 0.105146
| 0.039985
| 0.039985
| 0.039985
| 0.039985
| 0
| 0.002608
| 0.365798
| 4,836
| 96
| 122
| 50.375
| 0.878057
| 0.399297
| 0
| 0
| 0
| 0
| 0.070046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0.019608
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d064bc4db90fca2bed0f8cf38219eca21ad15605
| 1,657
|
py
|
Python
|
lessons/cse-numpy/drums/drums-5.py
|
uiuc-cse/2014-01-30-cse
|
de30ff0afdbb2030c3a844b9cd138177f38d3b76
|
[
"CC-BY-3.0"
] | 1
|
2021-04-21T23:05:51.000Z
|
2021-04-21T23:05:51.000Z
|
lessons/cse-numpy/drums/drums-5.py
|
gitter-badger/2014-01-30-cse
|
de30ff0afdbb2030c3a844b9cd138177f38d3b76
|
[
"CC-BY-3.0"
] | null | null | null |
lessons/cse-numpy/drums/drums-5.py
|
gitter-badger/2014-01-30-cse
|
de30ff0afdbb2030c3a844b9cd138177f38d3b76
|
[
"CC-BY-3.0"
] | 2
|
2016-03-12T02:28:13.000Z
|
2017-05-01T20:43:22.000Z
|
from __future__ import division
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.special import jn, jn_zeros
import subprocess
def drumhead_height(n, k, distance, angle, t):
nth_zero = jn_zeros(n, k)
return np.cos(t)*np.cos(n*angle)*jn(n, distance*nth_zero[-1])
# Define polar and cartesian coordinates for the drum.
theta = np.r_[0:2*np.pi:50j]
radius = np.r_[0:1:50j]
x = np.array([r*np.cos(theta) for r in radius])
y = np.array([r*np.sin(theta) for r in radius])
radial_nodes = 2
zeros = 2
# Define the base plot.
fig = plt.figure(num=None,figsize=(16,16),dpi=120,facecolor='w',edgecolor='k')
ax = list()
# Loop over the desired angular nodes.
cnt = 0
pixcnt = 0
plt.ion()
for t in np.r_[0:2*np.pi:40j]:
cnt = 0
pixcnt += 1
for i in np.r_[0:radial_nodes+1:1]:
for j in np.r_[1:zeros+1:1]:
cnt += 1;
ax.append(fig.add_subplot(radial_nodes+1,zeros,cnt,projection='3d'))
z = np.array([drumhead_height(i, j, r, theta, t) for r in radius])
ax[-1].set_xlabel('R@%d,A@%d' % (i,j))
ax[-1].plot_surface(x,y,z,rstride=1,cstride=1,cmap=mpl.cm.Accent,linewidth=0,vmin=-1,vmax=1)
ax[-1].set_zlim(-1,1)
plt.savefig('./drum-modes-%d.png' % pixcnt, format='png')
# Collate pictures to an animated GIF.
import os,string
cwd = os.getcwd()
cmd = 'cd %s; ls drum-modes*.png | sort -k1.12n'%cwd
png_files = os.popen(cmd)
png_files_list = string.join(png_files.readlines()).replace('\n',' ')
os.popen('convert -delay 10 -loop 1 %s ./drum-animate.gif'%png_files_list)
| 31.865385
| 104
| 0.660229
| 304
| 1,657
| 3.503289
| 0.427632
| 0.014085
| 0.015023
| 0.033803
| 0.048826
| 0.016901
| 0
| 0
| 0
| 0
| 0
| 0.038856
| 0.176826
| 1,657
| 51
| 105
| 32.490196
| 0.741935
| 0.089318
| 0
| 0.05
| 0
| 0
| 0.083167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.225
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d065e2da402db36ecb6c887992ef35dec831f741
| 704
|
py
|
Python
|
QB5/spiders/qb5.py
|
smithgoo/Scrapy_books
|
b556714510473f324a2952b739d79c0c78f47398
|
[
"MIT"
] | null | null | null |
QB5/spiders/qb5.py
|
smithgoo/Scrapy_books
|
b556714510473f324a2952b739d79c0c78f47398
|
[
"MIT"
] | null | null | null |
QB5/spiders/qb5.py
|
smithgoo/Scrapy_books
|
b556714510473f324a2952b739d79c0c78f47398
|
[
"MIT"
] | null | null | null |
import scrapy
from bs4 import BeautifulSoup
import requests
from QB5.pipelines import dbHandle
from QB5.items import Qb5Item
class Qb5Spider(scrapy.Spider):
name = 'qb5'
allowed_domains = ['qb5.tw']
start_urls = ['https://qb5.tw']
def parse(self, response):
soup = BeautifulSoup(response.text)
######获取最近更新的数据
tlists = soup.find_all('div', attrs={'class': 'txt'})
# print(tlist)
item = Qb5Item()
for tlist in tlists:
xx = tlist.find_all('a')[0]
print(xx['href'])
item['url'] = xx['href']
item['name'] = xx.text
print('********************************')
yield item
| 28.16
| 61
| 0.536932
| 78
| 704
| 4.794872
| 0.576923
| 0.037433
| 0.053476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019881
| 0.285511
| 704
| 24
| 62
| 29.333333
| 0.723658
| 0.03125
| 0
| 0
| 0
| 0
| 0.121481
| 0.047407
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.25
| 0
| 0.5
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d06a5181661f5f73feeb7820ddebac2f55560f7e
| 3,491
|
py
|
Python
|
src/models/markov_chain.py
|
dballesteros7/master-thesis-2015
|
8c0bf9a6eef172fc8167a30780ae0666f8ea2d88
|
[
"MIT"
] | null | null | null |
src/models/markov_chain.py
|
dballesteros7/master-thesis-2015
|
8c0bf9a6eef172fc8167a30780ae0666f8ea2d88
|
[
"MIT"
] | null | null | null |
src/models/markov_chain.py
|
dballesteros7/master-thesis-2015
|
8c0bf9a6eef172fc8167a30780ae0666f8ea2d88
|
[
"MIT"
] | null | null | null |
import itertools
import numpy as np
import constants
from utils import file
class MarkovChain:
def __init__(self, n_items: int, pseudo_count: int = 1,
use_rejection: bool = True):
self.n_items = n_items
self.counts = np.empty(n_items)
self.first_order_counts = np.empty((n_items, n_items))
self.counts.fill((n_items - 1) * pseudo_count)
self.first_order_counts.fill(pseudo_count)
self.use_rejection = use_rejection
np.fill_diagonal(self.first_order_counts, 0) # No self loops.
def train(self, ordered_sets: np.ndarray):
for ordered_set in ordered_sets:
for item, next_item in itertools.zip_longest(
ordered_set, ordered_set[1:]):
if next_item is not None:
self.counts[item] += 1
self.first_order_counts[item][next_item] += 1
def propose_set_item(self, to_complete):
missing_pos = to_complete.index('?')
probs = np.zeros_like(self.first_order_counts)
for idx, row in enumerate(self.first_order_counts):
probs[idx, :] = self.first_order_counts[idx, :] / self.counts[idx]
if missing_pos == 0:
column = probs[:, int(to_complete[missing_pos + 1])]
row = np.ones_like(column)
elif missing_pos == len(to_complete) - 1:
row = probs[:, int(to_complete[missing_pos - 1])]
column = np.ones_like(row)
else:
column = probs[:, int(to_complete[missing_pos + 1])]
row = probs[:, int(to_complete[missing_pos - 1])]
likelihood = column*row
to_complete = [int(x) for x in to_complete if x != '?']
if self.use_rejection:
likelihood[to_complete] = 0.0
sorted_indexes = np.argsort(likelihood)
return sorted_indexes[::-1]
def train_and_evaluate(dataset_name: str, n_items: int):
for fold in range(1, constants.N_FOLDS + 1):
for use_rejection in (False, True):
model = MarkovChain(n_items, use_rejection=use_rejection)
loaded_data = file.load_set_data(
constants.TRAIN_DATA_PATH_TPL.format(
fold=fold, dataset=dataset_name))
model.train(loaded_data)
loaded_test_data = file.load_csv_test_data(
constants.PARTIAL_DATA_PATH_TPL.format(
fold=fold, dataset=dataset_name))
model_name = 'pseudo_markov' if use_rejection else 'markov'
target_path = constants.RANKING_MODEL_PATH_TPL.format(
dataset=dataset_name, fold=fold, model=model_name)
with open(target_path, 'w') as output_file:
for subset in loaded_test_data:
model.propose_set_item(subset)
result = model.propose_set_item(subset)
# if subset.index('?') > 0:
# short_subset = subset[:subset.index('?')]
# short_subset = [int(item) for item in short_subset]
#
output_file.write(','.join(str(item) for item in result))
output_file.write('\n')
# else:
# output_file.write('-\n')
if __name__ == '__main__':
train_and_evaluate(constants.DATASET_NAME_TPL.format('100_no_singles'), 100)
#train_and_evaluate(constants.DATASET_NAME_TPL.format('50_no_singles'), 50)
| 42.573171
| 80
| 0.593813
| 436
| 3,491
| 4.444954
| 0.243119
| 0.0516
| 0.050568
| 0.072239
| 0.229102
| 0.189886
| 0.167183
| 0.167183
| 0.120743
| 0.049536
| 0
| 0.011556
| 0.30593
| 3,491
| 81
| 81
| 43.098765
| 0.788279
| 0.071899
| 0
| 0.095238
| 0
| 0
| 0.014547
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.063492
| 0
| 0.15873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d06c950496205dbbc1ed9eef4c8c7e1dcbe953e8
| 1,668
|
py
|
Python
|
tests/pipeline/nodes/dabble/test_check_large_groups.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | 1
|
2021-12-02T05:15:58.000Z
|
2021-12-02T05:15:58.000Z
|
tests/pipeline/nodes/dabble/test_check_large_groups.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | null | null | null |
tests/pipeline/nodes/dabble/test_check_large_groups.py
|
ericleehy/PeekingDuck
|
8cf1be842235fa60bac13bc466cac09747a780ea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from peekingduck.pipeline.nodes.dabble.check_large_groups import Node
@pytest.fixture
def check_large_groups():
node = Node(
{"input": ["obj_attrs"], "output": ["large_groups"], "group_size_threshold": 3}
)
return node
class TestCheckLargeGroups:
def test_no_obj_groups(self, check_large_groups):
array1 = []
input1 = {"obj_attrs": {"groups": array1}}
assert check_large_groups.run(input1)["large_groups"] == []
assert input1["obj_attrs"]["groups"] == array1
def test_no_large_groups(self, check_large_groups):
array1 = [0, 1, 2, 3, 4, 5]
input1 = {"obj_attrs": {"groups": array1}}
assert check_large_groups.run(input1)["large_groups"] == []
assert input1["obj_attrs"]["groups"] == array1
def test_multi_large_groups(self, check_large_groups):
array1 = [0, 1, 0, 3, 1, 0, 1, 2, 1, 0]
input1 = {"obj_attrs": {"groups": array1}}
assert check_large_groups.run(input1)["large_groups"] == [0, 1]
assert input1["obj_attrs"]["groups"] == array1
| 34.040816
| 87
| 0.678657
| 229
| 1,668
| 4.777293
| 0.427948
| 0.140768
| 0.117002
| 0.109689
| 0.387569
| 0.387569
| 0.329068
| 0.329068
| 0.329068
| 0.25777
| 0
| 0.033708
| 0.19964
| 1,668
| 48
| 88
| 34.75
| 0.785768
| 0.329736
| 0
| 0.333333
| 0
| 0
| 0.161232
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d06e09e4639214f16deaafbd6112fa849f57cd73
| 2,684
|
py
|
Python
|
src/seisspark/seisspark_context.py
|
kdeyev/SeisSpark
|
528d22143acb72e78ed310091db07eb5d731ca09
|
[
"ECL-2.0",
"Apache-2.0"
] | 11
|
2017-08-16T02:32:37.000Z
|
2020-12-25T07:18:57.000Z
|
src/seisspark/seisspark_context.py
|
kdeyev/SeisSpark
|
528d22143acb72e78ed310091db07eb5d731ca09
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-10-15T14:44:17.000Z
|
2018-10-15T14:44:17.000Z
|
src/seisspark/seisspark_context.py
|
kdeyev/SeisSpark
|
528d22143acb72e78ed310091db07eb5d731ca09
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2018-05-16T02:36:38.000Z
|
2020-06-15T07:46:50.000Z
|
# =============================================================================
# Copyright (c) 2021 SeisSpark (https://github.com/kdeyev/SeisSpark).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
from zipfile import ZipFile
import pyspark
from pyspark.sql import SparkSession
def zipdir(path: str, ziph: ZipFile) -> None:
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, "..")))
class SeisSparkContext:
def __init__(self) -> None:
seisspark_home = os.environ["SEISSPARK_HOME"]
seisspark_zip_pile = "seisspark.zip"
if os.path.exists(seisspark_zip_pile):
os.remove(seisspark_zip_pile)
with ZipFile(seisspark_zip_pile, mode="a") as myzipfile:
zipdir(f"{seisspark_home}/src/su_data", myzipfile)
zipdir(f"{seisspark_home}/src/su_rdd", myzipfile)
zipdir(f"{seisspark_home}/src/seisspark", myzipfile)
zipdir(f"{seisspark_home}/src/seisspark_modules", myzipfile)
spark_conf = pyspark.SparkConf()
if "SPARK_MASTER_URL" in os.environ:
spark_conf.setMaster(os.environ["SPARK_MASTER_URL"])
# spark_conf.setAll([
# ('spark.master', ),
# ('spark.app.name', 'myApp'),
# ('spark.submit.deployMode', 'client'),
# ('spark.ui.showConsoleProgress', 'true'),
# ('spark.eventLog.enabled', 'false'),
# ('spark.logConf', 'false'),
# ('spark.driver.bindAddress', 'vps00'),
# ('spark.driver.host', 'vps00'),
# ])
spark_sess = SparkSession.builder.config(conf=spark_conf).getOrCreate()
spark_ctxt = spark_sess.sparkContext
spark_sess.read
spark_sess.readStream
spark_ctxt.setLogLevel("WARN")
spark_ctxt.addPyFile(seisspark_zip_pile)
self._spark_ctxt = spark_ctxt
@property
def context(self) -> pyspark.SparkContext:
return self._spark_ctxt
| 38.342857
| 117
| 0.616617
| 315
| 2,684
| 5.11746
| 0.457143
| 0.037221
| 0.049628
| 0.062035
| 0.122829
| 0.122829
| 0.122829
| 0
| 0
| 0
| 0
| 0.00569
| 0.214232
| 2,684
| 69
| 118
| 38.898551
| 0.758653
| 0.40611
| 0
| 0
| 0
| 0
| 0.120613
| 0.078494
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.125
| 0.03125
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d06f1cb2d99e6c91380d0f70f6e5f7c771735207
| 1,116
|
py
|
Python
|
tests/parsers/notifications/test_Notification.py
|
Tberdy/python-amazon-mws-tools
|
2925118ce113851a2d8db98ad7f99163154f4151
|
[
"Unlicense"
] | 9
|
2017-03-28T12:58:36.000Z
|
2020-03-02T14:42:32.000Z
|
tests/parsers/notifications/test_Notification.py
|
Tberdy/python-amazon-mws-tools
|
2925118ce113851a2d8db98ad7f99163154f4151
|
[
"Unlicense"
] | 5
|
2017-01-05T19:36:18.000Z
|
2021-12-13T19:43:42.000Z
|
tests/parsers/notifications/test_Notification.py
|
Tberdy/python-amazon-mws-tools
|
2925118ce113851a2d8db98ad7f99163154f4151
|
[
"Unlicense"
] | 5
|
2017-02-15T17:29:02.000Z
|
2019-03-06T07:30:55.000Z
|
from unittest import TestCase
from unittest import TestSuite
from unittest import main
from unittest import makeSuite
from mwstools.parsers.notifications import Notification
class Dummy(object):
"""
Only used for test_notification_payload since there is not actually a payload to test.
"""
def __init__(self, *args, **kwargs):
pass
class TestNotification(TestCase):
body = """
<Notification>
<NotificationMetaData>
<Empty />
</NotificationMetaData>
<NotificationPayload>
<Emtpy />
</NotificationPayload>
</Notification>
"""
def setUp(self):
self.parser = Notification.load(self.body)
def test_notification_metadata(self):
self.assertIsNotNone(self.parser.notification_metadata)
def test_notification_payload(self):
self.assertIsNotNone(self.parser.notification_payload(Dummy))
__all__ = [
TestNotification
]
def suite():
s = TestSuite()
for a in __all__:
s.addTest(makeSuite(a))
return s
if __name__ == '__main__':
main(defaultTest='suite')
| 20.666667
| 90
| 0.669355
| 112
| 1,116
| 6.419643
| 0.464286
| 0.066759
| 0.100139
| 0.075104
| 0.125174
| 0.125174
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238351
| 1,116
| 53
| 91
| 21.056604
| 0.845882
| 0.077061
| 0
| 0
| 0
| 0
| 0.221893
| 0.086785
| 0
| 0
| 0
| 0
| 0.057143
| 1
| 0.142857
| false
| 0.028571
| 0.142857
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d06f2e4133f899f7d55993a62f6fac399373c048
| 1,025
|
py
|
Python
|
sec_certs/config/configuration.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 2
|
2021-03-24T11:56:15.000Z
|
2021-04-12T12:22:16.000Z
|
sec_certs/config/configuration.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 73
|
2021-04-12T14:04:04.000Z
|
2022-03-31T15:40:26.000Z
|
sec_certs/config/configuration.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | 3
|
2021-03-26T16:15:49.000Z
|
2021-05-10T07:26:23.000Z
|
import json
from pathlib import Path
from typing import Union
import jsonschema
import yaml
class Configuration(object):
def load(self, filepath: Union[str, Path]):
with Path(filepath).open("r") as file:
state = yaml.load(file, Loader=yaml.FullLoader)
script_dir = Path(__file__).parent
with (Path(script_dir) / "settings-schema.json").open("r") as file:
schema = json.loads(file.read())
try:
jsonschema.validate(state, schema)
except jsonschema.exceptions.ValidationError as e:
print(f"{e}\n\nIn file {filepath}")
for k, v in state.items():
setattr(self, k, v)
def __getattribute__(self, key):
res = object.__getattribute__(self, key)
if isinstance(res, dict) and "value" in res:
return res["value"]
return object.__getattribute__(self, key)
DEFAULT_CONFIG_PATH = Path(__file__).parent / "settings.yaml"
config = Configuration()
config.load(DEFAULT_CONFIG_PATH)
| 27.702703
| 75
| 0.643902
| 127
| 1,025
| 4.992126
| 0.464567
| 0.07571
| 0.089905
| 0.0347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242927
| 1,025
| 36
| 76
| 28.472222
| 0.81701
| 0
| 0
| 0
| 0
| 0
| 0.068293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.384615
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d0707615a365376fb262ae4ab58d6c156cbaf97a
| 4,415
|
py
|
Python
|
parlai/scripts/split_phrases.py
|
shigailowa/ParlAI
|
5bb359cdacb8f2b92ba482273cdff20f0d147a72
|
[
"MIT"
] | null | null | null |
parlai/scripts/split_phrases.py
|
shigailowa/ParlAI
|
5bb359cdacb8f2b92ba482273cdff20f0d147a72
|
[
"MIT"
] | null | null | null |
parlai/scripts/split_phrases.py
|
shigailowa/ParlAI
|
5bb359cdacb8f2b92ba482273cdff20f0d147a72
|
[
"MIT"
] | null | null | null |
import nltk
from nltk.chunk.regexp import ChunkString, ChunkRule, ChinkRule
from nltk.tree import Tree
from nltk.chunk import RegexpParser
from nltk.corpus import conll2000
from nltk.tag import NgramTagger
#class for Unigram Chunking
class UnigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.UnigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
#class for Bigram Chunking
class BigramChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.BigramTagger(train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
#class for Ngram Chunking
class NgramChunker(nltk.ChunkParserI):
def __init__(self, n, train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = nltk.NgramTagger(n, train_data)
def parse(self, sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word,pos),chunktag)
in zip(sentence, chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
#Rule-based chunking
def regexp_chunk():
#define rules here
grammar = r"""NP: {<DT|PDT|CD|PRP\$>?<JJ>*<N.*>+}
VP: {<V.*>+<TO>?<V.*>*}
PP: {<IN>+}
"""
cp = nltk.RegexpParser(grammar)
return(cp)
#train Unigram chunker on conll2000 dataset
def unigram_chunk():
train_sents = conll2000.chunked_sents('train.txt')
unigram_chunker = UnigramChunker(train_sents)
return(unigram_chunker)
#train Bigram chunker on conll2000 dataset
def bigram_chunk():
train_sents = conll2000.chunked_sents('train.txt')
bigram_chunker = BigramChunker(train_sents)
return(bigram_chunker)
#train Ngram chunker on conll2000 dataset
def ngram_chunk(n):
train_sents = conll2000.chunked_sents('train.txt')
ngram_chunker = NgramChunker(n, train_sents)
return(ngram_chunker)
#Call best performing chunker
def split_phrases(tagged_phrase):
bigram_chunker = bigram_chunk()
chunks = bigram_chunker.parse(tagged_phrase)
return(chunks)
"""
text = nltk.word_tokenize('My yellow dog loves eating breakfast and I like to watch netflix')
tags = nltk.pos_tag(text)
print(unigram_chunker.parse(tags))
"""
if __name__ == '__main__':
regexp_chunker = regexp_chunk()
unigram_chunker = ngram_chunk(1)
bigram_chunker = ngram_chunk(2)
trigram_chunker = ngram_chunk(3)
fourgram_chunker = ngram_chunk(4)
fivegram_chunker = ngram_chunk(5)
"""
phrase = "My yellow dog has been asking to eat the whole day because of hunger"
text = nltk.word_tokenize(phrase)
tags = nltk.pos_tag(text)
print(regexp_chunker.parse(tags))
print(unigram_chunker.parse(tags))
print(bigram_chunker.parse(tags))
"""
test_sents = conll2000.chunked_sents('test.txt')
print(regexp_chunker.evaluate(test_sents))
print(unigram_chunker.evaluate(test_sents))
print(bigram_chunker.evaluate(test_sents))
print(trigram_chunker.evaluate(test_sents))
print(fourgram_chunker.evaluate(test_sents))
print(fivegram_chunker.evaluate(test_sents))
"""
phrase = "play football and watch netflix"
text = nltk.word_tokenize(phrase)
tags = nltk.pos_tag(text)
chunks = split_phrases(tags)
print(chunks)
"""
"""
for chunk in chunks:
if type(chunk) is nltk.Tree:
for word,tag in chunk:
print(word)
else:
print(chunk[0])
"""
| 29.433333
| 94
| 0.69966
| 590
| 4,415
| 5.045763
| 0.211864
| 0.040309
| 0.020155
| 0.048371
| 0.539805
| 0.436681
| 0.427276
| 0.414175
| 0.384615
| 0.384615
| 0
| 0.01228
| 0.188448
| 4,415
| 150
| 95
| 29.433333
| 0.818588
| 0.059343
| 0
| 0.405063
| 0
| 0
| 0.038809
| 0.009046
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139241
| false
| 0
| 0.075949
| 0
| 0.291139
| 0.075949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d073713261d4accea1d939cebd542842ecae613a
| 1,320
|
py
|
Python
|
app/utils/zones.py
|
Xerrors/Meco-Server
|
f2111bab7691c0b567d5c3b3f38b83fee152a689
|
[
"MIT"
] | 1
|
2021-07-28T11:24:02.000Z
|
2021-07-28T11:24:02.000Z
|
app/utils/zones.py
|
Xerrors/Meco-Server
|
f2111bab7691c0b567d5c3b3f38b83fee152a689
|
[
"MIT"
] | null | null | null |
app/utils/zones.py
|
Xerrors/Meco-Server
|
f2111bab7691c0b567d5c3b3f38b83fee152a689
|
[
"MIT"
] | null | null | null |
import os
import json
from app.config import DATA_PATH
"""
_id: ID
date: 日期 eg "2020-02-06T15:24:59.942Z"
msg: 消息内容 eg "这是内容"
status: 状态 eg "😫" (a emoji)
"""
def get_zones():
with open(os.path.join(DATA_PATH, 'zone.json'), 'r') as f:
data = json.load(f)
return data['data']
def save_zone(data):
with open(os.path.join(DATA_PATH, 'zone.json'), 'w') as f:
json.dump({'data': data}, f)
def add_zone(msg:dict):
data = get_zones()
# 判断是否存在数据
if len(data) == 0:
_id = 0
else:
_id = data[-1]['id'] + 1
msg['id'] = _id
data.append(msg)
save_zone(data)
return data
def delete_zone(msg_id):
data = get_zones()
for i in range(len(data)):
if int(data[i]['id']) == int(msg_id):
del data[i]
break
save_zone(data)
return data
def update_zone(msg:dict):
data = get_zones()
for i in range(len(data)):
if int(data[i]['id']) == int(msg['id']):
data[i] = msg
break
save_zone(data)
return data
if __name__ == '__main__':
DATA_PATH = '../../data'
with open('../../../../Node/data/zoneMsg.json', 'r') as f:
data = json.load(f)
data = data['data']
for i in data[::-1]:
add_zone(i)
| 18.082192
| 62
| 0.524242
| 198
| 1,320
| 3.348485
| 0.318182
| 0.048265
| 0.072398
| 0.081448
| 0.485671
| 0.485671
| 0.310709
| 0.310709
| 0.253394
| 0.15083
| 0
| 0.023965
| 0.304545
| 1,320
| 72
| 63
| 18.333333
| 0.697168
| 0.006061
| 0
| 0.348837
| 0
| 0
| 0.079765
| 0.028547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.069767
| 0
| 0.27907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d078c0acdf412550824a96d5fadcbd21aeb88416
| 2,534
|
py
|
Python
|
fungal_automata/utils.py
|
ranyishere/fungal_automata_comap2021
|
1ef4f00a3e6f17413a60f6882dbee6f156aadfa0
|
[
"MIT"
] | null | null | null |
fungal_automata/utils.py
|
ranyishere/fungal_automata_comap2021
|
1ef4f00a3e6f17413a60f6882dbee6f156aadfa0
|
[
"MIT"
] | null | null | null |
fungal_automata/utils.py
|
ranyishere/fungal_automata_comap2021
|
1ef4f00a3e6f17413a60f6882dbee6f156aadfa0
|
[
"MIT"
] | null | null | null |
import random
import pprint
import matplotlib.pyplot as plt
import numpy as np
from cells import *
pp = pprint.PrettyPrinter(indent=2)
random.seed(5)
def get_image_from_state(cells, time, debug=False):
"""
Generates an image from the cell states
"""
# print("time: ", time)
img = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
img_row.append(col.color)
img.append(img_row)
if debug == True:
plt.imshow(np.array(img), origin='lower')
plt.show()
return img
def get_heatmap_of_temp(cells, optimal=31.5, debug=False):
img = []
optimal_pts = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
# if rix == 2 and cix == 4:
# print("rix: {0} cix: {1}".format(rix, cix))
# print(col.color)
# if col.temperature <= optimal+0.1 and col.temperature >= optimal-0.1:
# print("col.temperature: ", col.temperature)
# optimal_pts.append([rix,cix])
img_row.append(col.temperature)
# img_row.append(col.color[3])
img.append(img_row)
# print("img: ", img)
if debug == True:
for opt in optimal_pts:
plt.plot(opt, marker='x')
heatmap = plt.imshow(np.array(img), origin='lower', cmap='hot')
plt.colorbar(heatmap)
plt.show()
print("showing")
return img
def get_heatmap_of_food(cells, debug=False):
img = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
# if rix == 2 and cix == 4:
# print("rix: {0} cix: {1}".format(rix, cix))
# print(col.color)
img_row.append(col.color[3])
# img_row.append(col.color[3])
img.append(img_row)
if debug == True:
plt.imshow(np.array(img), origin='lower')
plt.show()
return img
def get_moistmap(cells, debug=False):
img = []
for rix, row in enumerate(cells):
img_row = []
for cix, col in enumerate(row):
# if rix == 2 and cix == 4:
# print("rix: {0} cix: {1}".format(rix, cix))
# print(col.color)
img_row.append(col.moisture)
# img_row.append(col.color[3])
img.append(img_row)
if debug == True:
plt.imshow(np.array(img), origin='lower', cmap='Blues')
plt.show()
return img
| 23.247706
| 83
| 0.54341
| 337
| 2,534
| 4.002967
| 0.216617
| 0.066716
| 0.062268
| 0.077835
| 0.645663
| 0.596738
| 0.572276
| 0.567828
| 0.539659
| 0.539659
| 0
| 0.014434
| 0.316496
| 2,534
| 108
| 84
| 23.462963
| 0.764434
| 0.226519
| 0
| 0.607143
| 0
| 0
| 0.018672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.089286
| 0
| 0.232143
| 0.053571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d07c7ec019295c93900e320c5fcec0bc4db8705b
| 415
|
py
|
Python
|
src/server/event_test.py
|
cnlohr/bridgesim
|
ff33b63db813eedfc8155c9fecda4c8f1c06ab60
|
[
"MIT"
] | 4
|
2015-05-03T07:37:34.000Z
|
2018-05-09T22:27:33.000Z
|
src/server/event_test.py
|
cnlohr/bridgesim
|
ff33b63db813eedfc8155c9fecda4c8f1c06ab60
|
[
"MIT"
] | 1
|
2016-08-07T16:56:38.000Z
|
2016-08-07T16:56:38.000Z
|
src/server/event_test.py
|
cnlohr/bridgesim
|
ff33b63db813eedfc8155c9fecda4c8f1c06ab60
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
import time
from events import *
def test1(foo, *args):
print("foo: %s otherargs: %s time: %06.3f" % (foo, args, time.time() % 100))
q = QueueExecutor()
q.addEvent(test1, time.time() + 3, 1, 5, "foo", "bar", "baz")
q.addEvent(test1, time.time() + .5, .3, 20, "foo2", "bar")
print("Main thread asleep at %s" % (time.time(),))
time.sleep(6)
print("Main thread awake, terminating...")
q.stop()
| 27.666667
| 78
| 0.621687
| 66
| 415
| 3.909091
| 0.545455
| 0.155039
| 0.108527
| 0.139535
| 0.170543
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053521
| 0.144578
| 415
| 15
| 79
| 27.666667
| 0.673239
| 0.043373
| 0
| 0
| 0
| 0
| 0.269521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d07d20e45fea750c32612fcddef24ffc98a05b67
| 1,845
|
py
|
Python
|
gd/iter_utils.py
|
nekitdev/gd.py
|
b9d5e29c09f953f54b9b648fb677e987d9a8e103
|
[
"MIT"
] | 58
|
2020-09-30T16:51:22.000Z
|
2022-02-13T17:27:48.000Z
|
gd/iter_utils.py
|
NeKitDS/gd.py
|
b9d5e29c09f953f54b9b648fb677e987d9a8e103
|
[
"MIT"
] | 30
|
2019-07-29T12:03:41.000Z
|
2020-09-15T17:01:37.000Z
|
gd/iter_utils.py
|
NeKitDS/gd.py
|
b9d5e29c09f953f54b9b648fb677e987d9a8e103
|
[
"MIT"
] | 20
|
2019-12-06T03:16:57.000Z
|
2020-09-16T17:45:27.000Z
|
from typing import Any, Callable, Dict, Iterable, Mapping, Tuple, TypeVar, Union, cast, overload
__all__ = ("extract_iterable_from_tuple", "is_iterable", "item_to_tuple", "mapping_merge")
KT = TypeVar("KT")
VT = TypeVar("VT")
T = TypeVar("T")
def mapping_merge(*mappings: Mapping[KT, VT], **arguments: VT) -> Dict[KT, VT]:
final: Dict[KT, VT] = {}
for mapping in mappings:
final.update(mapping)
final.update(arguments) # type: ignore
return final
def is_iterable(maybe_iterable: Union[Iterable[T], T], use_iter: bool = True) -> bool:
if use_iter:
try:
iter(maybe_iterable) # type: ignore
return True
except TypeError: # "T" object is not iterable
return False
return isinstance(maybe_iterable, Iterable)
@overload # noqa
def item_to_tuple(item: Iterable[T]) -> Tuple[T, ...]: # noqa
...
@overload # noqa
def item_to_tuple(item: T) -> Tuple[T, ...]: # noqa
...
def item_to_tuple(item: Union[T, Iterable[T]]) -> Tuple[T, ...]: # noqa
if is_iterable(item):
return tuple(cast(Iterable[T], item))
return (cast(T, item),)
@overload # noqa
def extract_iterable_from_tuple( # noqa
tuple_to_extract: Tuple[Iterable[T]], check: Callable[[Any], bool]
) -> Iterable[T]:
...
@overload # noqa
def extract_iterable_from_tuple( # noqa
tuple_to_extract: Tuple[T, ...], check: Callable[[Any], bool]
) -> Iterable[T]:
...
def extract_iterable_from_tuple( # noqa
tuple_to_extract: Union[Tuple[Iterable[T]], Tuple[T, ...]],
check: Callable[[Any], bool] = is_iterable,
) -> Iterable[T]:
if len(tuple_to_extract) == 1:
maybe_return = tuple_to_extract[0]
if check(maybe_return):
return cast(Iterable[T], maybe_return)
return cast(Iterable[T], tuple_to_extract)
| 24.276316
| 96
| 0.635772
| 243
| 1,845
| 4.621399
| 0.205761
| 0.088157
| 0.0748
| 0.085485
| 0.387355
| 0.353517
| 0.257346
| 0.15049
| 0.15049
| 0.110419
| 0
| 0.001382
| 0.215718
| 1,845
| 75
| 97
| 24.6
| 0.774706
| 0.055285
| 0
| 0.291667
| 0
| 0
| 0.039907
| 0.015616
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.020833
| 0
| 0.354167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d07d7eac9f05f51f4abf2075d7c3883791a41eb9
| 937
|
py
|
Python
|
spacetime/distort_ss.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | null | null | null |
spacetime/distort_ss.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | 25
|
2020-03-26T11:16:58.000Z
|
2020-09-10T18:31:52.000Z
|
spacetime/distort_ss.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | null | null | null |
import numpy as np
from spacetime.potential import Potential
class DistortedSchwarzschild(Potential):
def __init__(self, theta=np.pi/2, l=3.8, o=1, r_range=(2, 20),
num=10000, cont_without_eq=False, verbose=True):
super().__init__(r_range=r_range, num=num,
cont_without_eq=cont_without_eq, verbose=verbose)
self.theta = theta
self.l = l
self.o = o
def compute_w(self):
exponent = 0.5 * self.o * ( 3*self.r**2*np.cos(self.theta)**2 - \
6*self.r*np.cos(self.theta)**2 + \
2*np.cos(self.theta) - self.r**2 -2*self.r)
oben = (self.r**2 - self.r*2) * np.sin(self.theta)**2
unten = np.exp(-2*exponent) * self.r**2 * np.sin(self.theta)**2 - \
self.l**2 * np.exp(2*exponent) * (1 - 2/self.r)
w = 0.5 * np.log(oben/unten)
return w
| 37.48
| 79
| 0.526147
| 143
| 937
| 3.321678
| 0.314685
| 0.084211
| 0.063158
| 0.050526
| 0.185263
| 0.088421
| 0.088421
| 0.088421
| 0
| 0
| 0
| 0.053125
| 0.316969
| 937
| 24
| 80
| 39.041667
| 0.689063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|