hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
263290a43a7fd76dbddf7ceb014df04f20ba0371
| 7,249
|
py
|
Python
|
micropy/project/modules/packages.py
|
MathijsNL/micropy-cli
|
2dec0ca3045a22f6552dc3813bedaf552d4bad2c
|
[
"MIT"
] | null | null | null |
micropy/project/modules/packages.py
|
MathijsNL/micropy-cli
|
2dec0ca3045a22f6552dc3813bedaf552d4bad2c
|
[
"MIT"
] | null | null | null |
micropy/project/modules/packages.py
|
MathijsNL/micropy-cli
|
2dec0ca3045a22f6552dc3813bedaf552d4bad2c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Project Packages Module."""
import shutil
from pathlib import Path
from typing import Any, Union
from boltons import fileutils
from micropy import utils
from micropy.packages import (LocalDependencySource, PackageDependencySource,
create_dependency_source)
from micropy.project.modules import ProjectModule
class PackagesModule(ProjectModule):
"""Project Module for handling requirements.
Args:
path (str): Path to create requirements file at.
packages (dict, optional): Initial packages to use.
Defaults to None.
"""
PRIORITY: int = 8
def __init__(self, path, packages=None, **kwargs):
super().__init__(**kwargs)
self._path = Path(path)
self._loaded = False
packages = packages or {}
self.name = "packages"
self.packages = {**packages}
@property
def path(self):
"""Path to requirements file.
Returns:
Path: Path to file
"""
path = self.parent.path / self._path
return path
@property
def pkg_path(self):
"""Path to package data folder.
Returns:
Path: Path to folder.
"""
return self.parent.data_path / self.parent.name
@property
def config(self):
"""Config values specific to component.
Returns:
dict: Component config.
"""
return {
self.name: self.packages
}
@property
def context(self):
"""Context values specific to component.
Returns:
dict: Context values.
"""
_paths = set(self.parent.context.get('paths', set()))
_paths.add(self.pkg_path)
return {
'paths': _paths
}
def install_package(self, source: Union[LocalDependencySource, PackageDependencySource]) -> Any:
with source as files:
if isinstance(files, list):
self.log.debug(f"installing {source} as module(s)")
# Iterates over flattened list of stubs tuple
file_paths = [(f, (self.pkg_path / f.name)) for f in list(sum(files, ()))]
for paths in file_paths:
return shutil.move(*paths) # overwrites if existing
self.log.debug(f'installing {source} as package')
pkg_path = self.pkg_path / source.package.name
return fileutils.copytree(files, pkg_path)
@ProjectModule.hook(dev=False)
def add_from_file(self, path=None, dev=False, **kwargs):
"""Loads all requirements from file.
Args:
path (str): Path to file. Defaults to self.path.
dev (bool, optional): If dev requirements should be loaded.
Defaults to False.
"""
reqs = utils.iter_requirements(self.path)
for req in reqs:
self.add_package(req, fetch=True)
return reqs
@ProjectModule.hook()
def add_package(self, package, dev=False, **kwargs):
"""Add requirement to project.
Args:
package (str): package name/spec
dev (bool, optional): If dev requirements should be loaded.
Defaults to False.
Returns:
dict: Dictionary of packages
"""
source = create_dependency_source(package)
pkg = source.package
self.log.info(f"Adding $[{pkg.name}] to requirements...")
if self.packages.get(pkg.name, None):
self.log.error(f"$[{pkg}] is already installed!")
self.update()
return None
self.packages[pkg.name] = pkg.pretty_specs
try:
self.load()
except ValueError:
self.log.error(f"Failed to find package $[{pkg.name}]!")
self.log.error("Is it available on PyPi?")
self.packages.pop(pkg.name)
self.parent.config.pop(f"{self.name}.{pkg}")
except Exception as e:
self.log.error(
f"An error occured during the installation of $[{pkg.name}]!",
exception=e)
self.packages.pop(pkg.name)
self.parent.config.pop(f"{self.name}.{pkg}")
else:
self.parent.config.set(f"{self.name}.{pkg}", pkg.pretty_specs)
self.log.success("Package installed!")
finally:
return self.packages
def load(self, fetch=True, **kwargs):
"""Retrieves and stubs project requirements."""
self.pkg_path.mkdir(exist_ok=True)
if self.path.exists():
packages = utils.iter_requirements(self.path)
for p in packages:
pkg = create_dependency_source(p.line).package
self.packages.update({pkg.name: pkg.pretty_specs})
self.parent.config.set(f'{self.name}.{pkg.name}', pkg.pretty_specs)
pkg_keys = set(self.packages.keys())
pkg_cache = self.parent._get_cache(self.name)
new_pkgs = pkg_keys.copy()
if pkg_cache:
new_pkgs = new_pkgs - set(pkg_cache)
new_pkgs = [f"{name}{s if s != '*' else ''}"
for name, s in self.packages.items() if name in new_pkgs]
if fetch:
if new_pkgs:
self.log.title("Fetching Requirements")
for req in new_pkgs:
def format_desc(p): return "".join(self.log.iter_formatted(f"$B[{p}]"))
source = create_dependency_source(
req, format_desc=lambda p: f"{self.log.get_service()} {format_desc(p)}")
self.install_package(source)
self.update()
self.parent._set_cache(self.name, list(pkg_keys))
def create(self):
"""Create project files."""
return self.update()
def update(self):
"""Dumps packages to file at path."""
self.parent.config.set(self.name, self.packages)
ctx_paths = self.parent.context.get('paths')
ctx_paths.add(self.pkg_path)
if not self.path.exists():
self.path.touch()
pkgs = [(f"{name}{spec}" if spec and spec != "*" else name)
for name, spec in self.packages.items()]
with self.path.open('r+') as f:
content = [c.strip() for c in f.readlines() if c.strip() != '']
_lines = sorted(set(pkgs) | set(content))
lines = [l + "\n" for l in _lines]
f.seek(0)
f.writelines(lines)
class DevPackagesModule(PackagesModule):
"""Project Module for Dev Packages."""
PRIORITY: int = 7
def __init__(self, path, **kwargs):
super().__init__(path, **kwargs)
self.packages.update({'micropy-cli': '*'})
self.name = "dev-packages"
def load(self, *args, **kwargs):
"""Load component."""
return super().load(*args, **kwargs, fetch=False)
@ProjectModule.hook(dev=True)
def add_package(self, package, **kwargs):
"""Adds package."""
return super().add_package(package, **kwargs)
@ProjectModule.hook(dev=True)
def add_from_file(self, path=None, **kwargs):
"""Adds packages from file."""
return super().add_from_file(path=path, **kwargs)
| 32.95
| 100
| 0.572769
| 850
| 7,249
| 4.783529
| 0.214118
| 0.029513
| 0.013527
| 0.011805
| 0.201181
| 0.14609
| 0.099361
| 0.071323
| 0.056075
| 0.056075
| 0
| 0.000795
| 0.305973
| 7,249
| 219
| 101
| 33.100457
| 0.807394
| 0.158229
| 0
| 0.103704
| 0
| 0
| 0.086754
| 0.007934
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118519
| false
| 0
| 0.051852
| 0.007407
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
263cf4e525b99a26c7f1cf3f1ee37f07078de9e0
| 7,168
|
py
|
Python
|
src/covid19/dash_forecast.py
|
marhoy/covid19
|
b53f7b812edea46bca6b27ac106d2363ee5d44d5
|
[
"MIT"
] | null | null | null |
src/covid19/dash_forecast.py
|
marhoy/covid19
|
b53f7b812edea46bca6b27ac106d2363ee5d44d5
|
[
"MIT"
] | null | null | null |
src/covid19/dash_forecast.py
|
marhoy/covid19
|
b53f7b812edea46bca6b27ac106d2363ee5d44d5
|
[
"MIT"
] | null | null | null |
"""The dash-tab with forecast data."""
from typing import List
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output
import covid19.forecast
from covid19.data import DAY_ZERO_START
from .dash_app import app
tab_forecast = html.Div(
[
dbc.Row(
[
dbc.Col(
dbc.FormGroup(
[
dbc.Label("Select country"),
dcc.Dropdown(
id="forecast-country-selector",
options=covid19.dash_app.all_countries,
value="Norway",
clearable=False,
),
]
),
md=6,
),
dbc.Col(
dbc.FormGroup(
[
dbc.Label("The day when spreading is under control"),
dcc.Slider(
id="day-of-control",
min=60,
max=180,
step=10,
marks={i: f"{i}" for i in range(60, 181, 10)},
value=120,
),
]
),
md=6,
),
]
),
dbc.Row(
[
dbc.Col(
dbc.FormGroup(
[
dbc.Label("Factor of unrecorded cases"),
dcc.Slider(
id="unrecorded-factor",
min=1,
max=5,
step=1,
marks={i: f"{i}" for i in range(1, 6)},
value=3,
),
]
),
md=6,
),
dbc.Col(
dbc.FormGroup(
[
dbc.Label(
"Number of days it takes to recover from the infection"
),
dcc.Slider(
id="recovery-days",
min=5,
max=25,
step=5,
marks={i: f"{i}" for i in range(5, 26, 5)},
value=15,
),
]
),
md=6,
),
]
),
dbc.Row(
dbc.Col(
dbc.FormGroup(
[
dbc.Label("Select plot scale"),
dbc.RadioItems(
id="forecast-figure-scale",
options=[
{"value": "linear", "label": "Linear"},
{"value": "log", "label": "Logarithmic"},
],
value="linear",
),
]
),
md=6,
)
),
dbc.Row([dbc.Col(dcc.Graph(id="forecast-figure"), md=12)]),
dbc.Row(
[
dbc.Col(
[
html.H3("About the model"),
dcc.Markdown(
"""
This is how the forecast model works:
The measures taken in e.g. China and South Korea have shown that they were
able to drive the growth towards 1.0 in an exponential way.
**NB: The model assumes that the country in question is taking measures
that are as effective as the ones taken in China**
* The current growth rate is estimated by using an exponentially weighted
average of the last 7 days.
* The growth rate is assumed to converge towards 1.0 in an exponential
decay.
The speed of the decay is controlled by the parameter "Day when under
control" below.
* Patients are assumed to be ill from the day they are infected.
* They are assumed to have recovered after the number of days you specify.
"""
),
]
)
]
),
]
)
@app.callback(
Output("forecast-country-selector", "options"),
[Input("interval-component", "n_intervals")],
)
def forecast_country_selector_options(*_) -> List[dict]:
"""Scheduled update of dropdown options.
Returns:
List[dict]: All countries.
"""
return covid19.dash_app.all_countries
@app.callback(
Output("forecast-figure", "figure"),
[
Input("forecast-country-selector", "value"),
Input("day-of-control", "value"),
Input("unrecorded-factor", "value"),
Input("recovery-days", "value"),
Input("forecast-figure-scale", "value"),
],
)
def forecast_figure_figure(
country: str,
day_of_control: int,
unrecorded_factor: float,
recovery_days: int,
y_axis_type: str,
) -> go.Figure:
"""Create figure with the forecasts."""
infected = covid19.dash_app.infected
observed_data, forecast, being_ill = covid19.forecast.create_forecast(
infected[country],
day_of_control=day_of_control,
days_to_recover=recovery_days,
forecast_start=-1,
ratio_avg_days=4,
)
observed_data *= unrecorded_factor
forecast *= unrecorded_factor
being_ill *= unrecorded_factor
fig = go.Figure(
layout={
"title": "Forecast: Number of infected and ill people over time",
"xaxis": {
"title": f"Days since more that {DAY_ZERO_START}"
f" people confirmed infected"
},
"yaxis": {"type": y_axis_type},
"margin": {"l": 0, "r": 0},
}
)
fig.add_trace(
go.Scatter(
x=observed_data.index,
y=observed_data.values,
name="Currently infected",
line=dict(color="green", width=8),
mode="lines",
)
)
fig.add_trace(
go.Scatter(
x=forecast.index,
y=forecast.values,
name="Forecast infected",
line=dict(color="green", width=3, dash="dash"),
mode="lines",
)
)
fig.add_trace(
go.Scatter(
x=being_ill.index,
y=being_ill.values,
name="People being ill",
line=dict(color="orange", width=3, dash="dash"),
mode="lines",
)
)
return fig
| 32
| 87
| 0.410435
| 636
| 7,168
| 4.534591
| 0.327044
| 0.014563
| 0.015603
| 0.020804
| 0.179958
| 0.161928
| 0.100208
| 0.100208
| 0.039528
| 0.024272
| 0
| 0.018066
| 0.498047
| 7,168
| 223
| 88
| 32.143498
| 0.783491
| 0.020229
| 0
| 0.297872
| 0
| 0
| 0.126414
| 0.019183
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010638
| false
| 0
| 0.047872
| 0
| 0.069149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
263d7e9a9f0a66a08ad20dffde0a31855dd85fee
| 145
|
py
|
Python
|
1132.py
|
barroslipe/urionlinejudge
|
a20d8199d9a92b30ea394a6c949967d2fc51aa34
|
[
"MIT"
] | null | null | null |
1132.py
|
barroslipe/urionlinejudge
|
a20d8199d9a92b30ea394a6c949967d2fc51aa34
|
[
"MIT"
] | null | null | null |
1132.py
|
barroslipe/urionlinejudge
|
a20d8199d9a92b30ea394a6c949967d2fc51aa34
|
[
"MIT"
] | null | null | null |
x = int(input())
y = int(input())
if x > y:
x, y = y, x
soma = 0
for i in range(x, y + 1):
if i%13 != 0:
soma += i
print(soma)
| 12.083333
| 25
| 0.448276
| 30
| 145
| 2.166667
| 0.466667
| 0.092308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053191
| 0.351724
| 145
| 12
| 26
| 12.083333
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
263e3167efd7fced0be59c8dcec4e3dcbfdbafc7
| 193
|
py
|
Python
|
CodeForces/579A. Raising Bacteria/Raising Bacteria.py
|
tameemalaa/Solved-Problems
|
9e8bc96eb60f200787f2682e974ec6509a7c1734
|
[
"MIT"
] | null | null | null |
CodeForces/579A. Raising Bacteria/Raising Bacteria.py
|
tameemalaa/Solved-Problems
|
9e8bc96eb60f200787f2682e974ec6509a7c1734
|
[
"MIT"
] | null | null | null |
CodeForces/579A. Raising Bacteria/Raising Bacteria.py
|
tameemalaa/Solved-Problems
|
9e8bc96eb60f200787f2682e974ec6509a7c1734
|
[
"MIT"
] | null | null | null |
# Solution by : Tameem Alaa El-Deen Sayed
n = int(input())
c= 0
while n >= 1 :
if n % 2 == 0 :
n = n /2
else:
n = n -1
n = n / 2
c = c + 1
print (c)
| 16.083333
| 41
| 0.393782
| 34
| 193
| 2.235294
| 0.529412
| 0.078947
| 0.078947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07767
| 0.466321
| 193
| 12
| 42
| 16.083333
| 0.660194
| 0.202073
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
263ffe0fb0dfe081b932ef96364418b62b9cad91
| 1,520
|
py
|
Python
|
regression/other/pybindgen/classes/gen.py
|
ExternalRepositories/shroud
|
86c39d2324d947d28055f9024f52cc493eb0c813
|
[
"BSD-3-Clause"
] | 73
|
2017-10-11T17:01:50.000Z
|
2022-01-01T21:42:12.000Z
|
regression/other/pybindgen/classes/gen.py
|
ExternalRepositories/shroud
|
86c39d2324d947d28055f9024f52cc493eb0c813
|
[
"BSD-3-Clause"
] | 29
|
2018-03-21T19:34:29.000Z
|
2022-02-04T18:13:14.000Z
|
regression/other/pybindgen/classes/gen.py
|
ExternalRepositories/shroud
|
86c39d2324d947d28055f9024f52cc493eb0c813
|
[
"BSD-3-Clause"
] | 8
|
2017-11-22T14:27:01.000Z
|
2022-03-30T08:49:03.000Z
|
# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Shroud Project Developers.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
#
########################################################################
"""
Generate a module for classes using PyBindGen
"""
import pybindgen
from pybindgen import (param, retval)
def generate(fp):
mod = pybindgen.Module('classes')
mod.add_include('"classes.hpp"')
namespace = mod.add_cpp_namespace('classes')
class1 = namespace.add_class('Class1')
class1.add_enum('DIRECTION', ['UP', 'DOWN', 'LEFT', 'RIGHT'])
# class1.add_function('AcceptEnum', None, [param('MyEnum_e', 'value')])
class1.add_instance_attribute('m_flag', 'int')
class1.add_constructor([param('int', 'flag')])
class1.add_constructor([])
class1.add_method('Method1', None, [])
sclass = namespace.add_class("Singleton", is_singleton=True)
sclass.add_method("getReference", retval("classes::Singleton&", caller_owns_return=True), [],
is_static=True)
# mod.add_class('Class1',
# memory_policy=cppclass.ReferenceCountingMethodsPolicy(
# incref_method='Ref',
# decref_method='Unref',
# peekref_method='PeekRef')
# )
# mod.add_function('DoSomething', retval('Class1 *', caller_owns_return=False), [])
mod.generate(fp)
if __name__ == '__main__':
import sys
generate(sys.stdout)
| 33.043478
| 97
| 0.623026
| 163
| 1,520
| 5.595092
| 0.564417
| 0.059211
| 0.037281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016247
| 0.190132
| 1,520
| 45
| 98
| 33.777778
| 0.724614
| 0.415789
| 0
| 0
| 0
| 0
| 0.161006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2641f16660f596ae7a11f3f894108dc509f1b570
| 12,597
|
py
|
Python
|
aliens4friends/models/tinfoilhat.py
|
noi-techpark/solda-aliens4friends
|
65f65f4e6775405e3098b2bac3f5903ff1c56795
|
[
"Apache-2.0"
] | null | null | null |
aliens4friends/models/tinfoilhat.py
|
noi-techpark/solda-aliens4friends
|
65f65f4e6775405e3098b2bac3f5903ff1c56795
|
[
"Apache-2.0"
] | null | null | null |
aliens4friends/models/tinfoilhat.py
|
noi-techpark/solda-aliens4friends
|
65f65f4e6775405e3098b2bac3f5903ff1c56795
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-FileCopyrightText: NOI Techpark <info@noi.bz.it>
#
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import List, Dict, TypeVar, Optional
from copy import deepcopy
from deepdiff import DeepDiff
from .base import BaseModel, DictModel, ModelError
from aliens4friends.commons.utils import sha1sum_str
logger = logging.getLogger(__name__)
class SourceFile(BaseModel):
def __init__(
self,
rootpath: Optional[str] = None,
relpath: Optional[str] = None,
src_uri: Optional[str] = None,
sha1_cksum: Optional[str] = None,
git_sha1: Optional[str] = None,
tags: Optional[List[str]] = None
) -> None:
self.rootpath = rootpath
self.relpath = relpath
self.src_uri = src_uri
self.sha1_cksum = sha1_cksum
self.git_sha1 = git_sha1
self.tags = tags
# TODO: a specific class for tags should be added,
# like in tinfoilhat
class FileWithSize(BaseModel):
def __init__(
self,
path: Optional[str] = None,
sha256: Optional[str] = None,
size: int = 0
) -> None:
self.path = path
self.sha256 = sha256
self.size = size
class FileContainer(BaseModel):
def __init__(
self,
file_dir: Optional[str] = None,
files: Optional[List[FileWithSize]] = None
) -> None:
self.file_dir = file_dir
self.files = FileWithSize.drilldown(files)
class DependsProvides(BaseModel):
def __init__(
self,
depends: List[str],
provides: List[str]
):
self.depends = depends
self.provides = provides
_TDependsProvidesContainer = TypeVar('_TDependsProvidesContainer', bound='DependsProvidesContainer')
class DependsProvidesContainer(DictModel):
"""DictModel for 'depends' and 'provides' of bitbake recipes; the key is the
machine name, since the same recipe, built for different machines, may have
different build dependencies
"""
subclass = DependsProvides
@staticmethod
def merge(
old: Dict[str, _TDependsProvidesContainer],
new: Dict[str, _TDependsProvidesContainer]
) -> Dict[str, _TDependsProvidesContainer]:
res = {}
ids = set(list(old) + list(new))
for id in ids:
if id in new and id in old:
logger.debug(f"{id} found in new and old, checking consistency")
diff = DeepDiff(old[id].depends, new[id].depends, ignore_order=True)
if diff:
logger.warning(
"depends mismatch for machine"
f" '{id}', diff is: {diff}"
)
new[id].provides = list(set(old[id].provides + new[id].provides))
res[id] = new[id]
elif id in new:
logger.debug(f"depends_provides for machine '{id}' found in new")
res[id] = new[id]
elif id in old:
logger.debug(f"depends_provides for machine '{id}' found in old")
res[id] = old[id]
return res
class PackageMetaData(BaseModel):
def __init__(
self,
name: Optional[str] = None,
base_name: Optional[str] = None,
version: Optional[str] = None,
revision: Optional[str] = None,
package_arch: Optional[str] = None,
recipe_name: Optional[str] = None,
recipe_version: Optional[str] = None,
recipe_revision: Optional[str] = None,
license: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
depends: Optional[List[str]] = None,
provides: Optional[List[str]] = None
) -> None:
self.name = name
self.base_name = base_name
self.version = version
self.revision = revision
self.package_arch = package_arch
self.recipe_name = recipe_name
self.recipe_version = recipe_version
self.recipe_revision = recipe_revision
self.license = license
self.summary = summary
self.description = description
self.depends = depends
self.provides = provides
_TPackage = TypeVar('_TPackage', bound='Package')
class Package(BaseModel):
def __init__(
self,
metadata: Optional[PackageMetaData] = None,
files: Optional[FileContainer] = None,
chk_sum: Optional[str] = None
) -> None:
self.metadata = PackageMetaData.decode(metadata)
self.files = FileContainer.decode(files)
self.chk_sum = chk_sum
class PackageWithTags(BaseModel):
def __init__(
self,
package: Optional[Package] = None,
tags: Optional[List[str]] = None
) -> None:
self.package = Package.decode(package)
self.tags = tags
_TPackageContainer = TypeVar('_TPackageContainer', bound='PackageContainer')
class PackageContainer(DictModel):
subclass = PackageWithTags
@staticmethod
def merge(
old: Dict[str, _TPackageContainer],
new: Dict[str, _TPackageContainer]
) -> Dict[str, _TPackageContainer]:
res = {}
ids = set(list(old) + list(new))
for id in ids:
if id in new and id in old:
logger.debug(f"{id} found in new and old, merging")
diff = DeepDiff(
old[id],
new[id],
ignore_order=True,
exclude_paths=[
'root.tags', # here we expect differences that we want
# to merge
'root.package.files.file_dir', # specific to
# local build, needed just for aliensrc
# package creation in a previous stage;
# we expect it may be different if
# tinfoilhat files to merge have been
# generated in different local builds, but
# it doesn't matter here
]
)
if diff:
raise ModelError(
f"can't merge {id}, because some package fields"
f" mismatch, diff is: {diff}"
)
res[id] = deepcopy(new[id])
res[id].tags = list(set(old[id].tags + new[id].tags))
elif id in new and id not in old:
logger.debug(f"{id} found in new")
res[id] = new[id]
elif id not in new and id in old:
logger.debug(f"{id} found in old")
res[id] = old[id]
return res
class RecipeMetaData(BaseModel):
def __init__(
self,
name: Optional[str] = None,
base_name: Optional[str] = None,
version: Optional[str] = None,
revision: Optional[str] = None,
variant: Optional[str] = None,
author: Optional[str] = None,
homepage: Optional[str] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
license: Optional[str] = None,
depends_provides: Optional[Dict[str, DependsProvides]] = None
) -> None:
self.name = name
self.base_name = base_name
self.version = version
self.revision = revision
self.variant = variant
self.author = author
self.homepage = homepage
self.summary = summary
self.description = description
self.license = license
self.depends_provides = DependsProvidesContainer.decode(depends_provides)
@staticmethod
def merge(old: 'RecipeMetaData', new: 'RecipeMetaData') -> 'RecipeMetaData':
updatable = [ "homepage", "summary", "description" ]
res = RecipeMetaData()
for attr_name in res.encode():
if attr_name in updatable:
setattr(res, attr_name, getattr(new, attr_name))
else:
setattr(res, attr_name, getattr(old, attr_name))
res.depends_provides = DependsProvidesContainer.merge(
old.depends_provides,
new.depends_provides
)
return res
class CveProduct(BaseModel):
def __init__(
self,
vendor: Optional[str] = None,
product: Optional[str] = None
):
self.vendor = vendor
self.product = product
class RecipeCveMetaData(BaseModel):
def __init__(
self,
cve_version: Optional[str] = None,
cve_version_suffix: Optional[str] = None,
cve_check_whitelist: Optional[List[str]] = None,
cve_product: Optional[List[CveProduct]] = None
):
self.cve_version = cve_version
self.cve_version_suffix = cve_version_suffix
self.cve_check_whitelist = cve_check_whitelist
self.cve_product = CveProduct.drilldown(cve_product)
@staticmethod
def merge(old: 'RecipeCveMetaData', new: 'RecipeCveMetaData') -> 'RecipeCveMetaData':
res = RecipeCveMetaData()
must_be_equal = [ 'cve_version', 'cve_version_suffix' ]
for attr_name in old.encode():
old_attr = getattr(old, attr_name)
new_attr = getattr(new, attr_name)
if old_attr == new_attr:
setattr(res, attr_name, old_attr)
elif attr_name in must_be_equal:
raise ModelError(
f"can't merge cve metadata for {old.cve_product[0].product}"
f": '{attr_name}' mismatch"
)
else:
setattr(res, attr_name, new_attr)
return res
class Recipe(BaseModel):
def __init__(
self,
metadata: Optional[RecipeMetaData] = None,
cve_metadata: Optional[RecipeCveMetaData] = None,
source_files: Optional[List[SourceFile]] = None,
chk_sum: Optional[str] = None
) -> None:
self.metadata = RecipeMetaData.decode(metadata)
self.cve_metadata = RecipeCveMetaData.decode(cve_metadata)
self.source_files = SourceFile.drilldown(source_files)
self.chk_sum = chk_sum
_TContainer = TypeVar('_TContainer', bound='Container')
class Container(BaseModel):
def __init__(
self,
recipe: Optional[Recipe] = None,
tags: Optional[List[str]] = None,
packages: Optional[Dict[str, PackageWithTags]] = None
) -> None:
self.recipe = Recipe.decode(recipe)
self.tags = tags
self.packages = PackageContainer.decode(packages)
@staticmethod
def merge(
old: Dict[str, _TContainer],
new: Dict[str, _TContainer],
) -> Dict[str, _TContainer]:
"""merge tags, packages and depends_provides of two tinfoilhat dicts in
a new tinfoilhat dict; all other attributes of the two tinfoilhat dict -
except for bitbake-specific paths - must be the same, otherwise a
ModelError exception is raised
"""
res = {}
ids = set(list(old) + list(new))
for id in ids:
if id in new and id in old:
logger.debug(f"{id} found in new and old, merging")
diff = DeepDiff(
old[id],
new[id],
ignore_order=True,
# legend for paths excluded from diff:
# (M): expected diffs that we want to merge
# (I): expected diffs that we can safely ignore (we keep the newer value)
# (U): undesirable diffs that however "happen", even if the recipe version and revision stay the same;
# we ignore them to avoid complications (we keep the newer value)
exclude_paths=[
"root.tags", # (M)
"root.packages", # (M)
"root.recipe.metadata.description", # (U)
"root.recipe.metadata.homepage", # (U)
"root.recipe.metadata.summary", # (U)
"root.recipe.metadata.depends_provides", # (I)
"root.recipe.cve_metadata", # (M)
],
exclude_regex_paths=[
r"root.recipe.source_files\[\d+\].tags", # (M)
r"root.recipe.source_files\[\d+\].src_uri", # (U)
r"root.recipe.source_files\[\d+\].rootpath", # (I)
r"root.recipe.source_files\[\d+\].relpath", # (U) # FIXME workaround, handlye filename changes instead
]
)
if diff:
raise ModelError(
f"can't merge tags and packages for recipe {id}, "
f"because some fields mismatch, diff is: {diff}"
)
res[id] = deepcopy(new[id])
res[id].tags = list(set(old[id].tags + new[id].tags))
res[id].packages = PackageContainer.merge(
old[id].packages,
new[id].packages
)
res[id].recipe.metadata = RecipeMetaData.merge(
old[id].recipe.metadata,
new[id].recipe.metadata
)
res[id].recipe.cve_metadata = RecipeCveMetaData.merge(
old[id].recipe.cve_metadata,
new[id].recipe.cve_metadata
)
old_files = {
f'{s.relpath}-{s.git_sha1 or s.sha1_cksum}': s
for s in old[id].recipe.source_files
}
new_files = {
f'{s.relpath}-{s.git_sha1 or s.sha1_cksum}': s
for s in res[id].recipe.source_files
# res[id] here is on purpose, we need to modify
# its contents by reference;
# it has been deepcopied from new[id]
}
for file_id in new_files:
if old_files.get(file_id): # FIMXE workaround (see above)
new_files[file_id].tags = list(set(
old_files[file_id].tags + new_files[file_id].tags
))
elif id in new:
logger.debug(f"{id} found in new")
res[id] = new[id]
elif id in old:
logger.debug(f"{id} found in old")
res[id] = old[id]
return res
_TTinfoilHatModel = TypeVar('_TTinfoilHatModel', bound='TinfoilHatModel')
class TinfoilHatModel(DictModel):
subclass = Container
@staticmethod
def merge(
old: _TTinfoilHatModel,
new: _TTinfoilHatModel
) -> _TTinfoilHatModel:
"""merge tags, packages and depends_provides of two tinfoilhat objects
in a new tinfoilhat object; all other attributes of the two tinfoilhat
objs - except for bitbake-specific paths - must be the same, otherwise a
ModelError exception is raised
"""
res = TinfoilHatModel({})
res._container = Container.merge(old._container, new._container)
return res
# FIXME: All merge methods here are based on one assumption:
# that the "new" tinfoilhat file is really newer than the "old"
# one, and that it containes more updated info than the "old" one.
# We should add some field ('project manifest commit date'?)
# tinfoilhat.json in order to check this
| 30.28125
| 108
| 0.688656
| 1,666
| 12,597
| 5.080432
| 0.157263
| 0.033908
| 0.062027
| 0.028355
| 0.326441
| 0.303639
| 0.250236
| 0.221408
| 0.205813
| 0.183719
| 0
| 0.002467
| 0.195523
| 12,597
| 415
| 109
| 30.354217
| 0.832741
| 0.153132
| 0
| 0.426901
| 0
| 0
| 0.124351
| 0.042961
| 0
| 0
| 0
| 0.007229
| 0
| 1
| 0.052632
| false
| 0
| 0.017544
| 0
| 0.140351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2642eef0ddc241add30d3eac4bb4b4cb887bc80f
| 276
|
py
|
Python
|
teachers_sample_codes/spotkanie_3/01b_wheater_json.py
|
programujemy-python/programuj-w-zespole-test
|
865f96e5be6ab4e3a7f15b9e446a1c0cbae06472
|
[
"MIT"
] | 2
|
2022-01-31T20:21:18.000Z
|
2022-02-22T10:54:41.000Z
|
teachers_materials/spotkanie_3/01b_wheater_json.py
|
abixadamj/Popojutrze-Progr-mujemy
|
d6f5a4de799a486024f799c4c392fdc1419654b8
|
[
"MIT"
] | null | null | null |
teachers_materials/spotkanie_3/01b_wheater_json.py
|
abixadamj/Popojutrze-Progr-mujemy
|
d6f5a4de799a486024f799c4c392fdc1419654b8
|
[
"MIT"
] | 1
|
2022-03-07T11:23:58.000Z
|
2022-03-07T11:23:58.000Z
|
# przykład wykorzystania biblioteki requests
import requests
params = {
'format': 'j1',
}
api_result = requests.get('https://wttr.in/Varsavia', params)
api_response = api_result.json()
for elem in api_response:
print(f"Klucz: {elem} ma wartość {api_response[elem]}")
| 25.090909
| 61
| 0.724638
| 37
| 276
| 5.27027
| 0.648649
| 0.169231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004202
| 0.137681
| 276
| 10
| 62
| 27.6
| 0.815126
| 0.152174
| 0
| 0
| 0
| 0
| 0.331897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26445917b8f6b4bc76b5e11b529d19936fabc446
| 12,938
|
py
|
Python
|
dataPrepScripts/CreateTensor.py
|
strixy16/Clairvoyante
|
2bf60f9fc54d51518730d94cb05ffdf3a51f0176
|
[
"BSD-3-Clause"
] | 171
|
2017-07-24T00:35:48.000Z
|
2022-03-24T08:28:59.000Z
|
dataPrepScripts/CreateTensor.py
|
strixy16/Clairvoyante
|
2bf60f9fc54d51518730d94cb05ffdf3a51f0176
|
[
"BSD-3-Clause"
] | 45
|
2018-10-30T07:37:42.000Z
|
2021-12-30T07:53:24.000Z
|
dataPrepScripts/CreateTensor.py
|
strixy16/Clairvoyante
|
2bf60f9fc54d51518730d94cb05ffdf3a51f0176
|
[
"BSD-3-Clause"
] | 27
|
2017-07-23T21:43:50.000Z
|
2021-02-27T01:07:29.000Z
|
import os
import sys
import argparse
import os
import re
import shlex
import subprocess
import signal
import gc
import param
is_pypy = '__pypy__' in sys.builtin_module_names
def PypyGCCollect(signum, frame):
gc.collect()
signal.alarm(60)
cigarRe = r"(\d+)([MIDNSHP=X])"
base2num = dict(zip("ACGT", (0,1,2,3)))
stripe2 = 4 * param.matrixNum
stripe1 = param.matrixNum
def GenerateTensor(args, ctgName, alns, center, refSeq):
alnCode = [0] * ( (2*param.flankingBaseNum+1) * 4 * param.matrixNum )
depth = [0] * ((2 * param.flankingBaseNum + 1))
for aln in alns:
for refPos, queryAdv, refBase, queryBase in aln:
if str(refBase) not in "ACGT-":
continue
if str(queryBase) not in "ACGT-":
continue
if refPos - center >= -(param.flankingBaseNum+1) and refPos - center < param.flankingBaseNum:
offset = refPos - center + (param.flankingBaseNum+1)
if queryBase != "-":
if refBase != "-":
depth[offset] = depth[offset] + 1
alnCode[stripe2*offset + stripe1*base2num[refBase] + 0] += 1.0
alnCode[stripe2*offset + stripe1*base2num[queryBase] + 1] += 1.0
alnCode[stripe2*offset + stripe1*base2num[refBase] + 2] += 1.0
alnCode[stripe2*offset + stripe1*base2num[queryBase] + 3] += 1.0
elif refBase == "-":
idx = min(offset+queryAdv, 2*param.flankingBaseNum+1-1)
alnCode[stripe2*idx + stripe1*base2num[queryBase] + 1] += 1.0
else:
print >> sys.stderr, "Should not reach here: %s, %s" % (refBase, queryBase)
elif queryBase == "-":
if refBase != "-":
alnCode[stripe2*offset + stripe1*base2num[refBase] + 2] += 1.0
else:
print >> sys.stderr, "Should not reach here: %s, %s" % (refBase, queryBase)
else:
print >> sys.stderr, "Should not reach here: %s, %s" % (refBase, queryBase)
newRefPos = center - (0 if args.refStart == None else (args.refStart - 1))
if (newRefPos - (param.flankingBaseNum+1) >= 0) and depth[param.flankingBaseNum] >= args.minCoverage:
outputLine = "%s %d %s %s" % (ctgName, center, refSeq[newRefPos-(param.flankingBaseNum+1):newRefPos+param.flankingBaseNum], " ".join("%0.1f" % x for x in alnCode))
return outputLine
else:
return None
def GetCandidate(args, beginToEnd):
if args.can_fn != "PIPE":
f = subprocess.Popen(shlex.split("gzip -fdc %s" % (args.can_fn) ), stdout=subprocess.PIPE, bufsize=8388608)
fo = f.stdout
else:
fo = sys.stdin
for row in fo:
row = row.split()
if args.ctgName != row[0]: continue
pos = int(row[1])
if args.ctgStart != None and pos < args.ctgStart: continue
if args.ctgEnd != None and pos > args.ctgEnd: continue
if args.considerleftedge == False:
beginToEnd[ pos - (param.flankingBaseNum+1) ] = [(pos + (param.flankingBaseNum+1), pos)]
elif args.considerleftedge == True:
for i in range(pos - (param.flankingBaseNum+1), pos + (param.flankingBaseNum+1)):
if i not in beginToEnd:
beginToEnd[ i ] = [(pos + (param.flankingBaseNum+1), pos)]
else:
beginToEnd[ i ].append((pos + (param.flankingBaseNum+1), pos))
yield pos
if args.can_fn != "PIPE":
fo.close()
f.wait()
yield -1
class TensorStdout(object):
def __init__(self, handle):
self.stdin = handle
def __del__(self):
self.stdin.close()
def OutputAlnTensor(args):
availableSlots = 10000000
dcov = args.dcov
args.refStart = None; args.refEnd = None; refSeq = []; refName = None; rowCount = 0
if args.ctgStart != None and args.ctgEnd != None:
args.ctgStart += 1 # Change 0-based (BED) to 1-based (VCF and samtools faidx)
args.refStart = args.ctgStart; args.refEnd = args.ctgEnd
args.refStart -= param.expandReferenceRegion
args.refStart = 1 if args.refStart < 1 else args.refStart
args.refEnd += param.expandReferenceRegion
p1 = subprocess.Popen(shlex.split("%s faidx %s %s:%d-%d" % (args.samtools, args.ref_fn, args.ctgName, args.refStart, args.refEnd) ), stdout=subprocess.PIPE, bufsize=8388608)
else:
args.ctgStart = args.ctgEnd = None
p1 = subprocess.Popen(shlex.split("%s faidx %s %s" % (args.samtools, args.ref_fn, args.ctgName) ), stdout=subprocess.PIPE, bufsize=8388608)
for row in p1.stdout:
if rowCount == 0:
refName = row.rstrip().lstrip(">")
else:
refSeq.append(row.rstrip())
rowCount += 1
refSeq = "".join(refSeq)
p1.stdout.close()
p1.wait()
if p1.returncode != 0 or len(refSeq) == 0:
print >> sys.stderr, "Failed to load reference seqeunce. Please check if the provided reference fasta %s and the ctgName %s are correct." % (args.ref_fn, args.ctgName)
sys.exit(1)
beginToEnd = {}
canPos = 0
canGen = GetCandidate(args, beginToEnd)
p2 = subprocess.Popen(shlex.split("%s view -F 2308 %s %s:%d-%d" % (args.samtools, args.bam_fn, args.ctgName, args.ctgStart, args.ctgEnd) ), stdout=subprocess.PIPE, bufsize=8388608)\
if args.ctgStart != None and args.ctgEnd != None\
else subprocess.Popen(shlex.split("%s view -F 2308 %s %s" % (args.samtools, args.bam_fn, args.ctgName) ), stdout=subprocess.PIPE, bufsize=8388608)
centerToAln = {}
if args.tensor_fn != "PIPE":
tensor_fpo = open(args.tensor_fn, "wb")
tensor_fp = subprocess.Popen(shlex.split("gzip -c"), stdin=subprocess.PIPE, stdout=tensor_fpo, stderr=sys.stderr, bufsize=8388608)
else:
tensor_fp = TensorStdout(sys.stdout)
#if is_pypy:
# signal.signal(signal.SIGALRM, PypyGCCollect)
# signal.alarm(60)
previousPos = 0; depthCap = 0
for l in p2.stdout:
l = l.split()
if l[0][0] == "@":
continue
QNAME = l[0]
FLAG = int(l[1])
RNAME = l[2]
POS = int(l[3]) - 1 # switch from 1-base to 0-base to match sequence index
MQ = int(l[4])
CIGAR = l[5]
SEQ = l[9]
refPos = POS
queryPos = 0
if MQ < args.minMQ:
continue
endToCenter = {}
activeSet = set()
while canPos != -1 and canPos < (POS + len(SEQ) + 100000):
canPos = next(canGen)
if previousPos != POS:
previousPos = POS
depthCap = 0
else:
depthCap += 1
if depthCap >= dcov:
#print >> sys.stderr, "Bypassing POS %d at depth %d\n" % (POS, depthCap)
continue
for m in re.finditer(cigarRe, CIGAR):
if availableSlots == 0:
break
advance = int(m.group(1))
if m.group(2) == "S":
queryPos += advance
if m.group(2) in ("M", "=", "X"):
for i in xrange(advance):
if refPos in beginToEnd:
for rEnd, rCenter in beginToEnd[refPos]:
if rCenter in activeSet:
continue
endToCenter[rEnd] = rCenter
activeSet.add(rCenter)
centerToAln.setdefault(rCenter, [])
centerToAln[rCenter].append([])
for center in list(activeSet):
if availableSlots != 0:
availableSlots -= 1
centerToAln[center][-1].append( (refPos, 0, refSeq[refPos - (0 if args.refStart == None else (args.refStart - 1))], SEQ[queryPos] ) )
if refPos in endToCenter:
center = endToCenter[refPos]
activeSet.remove(center)
refPos += 1
queryPos += 1
elif m.group(2) == "I":
queryAdv = 0
for i in range(advance):
for center in list(activeSet):
if availableSlots != 0:
availableSlots -= 1
centerToAln[center][-1].append( (refPos, queryAdv, "-", SEQ[queryPos] ))
queryPos += 1
queryAdv += 1
elif m.group(2) == "D":
for i in xrange(advance):
for center in list(activeSet):
if availableSlots != 0:
availableSlots -= 1
centerToAln[center][-1].append( (refPos, 0, refSeq[refPos - (0 if args.refStart == None else (args.refStart - 1))], "-" ))
if refPos in beginToEnd:
for rEnd, rCenter in beginToEnd[refPos]:
if rCenter in activeSet:
continue
endToCenter[rEnd] = rCenter
activeSet.add(rCenter)
centerToAln.setdefault(rCenter, [])
centerToAln[rCenter].append([])
if refPos in endToCenter:
center = endToCenter[refPos]
activeSet.remove(center)
refPos += 1
if depthCap == 0:
for center in centerToAln.keys():
if center + (param.flankingBaseNum+1) < POS:
l = GenerateTensor(args, args.ctgName, centerToAln[center], center, refSeq)
if l != None:
tensor_fp.stdin.write(l)
tensor_fp.stdin.write("\n")
availableSlots += sum(len(i) for i in centerToAln[center])
#print >> sys.stderr, "POS %d: remaining slots %d" % (center, availableSlots)
del centerToAln[center]
for center in centerToAln.keys():
l = GenerateTensor(args, args.ctgName, centerToAln[center], center, refSeq)
if l != None:
tensor_fp.stdin.write(l)
tensor_fp.stdin.write("\n")
p2.stdout.close()
p2.wait()
if args.tensor_fn != "PIPE":
tensor_fp.stdin.close()
tensor_fp.wait()
tensor_fpo.close()
def main():
parser = argparse.ArgumentParser(
description="Generate tensors summarizing local alignments from a BAM file and a list of candidate locations" )
parser.add_argument('--bam_fn', type=str, default="input.bam",
help="Sorted bam file input, default: %(default)s")
parser.add_argument('--ref_fn', type=str, default="ref.fa",
help="Reference fasta file input, default: %(default)s")
parser.add_argument('--can_fn', type=str, default="PIPE",
help="Variant candidate list generated by ExtractVariantCandidates.py or true variant list generated by GetTruth.py, use PIPE for standard input, default: %(default)s")
parser.add_argument('--tensor_fn', type=str, default="PIPE",
help="Tensor output, use PIPE for standard output, default: %(default)s")
parser.add_argument('--minMQ', type=int, default=0,
help="Minimum Mapping Quality. Mapping quality lower than the setting will be filtered, default: %(default)d")
parser.add_argument('--ctgName', type=str, default="chr17",
help="The name of sequence to be processed, default: %(default)s")
parser.add_argument('--ctgStart', type=int, default=None,
help="The 1-bsae starting position of the sequence to be processed")
parser.add_argument('--ctgEnd', type=int, default=None,
help="The inclusive ending position of the sequence to be processed")
parser.add_argument('--samtools', type=str, default="samtools",
help="Path to the 'samtools', default: %(default)s")
parser.add_argument('--considerleftedge', type=param.str2bool, nargs='?', const=True, default=True,
help="Count the left-most base-pairs of a read for coverage even if the starting position of a read is after the starting position of a tensor, default: %(default)s")
parser.add_argument('--dcov', type=int, default=250,
help="Cap depth per position at %(default)d")
parser.add_argument('--minCoverage', type=int, default=0,
help="Minimum coverage required to generate a tensor, default: %(default)d")
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
OutputAlnTensor(args)
if __name__ == "__main__":
main()
| 41.335463
| 185
| 0.552713
| 1,466
| 12,938
| 4.834243
| 0.186221
| 0.047975
| 0.041484
| 0.020742
| 0.453789
| 0.363624
| 0.308029
| 0.287569
| 0.226612
| 0.204318
| 0
| 0.025523
| 0.327717
| 12,938
| 312
| 186
| 41.467949
| 0.789262
| 0.025893
| 0
| 0.301961
| 0
| 0.011765
| 0.123541
| 0.002144
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027451
| false
| 0
| 0.039216
| 0
| 0.078431
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26461c895574afc6d2e5c0139208bf8be78b66bf
| 2,405
|
py
|
Python
|
setup.py
|
ssjunnebo/MultiQC_NGI
|
1ca18747256324f1ddcb9ecd68159b2114718e71
|
[
"MIT"
] | 3
|
2017-02-03T14:18:30.000Z
|
2019-10-24T14:57:57.000Z
|
setup.py
|
ssjunnebo/MultiQC_NGI
|
1ca18747256324f1ddcb9ecd68159b2114718e71
|
[
"MIT"
] | 27
|
2015-10-16T16:20:10.000Z
|
2017-07-03T14:28:40.000Z
|
setup.py
|
ssjunnebo/MultiQC_NGI
|
1ca18747256324f1ddcb9ecd68159b2114718e71
|
[
"MIT"
] | 8
|
2016-04-20T10:33:29.000Z
|
2021-03-25T09:01:58.000Z
|
#!/usr/bin/env python
"""
MultiQC_NGI is a plugin for MultiQC, providing additional tools which are
specific to the National Genomics Infrastructure at the Science for Life
Laboratory in Stockholm, Sweden.
For more information about NGI, see http://www.scilifelab.se/platforms/ngi/
For more information about MultiQC, see http://multiqc.info
"""
from setuptools import setup, find_packages
version = '0.6.3'
setup(
name = 'multiqc_ngi',
version = version,
author = 'Phil Ewels',
author_email = 'phil.ewels@scilifelab.se',
description = "MultiQC plugin for the National Genomics Infrastructure @ SciLifeLab Sweden",
long_description = __doc__,
keywords = 'bioinformatics',
url = 'https://github.com/ewels/MultiQC_NGI',
download_url = 'https://github.com/ewels/MultiQC_NGI/releases',
license = 'MIT',
packages = find_packages(),
include_package_data = True,
install_requires = [
'couchdb',
'simplejson',
'pyyaml',
'requests',
'multiqc'
],
entry_points = {
'multiqc.templates.v1': [
'ngi = multiqc_ngi.templates.ngi',
'genstat = multiqc_ngi.templates.genstat',
],
'multiqc.cli_options.v1': [
'disable = multiqc_ngi.cli:disable_ngi',
'project = multiqc_ngi.cli:pid_option',
'push_statusdb = multiqc_ngi.cli:push_flag',
'test_db = multiqc_ngi.cli:test_db'
],
'multiqc.hooks.v1': [
'before_config = multiqc_ngi.multiqc_ngi:multiqc_ngi_config',
'before_report_generation = multiqc_ngi.multiqc_ngi:ngi_metadata',
'execution_finish = multiqc_ngi.multiqc_ngi:ngi_after_execution_finish'
]
},
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| 34.855072
| 96
| 0.63368
| 251
| 2,405
| 5.888446
| 0.521912
| 0.11502
| 0.043978
| 0.054127
| 0.074425
| 0.043302
| 0.043302
| 0
| 0
| 0
| 0
| 0.00388
| 0.249896
| 2,405
| 68
| 97
| 35.367647
| 0.81541
| 0.140125
| 0
| 0.071429
| 0
| 0
| 0.576699
| 0.203398
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017857
| 0
| 0.017857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
264fec7de161d7ec6768ac23aa7065cdd2a16bae
| 1,781
|
py
|
Python
|
newsplease/pipeline/extractor/extractors/beautifulsoup_extractor.py
|
JamilHossain/news-please
|
6c7fb001a24f0db80dd4f2cd7f3957a7fe284dcf
|
[
"Apache-2.0"
] | null | null | null |
newsplease/pipeline/extractor/extractors/beautifulsoup_extractor.py
|
JamilHossain/news-please
|
6c7fb001a24f0db80dd4f2cd7f3957a7fe284dcf
|
[
"Apache-2.0"
] | null | null | null |
newsplease/pipeline/extractor/extractors/beautifulsoup_extractor.py
|
JamilHossain/news-please
|
6c7fb001a24f0db80dd4f2cd7f3957a7fe284dcf
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from bs4 import BeautifulSoup
from .abstract_extractor import AbstractExtractor
from ..article_candidate import ArticleCandidate
class ReadabilityExtractor(AbstractExtractor):
"""This class implements Readability as an article extractor. Readability is
a subclass of Extractors and newspaper.Article.
"""
def __init__(self):
self.name = "beautifulsoup"
def extract(self, item):
"""Creates an readability document and returns an ArticleCandidate containing article title and text.
:param item: A NewscrawlerItem to parse.
:return: ArticleCandidate containing the recovered article data.
"""
description = None
doc = BeautifulSoup(item['spider_response'].body,'html.parser')
article = doc.find_all('article')
if article:
description = article[0].get_text()
f = open("log.log","a")
f.write("BeautifulSoup: \r\n")
if description is not None:
f.write(description)
f.write("\r\n")
if self._text(item) is not None:
f.write("TEXT: " + self._text(item))
f.close()
text = self._text(item)
if text is None:
text = description
article_candidate = ArticleCandidate()
article_candidate.extractor = self._name
#article_candidate.title = doc.short_title()
article_candidate.description = description
article_candidate.text = text
article_candidate.topimage = self._topimage(item)
article_candidate.author = self._author(item)
article_candidate.publish_date = self._publish_date(item)
article_candidate.language = self._language(item)
return article_candidate
| 32.381818
| 109
| 0.658057
| 194
| 1,781
| 5.886598
| 0.376289
| 0.154116
| 0.031524
| 0.017513
| 0.02627
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001511
| 0.256597
| 1,781
| 54
| 110
| 32.981481
| 0.861027
| 0.20831
| 0
| 0
| 0
| 0
| 0.06085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2650e879784fe541700fd39b00cc82a607be51e1
| 2,546
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/12_features/numtrees_20/rule_16.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/12_features/numtrees_20/rule_16.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/12_features/numtrees_20/rule_16.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Education, obj[6]: Occupation, obj[7]: Bar, obj[8]: Coffeehouse, obj[9]: Restaurant20to50, obj[10]: Direction_same, obj[11]: Distance
# {"feature": "Restaurant20to50", "instances": 51, "metric_value": 0.9526, "depth": 1}
if obj[9]<=1.0:
# {"feature": "Coupon", "instances": 32, "metric_value": 0.7579, "depth": 2}
if obj[2]<=3:
# {"feature": "Bar", "instances": 25, "metric_value": 0.5294, "depth": 3}
if obj[7]<=0.0:
# {"feature": "Occupation", "instances": 13, "metric_value": 0.7793, "depth": 4}
if obj[6]>5:
# {"feature": "Coffeehouse", "instances": 7, "metric_value": 0.9852, "depth": 5}
if obj[8]>0.0:
# {"feature": "Time", "instances": 5, "metric_value": 0.7219, "depth": 6}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[3]<=0:
return 'True'
elif obj[3]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[8]<=0.0:
return 'False'
else: return 'False'
elif obj[6]<=5:
return 'True'
else: return 'True'
elif obj[7]>0.0:
return 'True'
else: return 'True'
elif obj[2]>3:
# {"feature": "Time", "instances": 7, "metric_value": 0.9852, "depth": 3}
if obj[1]>0:
# {"feature": "Occupation", "instances": 4, "metric_value": 0.8113, "depth": 4}
if obj[6]<=20:
return 'True'
elif obj[6]>20:
return 'False'
else: return 'False'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[9]>1.0:
# {"feature": "Time", "instances": 19, "metric_value": 0.9495, "depth": 2}
if obj[1]<=3:
# {"feature": "Coupon", "instances": 16, "metric_value": 0.8113, "depth": 3}
if obj[2]<=2:
# {"feature": "Occupation", "instances": 10, "metric_value": 0.469, "depth": 4}
if obj[6]>4:
return 'False'
elif obj[6]<=4:
# {"feature": "Age", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[4]>2:
return 'False'
elif obj[4]<=2:
return 'True'
else: return 'True'
else: return 'False'
elif obj[2]>2:
# {"feature": "Distance", "instances": 6, "metric_value": 1.0, "depth": 4}
if obj[11]<=2:
return 'False'
elif obj[11]>2:
return 'True'
else: return 'True'
else: return 'False'
elif obj[1]>3:
return 'True'
else: return 'True'
else: return 'False'
| 34.876712
| 243
| 0.558916
| 370
| 2,546
| 3.805405
| 0.151351
| 0.117188
| 0.102273
| 0.102273
| 0.460938
| 0.292614
| 0.290483
| 0.147017
| 0.066761
| 0.066761
| 0
| 0.09432
| 0.225452
| 2,546
| 72
| 244
| 35.361111
| 0.619675
| 0.499607
| 0
| 0.568966
| 0
| 0
| 0.104051
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2653fcf693b549d95fda5d96dd6ca0e935afb6e0
| 1,414
|
py
|
Python
|
src/main.py
|
fortytw0/vizwiz
|
36563806d9bf13c8924577141b02bd2552aa48d6
|
[
"MIT"
] | null | null | null |
src/main.py
|
fortytw0/vizwiz
|
36563806d9bf13c8924577141b02bd2552aa48d6
|
[
"MIT"
] | null | null | null |
src/main.py
|
fortytw0/vizwiz
|
36563806d9bf13c8924577141b02bd2552aa48d6
|
[
"MIT"
] | null | null | null |
import os
import time
from src.models.model1 import CBD
from src.utils.train_utils import TrainGenerator
from tensorflow.keras import losses, optimizers, callbacks
train_data = TrainGenerator('train')
val_data = TrainGenerator('val')
epochs = 10
model_dir = 'models/'
log_dir = 'logs/'
cbd = CBD('models/', 'logs/')
cbd.model.summary()
print('Compiling model : ')
cbd.model.compile(loss=losses.BinaryCrossentropy(), optimizer=optimizers.Adam())
print('Succesfully compiled model')
model_ckpt = callbacks.ModelCheckpoint(os.path.join(model_dir, '{epoch:02d}-{val_loss:.2f}.hdf5'))
csv_logging = callbacks.CSVLogger(os.path.join(log_dir, 'train_{}.log'.format(time.time())))
[print(i.shape, i.dtype) for i in cbd.model.inputs]
[print(o.shape, o.dtype) for o in cbd.model.outputs]
generator = train_data.generator()
X, Y = next(generator)
print(X[0].shape)
print(X[1].shape)
print(Y.shape)
cbd.model.predict(X, batch_size=32)
# cbd.model.fit(x=train_data.generator())
# history = cbd.model.fit(x=train_data.generator(),
# batch_size=train_data.batch_size,
# steps_per_epoch=train_data.steps_per_epoch,
# callbacks= [model_ckpt, csv_logging],
# epochs=epochs,
# validation_data=val_data.generator(),
# validation_batch_size=val_data.batch_size,
# validation_steps=val_data.steps_per_epoch)
# print(type(history))
| 26.185185
| 98
| 0.710042
| 198
| 1,414
| 4.893939
| 0.353535
| 0.057792
| 0.055728
| 0.024768
| 0.06192
| 0.06192
| 0.06192
| 0
| 0
| 0
| 0
| 0.009121
| 0.1471
| 1,414
| 54
| 99
| 26.185185
| 0.794362
| 0.319661
| 0
| 0
| 0
| 0
| 0.125
| 0.032563
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.28
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
265421a849a89a636ad43bddadaa5357b6a066c0
| 1,142
|
py
|
Python
|
models/PSim_net.py
|
PoChunChen1012/synthesizing_human_like_sketches
|
ec2ba76cda3f658c21b5484bd478e0d4cee52fc6
|
[
"MIT"
] | 46
|
2020-03-13T14:30:35.000Z
|
2021-12-19T11:55:31.000Z
|
models/PSim_net.py
|
PoChunChen1012/synthesizing_human_like_sketches
|
ec2ba76cda3f658c21b5484bd478e0d4cee52fc6
|
[
"MIT"
] | 2
|
2020-07-17T07:48:35.000Z
|
2020-10-16T15:35:30.000Z
|
models/PSim_net.py
|
PoChunChen1012/synthesizing_human_like_sketches
|
ec2ba76cda3f658c21b5484bd478e0d4cee52fc6
|
[
"MIT"
] | 2
|
2020-03-20T18:50:52.000Z
|
2021-12-06T04:03:01.000Z
|
import torch.nn as nn
from models.PSim_alexnet import PSim_Alexnet
import torch
from utils import utils
class PSimNet(nn.Module):
"""Pre-trained network with all channels equally weighted by default (cosine similarity)"""
def __init__(self, device=torch.device("cuda:0")):
super(PSimNet, self).__init__()
checkpoint_path = 'pretrained_models/PSim_alexnet.pt'
self.net = PSim_Alexnet(train=False)
checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"))
self.net.load_weights(checkpoint['state_dict'])
self.net.to(device)
# freeze network
for param in self.net.parameters():
param.requires_grad = False
self.net.eval()
def forward(self, generated, target):
outs0 = self.net.forward(generated)
outs1 = self.net.forward(target)
for (kk, out0) in enumerate(outs0):
cur_score = torch.mean((1. - utils.cos_sim(outs0[kk], outs1[kk]))) # mean is over batch
if kk == 0:
val = 1. * cur_score
else:
val = val + cur_score
return val
| 33.588235
| 100
| 0.627846
| 146
| 1,142
| 4.753425
| 0.513699
| 0.070605
| 0.048991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011891
| 0.263573
| 1,142
| 33
| 101
| 34.606061
| 0.813317
| 0.105079
| 0
| 0
| 0
| 0
| 0.051232
| 0.032512
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2658babc747f1ce1026574efd7275014f53e2fd0
| 2,219
|
py
|
Python
|
sustainableCityManagement/main_project/Bus_API/process_bus_delays.py
|
Josh-repository/Dashboard-CityManager-
|
6287881be9fb2c6274a755ce5d75ad355346468a
|
[
"RSA-MD"
] | null | null | null |
sustainableCityManagement/main_project/Bus_API/process_bus_delays.py
|
Josh-repository/Dashboard-CityManager-
|
6287881be9fb2c6274a755ce5d75ad355346468a
|
[
"RSA-MD"
] | null | null | null |
sustainableCityManagement/main_project/Bus_API/process_bus_delays.py
|
Josh-repository/Dashboard-CityManager-
|
6287881be9fb2c6274a755ce5d75ad355346468a
|
[
"RSA-MD"
] | 1
|
2021-05-13T16:33:18.000Z
|
2021-05-13T16:33:18.000Z
|
import requests
import json
from ..Config.config_handler import read_config
class ProcessBusDelays:
def __init__(self):
self.config_vals = read_config("Bus_API")
# Get the live data of Buses(Arrival Time, Departure Time, Delay) from API and returns.
def get_data_from_bus_api(self):
url = self.config_vals["api_url"]
headers = {self.config_vals["api_key_name"]:self.config_vals["api_key_value"]}
response = requests.get(url, headers=headers)
bus_data = json.loads(response.text)
bus_trip_delays = bus_data["entity"]
return bus_trip_delays
# Structure the live data (Delays, Arrival Time, Departure Time) in required format to send the recent stop details to frontend.
def get_delay_for_trip_live(self):
bus_trip_delays=self.get_data_from_bus_api()
result_response={}
for trip in bus_trip_delays:
temp = trip["trip_update"]
if temp["trip"]["schedule_relationship"]!="CANCELED":
delay_details = temp["stop_time_update"][-1]
if "departure" not in delay_details:
temp_delay = delay_details["arrival"]
if "delay" not in temp_delay:
delay = "Not Available"
else:
delay = temp_delay["delay"]
result_response[trip["id"]] = {
"STOP_ID": delay_details["stop_id"],
"STOP_SEQUENCE": delay_details["stop_sequence"],
"DELAY": delay
}
else:
temp_delay = delay_details["departure"]
if "delay" not in temp_delay:
delay = "Not Available"
else:
delay = temp_delay["delay"]
result_response[trip["id"]] = {
"STOP_ID": delay_details["stop_id"],
"STOP_SEQUENCE": delay_details["stop_sequence"],
"DELAY": delay
}
else:
result_response[trip["id"]] = {"STATUS":"CANCELED"}
return result_response
| 42.673077
| 133
| 0.54484
| 238
| 2,219
| 4.785714
| 0.268908
| 0.084284
| 0.073749
| 0.044776
| 0.337138
| 0.272169
| 0.272169
| 0.272169
| 0.272169
| 0.272169
| 0
| 0.000705
| 0.360523
| 2,219
| 51
| 134
| 43.509804
| 0.801973
| 0.095539
| 0
| 0.4
| 0
| 0
| 0.142786
| 0.010484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
265aad51cd825c5cd3fa7bde6bb29b6e88376717
| 648
|
py
|
Python
|
op_interface/xgemm.py
|
LukasSlouka/TF_XNN
|
152698a5da5ed6fff9ec4337e8dca4a1a396b458
|
[
"MIT"
] | 3
|
2018-05-19T19:41:28.000Z
|
2019-03-04T12:40:32.000Z
|
op_interface/xgemm.py
|
LukasSlouka/TF_XNN
|
152698a5da5ed6fff9ec4337e8dca4a1a396b458
|
[
"MIT"
] | null | null | null |
op_interface/xgemm.py
|
LukasSlouka/TF_XNN
|
152698a5da5ed6fff9ec4337e8dca4a1a396b458
|
[
"MIT"
] | null | null | null |
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from .utils import get_xmodule
xmodule = get_xmodule()
xgemm = xmodule.xgemm
@ops.RegisterGradient("XGEMM")
def _xgemm_grad(op, grad):
"""
Gradient computation for the XGEMM
:param op: XGEMM operation that is differentiated
:param grad: gradient with respect to the output of XGEMM
:return: gradients with respect to the input matrices of the XGEMM
"""
a = op.inputs[0]
b = op.inputs[1]
grad_a = math_ops.matmul(grad, b, transpose_b=True)
grad_b = math_ops.matmul(a, grad, transpose_a=True)
return grad_a, grad_b
| 28.173913
| 70
| 0.723765
| 98
| 648
| 4.653061
| 0.418367
| 0.046053
| 0.087719
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003817
| 0.191358
| 648
| 22
| 71
| 29.454545
| 0.866412
| 0.322531
| 0
| 0
| 0
| 0
| 0.012165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
265ab7b03cf9ea1a66d9ea39dcb79842ad35aa0c
| 1,004
|
py
|
Python
|
Chapter05/nlp40.py
|
gushwell/PythonNLP100
|
c67148232fc942b1f8a72e69a2a5e7a3b76e99bd
|
[
"MIT"
] | 2
|
2020-01-09T14:48:41.000Z
|
2021-11-20T20:33:46.000Z
|
Chapter05/nlp40.py
|
CLRafaelR/PythonNLP100
|
c67148232fc942b1f8a72e69a2a5e7a3b76e99bd
|
[
"MIT"
] | null | null | null |
Chapter05/nlp40.py
|
CLRafaelR/PythonNLP100
|
c67148232fc942b1f8a72e69a2a5e7a3b76e99bd
|
[
"MIT"
] | 2
|
2020-01-09T14:48:40.000Z
|
2021-11-20T20:33:59.000Z
|
# 第5章: 係り受け解析
import re
class Morph:
def __init__(self, surface, base, pos, pos1):
self.surface = surface
self.base = base
self.pos = pos
self.pos1 = pos1
def print(self):
print([self.surface, self.base, self.pos, self.pos1])
def analyze():
article = []
sentence = []
with open('neko.txt.cabocha', 'r', encoding='utf8') as fin:
for line in fin:
words = re.split(r'\t|,|\n| ', line)
if words[0] == '*':
continue
elif words[0] == 'EOS':
if sentence:
article.append(sentence)
sentence = []
else:
sentence.append(Morph(
words[0],
words[7],
words[1],
words[2],
))
return article
def main():
article = analyze()
for morph in article[3]:
morph.print()
if __name__ == '__main__':
main()
| 23.904762
| 63
| 0.456175
| 105
| 1,004
| 4.247619
| 0.457143
| 0.073991
| 0.067265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022184
| 0.416335
| 1,004
| 41
| 64
| 24.487805
| 0.738908
| 0.010956
| 0
| 0.057143
| 0
| 0
| 0.042381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.028571
| 0
| 0.2
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
265bf6359ab14ac666621994354747be0e20755e
| 1,096
|
py
|
Python
|
test/TestUtils.py
|
priscillaboyd/SPaT_Prediction
|
4309819e1f8d8e49f2e7fc132750102322e1504a
|
[
"Apache-2.0"
] | 7
|
2017-07-10T09:18:19.000Z
|
2022-03-22T02:47:12.000Z
|
test/TestUtils.py
|
priscillaboyd/SPaT_Prediction
|
4309819e1f8d8e49f2e7fc132750102322e1504a
|
[
"Apache-2.0"
] | 36
|
2017-06-27T15:04:27.000Z
|
2017-10-21T12:39:12.000Z
|
test/TestUtils.py
|
priscillaboyd/SPaT_Prediction
|
4309819e1f8d8e49f2e7fc132750102322e1504a
|
[
"Apache-2.0"
] | 2
|
2017-11-01T03:26:55.000Z
|
2019-06-01T20:20:31.000Z
|
import os
import shutil
import unittest
from tools.Utils import root_path, output_fields, create_folder_if_not_exists, results_folder
class TestUtils(unittest.TestCase):
def test_output_fields(self):
output_fields_needed = ['Date', 'Time', 'Result', 'Phase']
self.assertEqual(output_fields_needed, output_fields)
def test_folder_is_created_if_not_exists(self):
folder = root_path + "/temp/"
# folder does not exist
self.assertEqual(os.path.exists(folder), False)
# folder created
create_folder_if_not_exists(folder)
self.assertEqual(os.path.exists(folder), True)
# remove after test
os.rmdir(folder)
self.assertEqual(os.path.exists(folder), False)
def test_results_folder_exists(self):
create_folder_if_not_exists(results_folder)
self.assertEqual(os.path.exists(results_folder), True)
# remove folder after test
shutil.rmtree(results_folder)
self.assertEqual(os.path.exists(results_folder), False)
if __name__ == "__main__":
unittest.main()
| 28.842105
| 93
| 0.70073
| 138
| 1,096
| 5.246377
| 0.304348
| 0.107735
| 0.117403
| 0.145028
| 0.426796
| 0.395028
| 0.395028
| 0.146409
| 0.146409
| 0
| 0
| 0
| 0.205292
| 1,096
| 37
| 94
| 29.621622
| 0.831228
| 0.07208
| 0
| 0.090909
| 0
| 0
| 0.032609
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.136364
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2661e35125a9440b7263e4c2e760872c0ae79dad
| 1,713
|
py
|
Python
|
app/pages.py
|
mvasilkov/terrible-mistake
|
4f40a9719786ad3df0aea521dfeda234e3329714
|
[
"MIT"
] | null | null | null |
app/pages.py
|
mvasilkov/terrible-mistake
|
4f40a9719786ad3df0aea521dfeda234e3329714
|
[
"MIT"
] | null | null | null |
app/pages.py
|
mvasilkov/terrible-mistake
|
4f40a9719786ad3df0aea521dfeda234e3329714
|
[
"MIT"
] | null | null | null |
import html
from .models import Post, Session
TEMPLATE_BASE = '''<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>noname</title>
<link rel="stylesheet" href="/static/app.css">
</head>
<body>
%s
</body>
</html>
'''
TEMPLATE_FORM = '''
<form action="/publish" method="post" enctype="multipart/form-data">
<label for="title">Title</label>
<input type="text" name="title" id="title" placeholder=""><br>
<label for="picture">Picture</label>
<input type="file" name="picture" id="picture"><br>
<button type="submit">Publish</button>
</form>
'''
TEMPLATE_POST = '''
<div class="post">
<img src="/static/uploads/%s" title="%s"><br>
<span class="title">%s</span>
</div>
'''
TEMPLATE_POST_SUPERUSER = '''
<div class="post">
<img src="/static/uploads/%s" title="%s"><br>
<span class="title">%s</span><br>
<a href="/delete/%d" class="delete">Delete</a>
</div>
'''
def render_start_page(is_superuser: bool):
session = Session()
posts = session.query(Post).order_by(Post.id.desc()).all()
if is_superuser:
rendered_posts = ''.join(
TEMPLATE_POST_SUPERUSER
% (
post.picture,
post.title,
html.escape(post.title),
post.id,
)
for post in posts
)
else:
rendered_posts = ''.join(
TEMPLATE_POST
% (
post.picture,
post.title,
html.escape(post.title),
)
for post in posts
)
session.close()
return TEMPLATE_BASE % ''.join([TEMPLATE_FORM, rendered_posts])
| 23.465753
| 68
| 0.54174
| 196
| 1,713
| 4.642857
| 0.392857
| 0.052747
| 0.030769
| 0.032967
| 0.279121
| 0.215385
| 0.215385
| 0.215385
| 0.12967
| 0.12967
| 0
| 0.000816
| 0.28488
| 1,713
| 72
| 69
| 23.791667
| 0.742041
| 0
| 0
| 0.354839
| 0
| 0
| 0.474606
| 0.16404
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.032258
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
266247aa06f4461cb7db5adf2fdddc88aebe5a2f
| 761
|
py
|
Python
|
seqauto/management/commands/reload_illumina_flowcell_qc.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
seqauto/management/commands/reload_illumina_flowcell_qc.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
seqauto/management/commands/reload_illumina_flowcell_qc.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
"""
https://github.com/SACGF/variantgrid/issues/1601
Need to trigger reloads of bad metrics, so we die properly
"""
import logging
from django.core.management.base import BaseCommand
from seqauto.models import IlluminaFlowcellQC
from snpdb.models import DataState
class Command(BaseCommand):
def handle(self, *args, **options):
qs = IlluminaFlowcellQC.objects.exclude(data_state=DataState.ERROR)
qs = qs.filter(mean_cluster_density__isnull=True)
if not qs.exists():
logging.info("No potentially bad IlluminaFlowcellQC records")
for iqc in qs:
logging.info(f"Reloading: {iqc}")
iqc.load_from_file(None)
logging.info(f"{iqc}: {iqc.get_data_state_display()}")
| 24.548387
| 75
| 0.687254
| 95
| 761
| 5.4
| 0.705263
| 0.064327
| 0.046784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006667
| 0.211564
| 761
| 30
| 76
| 25.366667
| 0.848333
| 0.141919
| 0
| 0
| 0
| 0
| 0.154088
| 0.04717
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2669475d57fe48eb8f470f059b2de2b3e28b5b3e
| 2,864
|
py
|
Python
|
GameManager.py
|
redxdev/Matching
|
6d65933a64bf0f22a18a27c675cb8e95f4161e08
|
[
"MIT"
] | 1
|
2016-05-06T10:23:24.000Z
|
2016-05-06T10:23:24.000Z
|
GameManager.py
|
redxdev/Matching
|
6d65933a64bf0f22a18a27c675cb8e95f4161e08
|
[
"MIT"
] | null | null | null |
GameManager.py
|
redxdev/Matching
|
6d65933a64bf0f22a18a27c675cb8e95f4161e08
|
[
"MIT"
] | null | null | null |
from WordList import WordList, WordCard
import pygame
class GameManager:
def __init__(self):
self.wordList = WordList()
self.cards = []
self.badCards = (None, None)
self.goodCards = (None, None)
self.timer = 0
def startGame(self, pairCount):
self.cards = self.wordList.getRandomCards(4)
def generateCardPosition(self, screenSize, i):
cardW, cardH = screenSize
cardW /= 9
cardH /= 5
return ((i % 4) * (cardW * 2) + cardW, int(i / 4) * (cardH * 2) + cardH, cardW, cardH)
def draw(self, screen):
if self.timer <= 0:
bad1, bad2 = self.badCards
if bad1 is not None and bad2 is not None:
bad1.selected = False
bad2.selected = False
self.badCards = (None, None)
good1, good2 = self.goodCards
if good1 is not None and good2 is not None:
good1.selected = False
good2.selected = False
good1.active = False
good2.active = False
self.goodCards = (None, None)
self.checkForGameEnd()
if self.timer > 0:
self.timer -= 0.08
for i in range(0, len(self.cards)):
card = self.cards[i]
if card.active:
screenW, screenH = screen.get_size()
x, y, cardW, cardH = self.generateCardPosition((screenW, screenH), i)
card.draw(screen, (x, y), (cardW, cardH), screenW / 64)
def onClick(self, screen, x, y):
bad1, bad2 = self.badCards
if bad1 is not None and bad2 is not None:
return
good1, good2 = self.goodCards
if good1 is not None and good2 is not None:
return
found = None
for i in range(0, len(self.cards)):
card = self.cards[i]
cx, cy, cw, ch = self.generateCardPosition(screen.get_size(), i)
if x >= cx and x <= cx + cw and y >= cy and y <= cy + ch:
found = card
break
if found is not None:
self.select(found)
def select(self, card):
if card.selected:
card.selected = False
return
other = None
for c in self.cards:
if c.selected:
other = c
break
if other == None:
card.selected = True
return
if other.matches(card):
card.selected = True
self.goodCards = (card, other)
self.timer = 3.0
else:
card.selected = True
self.badCards = (card, other)
self.timer = 3.0
def checkForGameEnd(self):
for c in self.cards:
if c.active:
return
self.startGame(4)
| 28.929293
| 94
| 0.50419
| 332
| 2,864
| 4.331325
| 0.213855
| 0.031293
| 0.056328
| 0.03338
| 0.289291
| 0.25452
| 0.226704
| 0.201669
| 0.201669
| 0.201669
| 0
| 0.025761
| 0.403631
| 2,864
| 99
| 95
| 28.929293
| 0.816159
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0875
| false
| 0
| 0.025
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
266b073ce320af6c6412a8f34133f369b56ae914
| 1,687
|
py
|
Python
|
src/main.py
|
ekim1919/TDAGo
|
014db546dae3dedb4f7206288333756fc358ed8a
|
[
"MIT"
] | null | null | null |
src/main.py
|
ekim1919/TDAGo
|
014db546dae3dedb4f7206288333756fc358ed8a
|
[
"MIT"
] | null | null | null |
src/main.py
|
ekim1919/TDAGo
|
014db546dae3dedb4f7206288333756fc358ed8a
|
[
"MIT"
] | null | null | null |
from plot import *
from experiments import *
import warnings
warnings.filterwarnings("ignore") #Ignore warnings for now
import sys
import os
import argparse
def main():
parser = argparse.ArgumentParser(description='Analysis of Go Games')
parser.add_argument('dir',nargs='*')
parser.add_argument('--conn',dest="conn",action='store_true')
parser.add_argument('--avg',dest="avg",action='store_true')
parser.add_argument('--score',dest="score",action='store_true')
parser.add_argument('--anim',dest="anim",action='store_true')
args = parser.parse_args()
if args.conn:
run_conn_routine(args.dir[0])
if args.avg:
predict_avg_experi(args.dir[0],args.dir[1])
if args.score:
test_score_routine(args.dir[0])
if args.anim:
test_anim_routine(args.dir[0])
#test_save_routine(str(argv[0]))
if __name__ == '__main__':
main()
#Test routines
#Animation routines
#Persistence Diagrams?\
#Go analysis features.
#Ideas
#How to interpret H_1 points on DGMS? For example, if a point has a earlier,later birthtime vs earlier,later deathtime? How do we interpret this as properties of possible enclosed territory.
#We can now start to add points to the white/black board to model obstructions to building territory. A good idea would be to find ways to create "meaningful" boards for analysis of specific advantage properties.
#Research more about Go fighting strategies and early,late game caveats
#Create a modular framework such that you have TDA-DATA -> plot modules -> customizable plot figure -> analysis interface
#Create a caching scheme to cache all sequential computations and diagrams made. See cache-tools
| 35.145833
| 212
| 0.740368
| 247
| 1,687
| 4.939271
| 0.518219
| 0.036885
| 0.069672
| 0.051639
| 0.113115
| 0.113115
| 0
| 0
| 0
| 0
| 0
| 0.004926
| 0.157676
| 1,687
| 47
| 213
| 35.893617
| 0.853624
| 0.484884
| 0
| 0
| 0
| 0
| 0.13769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.24
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
266c5f9566178c353cbde59b14658db79e486f2e
| 236
|
py
|
Python
|
script/pipeline/setup/setup.py
|
cpuabuse/py-deployment-automation
|
aea0c48ac4c5a81f2e027c984ab65f911ad29d0d
|
[
"0BSD"
] | 1
|
2020-02-23T22:35:28.000Z
|
2020-02-23T22:35:28.000Z
|
script/pipeline/setup/setup.py
|
cpuabuse/py-deployment-automation
|
aea0c48ac4c5a81f2e027c984ab65f911ad29d0d
|
[
"0BSD"
] | null | null | null |
script/pipeline/setup/setup.py
|
cpuabuse/py-deployment-automation
|
aea0c48ac4c5a81f2e027c984ab65f911ad29d0d
|
[
"0BSD"
] | null | null | null |
"""
A file for setup.
"""
# Metadata
__author__ = "cpuabuse.com"
__copyright__ = "cpuabuse.com 2019"
__license__ = "ISC"
__version__ = "0.0.1"
__email__ = "cpuabuse@gmail.com"
__status__ = "Development"
# Minimum python version is 3.6
| 18.153846
| 35
| 0.711864
| 30
| 236
| 4.8
| 0.8
| 0.152778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044554
| 0.144068
| 236
| 13
| 36
| 18.153846
| 0.668317
| 0.241525
| 0
| 0
| 0
| 0
| 0.385965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
266efdf5f618ad871cc4108d4a51b575ba968601
| 6,392
|
py
|
Python
|
Kinkajou/python/admin/opencode.py
|
app858216291-github/Kinkajou-shop
|
ee1e841e26407b1dcbd14601e5fe34b6422eba29
|
[
"MIT"
] | null | null | null |
Kinkajou/python/admin/opencode.py
|
app858216291-github/Kinkajou-shop
|
ee1e841e26407b1dcbd14601e5fe34b6422eba29
|
[
"MIT"
] | null | null | null |
Kinkajou/python/admin/opencode.py
|
app858216291-github/Kinkajou-shop
|
ee1e841e26407b1dcbd14601e5fe34b6422eba29
|
[
"MIT"
] | null | null | null |
from admin.upload import FileUploadField, ImageUploadField
from flask_babelex import Babel
from flask_admin._compat import urljoin
from flask import redirect
from flask_admin._compat import quote
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin import Admin, BaseView, expose
from flask_admin.babel import gettext, lazy_gettext
from flask import flash, redirect, abort, request, send_file
from flask_admin import form, helpers
import os.path as op
import flask_login as login
from wtforms.widgets import html_params
from common import aliyun, tools
from setting import Aliyun
allowed_file = lambda filename: '.' in filename and filename.rsplit('.', 1)[1] in set(
['png', 'jpg', 'jpeg', 'gif', 'bmp'])
def uploadFile(f):
if f and allowed_file(f.filename):
filname=aliyun.upload(f,'product')
r=tools.shopUtil.docManger(f,"https://"+Aliyun.bucketName+".oss-cn-beijing.aliyuncs.com/product/"+filname,"https://"+Aliyun.bucketName+".oss-cn-beijing.aliyuncs.com/product/")
return r
else:
return "filename is null"
class MXFileAdmin(FileAdmin):
def is_accessible(self):
if login.current_user.is_authenticated:
if login.current_user.username=='admin':
return True
return False
return False
def _save_form_files(self, directory, path, form):
super()
filename = self._separator.join([directory, form.upload.data.filename])
if self.storage.path_exists(filename):
secure_name = self._separator.join([path, form.upload.data.filename])
raise Exception(gettext('File "%(name)s" already exists.',
name=secure_name))
else:
self.save_file(filename, form.upload.data)
self.on_file_upload(directory, path, filename)
@expose('/download/<path:path>')
def download(self, path=None):
"""
Download view method.
:param path:
File path.
"""
if not self.can_download:
abort(404)
base_path, directory, path = self._normalize_path(path)
# backward compatibility with base_url
base_url = self.get_base_url()
if base_url:
base_url = urljoin(self.get_url('.index_view'), base_url)
path=path.replace('\\', '/')
print("------1------")
print(base_url)
print(path)
return redirect(urljoin(quote(base_url), quote(path)))
directory=directory.replace('\\', '/')
print("-------2-----")
print(directory)
return self.storage.send_file(directory)
@expose('/rename/', methods=('GET', 'POST'))
def rename(self):
"""
Rename view method
"""
form = self.name_form()
path = form.path.data
if path:
base_path, full_path, path = self._normalize_path(path)
return_url = self._get_dir_url('.index_view', op.dirname(path))
else:
return redirect(self.get_url('.index_view'))
if not self.can_rename:
flash(gettext('Renaming is disabled.'), 'error')
return redirect(return_url)
if not self.is_accessible_path(path):
flash(gettext('Permission denied.'), 'error')
return redirect(self._get_dir_url('.index_view'))
if not self.storage.path_exists(full_path):
flash(gettext('Path does not exist.'), 'error')
return redirect(return_url)
if self.validate_form(form):
try:
dir_base = op.dirname(full_path)
filename = form.name.data
# print(fi)
self.storage.rename_path(full_path, self._separator.join([dir_base, filename]))
self.on_rename(full_path, dir_base, filename)
flash(gettext('Successfully renamed "%(src)s" to "%(dst)s"',
src=op.basename(path),
dst=filename), 'success')
except Exception as ex:
flash(gettext('Failed to rename: %(error)s', error=ex), 'error')
return redirect(return_url)
else:
helpers.flash_errors(form, message='Failed to rename: %(error)s')
if self.rename_modal and request.args.get('modal'):
template = self.rename_modal_template
else:
template = self.rename_template
return self.render(template, form=form, path=op.dirname(path),
name=op.basename(path), dir_url=return_url,
header_text=gettext('Rename %(name)s',
name=op.basename(path)))
from flask_admin.helpers import get_url
from flask_admin._compat import string_types, urljoin
class MxImageUploadField(ImageUploadField):
def _save_file(self, data, filename):
path = self._get_path(filename)
data.seek(0)
filename=uploadFile(data)
return filename
# def __call__(self, field, **kwargs):
# kwargs.setdefault('id', field.id)
# kwargs.setdefault('name', field.name)
#
# args = {
# 'text': html_params(type='hidden',
# value=field.data,
# name=field.name),
# 'file': html_params(type='file',
# **kwargs),
# 'marker': '_%s-delete' % field.name
# }
#
# if field.data and isinstance(field.data, string_types):
# url = self.get_url(field)
# args['image'] = html_params(src=url)
#
# template = self.data_template
# else:
# template = self.empty_template
# print(template % args)
# return Markup(template % args)
def get_url(self, field):
if field.thumbnail_size:
filename = field.thumbnail_fn(field.data)
else:
filename = field.data
if field.url_relative_path:
filename = urljoin(field.url_relative_path, filename)
return get_url(field.endpoint, filename=filename)
| 37.380117
| 184
| 0.571339
| 706
| 6,392
| 5.007082
| 0.247875
| 0.028006
| 0.031683
| 0.016973
| 0.145403
| 0.066761
| 0.028854
| 0.028854
| 0.028854
| 0
| 0
| 0.001832
| 0.316959
| 6,392
| 170
| 185
| 37.6
| 0.807833
| 0.125
| 0
| 0.096491
| 0
| 0
| 0.090584
| 0.017854
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061404
| false
| 0
| 0.149123
| 0
| 0.359649
| 0.04386
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
267010ecd5efb0c3498de085c2712903abc79773
| 4,137
|
py
|
Python
|
liminal/runners/airflow/operators/kubernetes_pod_operator_with_input_output.py
|
aviemzur/incubator-liminal
|
88174a6fe519f9a6052f6e5d366a37a88a915ee4
|
[
"Apache-2.0"
] | 1
|
2021-03-24T08:23:03.000Z
|
2021-03-24T08:23:03.000Z
|
liminal/runners/airflow/operators/kubernetes_pod_operator_with_input_output.py
|
liorsav/incubator-liminal
|
88174a6fe519f9a6052f6e5d366a37a88a915ee4
|
[
"Apache-2.0"
] | null | null | null |
liminal/runners/airflow/operators/kubernetes_pod_operator_with_input_output.py
|
liorsav/incubator-liminal
|
88174a6fe519f9a6052f6e5d366a37a88a915ee4
|
[
"Apache-2.0"
] | null | null | null |
import json
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
def _split_list(seq, num):
k, m = divmod(len(seq), num)
return list(
(seq[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(num))
)
_IS_SPLIT_KEY = 'is_split'
class PrepareInputOperator(KubernetesPodOperator):
def __init__(self,
input_type=None,
input_path=None,
split_input=False,
executors=1,
*args,
**kwargs):
namespace = kwargs.pop('namespace')
image = kwargs.pop('image')
name = kwargs.pop('name')
super().__init__(
namespace=namespace,
image=image,
name=name,
*args,
**kwargs)
self.input_type = input_type
self.input_path = input_path
self.executors = executors
self.split_input = split_input
def execute(self, context):
input_dict = {}
self.log.info(f'config type: {self.input_type}')
ti = context['task_instance']
if self.input_type:
if self.input_type == 'file':
input_dict = {} # future feature: return config from file
elif self.input_type == 'sql':
input_dict = {} # future feature: return from sql config
elif self.input_type == 'task':
self.log.info(self.input_path)
input_dict = ti.xcom_pull(task_ids=self.input_path)
elif self.input_type == 'static':
input_dict = json.loads(self.input_path)
else:
raise ValueError(f'Unknown config type: {self.input_type}')
run_id = context['dag_run'].run_id
print(f'run_id = {run_id}')
if input_dict:
self.log.info(f'Generated input: {input_dict}')
if self.split_input:
input_splits = _split_list(input_dict, self.executors)
numbered_splits = list(
zip(range(len(input_splits)), input_splits)
)
self.log.info(numbered_splits)
ti.xcom_push(key=_IS_SPLIT_KEY, value=True)
return input_splits
else:
return input_dict
else:
return {}
def run_pod(self, context):
return super().execute(context)
class KubernetesPodOperatorWithInputAndOutput(KubernetesPodOperator):
"""
TODO: pydoc
"""
_LIMINAL_INPUT_ENV_VAR = 'LIMINAL_INPUT'
def __init__(self,
task_split,
input_task_id=None,
*args,
**kwargs):
namespace = kwargs.pop('namespace')
image = kwargs.pop('image')
name = kwargs.pop('name')
super().__init__(
namespace=namespace,
image=image,
name=name,
*args,
**kwargs)
self.input_task_id = input_task_id
self.task_split = task_split
def execute(self, context):
task_input = {}
if self.input_task_id:
ti = context['task_instance']
self.log.info(f'Fetching input for task {self.task_split}.')
task_input = ti.xcom_pull(task_ids=self.input_task_id)
is_split = ti.xcom_pull(task_ids=self.input_task_id, key=_IS_SPLIT_KEY)
self.log.info(f'is_split = {is_split}')
if is_split:
self.log.info(f'Fetching split {self.task_split} of input.')
task_input = task_input[self.task_split]
if task_input:
self.log.info(f'task input = {task_input}')
self.env_vars.update({self._LIMINAL_INPUT_ENV_VAR: json.dumps(task_input)})
else:
self.env_vars.update({self._LIMINAL_INPUT_ENV_VAR: '{}'})
self.log.info(f'Empty input for task {self.task_split}.')
run_id = context['dag_run'].run_id
print(f'run_id = {run_id}')
self.env_vars.update({'run_id': run_id})
return super().execute(context)
| 28.93007
| 87
| 0.555958
| 483
| 4,137
| 4.482402
| 0.192547
| 0.07067
| 0.054042
| 0.038799
| 0.366744
| 0.282679
| 0.240185
| 0.228176
| 0.228176
| 0.162587
| 0
| 0.001095
| 0.337684
| 4,137
| 142
| 88
| 29.133803
| 0.789051
| 0.021997
| 0
| 0.401961
| 0
| 0
| 0.104741
| 0
| 0
| 0
| 0
| 0.007042
| 0
| 1
| 0.058824
| false
| 0
| 0.019608
| 0.009804
| 0.166667
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2671a284c0ed4b2cd6f0faa0d1f0db0edd38447c
| 27,696
|
py
|
Python
|
reV/handlers/collection.py
|
pjstanle/reV
|
c22c620749747022a65d2a98a99beef804849ee6
|
[
"BSD-3-Clause"
] | 37
|
2020-03-04T05:24:23.000Z
|
2022-02-24T14:39:49.000Z
|
reV/handlers/collection.py
|
pjstanle/reV
|
c22c620749747022a65d2a98a99beef804849ee6
|
[
"BSD-3-Clause"
] | 174
|
2020-03-03T18:18:53.000Z
|
2022-03-08T22:00:40.000Z
|
reV/handlers/collection.py
|
pjstanle/reV
|
c22c620749747022a65d2a98a99beef804849ee6
|
[
"BSD-3-Clause"
] | 16
|
2020-08-10T13:43:36.000Z
|
2021-11-19T22:43:36.000Z
|
# -*- coding: utf-8 -*-
"""
Base class to handle collection of profiles and means across multiple .h5 files
"""
import logging
import numpy as np
import os
import sys
import psutil
import pandas as pd
import time
import shutil
from warnings import warn
from reV.handlers.outputs import Outputs
from reV.utilities.exceptions import (CollectionRuntimeError,
CollectionValueError,
CollectionWarning)
from reV.utilities import log_versions
from rex.utilities.loggers import log_mem
logger = logging.getLogger(__name__)
class DatasetCollector:
"""
Class to collect single datasets from several source files into a final
output file.
"""
def __init__(self, h5_file, source_files, gids, dset_in, dset_out=None,
mem_util_lim=0.7):
"""
Parameters
----------
h5_file : str
Path to h5_file into which dataset is to be collected
source_files : list
List of source filepaths.
gids : list
list of gids to be collected
dset_in : str
Dataset to collect
dset_out : str
Dataset into which collected data is to be written
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
self._h5_file = h5_file
self._source_files = source_files
self._gids = gids
self._dset_in = dset_in
if dset_out is None:
dset_out = dset_in
self._dset_out = dset_out
tot_mem = psutil.virtual_memory().total
self._mem_avail = mem_util_lim * tot_mem
self._attrs, self._axis, self._site_mem_req = self._pre_collect()
logger.debug('Available memory for collection is {} bytes'
.format(self._mem_avail))
logger.debug('Site memory requirement is: {} bytes'
.format(self._site_mem_req))
@staticmethod
def parse_meta(h5_file):
"""
Extract and convert meta data from a rec.array to pandas.DataFrame
Parameters
----------
h5_file : str
Path to .h5 file from which meta is to be parsed
Returns
-------
meta : pandas.DataFrame
Portion of meta data corresponding to sites in h5_file
"""
with Outputs(h5_file, mode='r') as f:
meta = f.meta
return meta
@staticmethod
def _get_site_mem_req(shape, dtype, n=100):
"""Get the memory requirement to collect one site from a dataset of
shape and dtype
Parameters
----------
shape : tuple
Shape of dataset to be collected (n_time, n_sites)
dtype : np.dtype
Numpy dtype of dataset (disk dtype)
n : int
Number of sites to prototype the memory req with.
Returns
-------
site_mem : float
Memory requirement in bytes for one site from a dataset with
shape and dtype.
"""
m = 1
if len(shape) > 1:
m = shape[0]
site_mem = sys.getsizeof(np.ones((m, n), dtype=dtype)) / n
return site_mem
def _pre_collect(self):
"""Run a pre-collection check and get relevant dset attrs.
Returns
-------
attrs : dict
Dictionary of dataset attributes for the dataset being collected.
axis : int
Axis size (1 is 1D array, 2 is 2D array)
site_mem_req : float
Memory requirement in bytes to collect a single site from one
source file.
"""
with Outputs(self._source_files[0], mode='r') as f:
shape, dtype, chunks = f.get_dset_properties(self._dset_in)
attrs = f.get_attrs(self._dset_in)
axis = len(f[self._dset_in].shape)
with Outputs(self._h5_file, mode='a') as f:
if axis == 1:
dset_shape = (len(f),)
elif axis == 2:
if 'time_index' in f.datasets:
dset_shape = f.shape
else:
m = ("'time_index' must be combined "
"before profiles can be "
"combined.")
logger.error(m)
raise CollectionRuntimeError(m)
else:
m = ('Cannot collect dset "{}" with '
'axis {}'.format(self._dset_in, axis))
logger.error(m)
raise CollectionRuntimeError(m)
if self._dset_out not in f.datasets:
f._create_dset(self._dset_out, dset_shape, dtype,
chunks=chunks, attrs=attrs)
site_mem_req = self._get_site_mem_req(shape, dtype)
return attrs, axis, site_mem_req
@staticmethod
def _get_gid_slice(gids_out, source_gids, fn_source):
"""Find the site slice that the chunked set of source gids belongs to.
Parameters
----------
gids_out : list
List of resource GIDS in the final output meta data f_out
source_gids : list
List of resource GIDS in one chunk of source data.
fn_source : str
Source filename for warning printout.
Returns
-------
site_slice : slice | np.ndarray
Slice in the final output file to write data to from source gids.
If gids in destination file are non-sequential, a boolean array of
indexes is returned and a warning is printed.
"""
locs = np.where(np.isin(gids_out, source_gids))[0]
if not any(locs):
e = ('DatasetCollector could not locate source gids in '
'output gids. \n\t Source gids: {} \n\t Output gids: {}'
.format(source_gids, gids_out))
logger.error(e)
raise CollectionRuntimeError(e)
sequential_locs = np.arange(locs.min(), locs.max() + 1)
if not len(locs) == len(sequential_locs):
w = ('GID indices for source file "{}" are not '
'sequential in destination file!'.format(fn_source))
logger.warning(w)
warn(w, CollectionWarning)
site_slice = np.isin(gids_out, source_gids)
else:
site_slice = slice(locs.min(), locs.max() + 1)
return site_slice
def _get_source_gid_chunks(self, f_source):
"""Split the gids from the f_source into chunks based on memory req.
Parameters
----------
f_source : reV.handlers.outputs.Output
Source file handler
Returns
-------
all_source_gids : list
List of all source gids to be collected
source_gid_chunks : list
List of source gid chunks to collect.
"""
all_source_gids = f_source.get_meta_arr('gid')
mem_req = (len(all_source_gids) * self._site_mem_req)
if mem_req > self._mem_avail:
n = 2
while True:
source_gid_chunks = np.array_split(all_source_gids, n)
new_mem_req = (len(source_gid_chunks[0]) * self._site_mem_req)
if new_mem_req > self._mem_avail:
n += 1
else:
logger.debug('Collecting dataset "{}" in {} chunks with '
'an estimated {} bytes in each chunk '
'(mem avail limit is {} bytes).'
.format(self._dset_in, n, new_mem_req,
self._mem_avail))
break
else:
source_gid_chunks = [all_source_gids]
return all_source_gids, source_gid_chunks
def _collect_chunk(self, all_source_gids, source_gids, f_out,
f_source, fp_source):
"""Collect one set of source gids from f_source to f_out.
Parameters
----------
all_source_gids : list
List of all source gids to be collected
source_gids : np.ndarray | list
Source gids to be collected
f_out : reV.handlers.outputs.Output
Output file handler
f_source : reV.handlers.outputs.Output
Source file handler
fp_source : str
Source filepath
"""
out_slice = self._get_gid_slice(self._gids, source_gids,
os.path.basename(fp_source))
source_i0 = np.where(all_source_gids == np.min(source_gids))[0][0]
source_i1 = np.where(all_source_gids == np.max(source_gids))[0][0]
source_slice = slice(source_i0, source_i1 + 1)
source_indexer = np.isin(source_gids, self._gids)
logger.debug('\t- Running low mem collection of "{}" for '
'output site {} from source site {} and file : {}'
.format(self._dset_in, out_slice, source_slice,
os.path.basename(fp_source)))
try:
if self._axis == 1:
data = f_source[self._dset_in, source_slice]
if not all(source_indexer):
data = data[source_indexer]
f_out[self._dset_out, out_slice] = data
elif self._axis == 2:
data = f_source[self._dset_in, :, source_slice]
if not all(source_indexer):
data = data[:, source_indexer]
f_out[self._dset_out, :, out_slice] = data
except Exception as e:
logger.exception('Failed to collect source file {}. '
'Raised the following exception:\n{}'
.format(os.path.basename(fp_source), e))
raise e
def _collect(self):
"""Simple & robust serial collection optimized for low memory usage."""
with Outputs(self._h5_file, mode='a') as f_out:
for fp in self._source_files:
with Outputs(fp, mode='r') as f_source:
x = self._get_source_gid_chunks(f_source)
all_source_gids, source_gid_chunks = x
for source_gids in source_gid_chunks:
self._collect_chunk(all_source_gids, source_gids,
f_out, f_source, fp)
log_mem(logger, log_level='DEBUG')
@classmethod
def collect_dset(cls, h5_file, source_files, gids, dset_in, dset_out=None,
mem_util_lim=0.7):
"""Collect a single dataset from a list of source files into a final
output file.
Parameters
----------
h5_file : str
Path to h5_file into which dataset is to be collected
source_files : list
List of source filepaths.
gids : list
list of gids to be collected
dset_in : str
Dataset to collect
dset_out : str
Dataset into which collected data is to be written
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
dc = cls(h5_file, source_files, gids, dset_in, dset_out=dset_out,
mem_util_lim=mem_util_lim)
dc._collect()
class Collector:
"""
Class to handle the collection and combination of .h5 files
"""
def __init__(self, h5_file, h5_dir, project_points, file_prefix=None,
clobber=False):
"""
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame | None
Project points that correspond to the full collection of points
contained in the .h5 files to be collected. None if points list is
to be ignored (collect all data in h5_files)
file_prefix : str
.h5 file prefix, if None collect all files in h5_dir
clobber : bool
Flag to purge .h5 file if it already exists
"""
log_versions(logger)
if clobber:
if os.path.isfile(h5_file):
warn('{} already exists and is being replaced'.format(h5_file),
CollectionWarning)
os.remove(h5_file)
self._h5_out = h5_file
ignore = os.path.basename(self._h5_out)
self._h5_files = self.find_h5_files(h5_dir, file_prefix=file_prefix,
ignore=ignore)
if project_points is not None:
self._gids = self.parse_project_points(project_points)
else:
self._gids = self.parse_gids_from_files(self._h5_files)
self.combine_meta()
@staticmethod
def find_h5_files(h5_dir, file_prefix=None, ignore=None):
"""
Search h5_dir for .h5 file, return sorted
If file_prefix is not None, only return .h5 files with given prefix
Parameters
----------
h5_dir : str
Root directory to search
file_prefix : str
Prefix for .h5 file in h5_dir, if None return all .h5 files
ignore : str | list | NoneType
File name(s) to ignore.
"""
if not isinstance(ignore, list):
ignore = [ignore]
h5_files = []
logger.debug('Looking for source files in {}'.format(h5_dir))
for file in os.listdir(h5_dir):
if file.endswith('.h5'):
if file_prefix is not None:
if file.startswith(file_prefix) and file not in ignore:
logger.debug('\t- Found source file to collect: {}'
.format(file))
h5_files.append(os.path.join(h5_dir, file))
elif file not in ignore:
logger.debug('\t- Found source file to collect: {}'
.format(file))
h5_files.append(os.path.join(h5_dir, file))
h5_files = sorted(h5_files)
logger.debug('Final list of {} source files: {}'
.format(len(h5_files), h5_files))
return h5_files
@staticmethod
def parse_project_points(project_points):
"""
Extract resource gids from project points
Parameters
----------
project_points : str | slice | list | pandas.DataFrame
Reference to resource points that were processed and need
collecting
Returns
-------
gids : list
List of resource gids that are to be collected
"""
if isinstance(project_points, str):
gids = pd.read_csv(project_points)['gid'].values
elif isinstance(project_points, pd.DataFrame):
gids = project_points['gid'].values
elif isinstance(project_points, list):
gids = project_points
elif isinstance(project_points, slice):
s = project_points.start
if s is None:
s = 0
e = project_points.stop
if e is None:
m = "slice must be bounded!"
logger.error(m)
raise CollectionValueError(m)
step = project_points.step
if step is None:
step = 1
gids = list(range(s, e, step))
else:
m = 'Cannot parse project_points'
logger.error(m)
raise CollectionValueError(m)
gids = sorted([int(g) for g in gids])
return gids
@staticmethod
def parse_gids_from_files(h5_files):
"""
Extract a sorted gid list from a list of h5_files.
Parameters
----------
h5_files : list
List of h5 files to be collected.
Returns
-------
gids : list
List of sorted resource gids to be collected.
"""
meta = [DatasetCollector.parse_meta(file) for file in h5_files]
meta = pd.concat(meta, axis=0)
gids = list(set(meta['gid'].values.tolist()))
gids = sorted([int(g) for g in gids])
return gids
def get_dset_shape(self, dset_name):
"""
Extract the dataset shape from the first file in the collection list.
Parameters
----------
dset_name : str
Dataset to be collected whose shape is in question.
Returns
-------
shape : tuple
Dataset shape tuple.
"""
with Outputs(self.h5_files[0], mode='r') as f:
shape = f.shapes[dset_name]
return shape
@property
def h5_files(self):
"""
List of .h5 files to be combined
Returns
-------
list
"""
return self._h5_files
@property
def gids(self):
"""
List of gids corresponding to all sites to be combined
Returns
-------
list
"""
return self._gids
def combine_time_index(self):
"""
Extract time_index, None if not present in .h5 files
"""
with Outputs(self.h5_files[0], mode='r') as f:
if 'time_index' in f.datasets:
time_index = f.time_index
attrs = f.get_attrs('time_index')
else:
time_index = None
warn("'time_index' was not processed as it is not "
"present in .h5 files to be combined.",
CollectionWarning)
if time_index is not None:
with Outputs(self._h5_out, mode='a') as f:
f._set_time_index('time_index', time_index, attrs=attrs)
def _check_meta(self, meta):
"""
Check combined meta against self._gids to make sure all sites
are present in self._h5_files
Parameters
----------
meta : pandas.DataFrame
DataFrame of combined meta from all files in self._h5_files
Parameters
----------
meta : pandas.DataFrame
DataFrame of combined meta from all files in self._h5_files.
Duplicate GIDs are dropped and a warning is raised.
"""
meta_gids = meta['gid'].values
gids = np.array(self.gids)
missing = gids[~np.in1d(gids, meta_gids)]
if any(missing):
# TODO: Write missing gids to disk to allow for automated re-run
m = "gids: {} are missing".format(missing)
logger.error(m)
raise CollectionRuntimeError(m)
if len(set(meta_gids)) != len(meta):
m = ('Meta of length {} has {} unique gids! '
'There are duplicate gids in the source file list: {}'
.format(len(meta), len(set(meta_gids)), self.h5_files))
logger.warning(m)
warn(m, CollectionWarning)
meta = meta.drop_duplicates(subset='gid', keep='last')
meta = meta.sort_values('gid')
meta = meta.reset_index(drop=True)
return meta
def _purge_chunks(self):
"""Remove the chunked files (after collection). Will not delete files
if any datasets were not collected."""
with Outputs(self._h5_out, mode='r') as out:
dsets_collected = out.datasets
with Outputs(self.h5_files[0], mode='r') as out:
dsets_source = out.datasets
missing = [d for d in dsets_source if d not in dsets_collected]
if any(missing):
w = ('Not purging chunked output files. These dsets '
'have not been collected: {}'.format(missing))
warn(w, CollectionWarning)
logger.warning(w)
else:
for fpath in self.h5_files:
os.remove(fpath)
def _move_chunks(self, sub_dir):
"""Move the chunked files to a sub dir (after collection).
Parameters
----------
sub_dir : str | None
Sub directory name to move chunks to. None to not move files.
"""
if sub_dir is not None:
for fpath in self.h5_files:
base_dir, fn = os.path.split(fpath)
new_dir = os.path.join(base_dir, sub_dir)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
new_fpath = os.path.join(new_dir, fn)
shutil.move(fpath, new_fpath)
def combine_meta(self):
"""
Load and combine meta data from .h5
"""
with Outputs(self._h5_out, mode='a') as f:
if 'meta' in f.datasets:
self._check_meta(f.meta)
else:
with Outputs(self.h5_files[0], mode='r') as f_in:
global_attrs = f_in.get_attrs()
meta_attrs = f_in.get_attrs('meta')
for key, value in global_attrs.items():
f._h5.attrs[key] = value
meta = [DatasetCollector.parse_meta(file)
for file in self.h5_files]
meta = pd.concat(meta, axis=0)
meta = self._check_meta(meta)
logger.info('Writing meta data with shape {}'
.format(meta.shape))
f._set_meta('meta', meta, attrs=meta_attrs)
@classmethod
def collect(cls, h5_file, h5_dir, project_points, dset_name, dset_out=None,
file_prefix=None, mem_util_lim=0.7):
"""
Collect dataset from h5_dir to h5_file
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame | None
Project points that correspond to the full collection of points
contained in the .h5 files to be collected. None if points list is
to be ignored (collect all data in h5_files)
dset_name : str
Dataset to be collected. If source shape is 2D, time index will be
collected.
dset_out : str
Dataset to collect means into
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
if file_prefix is None:
h5_files = "*.h5"
else:
h5_files = "{}*.h5".format(file_prefix)
logger.info('Collecting dataset "{}" from {} files in {} to {}'
.format(dset_name, h5_files, h5_dir, h5_file))
ts = time.time()
clt = cls(h5_file, h5_dir, project_points, file_prefix=file_prefix,
clobber=True)
logger.debug("\t- 'meta' collected")
dset_shape = clt.get_dset_shape(dset_name)
if len(dset_shape) > 1:
clt.combine_time_index()
logger.debug("\t- 'time_index' collected")
DatasetCollector.collect_dset(clt._h5_out, clt.h5_files, clt.gids,
dset_name, dset_out=dset_out,
mem_util_lim=mem_util_lim)
logger.debug("\t- Collection of '{}' complete".format(dset_name))
tt = (time.time() - ts) / 60
logger.info('Collection complete')
logger.debug('\t- Collection took {:.4f} minutes'
.format(tt))
@classmethod
def add_dataset(cls, h5_file, h5_dir, dset_name, dset_out=None,
file_prefix=None, mem_util_lim=0.7):
"""
Collect and add dataset to h5_file from h5_dir
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
dset_name : str
Dataset to be collected. If source shape is 2D, time index will be
collected.
dset_out : str
Dataset to collect means into
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
if file_prefix is None:
h5_files = "*.h5"
else:
h5_files = "{}*.h5".format(file_prefix)
logger.info('Collecting "{}" from {} files in {} and adding to {}'
.format(dset_name, h5_files, h5_dir, h5_file))
ts = time.time()
with Outputs(h5_file, mode='r') as f:
points = f.meta
clt = cls(h5_file, h5_dir, points, file_prefix=file_prefix)
dset_shape = clt.get_dset_shape(dset_name)
if len(dset_shape) > 1:
clt.combine_time_index()
logger.debug("\t- 'time_index' collected")
DatasetCollector.collect_dset(clt._h5_out, clt.h5_files, clt.gids,
dset_name, dset_out=dset_out,
mem_util_lim=mem_util_lim)
logger.debug("\t- Collection of '{}' complete".format(dset_name))
tt = (time.time() - ts) / 60
logger.info('{} collected'.format(dset_name))
logger.debug('\t- Collection took {:.4f} minutes'
.format(tt))
@classmethod
def purge_chunks(cls, h5_file, h5_dir, project_points, file_prefix=None):
"""
Purge (remove) chunked files from h5_dir (after collection).
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame
Project points that correspond to the full collection of points
contained in the .h5 files to be collected
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
"""
clt = cls(h5_file, h5_dir, project_points, file_prefix=file_prefix)
clt._purge_chunks()
logger.info('Purged chunk files from {}'.format(h5_dir))
@classmethod
def move_chunks(cls, h5_file, h5_dir, project_points, file_prefix=None,
sub_dir='chunk_files'):
"""
Move chunked files from h5_dir (after collection) to subdir.
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame
Project points that correspond to the full collection of points
contained in the .h5 files to be collected
file_prefix : str
.h5 file prefix, if None collect all files on h5_dir
sub_dir : str | None
Sub directory name to move chunks to. None to not move files.
"""
clt = cls(h5_file, h5_dir, project_points, file_prefix=file_prefix)
clt._move_chunks(sub_dir)
logger.info('Moved chunk files from {} to sub_dir: {}'
.format(h5_dir, sub_dir))
| 35.326531
| 79
| 0.55239
| 3,405
| 27,696
| 4.306314
| 0.105433
| 0.026734
| 0.015959
| 0.010434
| 0.488031
| 0.447521
| 0.400668
| 0.369229
| 0.347473
| 0.328514
| 0
| 0.012237
| 0.362688
| 27,696
| 783
| 80
| 35.371648
| 0.818481
| 0.298563
| 0
| 0.293801
| 0
| 0
| 0.09744
| 0
| 0
| 0
| 0
| 0.001277
| 0
| 1
| 0.067385
| false
| 0
| 0.03504
| 0
| 0.140162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
267391fe6f529c4f578f96fdbf6f647ec6e040d3
| 964
|
py
|
Python
|
utility/templatetags/to_price.py
|
hosseinmoghimi/waiter
|
9f5f332b6f252a29aa14f67655b423fd9c40fba3
|
[
"MIT"
] | 1
|
2021-12-02T11:16:53.000Z
|
2021-12-02T11:16:53.000Z
|
utility/templatetags/to_price.py
|
hosseinmoghimi/waiter
|
9f5f332b6f252a29aa14f67655b423fd9c40fba3
|
[
"MIT"
] | null | null | null |
utility/templatetags/to_price.py
|
hosseinmoghimi/waiter
|
9f5f332b6f252a29aa14f67655b423fd9c40fba3
|
[
"MIT"
] | null | null | null |
from core.errors import LEO_ERRORS
from django import template
register = template.Library()
from utility.currency import to_price as to_price_origin
from utility.num import to_horuf as to_horuf_num,to_tartib as to_tartib_
@register.filter
def to_price(value):
return to_price_origin(value=value)
@register.filter
def to_horuf(value):
return to_horuf_num(value)
@register.filter
def to_tartib(value):
return to_tartib_(value)
@register.filter
def to_price_pure(value):
"""converts int to string"""
try:
sign=''
if value<0:
value=0-value
sign='- '
a=separate(value)
return sign+a
except:
# return LEO_ERRORS.error_to_price_template_tag
return ""
def separate(price):
try:
price=int(price)
except:
return None
if price<1000:
return str(price)
else:
return separate(price/1000)+','+str(price)[-3:]
| 18.538462
| 72
| 0.65249
| 131
| 964
| 4.603053
| 0.305344
| 0.069652
| 0.112769
| 0.126036
| 0.167496
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01532
| 0.255187
| 964
| 51
| 73
| 18.901961
| 0.824513
| 0.071577
| 0
| 0.235294
| 0
| 0
| 0.003375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.117647
| 0.088235
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2674b3c10e1e9d8ebf4b7b0491fb0687920f7025
| 3,119
|
py
|
Python
|
Python/maximal-rectangle.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2022-01-30T06:55:28.000Z
|
2022-01-30T06:55:28.000Z
|
Python/maximal-rectangle.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | null | null | null |
Python/maximal-rectangle.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2021-12-31T03:56:39.000Z
|
2021-12-31T03:56:39.000Z
|
# Time: O(m*n)
# Space: O(n)
# 85
# Given a 2D binary matrix filled with 0's and 1's,
# find the largest rectangle containing all ones and return its area.
# Ascending stack solution.
class Solution(object):
def maximalRectangle(self, matrix): # USE THIS
"""
:type matrix: List[List[str]]
:rtype: int
"""
def largestRectangleArea(heights):
heights.append(0) # KENG:一定要延申到数列末端之外 e.g. [2,4,5]
stk, ans = [-1], 0
for i, h in enumerate(heights):
while len(stk) > 1 and h <= heights[stk[-1]]: # 右边界确定。相同高度值也弹出,只保留最后一个
last = stk.pop()
width = i - 1 - stk[-1]
ans = max(ans, heights[last] * width)
stk.append(i)
return ans
if not matrix:
return 0
result = 0
heights = [0] * len(matrix[0])
for i in range(len(matrix)):
for j in range(len(matrix[0])):
heights[j] = heights[j] + 1 if matrix[i][j] == '1' else 0
result = max(result, largestRectangleArea(heights))
return result
# DP solution.
class Solution2(object):
# Time: O(m*n^2) Space: O(n)
def maximalRectangle(self, A): # DONT USE: time complexity
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not A: return 0
m, n, ans = len(A), len(A[0]), 0
dp = [(0,0)] * (n+1) # number of consecutive 1s on left and top direction
for i in range(1, m+1):
for j in range(1, n+1):
if A[i-1][j-1] == '1':
dp[j] = (1+dp[j-1][0], 1+dp[j][1])
minHght = float('inf')
for k in range(dp[j][0]):
minHght = min(minHght, dp[j-k][1])
ans = max(ans, (k+1)*minHght)
else:
dp[j] = (0, 0) # need to reset because we reuse the storage
return ans
# Time: O(n^2) Space: O(n)
def maximalRectangle2(self, matrix): # hard to understand: 3 dp array L, H, R
if not matrix: return 0
result = 0
m, n = len(matrix), len(matrix[0])
L, H, R = [0] * n, [0] * n, [0] * n
for i in range(m):
left = 0
for j in range(n):
if matrix[i][j] == '1':
L[j] = max(L[j], left)
H[j] += 1
else:
L[j] = 0
H[j] = 0
R[j] = n
left = j + 1
right = n
for j in reversed(range(n)):
if matrix[i][j] == '1':
R[j] = min(R[j], right)
result = max(result, H[j] * (R[j] - L[j]))
else:
right = j
return result
if __name__ == "__main__":
matrix = ["01101",
"11010",
"01110",
"11110",
"11111",
"00000"]
print(Solution2().maximalRectangle(matrix)) # 9
| 31.505051
| 91
| 0.430907
| 404
| 3,119
| 3.306931
| 0.289604
| 0.01497
| 0.017964
| 0.024701
| 0.132485
| 0.124252
| 0.106287
| 0
| 0
| 0
| 0
| 0.054833
| 0.432831
| 3,119
| 98
| 92
| 31.826531
| 0.700396
| 0.175697
| 0
| 0.15942
| 0
| 0
| 0.018058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0
| 0
| 0.15942
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2676fe4e4181d8ea15429d8939404231084cca25
| 8,869
|
py
|
Python
|
makechart.py
|
preeve9534/signalk-sensor-log
|
7f6afd188b1ed95dad0b4d798f66d145a1f10978
|
[
"Apache-2.0"
] | null | null | null |
makechart.py
|
preeve9534/signalk-sensor-log
|
7f6afd188b1ed95dad0b4d798f66d145a1f10978
|
[
"Apache-2.0"
] | null | null | null |
makechart.py
|
preeve9534/signalk-sensor-log
|
7f6afd188b1ed95dad0b4d798f66d145a1f10978
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from SocketServer import TCPServer, StreamRequestHandler
import socket
from subprocess import call
import datetime
import json
import re
import sys
import os
CONF = {}
RRDTOOL = '/usr/bin/rrdtool'
PERIODS = []
CHART_BACKGROUNDCOLOR = '#000000'
CHART_CANVASCOLOR = '#000000'
CHART_DIRECTORY = '/tmp/'
CHART_FONT = 'LEGEND:8:Courier New'
CHART_FONTCOLOR = '#804040'
CHART_IMAGETYPE = 'SVG'
DISPLAYGROUP_LIST = []
RRDDATABASE_DATABASES = []
RRDDATABASE_DIRECTORY = '/tmp'
SENSOR_LIST= []
def init(config):
global CONF, PERIODS, CHART_BACKGROUNDCOLOR, CHART_CANVASCOLOR, CHART_DIRECTORY, CHART_FONTCOLOR, DISPLAYGROUP_LIST, RRDDATABASE_DATABASES, RRDDATABASE_DIRECTORY, SENSOR_LIST
with open(config) as data_file:
CONF = json.load(data_file)["configuration"]
PERIODS = CONF['rrddatabase']['periods']
CHART_BACKGROUNDCOLOR = CONF["chart"]["backgroundcolor"]
CHART_CANVASCOLOR = CONF["chart"]["canvascolor"]
CHART_DIRECTORY = CONF['chart']['directory']
CHART_FONTCOLOR = CONF["chart"]["fontcolor"]
DISPLAYGROUP_LIST = CONF['displaygroups']
SENSOR_LIST = CONF['paths']
RRDDATABASE_DATABASES = CONF['rrddatabase']['databases']
RRDDATABASE_DIRECTORY = CONF['rrddatabase']['directory']
return True
def makeGraph(group, chart, directory):
command = ""
if group in map(lambda x: x['id'], DISPLAYGROUP_LIST):
displayGroup = reduce(lambda a, v: (v if (v['id'] == group) else a), DISPLAYGROUP_LIST, None)
if (chart in map(lambda s: s['name'], PERIODS)):
dsIds = map(lambda datasource: datasource['datasource'][datasource['datasource'].find(':') + 1:], displayGroup['datasources'])
dsDatabases = map(lambda datasource: datasource['datasource'][0: datasource['datasource'].find(':')], displayGroup['datasources'])
dsColors = map(lambda datasource: datasource['color'], displayGroup['datasources'])
dsNames = map(lambda datasource: datasource['displayname'], displayGroup['datasources'])
dsLineTypes = map(lambda datasource: 'AREA' if ('area' in datasource['options']) else 'LINE', displayGroup['datasources'])
dsStack = map(lambda datasource: ('stack' in datasource['options']), displayGroup['datasources'])
command = RRDTOOL
command += " graph '" + directory + "/" + group + "." + chart + "." + CHART_IMAGETYPE.lower() + "'"
command += " -T 80"
command += " --imgformat " + CHART_IMAGETYPE
command += " --font '" + CHART_FONT + "'"
command += " --title '" + displayGroup["title"] + "'"
command += " --vertical-label '" + displayGroup["ylabel"] + "'"
command += " --watermark 'Generated on " + datetime.datetime.now().replace(microsecond=0).isoformat(' ') + "'"
command += " --start '" + reduce(lambda a, v: (v['tag'] if (v['name'] == chart) else a), PERIODS,"end-1h") + "'"
command += (" --lower-limit=" + displayGroup["ymin"]) if (displayGroup["ymin"] != "") else ""
command += (" --upper-limit=" + displayGroup["ymax"]) if (displayGroup["ymax"] != "") else ""
command += " --slope-mode"
command += " --rigid"
command += " --color CANVAS" + CHART_CANVASCOLOR
command += " --color BACK" + CHART_BACKGROUNDCOLOR
command += " --color FONT" + CHART_FONTCOLOR
command += " --full-size-mode"
command += " --width=800"
command += " --height=300"
for index, dsid in enumerate(dsIds):
command += " DEF:" + dsid + "=" + RRDDATABASE_DIRECTORY + "/" + dsDatabases[index] + ":" + dsid + ":" + reduce(lambda a, v: (v['consolidate'] if (v['name'] == chart) else a), PERIODS,"AVERAGE")
command += (" VDEF:" + dsid + "min=" + dsid + ",MINIMUM") if ("min" in displayGroup["options"]) else ""
command += (" VDEF:" + dsid + "max=" + dsid + ",MAXIMUM") if ("max" in displayGroup["options"]) else ""
command += (" VDEF:" + dsid + "avg=" + dsid + ",AVERAGE") if ("avg" in displayGroup["options"]) else ""
command += (" VDEF:" + dsid + "lst=" + dsid + ",LAST") if ("lst" in displayGroup["options"]) else ""
command += (" CDEF:" + dsid + "eeg=" + dsid + "," + str(index * 1.1) + ",+") if ("eeg" in displayGroup["options"]) else ""
#//command += " CDEF:" + dsname + "filled=" + dsname + ",UN," + dsname + "avg," + dsname + ",IF";
#command += " CDEF:" + dsid + "filled=" + dsid + ",UN,PREV," + dsid + ",IF"
#command += " CDEF:" + dsid + "fixed=" + dsid + "filled," + str(reduce(lambda a, v: (v['seconds'] if (v['name'] == chart) else a), PERIODS,"1")) + ",/"
#command += " VDEF:" + dsid + "total=" + dsid + "fixed,TOTAL"
comments = reduce(lambda a, v: (a | (v in displayGroup["options"])), ["min","max","avg","lst"], False)
command += (" COMMENT:'" + "Data source".ljust(23) + "'") if (comments) else ""
command += (" COMMENT:'" + "Min ".rjust(10) + "'") if ("min" in displayGroup["options"]) else ""
command += (" COMMENT:'" + "Max ".rjust(10) + "'") if ("max" in displayGroup["options"]) else ""
command += (" COMMENT:'" + "Average ".rjust(10) + "'") if ("avg" in displayGroup["options"]) else ""
command += (" COMMENT:'" + "Last ".rjust(10) + "'") if ("lst" in displayGroup["options"]) else ""
command += (" COMMENT:'\\n'") if (comments) else ""
#command += " COMMENT:'" + "Data stream".ljust(19) + "Min ".rjust(13) + "Max ".rjust(14) + "Average ".rjust(14) + "Derived".rjust(13) + "\\n'";
for i, dsid in enumerate(dsIds):
plot = (dsid + "eeg") if ("eeg" in displayGroup["options"]) else dsid
command += " " + dsLineTypes[i] + ":" + plot + dsColors[i] + ":'" + dsNames[i].ljust(19) + "'" + (":STACK" if (dsStack[i]) else "")
command += (" GPRINT:" + dsid + "min:'%10.2lf'") if ("min" in displayGroup["options"]) else ""
command += (" GPRINT:" + dsid + "max:'%10.2lf'") if ("max" in displayGroup["options"]) else ""
command += (" GPRINT:" + dsid + "avg:'%10.2lf'") if ("avg" in displayGroup["options"]) else ""
command += (" GPRINT:" + dsid + "lst:'%10.2lf'") if ("lst" in displayGroup["options"]) else ""
#command += (" GPRINT:" + dsid + "total:'%10.2lf\\n'"
command += (" COMMENT:'\\n'") if (comments) else ""
call(command, shell=True)
return command
def dropPrivileges(user, group):
import pwd, grp
# Get the uid/gid from the name
runningUid = pwd.getpwnam(user).pw_uid
runningGid = grp.getgrnam(group).gr_gid
# Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(runningGid)
os.setuid(runningUid)
# Reset logging
# self.resetLogging()
class Handler(StreamRequestHandler):
def handle(self):
line = self.rfile.readline()
while (line):
#self.wfile.write(line)
line = line.decode('ascii').strip()
if (line == "quit"):
break
parts = re.split('\s+', line)
if (len(parts) == 2):
makeGraph(parts[0], parts[1], CHART_DIRECTORY)
line = self.rfile.readline()
class Server(TCPServer):
# The constant would be better initialized by a systemd module
SYSTEMD_FIRST_SOCKET_FD = 3
def __init__(self, server_address, handler_cls):
# Invoke base but omit bind/listen steps (performed by systemd activation!)
TCPServer.__init__(self, server_address, handler_cls, bind_and_activate=False)
# Override socket
self.socket = socket.fromfd(self.SYSTEMD_FIRST_SOCKET_FD, self.address_family, self.socket_type)
if __name__ == '__main__':
DAEMONISE = False
CONFIG = "/root/.signalk/plugin-config-data/sensor-log.json"
USER = None
GROUP = None
args = sys.argv[1:]
if (len(args) > 0) and (args[0] == "-"):
DAEMONISE = True;
args = args[1:]
if (len(args) > 1) and (args[0] == "-c"):
CONFIG = args[1]
args = args[2:]
if (len(args) > 1) and (args[0] == "-U"):
USER = args[1]
args = args[2:]
if (len(args) > 1) and (args[0] == "-G"):
GROUP = args[1]
args = args[2:]
if (init(CONFIG)):
if (DAEMONISE):
if ((USER != None) and (GROUP != None)):
dropPrivileges(USER, GROUP)
server = Server(('127.0.0.1', 9999), Handler)
server.serve_forever()
else:
if (len(args) > 1):
print(makeGraph(args[0], args[1], "."))
| 49.272222
| 209
| 0.560379
| 945
| 8,869
| 5.184127
| 0.266667
| 0.040416
| 0.064299
| 0.071443
| 0.240661
| 0.203919
| 0.13513
| 0.013064
| 0.013064
| 0.013064
| 0
| 0.015808
| 0.258203
| 8,869
| 179
| 210
| 49.547486
| 0.728834
| 0.099673
| 0
| 0.05036
| 0
| 0
| 0.156376
| 0.00615
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035971
| false
| 0
| 0.064748
| 0
| 0.136691
| 0.007194
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
267701db0df3dc5669a6ef8609e548969a09888e
| 410
|
py
|
Python
|
way/python/exercises/various/turtle_draws/turtle_spiral_name.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
way/python/exercises/various/turtle_draws/turtle_spiral_name.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
way/python/exercises/various/turtle_draws/turtle_spiral_name.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
# цветная спираль из имени пользователя
import turtle
t = turtle.Pen()
turtle.bgcolor("black")
colors = ["red", "yellow", "blue", "green"]
# gui text input
name = turtle.textinput("Введи своё имя", "Как тебя зовут?")
for x in range(100):
t.pencolor(colors[x%4])
t.penup()
t.forward(x*4)
t.pendown()
t.write(name, font=("Arial", int((x + 4) / 4), "bold"))
t.left(92)
| 22.777778
| 61
| 0.592683
| 61
| 410
| 3.983607
| 0.721311
| 0.024691
| 0.024691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028213
| 0.221951
| 410
| 17
| 62
| 24.117647
| 0.733542
| 0.126829
| 0
| 0
| 0
| 0
| 0.180473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26796efa4885d9b90f7bb3e4e595ebd4603db189
| 1,537
|
py
|
Python
|
config/base_config.py
|
xuyouze/DropNet
|
edbaeb72075b819b96e1ca66e966999a40d3645e
|
[
"Apache-2.0"
] | 1
|
2021-06-28T06:27:06.000Z
|
2021-06-28T06:27:06.000Z
|
config/base_config.py
|
xuyouze/DropNet
|
edbaeb72075b819b96e1ca66e966999a40d3645e
|
[
"Apache-2.0"
] | null | null | null |
config/base_config.py
|
xuyouze/DropNet
|
edbaeb72075b819b96e1ca66e966999a40d3645e
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
# @Time : 2019/5/15
# @Author : xuyouze
# @File Name : base_config.py
import importlib
import os
import sys
import torch
import logging
from .dataset_config import build_dataset_config
from .logger_config import config
__all__ = ["BaseConfig"]
class BaseConfig(object):
def __init__(self):
# model component parameters
self.checkpoints_dir = "ckp"
# dataset name [celebA | lfwa | duke | market]
self.dataset_name = "celebA"
# self.dataset_name = "lfwa"
# self.dataset_name = "duke"
# self.dataset_name = "market"
# model name [common]
self.model_name = "common"
# model name [resnet]
self.network_name = "resnet"
# loss name [focal | ghm-c | drop | bce]
# self.loss_name = "drop"
# self.loss_name = "focal"
# self.loss_name = "ghmc"
self.loss_name = "bce"
# network initialization type [normal]
self.init_type = "normal"
self.init_gain = 0.2 # scaling factor for normal
# global saving and loading parameters
self.batch_size = 100
self.num_threads = 4
self.last_epoch = "last"
self.load_iter = 0
self.isTrain = None
# dataset parameters
self.dataset_config = build_dataset_config(self.dataset_name)
self.balance_attr_pos_prop = torch.FloatTensor([0.5] * self.dataset_config.attribute_num)
# logging config
logging.config.dictConfig(config)
self.logger = logging.getLogger("TrainLogger")
self.test_logger = logging.getLogger("TestLogger")
if not os.path.exists(self.checkpoints_dir):
os.makedirs(self.checkpoints_dir)
| 23.646154
| 91
| 0.709824
| 206
| 1,537
| 5.092233
| 0.446602
| 0.073403
| 0.071497
| 0.034318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013503
| 0.180872
| 1,537
| 64
| 92
| 24.015625
| 0.819698
| 0.348081
| 0
| 0
| 0
| 0
| 0.066259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.241379
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
267b0451a5289dfdcefad895acd9541e3d77721e
| 814
|
py
|
Python
|
test/test_utils.py
|
fact-project/ratescan
|
69a2eb8b2c66024f10e59d6dbf15c84c9b12ede4
|
[
"MIT"
] | null | null | null |
test/test_utils.py
|
fact-project/ratescan
|
69a2eb8b2c66024f10e59d6dbf15c84c9b12ede4
|
[
"MIT"
] | null | null | null |
test/test_utils.py
|
fact-project/ratescan
|
69a2eb8b2c66024f10e59d6dbf15c84c9b12ede4
|
[
"MIT"
] | null | null | null |
from fact.io import read_data
def test_sumupCountsOfRun():
from ratescan.utils import sumupCountsOfRun
df = read_data("test/test.hdf5", key="ratescan")
df_summed = sumupCountsOfRun(df)
assert df_summed.run_id.unique() == 182
assert len(df_summed.ratescan_trigger_thresholds) == 1000
def test_compileRatescanForRun():
from ratescan.utils import compileRatescanForRun
df = read_data("test/test.hdf5", key="ratescan")
df = compileRatescanForRun(df, ontime=160)
assert df[df["ratescan_trigger_counts"] == 75840]["ratescan_trigger_rate"].unique() == 474.0
#
# def test_joinOnTimesFromRunDB():
# from ratescan.utils import joinOnTimesFromRunDB
#
# df = read_data("test/test.hdf5", key="ratescan")
#
# df_res = joinOnTimesFromRunDB(df)
| 29.071429
| 96
| 0.701474
| 95
| 814
| 5.821053
| 0.357895
| 0.057866
| 0.092224
| 0.124774
| 0.189873
| 0.189873
| 0.189873
| 0.189873
| 0.189873
| 0
| 0
| 0.033133
| 0.184275
| 814
| 27
| 97
| 30.148148
| 0.799699
| 0.219902
| 0
| 0.166667
| 0
| 0
| 0.140575
| 0.070288
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.166667
| false
| 0
| 0.25
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
267b7ae665db4a183786b0a16f0d7887f1bbb20e
| 4,080
|
py
|
Python
|
rbac/cli/cli_test_auth.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 16
|
2018-03-19T02:19:01.000Z
|
2021-12-30T15:24:40.000Z
|
rbac/cli/cli_test_auth.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 1
|
2021-12-18T16:46:04.000Z
|
2021-12-18T16:46:04.000Z
|
rbac/cli/cli_test_auth.py
|
shawnmckinney/py-fortress
|
ead12bf9b7e37e923c42ccdadd8fd3c5adf027cf
|
[
"Apache-2.0"
] | 2
|
2018-03-14T21:48:43.000Z
|
2018-03-19T03:25:40.000Z
|
'''
@copyright: 2022 - Symas Corporation
'''
import sys
import pickle
import argparse
from rbac.util import global_ids
from rbac.model import Perm, User
from rbac import access
from rbac.util import RbacError
from ..cli.utils import print_user, print_entity
from rbac.cli.utils import (
load_entity, add_args, ADD, DELETE, AUTH, CHCK, ROLES, PERMS, SHOW, DROP
)
OUT_SESS_FILE = "sess.pickle"
def process(args):
sess = None
result = False
user = load_entity (User(), args)
perm = load_entity (Perm(), args)
print(args.operation)
try:
if args.operation == AUTH:
sess = access.create_session(user, False)
result = True
elif args.operation == CHCK:
sess = un_pickle()
result = access.check_access(sess, perm)
elif args.operation == ROLES:
sess = un_pickle()
roles = access.session_roles(sess)
for idx, role in enumerate(roles):
print_entity(role, role.name + ':' + str(idx))
result = True
elif args.operation == PERMS:
sess = un_pickle()
perms = access.session_perms(sess)
for idx, perm in enumerate(perms):
print_entity(perm, perm.obj_name + '.' + perm.op_name + ':' + str(idx))
result = True
elif args.operation == SHOW:
sess = un_pickle()
print_entity(sess, 'session')
print_user(sess.user, 'user')
result = True
elif args.operation == ADD:
sess = un_pickle()
if not args.role:
print("error --role required for this op")
return False
print('role=' + args.role)
access.add_active_role(sess, args.role)
result = True
elif args.operation == DROP:
sess = un_pickle()
if not args.role:
print("error --role required for this op")
return False
print('role=' + args.role)
access.drop_active_role(sess, args.role)
result = True
else:
print('process failed, invalid operation=' + args.operation)
if result:
print('success')
else:
print('failed')
pickle_it(sess)
except RbacError as e:
if e.id == global_ids.ACTV_FAILED_DAY:
print('failed day of week, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_DATE:
print('failed for date, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_TIME:
print('failed for time of day, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_TIMEOUT:
print('failed inactivity timeout, id=' + str(e.id) + ', msg=' + e.msg)
elif e.id == global_ids.ACTV_FAILED_LOCK:
print('failed locked date')
else:
print('RbacError id=' + str(e.id) +', ' + e.msg)
def pickle_it(sess):
if sess is not None:
pickling_on = open(OUT_SESS_FILE,"wb")
pickle.dump(sess, pickling_on)
pickling_on.close()
def un_pickle():
pickle_off = open(OUT_SESS_FILE,"rb")
sess = pickle.load(pickle_off)
return sess
def main(argv=None):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = 'Process py-fortress access commands.'
parser = argparse.ArgumentParser(description=program_name)
parser.add_argument('operation', metavar='operand', choices=[AUTH,CHCK,ROLES,PERMS,ADD,DELETE,SHOW,DROP], help='operation name')
parser.add_argument('-r', '--role', dest='role', help='role name')
add_args(parser, User())
add_args(parser, Perm())
args = parser.parse_args()
process(args)
if __name__ == "__main__":
sys.exit(main())
| 34.576271
| 132
| 0.554412
| 492
| 4,080
| 4.453252
| 0.23374
| 0.013692
| 0.046554
| 0.041077
| 0.262437
| 0.230032
| 0.219991
| 0.19078
| 0.157006
| 0.157006
| 0
| 0.001461
| 0.329167
| 4,080
| 118
| 133
| 34.576271
| 0.79905
| 0.014706
| 0
| 0.235294
| 0
| 0
| 0.100798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.088235
| 0
| 0.156863
| 0.186275
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
267b9ff6b529eb0367e6acbbd247f37b5d0c7a4d
| 1,678
|
py
|
Python
|
httprider/presenters/utility_functions_presenter.py
|
iSWORD/http-rider
|
5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9
|
[
"MIT"
] | 27
|
2019-12-20T00:10:28.000Z
|
2022-03-09T18:04:23.000Z
|
httprider/presenters/utility_functions_presenter.py
|
iSWORD/http-rider
|
5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9
|
[
"MIT"
] | 6
|
2019-10-13T08:50:21.000Z
|
2020-06-05T12:23:08.000Z
|
httprider/presenters/utility_functions_presenter.py
|
iSWORD/http-rider
|
5d9e5cc8c5166ab58f81d30d21b3ce2497bf09b9
|
[
"MIT"
] | 7
|
2019-08-10T01:38:31.000Z
|
2021-08-23T05:28:46.000Z
|
from httprider.core.generators import utility_func_map
class UtilityFunctionsPresenter:
def __init__(self, view, parent):
self.view = view
self.parent = parent
# update list of functions
for f in utility_func_map.keys():
self.view.function_selector.addItem(f)
# Event handlers to refresh generated values
self.view.function_selector.currentIndexChanged[str].connect(
self.transform_selected_text
)
self.view.btn_copy_transformed.clicked.connect(self.on_copy_clipboard)
def init(self):
whole_text = self.parent.text()
selected_text = self.parent.selected_text
self.view.lbl_selected_text.setText(
selected_text or whole_text or "Select some text"
)
self.transform_selected_text()
def apply_transformation(self, selected_text, func_name):
try:
return utility_func_map.get(func_name)(selected_text)
except Exception as e:
return "Error: {}".format(e)
def on_copy_clipboard(self):
self.view.txt_transformed_text.selectAll()
self.view.txt_transformed_text.copy()
def transform_selected_text(self):
selected_text = self.view.lbl_selected_text.text()
func_name = self.view.function_selector.currentText()
self.view.txt_transformed_text.setPlainText(
self.apply_transformation(selected_text, func_name)
)
def get_function(self):
selected_text = self.view.lbl_selected_text.text()
func_name = self.view.function_selector.currentText()
return f'${{utils("{func_name}", "{selected_text}")}}'
| 34.958333
| 78
| 0.6764
| 201
| 1,678
| 5.358209
| 0.333333
| 0.167131
| 0.089136
| 0.089136
| 0.264624
| 0.192201
| 0.192201
| 0.159703
| 0.159703
| 0.159703
| 0
| 0
| 0.234207
| 1,678
| 47
| 79
| 35.702128
| 0.838132
| 0.039928
| 0
| 0.111111
| 0
| 0
| 0.04291
| 0.014303
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.027778
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
267d4a279fad22068d75718ec410431f6a3cbe63
| 12,745
|
py
|
Python
|
ensembling_sgd.py
|
suswei/RLCT
|
e9e04ca5e64250dfbb94134ec5283286dcdc4358
|
[
"MIT"
] | null | null | null |
ensembling_sgd.py
|
suswei/RLCT
|
e9e04ca5e64250dfbb94134ec5283286dcdc4358
|
[
"MIT"
] | null | null | null |
ensembling_sgd.py
|
suswei/RLCT
|
e9e04ca5e64250dfbb94134ec5283286dcdc4358
|
[
"MIT"
] | null | null | null |
import argparse
import numpy as np
import os
from numpy.linalg import inv
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
import torch.optim as optim
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.uniform import Uniform
from torch.distributions.normal import Normal
from matplotlib import pyplot as plt
#TODO: currently only supports realizable reduced rank regression, need to add realizable tanh
def main():
# Training settings
parser = argparse.ArgumentParser(description='RLCT Variational Inference')
parser.add_argument('--n', type=int, default=500)
parser.add_argument('--batchsize', type=int, default=10)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--H', type=int, default=5)
parser.add_argument('--dataset',type=str, choices=['tanh','rr'])
parser.add_argument('--prior-std', type=float, default=1.0)
parser.add_argument('--y-std', type=float, default=1.0)
parser.add_argument('--betasbegin', type=float, default=1.0,
help='where beta range should begin')
parser.add_argument('--betasend', type=float, default=5.0,
help='where beta range should end')
parser.add_argument('--numbetas', type=int, default=10,
help='how many betas should be swept between betasbegin and betasend')
parser.add_argument('--R', type=int, default=5)
parser.add_argument('--MC', type=int, default=1)
parser.add_argument('--taskid',type=int, default=1)
args = parser.parse_args()
# %%
if args.dataset == 'rr':
args.output_dim = 6
args.input_dim = 6
args.H = 6
args.H0 = 3
# args.a_params = torch.transpose(
# torch.cat((torch.eye(args.H), torch.ones([args.H, args.input_dim - args.H], dtype=torch.float32)), 1), 0,
# 1) # input_dim * H
# args.b_params = torch.eye(args.output_dim)
a = Normal(0.0, 1.0)
args.a_params = 0.2 * a.sample((args.H0, args.input_dim))
b = Normal(0.0, 1.0)
args.b_params = 0.2 * b.sample((args.output_dim,args.H0))
m = MultivariateNormal(torch.zeros(args.input_dim), torch.eye(args.input_dim)) # the input_dim=output_dim + 3, output_dim = H (the number of hidden units)
X = 3.0*m.sample(torch.Size([2 * args.n]))
mean = torch.matmul(torch.matmul(X, args.b_params), args.a_params)
y_rv = MultivariateNormal(torch.zeros(args.output_dim), torch.eye(args.output_dim))
y = mean + args.y_std * y_rv.sample(torch.Size([2 * args.n]))
# The splitting ratio of training set, validation set, testing set is 0.7:0.15:0.15
train_size = args.n
valid_size = int(args.n * 0.5)
test_size = 2 * args.n - train_size - valid_size
dataset_train, dataset_valid, dataset_test = torch.utils.data.random_split(TensorDataset(X, y),
[train_size, valid_size, test_size])
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batchsize, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=args.batchsize, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batchsize, shuffle=True)
args.loss_criterion = nn.MSELoss(reduction='sum')
args.trueRLCT = (args.output_dim * args.H - args.H ** 2 + args.input_dim * args.H) / 2 # rank r = H for the 'reducedrank_synthetic' dataset
elif args.dataset == 'tanh':
# generate features X from unif(-1,1)
m = Uniform(torch.tensor([-1.0]), torch.tensor([1.0]))
X = m.sample(torch.Size([2 * args.n]))
# generate target from N(0,1) i.e. tanh network with zero layers
# w = {(a_m,b_m)}_{m=1}^p, p(y|x,w) = N(f(x,w),1) where f(x,w) = \sum_{m=1}^p a_m tanh(b_m x)
y_rv = Normal(0.0, args.y_std) # torch.distributions.normal.Normal(loc, scale) where scale is standard deviation
y = y_rv.sample(torch.Size([2 * args.n, 1]))
# The splitting ratio of training set, validation set, testing set is 0.7:0.15:0.15
train_size = args.n
valid_size = int(args.n * 0.5)
test_size = 2 * args.n - train_size - valid_size
dataset_train, dataset_valid, dataset_test = torch.utils.data.random_split(TensorDataset(X, y),
[train_size, valid_size, test_size])
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batchsize, shuffle=True)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=args.batchsize, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batchsize, shuffle=True)
args.input_dim = X.shape[1]
args.output_dim = y.shape[1]
args.loss_criterion = nn.MSELoss(reduction='sum')
max_integer = int(np.sqrt(args.H))
args.trueRLCT = (args.H + max_integer * max_integer + max_integer) / (4 * max_integer + 2)
# %%
# define network
class reducedrank(nn.Module):
def __init__(self, input_dim, output_dim, H):
super(reducedrank, self).__init__()
self.fc1 = nn.Linear(input_dim, H, bias=False)
self.fc2 = nn.Linear(H, output_dim, bias=False)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class tanh(nn.Module):
def __init__(self, input_dim, output_dim, H):
super(tanh, self).__init__()
self.fc1 = nn.Linear(input_dim, H, bias=False)
self.fc2 = nn.Linear(H, output_dim, bias=False)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = self.fc2(x)
return x
args.w_dim = (args.input_dim + args.output_dim) * args.H
# TODO: is the log n scale really necessary?
# get B inverse temperatures
# args.betas = 1 / np.linspace(np.log(args.n) / args.betasbegin, np.log(args.n) / args.betasend, args.numbetas)
args.betas = 1 / np.linspace(1 / args.betasbegin, 1 / args.betasend, args.numbetas)
# args.betas = np.linspace(args.betasbegin, args.betasend, args.numbetas)/np.log(args.n)
# args.recip = np.linspace(0.1,args.numbetas,args.numbetas) #1/beta
# args.betas = 1/args.recip
# args.betas = np.linspace(args.betasbegin, args.betasend, args.numbetas)
# TODO: set automatically?
# args.prior_std = np.sqrt(args.w_dim * (args.y_std ** 2) * np.log(args.n) / (args.betasbegin * args.n))
# args.prior_std = np.sqrt(args.w_dim / (args.betasbegin * args.n))
# args.prior_std = 10.0
# print('prior std auto set to {}'.format(args.prior_std))
# %%
# %%
# define loss function that is specific to anchor point and inverse temperature beta
def custom_loss(model, target, output, beta):
# TODO: what's the justification for using anchors?
# returns ||y-\hat y||^2_2 + \sigma_eps^2/beta*\sigma_{prior}^2 ||theta-\hat theta||^2_2
# anchor_dist = Normal(0.0, args.prior_std)
wd = torch.tensor(0.)
for p in model.parameters():
# anchor = anchor_dist.sample(p.shape)
# wd += ((p - anchor) ** 2).sum()
wd += (p ** 2).sum()
# wd_factor = torch.tensor(((args.y_std/args.prior_std)**2))
# print('model fit portion {}'.format(beta * args.loss_criterion(target, output) / (args.batchsize)))
# print('weight decay portion {}'.format(wd / ((args.prior_std ** 2) * args.n)))
# return beta * args.loss_criterion(target, output) / (2 * args.batchsize) + wd / (
# 2 * (args.prior_std ** 2) * args.n)
return beta * args.loss_criterion(target, output) /((args.y_std ** 2) * args.batchsize) + wd / ((args.prior_std ** 2) * args.n)
# return args.loss_criterion(target, output) / ((args.y_std ** 2) * args.batchsize) + wd / ((args.prior_std ** 2) * args.n)
# %%
# train ensemble
def train(beta):
# return ensemble-average of nL_n(w) = -\sum_{i=1}^n \log p(y_i|x_i,w) = \sum_i (y_i-f(x_i,w))^2/ 2\sigma_eps^2
# wd_factor = ((args.y_std/args.prior_std)**2)/beta
if args.dataset == 'rr':
model = reducedrank(args.input_dim, args.output_dim, args.H)
elif args.dataset == 'tanh':
model = tanh(args.input_dim, args.output_dim, args.H)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=wd_factor)
# TODO: how to scale lr automatically so it doesn't explode, does it include beta or not?
# lr = 0.01*args.batchsize / (beta * args.n)
lr = args.batchsize / args.n
optimizer = optim.SGD(model.parameters(), lr=lr)
wholex = train_loader.dataset[:][0]
wholey = train_loader.dataset[:][1]
for epoch in range(1, args.epochs + 1):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
output = model(data)
loss = custom_loss(model, target, output, beta)
# loss = args.loss_criterion(target, output)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 100 == 0:
model.eval()
with torch.no_grad():
output = model(wholex)
eval_loss = custom_loss(model, wholey, output, beta)
# eval_loss = args.loss_criterion(wholey, output)
print('Epoch {}: total loss on training {}, negloglik {}'.format(epoch, eval_loss, args.loss_criterion(wholey, output).detach() / (2 * (args.y_std ** 2))))
final_output = model(wholex)
return ((wholey - final_output) ** 2).sum() / (2 * (args.y_std ** 2))
nll = np.empty((args.numbetas, args.R))
for beta_index in range(0, args.numbetas):
beta = args.betas[beta_index]
for r in range(0, args.R):
print('Training {}/{} ensemble at {}/{} inverse temp, getting colder (negloglik smaller)'.format(r + 1, args.R, beta_index + 1, args.numbetas))
nll[beta_index, r] = train(beta)
if beta_index > 0:
design_x = np.vstack((np.ones(beta_index+1), 1 / args.betas[0:beta_index+1])).T
design_y = np.mean(nll[0:beta_index+1,:], 1)
design_y = design_y[:, np.newaxis]
fit = inv(design_x.T.dot(design_x)).dot(design_x.T).dot(design_y)
print('true RLCT {}, current RLCT estimate {}'.format(args.trueRLCT,fit[1][0]))
plt.hist(nll[beta_index,:])
plt.title('nLn(w) at inverse temp {}'.format(beta[beta_index]))
plt.show()
# %%
# average nll array over r
# ols_model = OLS(np.mean(nll, 1), add_constant(1 / args.betas)).fit()
# ols_intercept_estimate = ols_model.params[0]
# RLCT_estimate = ols_model.params[1]
design_x = np.vstack((np.ones(args.numbetas), 1/args.betas)).T
design_y = np.mean(nll,1)
design_y = design_y[:, np.newaxis]
fit = inv(design_x.T.dot(design_x)).dot(design_x.T).dot(design_y)
ols_intercept_estimate = fit[0][0]
RLCT_estimate =fit[1][0]
print('RLCT estimate: {}'.format(RLCT_estimate))
print('true RLCT: {}'.format(args.trueRLCT))
# robust ls fit
# regr = ElasticNet(random_state=0, fit_intercept=True, alpha=0.5)
# regr.fit((1 / args.betas).reshape(args.numbetas, 1), np.mean(nll, 1))
# robust_intercept_estimate = regr.intercept_
# # slope_estimate = min(regr.coef_[0],args.w_dim/2)
# robust_slope_estimate = regr.coef_[0]
path = './taskid{}'.format(args.taskid)
if not os.path.exists(path):
os.makedirs(path)
args_dict = vars(args)
print(args_dict)
torch.save(args_dict, '{}/mc{}_config.pt'.format(path, args.MC))
plt.scatter(1 / args.betas, np.mean(nll, 1), label='nll beta')
# plt.plot(1 / args.betas, robust_intercept_estimate + robust_slope_estimate * 1 / args.betas, 'g-',
# label='robust ols')
plt.plot(1 / args.betas, ols_intercept_estimate + RLCT_estimate * 1 / args.betas, 'b-', label='ols')
plt.title("d_on_2 = {}, true lambda = {:.1f} "
"\n hat lambda ols = {:.1f}"
.format(args.w_dim / 2, args.trueRLCT, RLCT_estimate), fontsize=8)
plt.xlabel("1/beta", fontsize=8)
plt.ylabel("ensemble estimate of E^beta_w [nL_n(w)]", fontsize=8)
plt.savefig('{}/mc{}.png'.format(path, args.MC))
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 42.342193
| 175
| 0.612475
| 1,824
| 12,745
| 4.134868
| 0.169408
| 0.014585
| 0.029303
| 0.007955
| 0.417396
| 0.376425
| 0.326704
| 0.267436
| 0.248077
| 0.223681
| 0
| 0.021021
| 0.242291
| 12,745
| 301
| 176
| 42.342193
| 0.759967
| 0.274696
| 0
| 0.243902
| 0
| 0
| 0.071265
| 0
| 0
| 0
| 0
| 0.003322
| 0
| 1
| 0.042683
| false
| 0
| 0.073171
| 0
| 0.152439
| 0.036585
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
268c51ed50d4a8d0b92613024c9ad4e9c61f0c83
| 371
|
py
|
Python
|
Statistics/PopulationMean.py
|
cadibemma/Statistical-Calculator
|
4135487577af9e17b51317e72d7b07c09390f3f6
|
[
"MIT"
] | 1
|
2020-06-27T22:14:11.000Z
|
2020-06-27T22:14:11.000Z
|
Statistics/PopulationMean.py
|
cadibemma/Statistical-Calculator
|
4135487577af9e17b51317e72d7b07c09390f3f6
|
[
"MIT"
] | 28
|
2020-06-28T15:03:56.000Z
|
2020-07-07T16:29:27.000Z
|
Statistics/PopulationMean.py
|
cadibemma/Statistical-Calculator
|
4135487577af9e17b51317e72d7b07c09390f3f6
|
[
"MIT"
] | 1
|
2020-06-27T14:33:20.000Z
|
2020-06-27T14:33:20.000Z
|
# from Calculator.Addition import addition
from Calculator.Division import division
def populationmean(num):
try:
num_values = len(num)
total = sum(num)
return division(total, num_values)
except ZeroDivisionError:
print("Error: Enter values greater than 0")
except ValueError:
print("Error: insert correct data type")
| 28.538462
| 51
| 0.684636
| 43
| 371
| 5.860465
| 0.627907
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003546
| 0.239892
| 371
| 13
| 52
| 28.538462
| 0.890071
| 0.107817
| 0
| 0
| 0
| 0
| 0.19697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
26931376c81cc95ed098daf30d28fcc4518c0ee9
| 1,842
|
py
|
Python
|
bot/NFQ.py
|
cyber-meow/Robotic_state_repr_learning
|
d74fe372bea0b1cf42107450a8c3344a99279e91
|
[
"MIT"
] | null | null | null |
bot/NFQ.py
|
cyber-meow/Robotic_state_repr_learning
|
d74fe372bea0b1cf42107450a8c3344a99279e91
|
[
"MIT"
] | null | null | null |
bot/NFQ.py
|
cyber-meow/Robotic_state_repr_learning
|
d74fe372bea0b1cf42107450a8c3344a99279e91
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.exceptions import NotFittedError
from inter.interfaces import QLearning
from utility import set_all_args
class NFQ(QLearning):
gamma = 0.9
beta = 0.8
def __init__(self, **kwargs):
self.mlp = MLPRegressor(
hidden_layer_sizes=(5,5), activation='logistic', batch_size=400)
set_all_args(self, kwargs)
def fit(self, data, max_iter=300, intra_step=50):
"""
data is the triple (ss, as, rs)
"""
for _ in range(max_iter):
inputs, targets = self.compute_inputs_targets(data)
for _ in range(intra_step):
self.mlp.partial_fit(inputs, targets)
def compute_inputs_targets(self, data):
inputs, targets = [], []
for i in range(len(data[0])-1):
s, a, r = list(data[0][i]), data[1][i], data[2][i]
s_next = list(data[0][i+1])
inputs.append(s + [self.actions.index(a)])
to_prs = [s_next + [act] for act in range(len(self.actions))]
try:
q_values = self.mlp.predict(to_prs)
targets.append(r + self.gamma * np.max(q_values))
except NotFittedError:
targets.append(r)
return np.array(inputs), np.array(targets)
def score(self, data):
inputs, targes = self.compute_inputs_targets(data)
return self.mlp.score(inputs, targes)
def decision(self, state):
state = list(state)
to_prs = [state + [act] for act in range(len(self.actions))]
q_values = self.mlp.predict(to_prs)
ps = np.exp(self.beta * q_values)
a_num = np.random.choice(len(self.actions), p=ps/np.sum(ps))
return self.actions[a_num]
| 29.238095
| 76
| 0.58089
| 248
| 1,842
| 4.165323
| 0.370968
| 0.075508
| 0.058083
| 0.046467
| 0.162633
| 0.108422
| 0.108422
| 0.058083
| 0
| 0
| 0
| 0.016279
| 0.299674
| 1,842
| 62
| 77
| 29.709677
| 0.784496
| 0.01683
| 0
| 0.05
| 0
| 0
| 0.004484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13fe23236e035adcc7cad3112d9cc94bfc4481fa
| 66,843
|
py
|
Python
|
TransitionListener/transitionFinder.py
|
tasicarl/TransitionListerner_public
|
b231467e731f51521a85dd962cc08da07eca8226
|
[
"MIT"
] | null | null | null |
TransitionListener/transitionFinder.py
|
tasicarl/TransitionListerner_public
|
b231467e731f51521a85dd962cc08da07eca8226
|
[
"MIT"
] | null | null | null |
TransitionListener/transitionFinder.py
|
tasicarl/TransitionListerner_public
|
b231467e731f51521a85dd962cc08da07eca8226
|
[
"MIT"
] | 1
|
2021-11-04T08:12:10.000Z
|
2021-11-04T08:12:10.000Z
|
"""
The transitionFinder module is used to calculate finite temperature
cosmological phase transitions: it contains functions to find the phase
structure as a function of temperature, and functions to find the transition
(bubble nucleation) temperature for each phase.
In contrast, :mod:`.pathDefomration` is useful for finding the tunneling
solution for a fixed potential or a potential at a fixed temperature.
The most directly used functions in this module will likely be
:func:`traceMultiMin` for finding the phase structure, and
:func:`findAllTransitions` and :func:`findCriticalTemperatures` for calculating
properties of the phase transitions.
"""
from collections import namedtuple
import numpy as np
from scipy import linalg, interpolate, optimize
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
from . import pathDeformation
from . import tunneling1D
import sys
_traceMinimum_rval = namedtuple("traceMinimum_rval", "X T dXdT overX overT")
def traceMinimum(f, d2f_dxdt, d2f_dx2, x0, t0, tstop, dtstart, deltaX_target,
dtabsMax=20.0, dtfracMax=.25, dtmin=1e-3,
deltaX_tol=1.2, minratio=1e-2, verbose = False):
"""
Trace the minimum `xmin(t)` of the function `f(x,t)`, starting at `x0, t0`.
Parameters
----------
f : callable
The scalar function `f(x,t)` which needs to be minimized. The input will
be of the same type as `(x0,t0)`.
d2f_dxdt, d2f_dx2 : callable
Functions which return returns derivatives of `f(x)`. `d2f_dxdt` should
return the derivative of the gradient of `f(x)` with respect to `t`, and
`d2f_dx2` should return the Hessian matrix of `f(x)` evaluated at `t`.
Both should take as inputs `(x,t)`.
x0 : array_like
The initial starting point. Must be an array even if the potential is
one-dimensional (in which case the array should have length 1).
t0 : float
The initial starting parameter `t`.
tstop : float
Stop the trace when `t` reaches `tstop`.
dtstart : float
Initial stepsize.
deltaX_target : float
The target error in x at each step. Determines the
stepsize in t by extrapolation from last error.
dtabsMax : float, optional
dtfracMax : float, optional
The largest stepsize in t will be the LARGEST of
``abs(dtstart)*dtabsMax`` and ``t*dtfracMax``.
dtmin : float, optional
The smallest stepsize we'll allow before assuming the transition ends,
relative to `dtstart`
deltaX_tol : float, optional
``deltaX_tol*deltaX_target`` gives the maximum error in x
before we want to shrink the stepsize and recalculate the minimum.
minratio : float, optional
The smallest ratio between smallest and largest eigenvalues in the
Hessian matrix before treating the smallest eigenvalue as zero (and
thus signaling a saddle point and the end of the minimum).
Returns
-------
X, T, dXdT : array_like
Arrays of the minimum at different values of t, and
its derivative with respect to t.
overX : array_like
The point beyond which the phase seems to disappear.
overT : float
The t-value beyond which the phase seems to disappear.
Notes
-----
In prior versions, `d2f_dx2` was optional and called `d2f`, while `d2f_dxdt`
was calculated from an optional parameter `df` using finite differences. If
Neither of these were supplied, they would be calculated directly from
`f(x,t)` using finite differences. This lead to a messier calling signature,
since additional parameters were needed to find the finite differences. By
instead requiring that the derivatives be supplied, the task of creating the
derivative functions can be delegated to more general purpose routines
(see e.g. :class:`helper_functions.gradientFunction` and
:class:`helper_functions.hessianFunction`).
Also new in this version, `dtmin` and `dtabsMax` are now relative to
`dtstart`. The idea here is that there should be some required parameter
that sets the scale, and then optional parameters can set the tolerances
relative to this scale. `deltaX_target` is now not optional for the same
reasoning.
"""
if verbose:
print("traceMinimum t0 = %0.6g" % t0)
Ndim = len(x0)
M0 = d2f_dx2(x0,t0)
minratio *= min(abs(linalg.eigvalsh(M0)))/max(abs(linalg.eigvalsh(M0)))
def dxmindt(x,t):
M = d2f_dx2(x,t)
if abs(linalg.det(M)) < (1e-3*np.max(abs(M)))**Ndim:
# Assume matrix is singular
return None, False
b = -d2f_dxdt(x,t)
eigs = linalg.eigvalsh(M)
try:
dxdt = linalg.solve(M,b, overwrite_a=False, overwrite_b=False)
# dxdt = linalg.solve(M,b, overwrite_a=True, overwrite_b=True)
isneg = ((eigs <= 0).any() or min(eigs)/max(eigs) < minratio)
except:
dxdt = None
isneg = False
return dxdt, isneg
xeps = deltaX_target * 1e-2
def fmin(x,t):
return optimize.fmin(f, x, args=(t,), xtol=xeps, ftol=np.inf,
disp=False)
deltaX_tol = deltaX_tol * deltaX_target
tscale = abs(dtstart)
dtabsMax = dtabsMax * tscale
dtmin = dtmin * tscale
x,t,dt,xerr = x0,t0,dtstart,0.0
dxdt, negeig = dxmindt(x,t)
X,T,dXdT = [x],[t],[dxdt]
overX = overT = None
while dxdt is not None:
if verbose:
sys.stdout.write('.')
sys.stdout.flush()
# Get the values at the next step
tnext = t+dt
xnext = fmin(x+dxdt*dt, tnext)
dxdt_next, negeig = dxmindt(xnext,tnext)
if dxdt_next is None or negeig == True:
# We got stuck on a saddle, so there must be a phase transition
# there.
dt *= .5
overX, overT = xnext, tnext
else:
# The step might still be too big if it's outside of our error
# tolerance.
xerr = max(np.sum((x+dxdt*dt - xnext)**2),
np.sum((xnext-dxdt_next*dt - x)**2))**.5
if xerr < deltaX_tol: # Normal step, error is small
T.append(tnext)
X.append(xnext)
dXdT.append(dxdt_next)
if overT is None:
# change the stepsize only if the last step wasn't
# troublesome
dt *= deltaX_target/(xerr+1e-100)
x,t,dxdt = xnext, tnext, dxdt_next
overX = overT = None
else:
# Either stepsize was too big, or we hit a transition.
# Just cut the step in half.
dt *= .5
overX, overT = xnext, tnext
# Now do some checks on dt.
if abs(dt) < abs(dtmin):
# Found a transition! Or at least a point where the step is really
# small.
break
if dt > 0 and t >= tstop or dt < 0 and t <= tstop:
# Reached tstop, but we want to make sure we stop right at tstop.
dt = tstop-t
x = fmin(x+dxdt*dt, tstop)
dxdt,negeig = dxmindt(x,tstop)
t = tstop
X[-1], T[-1], dXdT[-1] = x,t,dxdt
break
dtmax = max(t*dtfracMax, dtabsMax)
if abs(dt) > dtmax:
dt = np.sign(dt)*dtmax
if overT is None:
overX, overT = X[-1], T[-1]
if verbose:
sys.stdout.write('\n')
sys.stdout.flush()
X = np.array(X)
T = np.array(T)
dXdT = np.array(dXdT)
return _traceMinimum_rval(X, T, dXdT, overX, overT)
class Phase(object):
"""
Describes a temperature-dependent minimum, plus second-order transitions
to and from that minimum.
Attributes
----------
key : hashable
A unique identifier for the phase (usually an int).
X, T, dXdT : array_like
The minima and its derivative at different temperatures.
tck : tuple
Spline knots and coefficients, used in `interpolate.splev`.
low_trans : set
Phases (identified by keys) which are joined by a second-order
transition to this phase.
high_trans : set
Phases (identified by keys) which are joined by a second-order
transition to this phase.
"""
def __init__(self, key, X, T, dXdT):
self.key = key
# We shouldn't ever really need to sort the array, but there must be
# some bug in the above code that makes it so that occasionally the last
# step goes backwards. This should fix that.
i = np.argsort(T)
T, X, dXdT = T[i], X[i], dXdT[i]
self.X = X
self.T = T
self.dXdT = dXdT
# Make the spline:
k = 3 if len(T) > 3 else 1
tck, u = interpolate.splprep(X.T, u=T, s=0, k=k)
self.tck = tck
# Make default connections
self.low_trans = set()
self.high_trans = set()
def valAt(self, T, deriv=0):
"""
Find the minimum at the value `T` using a spline.
Parameters
----------
T : float or array_like
deriv : int
If deriv > 0, instead return the derivative of the minimum with
respect to `T`. Can return up to the third derivative for cubic
splines (when ``len(X) > 3``) or first derivative for linear
splines.
"""
T = np.asanyarray(T).T
y = interpolate.splev(T, self.tck)
return np.asanyarray(y).T
def addLinkFrom(self, other_phase):
"""
Add a link from `other_phase` to this phase, checking to see if there
is a second-order transition.
"""
if np.min(self.T) >= np.max(other_phase.T):
self.low_trans.add(other_phase.key)
other_phase.high_trans.add(self.key)
if np.max(self.T) <= np.min(other_phase.T):
self.high_trans.add(other_phase.key)
other_phase.low_trans.add(self.key)
def __repr__(self):
popts = np.get_printoptions()
np.set_printoptions(formatter={'float': lambda x: "%0.4g" % x})
if len(self.X) > 1:
Xstr = "[%s, ..., %s]" % (self.X[0], self.X[-1])
else:
Xstr = "[%s]" % self.X[0]
if len(self.T) > 1:
Tstr = "[%0.4g, ..., %0.4g]" % (self.T[0], self.T[-1])
else:
Tstr = "[%0.4g]" % self.T[0]
if len(self.dXdT) > 1:
dXdTstr = "[%s, ..., %s]" % (self.dXdT[0], self.dXdT[-1])
else:
dXdTstr = "[%s]" % self.dXdT[0]
s = "Phase(key=%s, X=%s, T=%s, dXdT=%s" % (
self.key, Xstr, Tstr, dXdTstr)
np.set_printoptions(**popts)
return s
def traceMultiMin(f, d2f_dxdt, d2f_dx2,
points, tLow, tHigh, deltaX_target,
dtstart=1e-6, tjump=1e-5, forbidCrit=None, verbose = False,
single_trace_args={}, local_min_args={}):
"""
Trace multiple minima `xmin(t)` of the function `f(x,t)`.
This function will trace the minima starting from the initial `(x,t)` values
given in `points`. When a phase disappears, the function will search for
new nearby minima, and trace them as well. In this way, if each minimum
corresponds to a different phase, this function can find the (possibly)
complete phase structure of the potential.
Parameters
----------
f : callable
The scalar function `f(x,t)` which needs to be minimized. The input will
be of the same type as each entry in the `points` parameter.
d2f_dxdt, d2f_dx2 : callable
Functions which return returns derivatives of `f(x)`. `d2f_dxdt` should
return the derivative of the gradient of `f(x)` with respect to `t`, and
`d2f_dx2` should return the Hessian matrix of `f(x)` evaluated at `t`.
Both should take as inputs `(x,t)`.
points : list
A list of points [(x1,t1), (x2,t2),...] that we want to trace, where
`x1`, `x2`, etc. are each a one-dimensional array.
tLow, tHigh : float
Lowest and highest temperatures between which to trace.
deltaX_target : float
Passed to :func:`traceMinimum` and used to set the tolerance in
minimization.
dtstart : float, optional
The starting stepsize, relative to ``tHigh-tLow``.
tjump : float, optional
The jump in `t` from the end of one phase to the initial tracing point
in another. If this is too large, intermediate phases may be skipped.
Relative to ``tHigh-tLow``.
forbidCrit : callable or None, optional
A function that determines whether or not to forbid a phase with a given
starting point. Should take a point `x` as input, and return True (if
the phase should be discarded) or False (if the phase should be kept).
single_trace_args : dict, optional
Arguments to pass to :func:`traceMinimum`.
local_min_args : dict, optoinal
Arguments to pass to :func:`findApproxLocalMinima`.
Returns
-------
phases : dict
A dictionary of :class:`Phase` instances. The keys in the dictionary
are integers corresponding to the order in which the phases were
constructed.
"""
#dtstart = 1e-6
#tjump = 1e-6
# We want the minimization here to be very accurate so that we don't get
# stuck on a saddle or something. This isn't much of a bottle neck.
xeps = deltaX_target*1e-2
def fmin(x,t):
return optimize.fmin(f, x+xeps, args=(t,), xtol=xeps*1e-3,
ftol=np.inf, disp=False)
dtstart = dtstart * (tHigh-tLow)
tjump = tjump * (tHigh-tLow)
phases = {}
nextPoint = []
for p in points:
x,t = p
nextPoint.append([t,dtstart,fmin(x,t),None])
while len(nextPoint) != 0:
t1,dt1,x1,linkedFrom = nextPoint.pop()
x1 = fmin(x1, t1) # make sure we start as accurately as possible.
# Check to see if this point is outside the bounds
if t1 < tLow or (t1 == tLow and dt1 < 0):
continue
if t1 > tHigh or (t1 == tHigh and dt1 > 0):
continue
if forbidCrit is not None and forbidCrit(x1) == True:
continue
# Check to see if it's redudant with another phase
for i in list(phases.keys()):
phase = phases[i]
if (t1 < min(phase.T[0], phase.T[-1]) or
t1 > max(phase.T[0], phase.T[-1])):
continue
x = fmin(phase.valAt(t1), t1)
if np.sum((x-x1)**2)**.5 < 2*deltaX_target:
# The point is already covered
# Skip this phase and change the linkage.
if linkedFrom != i and linkedFrom is not None:
phase.addLinkFrom(phases[linkedFrom])
break
else:
# The point is not already covered. Trace the phase.
if verbose:
print("Tracing phase starting at x =", x1, "; t =", t1)
phase_key = len(phases)
oldNumPoints = len(nextPoint)
if (t1 > tLow):
if verbose:
print("Tracing minimum down")
down_trace = traceMinimum(f, d2f_dxdt, d2f_dx2, x1,
t1, tLow, -dt1, deltaX_target, verbose = verbose,
**single_trace_args)
X_down, T_down, dXdT_down, nX, nT = down_trace
t2,dt2 = nT-tjump, .1*tjump
x2 = fmin(nX,t2)
nextPoint.append([t2,dt2,x2,phase_key])
if np.sum((X_down[-1]-x2)**2) > deltaX_target**2:
for point in findApproxLocalMin(f,X_down[-1],x2,(t2,)):
nextPoint.append([t2,dt2,fmin(point,t2),phase_key])
X_down = X_down[::-1]
T_down = T_down[::-1]
dXdT_down = dXdT_down[::-1]
if (t1 < tHigh):
if verbose:
print("Tracing minimum up")
up_trace = traceMinimum(f, d2f_dxdt, d2f_dx2, x1,
t1, tHigh, +dt1, deltaX_target, verbose = verbose,
**single_trace_args)
X_up, T_up, dXdT_up, nX, nT = up_trace
t2,dt2 = nT+tjump, .1*tjump
x2 = fmin(nX,t2)
nextPoint.append([t2,dt2,x2,phase_key])
if np.sum((X_up[-1]-x2)**2) > deltaX_target**2:
for point in findApproxLocalMin(f,X_up[-1],x2,(t2,)):
nextPoint.append([t2,dt2,fmin(point,t2),phase_key])
# Then join the two together
if (t1 <= tLow):
X,T,dXdT = X_up, T_up, dXdT_up
elif (t1 >= tHigh):
X,T,dXdT = X_down, T_down, dXdT_down
else:
X = np.append(X_down, X_up[1:], 0)
T = np.append(T_down, T_up[1:], 0)
dXdT = np.append(dXdT_down, dXdT_up[1:], 0)
if forbidCrit is not None and (forbidCrit(X[0]) or
forbidCrit(X[-1])):
# The phase is forbidden.
# Don't add it, and make it a dead-end.
nextPoint = nextPoint[:oldNumPoints]
elif len(X) > 1:
newphase = Phase(phase_key, X,T,dXdT)
if linkedFrom is not None:
newphase.addLinkFrom(phases[linkedFrom])
phases[phase_key] = newphase
else:
# The phase is just a single point.
# Don't add it, and make it a dead-end.
nextPoint = nextPoint[:oldNumPoints]
if verbose:
print(phases)
return phases
def findApproxLocalMin(f, x1, x2, args=(), n=100, edge=.05):
"""
Find minima on a straight line between two points.
When jumping between phases, we want to make sure that we
don't jump over an intermediate phase. This function does a rough
calculation to find any such intermediate phases.
Parameters
----------
f : callable
The function `f(x)` to minimize.
x1, x2 : array_like
The points between which to find minima.
args : tuple, optional
Extra arguments to pass to `f`.
n : int, optional
Number of points to test for local minima.
edge : float, optional
Don't test for minima directly next to the input points. If ``edge==0``,
the minima potentially go all the way to input points. If ``edge==0.5``,
the range of tested minima shrinks to a single point at the center of
the two points.
Returns
-------
list
A list of approximate minima, with each minimum having the same shape
as `x1` and `x2`.
"""
x1,x2 = np.array(x1), np.array(x2)
dx = np.sum((x1-x2)**2)**.5
#if dx < mindeltax:
# return np.array([]).reshape(0,len(x1))
x = x1 + (x2-x1)*np.linspace(edge,1-edge,n).reshape(n,1)
y = f(x,*args)
i = (y[2:] > y[1:-1]) & (y[:-2] > y[1:-1])
return x[1:-1][i]
def _removeRedundantPhase(phases, removed_phase, redundant_with_phase):
for key in removed_phase.low_trans:
if key != redundant_with_phase.key:
p = phases[key]
p.high_trans.discard(removed_phase.key)
redundant_with_phase.addLinkFrom(p)
for key in removed_phase.high_trans:
if key != redundant_with_phase.key:
p = phases[key]
p.low_trans.discard(removed_phase.key)
redundant_with_phase.addLinkFrom(p)
del phases[removed_phase.key]
def removeRedundantPhases(f, phases, xeps=1e-5, diftol=1e-2, verbose = False):
"""
Remove redundant phases from a dictionary output by :func:`traceMultiMin`.
Although :func:`traceMultiMin` attempts to only trace each phase once, there
are still instances where a single phase gets traced twice. If a phase is
included twice, the routines for finding transition regions and tunneling
get very confused. This attempts to avoid that problem.
Parameters
----------
f : callable
The function `f(x,t)` which was passed to :func:`traceMultiMin`.
phases : dict
The output of :func:`traceMultiMin`.
xeps : float, optional
Error tolerance in minimization.
diftol : float, optional
Maximum separation between two phases before they are considered to be
coincident.
Returns
-------
None
Notes
-----
If two phases are merged to get rid of redundancy, the resulting phase has
a key that is a string combination of the two prior keys.
.. todo:: Make sure to test removeRedundantPhases().
.. todo::
Possibly add extra logic to account for phases which coinincide
at one end but not the other.
Warning
-------
This hasn't been thoroughly tested yet.
"""
# I want to make the logic extremely simple at the cost of checking the
# same thing multiple times.
# There's just no way this function is going to be the bottle neck.
def fmin(x,t):
return np.array(optimize.fmin(f, x, args=(t,),
xtol=xeps, ftol=np.inf, disp=False))
has_redundant_phase = True
while has_redundant_phase:
has_redundant_phase = False
for i in list(phases.keys()):
for j in list(phases.keys()):
if i == j:
continue
phase1, phase2 = phases[i], phases[j]
tmax = min(phase1.T[-1], phase2.T[-1])
tmin = max(phase1.T[0], phase2.T[0])
if tmin > tmax: # no overlap in the phases
continue
if tmax == phase1.T[-1]:
x1 = phase1.X[-1]
else:
x1 = fmin(phase1.valAt(tmax), tmax)
if tmax == phase2.T[-1]:
x2 = phase2.X[-1]
else:
x2 = fmin(phase2.valAt(tmax), tmax)
dif = np.sum((x1-x2)**2)**.5
same_at_tmax = (dif < diftol)
if tmin == phase1.T[0]:
x1 = phase1.X[0]
else:
x1 = fmin(phase1.valAt(tmin), tmin)
if tmin == phase2.T[0]:
x2 = phase2.X[0]
else:
x2 = fmin(phase2.valAt(tmin), tmin)
dif = np.sum((x1-x2)**2)**.5
same_at_tmin = (dif < diftol)
if same_at_tmin and same_at_tmax:
# Phases are redundant
has_redundant_phase = True
p_low = phase1 if phase1.T[0] < phase2.T[0] else phase2
p_high = phase1 if phase1.T[-1] > phase2.T[-1] else phase2
if p_low is p_high:
p_reject = phase1 if p_low is phase2 else phase2
_removeRedundantPhase(phases, p_reject, p_low)
else:
i = p_low.T <= tmax
T_low = p_low.T[i]
X_low = p_low.X[i]
dXdT_low = p_low.dXdT[i]
i = p_high.T > tmax
T_high = p_high.T[i]
X_high = p_high.X[i]
dXdT_high = p_high.dXdT[i]
T = np.append(T_low, T_high, axis=0)
X = np.append(X_low, X_high, axis=0)
dXdT = np.append(dXdT_low, dXdT_high, axis=0)
newkey = str(p_low.key) + "_" + str(p_high.key)
newphase = Phase(newkey, X, T, dXdT)
phases[newkey] = newphase
_removeRedundantPhase(phases, p_low, newphase)
_removeRedundantPhase(phases, p_high, newphase)
break
elif same_at_tmin or same_at_tmax:
if verbose:
print("ERROR, Two phases have been found and the necessary function to combine them is not implemented.")
# raise NotImplementedError(
# "Two phases have been found to coincide at one end "
# "but not the other. Ideally, this function would "
# "find where the two diverge, make a cut, and join them "
# "such there are no more phase redundancies.\n"
# "Instead, just raise an exception."
# )
if has_redundant_phase:
break
def getStartPhase(phases, V=None):
"""
Find the key for the high-T phase.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V : callable
The potential V(x,T). Only necessary if there are
multiple phases with the same Tmax.
"""
startPhases = []
startPhase = None
Tmax = None
assert len(phases) > 0
for i in list(phases.keys()):
if phases[i].T[-1] == Tmax:
# add this to the startPhases list.
startPhases.append(i)
elif Tmax is None or phases[i].T[-1] > Tmax:
startPhases = [i]
Tmax = phases[i].T[-1]
if len(startPhases) == 1 or V is None:
startPhase = startPhases[0]
else:
# more than one phase have the same maximum temperature
# Pick the stable one at high temp.
Vmin = None
for i in startPhases:
V_ = V(phases[i].X[-1], phases[i].T[-1])
if Vmin is None or V_ < Vmin:
Vmin = V_
startPhase = i
assert startPhase in phases
return startPhase
def _tunnelFromPhaseAtT(T, phases, start_phase, V, dV,
phitol, overlapAngle, nuclCriterion,
fullTunneling_params, verbose, outdict):
"""
Find the lowest action tunneling solution.
Return ``nuclCriterion(S,T)``, and store a dictionary describing the
transition in outdict for key `T`.
"""
try:
T = T[0] # need this when the function is run from optimize.fmin
except:
pass
if T in outdict:
return nuclCriterion(outdict[T]['action'], T)
def fmin(x):
return optimize.fmin(V, x, args=(T,),
xtol=phitol, ftol=np.inf, disp=False)
# Loop through all the phases, adding acceptable minima
x0 = fmin(start_phase.valAt(T))
V0 = V(x0, T)
tunnel_list = []
for key in phases.keys():
if key == start_phase.key:
continue
p = phases[key]
if (p.T[0] > T or p.T[-1] < T):
continue
x1 = fmin(p.valAt(T))
V1 = V(x1, T)
if V1 >= V0:
continue
tdict = dict(low_vev=x1, high_vev=x0, Tnuc=T,
low_phase=key, high_phase=start_phase.key)
tunnel_list.append(tdict)
# Check for overlap
if overlapAngle > 0:
excluded = []
cos_overlap = np.cos(overlapAngle * np.pi/180)
for i in range(1, len(tunnel_list)):
for j in range(i):
xi = tunnel_list[i]['low_vev']
xj = tunnel_list[j]['low_vev']
xi2 = np.sum((xi-x0)**2)
xj2 = np.sum((xj-x0)**2)
dotij = np.sum((xj-x0)*(xi-x0))
if dotij >= np.sqrt(xi2*xj2) * cos_overlap:
excluded.append(i if xi2 > xj2 else j)
for i in sorted(excluded)[::-1]:
del tunnel_list[i]
# Get rid of the T parameter for V and dV
def V_(x,T=T,V=V): return V(x,T)
def dV_(x,T=T,dV=dV): return dV(x,T)
# For each item in tunnel_list, try tunneling
lowest_action = np.inf
lowest_tdict = dict(action=np.inf)
for tdict in tunnel_list:
x1 = tdict['low_vev']
try:
print("Tunneling from phase %s to phase %s at T=%0.4g"
% (tdict['high_phase'], tdict['low_phase'], T))
print("high_vev =", tdict['high_vev'])
print("low_vev =", tdict['low_vev'])
tobj = pathDeformation.fullTunneling(
[x1,x0], V_, dV_, callback_data=T,
**fullTunneling_params)
tdict['instanton'] = tobj
tdict['action'] = tobj.action
tdict['trantype'] = 1
except tunneling1D.PotentialError as err:
if err.args[1] == "no barrier":
tdict['trantype'] = 0
tdict['action'] = 0.0
elif err.args[1] == "stable, not metastable":
tdict['trantype'] = 0
tdict['action'] = np.inf
else:
print("Unexpected error message.")
raise
if tdict['action'] <= lowest_action:
lowest_action = tdict['action']
lowest_tdict = tdict
outdict[T] = lowest_tdict
return nuclCriterion(lowest_action, T)
def _tunnelFromPhaseAtTGW(T, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl):
"""
Extends _tunnelFromPhaseAtT by a feature defining the sign of the action in
case that it is calculated as np.inf. This is useful since otherways in some
cases the used root finder brentq won't work: A different sign for f(a) and
f(b) is required, where a and b are the interval's boundaries. Additionally,
all potentially interesting points S(T) for the calculation of the derivative
of the action are passed over using SEucl and TEucl. Potentially interesting
points are defined as those, at which the nucleation criterion is fulfilled
up to np.abs(nuclCriterion(lowest_action, T)) < 1.
Attributes:
-----------
SEucl : list, float
Calculated actions while executing the program. Used to calculated the
derivative of the action in tunnelFromPhaseGW.
TEucl : list, float
The temperatures at which the interesting actions SEucl have been
calculated.
"""
sign = 1
if T <= start_phase.T[0]:
# Set the sign to -1 for temperatures below Tmin
# where Tmin is the lowest t of the start_phase
sign = -1
try:
T = T[0] # need this when the function is run from optimize.fmin
except:
pass
if T in outdict:
return nuclCriterion(outdict[T]['action'], T)
def fmin(x):
return optimize.fmin(V, x, args=(T,), xtol=phitol, ftol=np.inf, disp=False)
# Loop through all the phases, adding acceptable minima
x0 = fmin(start_phase.valAt(T))
V0 = V(x0, T)
tunnel_list = []
for key in list(phases.keys()):
if key == start_phase.key:
continue
p = phases[key]
if (p.T[0] > T or p.T[-1] < T):
continue
x1 = fmin(p.valAt(T))
V1 = V(x1, T)
if V1 >= V0:
#if verbose:
# print(T, x1, x0, V1, V0)
continue
tdict = dict(low_vev=x1, high_vev=x0, Tnuc=T,
low_phase=key, high_phase=start_phase.key)
tunnel_list.append(tdict)
# Check for overlap
if overlapAngle > 0:
excluded = []
cos_overlap = np.cos(overlapAngle * np.pi/180)
for i in range(1, len(tunnel_list)):
for j in range(i):
xi = tunnel_list[i]['low_vev']
xj = tunnel_list[j]['low_vev']
xi2 = np.sum((xi-x0)**2)
xj2 = np.sum((xj-x0)**2)
dotij = np.sum((xj-x0)*(xi-x0))
if dotij >= np.sqrt(xi2*xj2) * cos_overlap:
excluded.append(i if xi2 > xj2 else j)
for i in sorted(excluded)[::-1]:
del tunnel_list[i]
# Get rid of the T parameter for V and dV
def V_(x,T=T,V=V): return V(x,T)
def dV_(x,T=T,dV=dV): return dV(x,T)
#print(T)
#lin = np.linspace(0, 200, 1000)
#V3 = np.zeros(lin.shape)
#for i in range(1000):
# V3[i] = V_([lin[i]])
#plt.plot(lin, V3)
#plt.show()
#print(tunnel_list)
# For each item in tunnel_list, try tunneling
# Set here the sign to get -np.inf if necessary.
lowest_action = sign*np.inf
lowest_tdict = dict(action=sign*np.inf)
for tdict in tunnel_list:
x1 = tdict['low_vev']
try:
if verbose:
print("Tunneling from phase %s to phase %s at T=%0.8g" % (tdict['high_phase'], tdict['low_phase'], T))
print("high_vev =", tdict['high_vev'])
print("low_vev =", tdict['low_vev'])
tobj = pathDeformation.fullTunneling(
[x1,x0], V_, dV_, callback_data=T,
**fullTunneling_params)
tdict['instanton'] = tobj
tdict['action'] = tobj.action
tdict['trantype'] = 1
except tunneling1D.PotentialError as err:
if err.args[1] == "no barrier":
if verbose:
print("No barrier!")
tdict['trantype'] = 0
tdict['action'] = 0.0
elif err.args[1] == "stable, not metastable":
if verbose:
print("Stable, not metastable!")
tdict['trantype'] = 0
tdict['action'] = np.inf
else:
if verbose:
print("Unexpected error message.")
raise
if tdict['action'] <= lowest_action:
lowest_action = tdict['action']
lowest_tdict = tdict
outdict[T] = lowest_tdict
if verbose:
# Print the currently calculated euclidean action, the temperature,
# their ratio and and the nucleation criterion at that point
print("S = ", lowest_action, ", T = ", T, ", S/T = ", lowest_action/(T + 1e-100), ", S/T - crit = ", nuclCriterion(lowest_action, T), "\n")
# Fill SEucl and TEucl to pass it over to tunnelFromPhaseGW
if np.abs(nuclCriterion(lowest_action, T)) < 1:
#if np.abs(lowest_action/(T + 1e-100)) != np.inf:
# activate lower 'if' for plot of nucleation criterion in function tunnelFromPhase
SEucl.append(lowest_action/(T + 1e-100))
TEucl.append(T + 1e-100)
return nuclCriterion(lowest_action, T)
def _potentialDiffForPhase(T, start_phase, other_phases, V):
"""
Returns the maximum difference between the other phases and `start_phase`.
Return value is positive/negative when `start_phase` is stable/unstable.
"""
V0 = V(start_phase.valAt(T),T)
delta_V = np.inf
for phase in other_phases:
V1 = V(phase.valAt(T),T)
if V1-V0 < delta_V:
delta_V = V1-V0
return delta_V
def _maxTCritForPhase(phases, start_phase, V, Ttol):
"""
Find the maximum temperature at which `start_phase` is degenerate with one
of the other phases.
"""
other_phases = []
for phase in list(phases.values()):
if phase.key != start_phase.key:
other_phases.append(phase)
if len(other_phases) == 0:
# No other phases, just return the lowest temperature
return start_phase.T[0]
Tmin = min([phase.T[0] for phase in other_phases])
Tmax = max([phase.T[-1] for phase in other_phases])
Tmin = max(Tmin, start_phase.T[0])
Tmax = min(Tmax, start_phase.T[-1])
DV_Tmin = _potentialDiffForPhase(Tmin, start_phase, other_phases, V)
DV_Tmax = _potentialDiffForPhase(Tmax, start_phase, other_phases, V)
if DV_Tmin >= 0: return Tmin # stable at Tmin
if DV_Tmax <= 0: return Tmax # unstable at Tmax
return optimize.brentq(
_potentialDiffForPhase, Tmin, Tmax,
args=(start_phase, other_phases, V),
xtol=Ttol, maxiter=200, disp=False)
def tunnelFromPhase(phases, start_phase, V, dV, Tmax,
Ttol=1e-3, maxiter=100, phitol=1e-8, overlapAngle=45.0,
nuclCriterion=lambda S,T: S/(T+1e-100) - 140.0,
verbose=True,
fullTunneling_params={}):
"""
Find the instanton and nucleation temeprature for tunneling from
`start_phase`.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
start_phase : Phase object
The metastable phase from which tunneling occurs.
V, dV : callable
The potential V(x,T) and its gradient.
Tmax : float
The highest temperature at which to try tunneling.
Ttol : float, optional
Tolerance for finding the nucleation temperature.
maxiter : int, optional
Maximum number of times to try tunneling.
phitol : float, optional
Tolerance for finding the minima.
overlapAngle : float, optional
If two phases are in the same direction, only try tunneling to the
closer one. Set to zero to always try tunneling to all available phases.
nuclCriterion : callable
Function of the action *S* and temperature *T*. Should return 0 for the
correct nucleation rate, > 0 for a low rate and < 0 for a high rate.
Defaults to ``S/T - 140``.
verbose : bool
If true, print a message before each attempted tunneling.
fullTunneling_params : dict
Parameters to pass to :func:`pathDeformation.fullTunneling`.
Returns
-------
dict or None
A description of the tunneling solution at the nucleation temperature,
or None if there is no found solution. Has the following keys:
- *Tnuc* : the nucleation temperature
- *low_vev, high_vev* : vevs for the low-T phase (the phase that the
instanton tunnels to) and high-T phase (the phase that the instanton
tunnels from).
- *low_phase, high_phase* : identifier keys for the low-T and high-T
phases.
- *action* : The Euclidean action of the instanton.
- *instanton* : Output from :func:`pathDeformation.fullTunneling`, or
None for a second-order transition.
- *trantype* : 1 or 2 for first or second-order transitions.
"""
outdict = {} # keys are T values
args = (phases, start_phase, V, dV,
phitol, overlapAngle, nuclCriterion,
fullTunneling_params, verbose, outdict)
Tmin = start_phase.T[0]
T_highest_other = Tmin
for phase in phases.values():
T_highest_other = max(T_highest_other, phase.T[-1])
Tmax = min(Tmax, T_highest_other)
assert Tmax >= Tmin
try:
Tnuc = optimize.brentq(_tunnelFromPhaseAtT, Tmin, Tmax, args=args,
xtol=Ttol, maxiter=maxiter, disp=False)
except ValueError as err:
if err.args[0] != "f(a) and f(b) must have different signs":
raise
if nuclCriterion(outdict[Tmax]['action'], Tmax) > 0:
if nuclCriterion(outdict[Tmin]['action'], Tmax) < 0:
# tunneling *may* be possible. Find the minimum.
# It's important to make an appropriate initial guess;
# otherwise the minimization routine may get get stuck in a
# region where the action is infinite. Modify Tmax.
Tmax = _maxTCritForPhase(phases, start_phase, V, Ttol)
def abort_fmin(T, outdict=outdict, nc=nuclCriterion):
T = T[0] # T is an array of size 1
if nc(outdict[T]['action'], T) <= 0:
raise StopIteration(T)
try:
Tmin = optimize.fmin(_tunnelFromPhaseAtT, 0.5*(Tmin+Tmax),
args=args, xtol=Ttol*10, ftol=1.0,
maxiter=maxiter, disp=0,
callback=abort_fmin)[0]
except StopIteration as err:
Tmin = err.args[0]
if nuclCriterion(outdict[Tmin]['action'], Tmin) > 0:
# no tunneling possible
return None
Tnuc = optimize.brentq(
_tunnelFromPhaseAtT, Tmin, Tmax,
args=args, xtol=Ttol, maxiter=maxiter, disp=False)
else:
# no tunneling possible
return None
else:
# tunneling happens right away at Tmax
Tnuc = Tmax
rdict = outdict[Tnuc]
return rdict if rdict['trantype'] > 0 else None
def tunnelFromPhaseGW(phases, start_phase, V, dV, Tmax, Ttol = 1e-3, maxiter = 100, phitol = 1e-8, overlapAngle = 45.0,
nuclCriterion = lambda S, T: S/(T+1e-100) - 140, verbose = False, fullTunneling_params={}):
"""
Extends the function tunnelFromPhase by the calculation of the GW
parameter beta / H.
Find the instanton and nucleation temeprature for tunneling from
`start_phase`. Compute the derivative of the instanton action at
the nucleation temperature to calculate the GW spectrum parameter
beta / H. To do that, use _tunnelFromPhaseAtTGW and not
_tunnelFromPhaseAtT to pass over points of support of the
action S(T) as a function of temperature with which the necessary
derivative can be calculated by fitting a linear function to the
passed points. To avoid numerical errors, an upper and a lower fit
of the data (T > Tnuc, T < Tnuc) are calculated and then compared.
If during this procedure an error occurrs: return a dictionary with
a code explaining the reason and position of the error.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
start_phase : Phase object
The metastable phase from which tunneling occurs.
V, dV : callable
The potential V(x,T) and its gradient.
transC : dict
Found transition (including a critical temperature) from findCriticalTemperature.
This is necessary to pass over the model's nucleation criterion
Tmax : float
The highest temperature at which to try tunneling.
Ttol : float, optional
Tolerance for finding the nucleation temperature.
maxiter : int, optional
Maximum number of times to try tunneling.
phitol : float, optional
Tolerance for finding the minima.
overlapAngle : float, optional
If two phases are in the same direction, only try tunneling to the
closer one. Set to zero to always try tunneling to all available phases.
nuclCriterion : callable
Function of the action *S* and temperature *T*. Should return 0 for the
correct nucleation rate, > 0 for a low rate and < 0 for a high rate.
Defaults to ``S/T - 140``.
verbose : bool
If true, print a message before each attempted tunneling.
fullTunneling_params : dict
Parameters to pass to :func:`pathDeformation.fullTunneling`.
Returns
-------
dict or None
A description of the tunneling solution at the nucleation temperature,
or None if there is no found solution. Has the following keys:
- *Tnuc* : the nucleation temperature
- *betaH*: the GW spectrum parameter beta/H
- *low_vev, high_vev* : vevs for the low-T phase (the phase that the
instanton tunnels to) and high-T phase (the phase that the instanton
tunnels from).
- *low_phase, high_phase* : identifier keys for the low-T and high-T
phases.
- *action* : The Euclidean action of the instanton.
- *instanton* : Output from :func:`pathDeformation.fullTunneling`, or
None for a second-order transition.
- *trantype* : 1 or 2 for first or second-order transitions.
"""
rel_slope_error = 0.01 # 0.05
N_first_try = 50 # 32 number of points of support generated for a fit above and below Tnuc
N_second_try = 100 # 15
N_cutted_fit = 15 # 10
Tmin = start_phase.T[0]
if Tmin >= Tmax:
if verbose:
print("Found that Tmin is bigger than Tmax: Tunneling cannot occurr.")
errordict = dict(error = 10)
return errordict
SEucl = []
TEucl = []
outdict = {} # keys are T values
args = (phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
try:
# Calculate the nucleation temperature by finding the root of the nucleation criterion
if verbose:
print("\n# Brentq at Tmin = ", Tmin, " Tmax = ", Tmax)
Tnuc = optimize.brentq(_tunnelFromPhaseAtTGW, Tmin, Tmax, args=args, xtol=Ttol, maxiter=maxiter, disp=False)
if verbose:
print("# Brentq done, found Tnuc = ", Tnuc, "\n")
except:
if verbose:
print("No temperature at which the nucleation criterion can be fulfilled was found.")
errordict = dict(error = 7)
return errordict
def plot_nucl_crit(Tmin = Tmin, Tmax = Tmax, N = 100):
# Plot the value of the nucleation criterion with N points of support
# between Tmin and Tmax
Tminplot = Tmin
Tmaxplot = Tmax
T_list = np.linspace(Tminplot, Tmaxplot, N)
for T_extra in T_list:
try:
_tunnelFromPhaseAtTGW(T_extra, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
except:
if verbose:
print("Had a problem adding the temperature ", T_extra, " to the outdict.")
STEucl = list(zip(TEucl,SEucl))
STEucl.sort()
SEucl_srt = [x for T, x in STEucl]
TEucl_srt = [y for y, S in STEucl]
nucl_list = np.zeros(len(SEucl_srt))
nucl2_list = np.zeros(len(SEucl_srt))
plt.plot(TEucl_srt, SEucl_srt)
plt.title("S/T vs. temperature")
#plt.plot(TEucl_srt, SEucl_srt, "x")
plt.xlim(Tminplot, Tmaxplot)
plt.yscale("log")
plt.grid()
#plt.savefig("nuclcrit_T_g_is_085_l_is_001")
plt.show()
for i in range(len(SEucl_srt)):
nucl_list[i] = nuclCriterion(SEucl_srt[i]*TEucl_srt[i], TEucl_srt[i])
plt.plot(TEucl_srt, np.abs(nucl_list))
plt.title("|Nucleation criterion| vs. temperature")
#plt.plot(TEucl_srt, np.abs(nucl_list), "x")
plt.xlim(Tminplot, Tmaxplot)
plt.yscale("log")
plt.grid()
#plt.savefig("nuclcrit_T_g_is_085_l_is_001")
plt.show()
for i in range(len(SEucl_srt)):
nucl2_list[i] = -nuclCriterion(SEucl_srt[i]*TEucl_srt[i], TEucl_srt[i]) + SEucl_srt[i]
plt.plot(TEucl_srt, nucl2_list)
plt.plot(TEucl_srt, SEucl_srt)
plt.title("action and criterion vs. temperature")
#plt.plot(TEucl_srt, np.abs(nucl_list), "x")
plt.xlim(Tminplot, Tmaxplot)
plt.yscale("log")
plt.grid()
#plt.savefig("nuclcrit_T_g_is_085_l_is_001")
plt.show()
#np.savetxt("T_plot.txt", TEucl_srt)
#np.savetxt("S_plot.txt", SEucl_srt)
#np.savetxt("N_plot.txt", nucl_list)
#plot_nucl_crit(Tmin = 46, Tmax = 51.3, N = 100)
# Check, whether there are enough points of support to calculate the necessary derivative
# If not the case, calculate more points of support.
if len(TEucl) == 0:
if verbose:
print("Found a temperature which fulfills the nucleation criterion sufficiently well in brentq, but not good enough (|_tunnelFromPhaseAtT| < 1) to go on from here. I.e. most probably: a too-supercooled phase.")
errordict = dict(error = 0)
return errordict
if len(TEucl) == 1:
if verbose:
print("Found only one point that fulfills the nucleation criterion sufficiently well (|_tunnelFromPhaseAtT| < 1). This isn't suffient for a proper calculation of the beta parameter.")
errordict = dict(error = 1)
return errordict
if len(TEucl) <= N_first_try: #need more points for good regression!
N_to_add = N_first_try - len(TEucl)
if verbose:
print("Found only", len(TEucl), " points. Add", N_to_add, "more to calculate the derivative of the euclidean action properly.")
TEucl_min = min(TEucl)
TEucl_max = max(TEucl)
TEucl_between = np.linspace(TEucl_min, TEucl_max, N_to_add)
# Add more points such that alltogether 30 points of support
for T_extra in TEucl_between:
try:
_tunnelFromPhaseAtTGW(T_extra, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
except:
if verbose:
print("Had a problem adding the temperature ", T_extra, " to the outdict.")
# Calculation of beta / H
def lin_fit(x, a, b):
# General linear function
return a*x + b
def make_fit():
# Make a linear fit using TEucl and SEucl
# return the slope, its error and wether the covariance matrix
# has an infinitely large element
STEucl = list(zip(TEucl,SEucl))
STEucl.sort()
SEucl_srt = [x for T, x in STEucl]
TEucl_srt = [y for y, S in STEucl]
if verbose:
print("\nFit through ", len(TEucl_srt), " points.")
np.seterr(over = 'ignore')
try:
params, covariance_matrix = curve_fit(lin_fit, TEucl_srt, SEucl_srt)
except:
if verbose:
print("Fit was not successfull.")
return -1, 1, True
errors = np.sqrt(np.diag(np.abs(covariance_matrix)))
np.seterr(over = 'warn')
def plot_fit():
plt.plot(TEucl_srt, SEucl_srt, "X")
a = np.zeros(len(TEucl_srt))
for i in range(len(TEucl_srt)):
a[i] = params[0]*TEucl_srt[i] + params[1]
plt.plot(TEucl_srt, a)
plt.axvline(x=Tnuc)
plt.show()
#plot_fit()
covisinf = np.any(covariance_matrix[covariance_matrix == np.inf])
if verbose:
print("Fit was successfull.")
return params[0], errors[0], covisinf
def make_cutted_fit():
# Make two linear fits above and below Tnuc using TEucl and SEucl.
# If slopes don't match up, use single linear regression
# make_fit. If slopes coincide within an uncertainty of 5 percent
# return a weighted mean of them (weights: number of points of
# support of respective fit). If one of the fits is trustworthy,
# but the other one is not, take that one to return the
# expected values.
STEucl = list(zip(TEucl,SEucl))
STEucl.sort()
SEucl_srt = [x for T, x in STEucl]
TEucl_srt = [y for y, S in STEucl]
SEucl_srt_lo = []
TEucl_srt_lo = []
SEucl_srt_hi = []
TEucl_srt_hi = []
for i in range(len(TEucl_srt)):
if TEucl_srt[i] < Tnuc:
TEucl_srt_lo.append(TEucl_srt[i])
SEucl_srt_lo.append(SEucl_srt[i])
else:
TEucl_srt_hi.append(TEucl_srt[i])
SEucl_srt_hi.append(SEucl_srt[i])
if verbose:
print("\nFit through ", len(TEucl_srt_lo), " points below and ", len(TEucl_srt_hi), " points above Tnuc.")
if len(TEucl_srt_lo) < N_cutted_fit or len(TEucl_srt_hi) < N_cutted_fit:
if verbose:
print("Not enough points for a fit below or above. Try fitting all points in one line.")
return make_fit()
np.seterr(over = 'ignore')
try:
params_hi, covariance_matrix_hi = curve_fit(lin_fit, TEucl_srt_hi, SEucl_srt_hi)
errors_hi = np.sqrt(np.diag(np.abs(covariance_matrix_hi)))
except:
if verbose:
print("Upper fit was not successfull.")
return -1, 1, True
try:
params_lo, covariance_matrix_lo = curve_fit(lin_fit, TEucl_srt_lo, SEucl_srt_lo)
errors_lo = np.sqrt(np.diag(np.abs(covariance_matrix_lo)))
except:
if verbose:
print("Lower fit was not successfull.")
return -1, 1, True
np.seterr(over = 'warn')
def plot_fit():
plt.plot(TEucl_srt_lo, SEucl_srt_lo, "X")
plt.plot(TEucl_srt_hi, SEucl_srt_hi, "X")
a = np.zeros(len(TEucl_srt))
b = np.zeros(len(TEucl_srt))
for i in range(len(TEucl_srt)):
a[i] = params_hi[0]*TEucl_srt[i] + params_hi[1]
b[i] = params_lo[0]*TEucl_srt[i] + params_lo[1]
plt.plot(TEucl_srt, a)
plt.plot(TEucl_srt, b)
plt.axvline(x=Tnuc)
plt.show()
#plot_fit()
if np.abs((params_lo[0] - params_hi[0])/ params_hi[0]) < rel_slope_error: # if upper and lower slopes are compatible
covisinf = np.any(covariance_matrix_hi[covariance_matrix_hi == np.inf])
covisinf = covisinf or np.any(covariance_matrix_lo[covariance_matrix_lo == np.inf])
slope_mean = (params_lo[0] * len(TEucl_srt_lo) + params_hi[0] * len(TEucl_srt_hi)) / len(TEucl_srt)
errors = max(errors_hi[0], errors_lo[0])
return slope_mean, errors, covisinf
elif np.abs(errors_hi[0]/params_hi[0]) < rel_slope_error:
covisinf = np.any(covariance_matrix_hi[covariance_matrix_hi == np.inf])
return params_hi[0], errors_hi[0], covisinf
elif np.abs(errors_lo[0]/params_lo[0]) < rel_slope_error:
covisinf = np.any(covariance_matrix_lo[covariance_matrix_lo == np.inf])
return params_lo[0], errors_lo[0], covisinf
else:
return make_fit()
slope, slope_error, covisinf = make_cutted_fit()
# Test if the calculation of the derivative is certain enough,
# else, calculate more points of support and retry.
if np.abs(slope_error/slope) > rel_slope_error:
if verbose:
print(r"Found a regression with an too high uncertainty. Calculate more points of support.")
TEucl_min = min(TEucl)
TEucl_max = max(TEucl)
TEucl_between = np.linspace(TEucl_min, TEucl_max, N_second_try)
for T_extra in TEucl_between:
try:
_tunnelFromPhaseAtTGW(T_extra, phases, start_phase, V, dV, phitol, overlapAngle, nuclCriterion, fullTunneling_params, verbose, outdict, SEucl, TEucl)
except:
if verbose:
print("Had a problem adding the temperature ", T_extra, " to the outdict.")
slope, slope_error, covisinf = make_cutted_fit()
if np.abs(slope_error/slope) > rel_slope_error: # check if still uncertain
if covisinf:
if verbose:
print("There were at least two same points when calculating the linear regression for calculating beta. This yielded an infinte covariance matrix.")
print("Slope:", slope, "Slope error: ", slope_error, "Relative slope error: ", slope_error/slope, "Covariance matrix is infinte:", covisinf)
errordict = dict(error = 8)
if verbose:
print(r"Adding more points of support to calculate the derivative of the euclidean action didn't yield a result with a relative error below 1 percent.")
print("Slope:", slope, "Slope error: ", slope_error, "Relative slope error: ", slope_error/slope, "Covariance matrix is infinte:", covisinf)
errordict = dict(error = 3)
return errordict
# Calculate beta/H as given by
# beta/ H = Tnuc * (dS/dT)_(T = Tnuc)
# and pass it to the dictionary
rdict = outdict[Tnuc]
rdict['betaH'] = Tnuc * slope
if rdict['betaH'] < 0:
if verbose:
print("Due to some strange reason a negative beta was calculated. This shouldn't happen at all.")
errordict = dict(error = 2)
return errordict
if not rdict['trantype'] > 0:
if verbose:
print("Due to some strange reason a neither first, nor second-order transition has been documented. This shouldn't happen at all.")
errordict = dict(error = 6)
return errordict
return rdict
def secondOrderTrans(high_phase, low_phase, Tstr='Tnuc'):
"""
Assemble a dictionary describing a second-order phase transition.
"""
rdict = {}
rdict[Tstr] = 0.5*(high_phase.T[0] + low_phase.T[-1])
rdict['low_vev'] = rdict['high_vev'] = high_phase.X[0]
rdict['low_phase'] = low_phase.key
rdict['high_phase'] = high_phase.key
rdict['action'] = 0.0
rdict['instanton'] = None
rdict['trantype'] = 2
# MANUALLY ADDED GW PARAMETERS
rdict['alpha'] = -2.
rdict['beta'] = -2.
return rdict
def findAllTransitions(phases, V, dV, tunnelFromPhase_args={}):
"""
Find the complete phase transition history for the potential `V`.
This functions uses :func:`tunnelFromPhase` to find the transition
temperature and instanton for each phase, starting at the highest phase
in the potential. Note that if there are multiple transitions that could
occur at the same minimum (if, for example, there is a Z2 symmetry or
a second-order transition breaks in multiple directions), only one of the
transitions will be used.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V, dV : callable
The potential function and its gradient, each a function of field
value (which should be an array, not a scalar) and a temperature.
tunnelFromPhase_args : dict
Parameters to pass to :func:`tunnelFromPhase`.
Returns
-------
list of transitions
Each item is a dictionary describing the transition (see
:func:`tunnelFromPhase` for keys). The first transition is the one at
the highest temperature.
"""
phases = phases.copy()
start_phase = phases[getStartPhase(phases, V)]
Tmax = start_phase.T[-1]
transitions = []
while start_phase is not None:
del phases[start_phase.key]
trans = tunnelFromPhase(phases, start_phase, V, dV, Tmax,
**tunnelFromPhase_args)
if trans is None and not start_phase.low_trans:
start_phase = None
elif trans is None:
low_key = None
for key in start_phase.low_trans:
if key in phases:
low_key = key
break
if low_key is not None:
low_phase = phases[low_key]
transitions.append(secondOrderTrans(start_phase, low_phase))
start_phase = low_phase
Tmax = low_phase.T[-1]
else:
start_phase = None
else:
transitions.append(trans)
start_phase = phases[trans['low_phase']]
Tmax = trans['Tnuc']
return transitions
def findAllTransitionsGW(phases, V, dV, transC, verbose = False, tunnelFromPhase_args={}):
"""
Works just as findAllTransitions, but doesn't call tunnelFromPhase to compute
the tunneling parameters but tunnelFromPhaseGW, which takes also transC as an input
and calculates beta/H as well (and puts it into the dictionary that findAllTransitionsGW
returns). The additional input is necessary to provide the investigated model's
nucleation criterion, which the original tunnelFromPhase didn't use.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V, dV : callable
The potential function and its gradient, each a function of field
value (which should be an array, not a scalar) and a temperature.
transC : dict
Found transition (including a critical temperature) from findCriticalTemperature.
This is necessary to pass over the model's nucleation criterion
verbose : Boolean
Decides whether there will be an output or not during the following calculations.
tunnelFromPhase_args : dict
Parameters to pass to :func:`tunnelFromPhase`.
Returns
-------
list of transitions
Each item is a dictionary describing the transition (see
:func:`tunnelFromPhase` for keys). The first transition is the one at
the highest temperature. In the case of an error, the dictionary will
have the key 'error' in which an explanatory errorkey is saved.
"""
phases = phases.copy()
start_phase = phases[getStartPhase(phases, V)]
#start_phase = phases[1]
#Tmax = start_phase.T[-1]
Tmax = transC['Tcrit']
transitions = []
while start_phase is not None:
del phases[start_phase.key]
trans = tunnelFromPhaseGW(phases, start_phase, V, dV, Tmax, verbose = verbose, **tunnelFromPhase_args)
if 'error' in trans:
return trans
if trans is None and not start_phase.low_trans:
start_phase = None
elif trans is None:
low_key = None
for key in start_phase.low_trans:
if key in phases:
low_key = key
break
if low_key is not None:
low_phase = phases[low_key]
transitions.append(secondOrderTrans(start_phase, low_phase))
start_phase = low_phase
Tmax = low_phase.T[-1]
else:
start_phase = None
else:
if "low_phase" not in trans:
trans["error"] = 8
transitions.append(trans)
return transitions
transitions.append(trans)
start_phase = phases[trans['low_phase']]
Tmax = trans['Tnuc']
start_phase = None # to stop program from another run with T interval between 0 and old Tmin
return transitions
def findCriticalTemperatures(phases, V, start_high=False):
"""
Find all temperatures `Tcrit` such that there is degeneracy between any
two phases.
Parameters
----------
phases : dict
Output from :func:`traceMultiMin`.
V : callable
The potential function `V(x,T)`, where `x` is the field value (which
should be an array, not a scalar) and `T` is the temperature.
start_high : bool, optional
If True, only include those transitions which could be reached starting
from the high-T phase. NOT IMPLEMENTED YET.
Returns
-------
list of transitions
Transitions are sorted in decreasing temperature. Each transition is a
dictionary with the following keys:
- *Tcrit* : the critical temperature
- *low_vev, high_vev* : vevs for the low-T phase (the phase that the
model transitions to) and high-T phase (the phase that the model
transitions from).
- *low_phase, high_phase* : identifier keys for the low-T and high-T
phases.
- *trantype* : 1 or 2 for first or second-order transitions.
"""
transitions = []
for i in list(phases.keys()):
for j in list(phases.keys()):
if i == j:
continue
# Try going from i to j (phase1 -> phase2)
phase1, phase2 = phases[i], phases[j]
#print(phase1, "\n", phase2)
tmax = min(phase1.T[-1], phase2.T[-1])
tmin = max(phase1.T[0], phase2.T[0])
#print(tmin, tmax)
if tmin >= tmax:
# No overlap. Try for second-order.
if phase2.key in phase1.low_trans:
transitions.append(
secondOrderTrans(phase1, phase2, 'Tcrit'))
continue
def DV(T):
return V(phase1.valAt(T), T) - V(phase2.valAt(T), T)
if DV(tmin) < 0:
# phase1 is lower at tmin, no tunneling
continue
if DV(tmax) > 0:
#phase1 is higher even at tmax, no critical temperature
continue
Tcrit = optimize.brentq(DV, tmin, tmax, disp=False)
tdict = {}
tdict['Tcrit'] = Tcrit
tdict['high_vev'] = phase1.valAt(Tcrit)
tdict['high_phase'] = phase1.key
tdict['low_vev'] = phase2.valAt(Tcrit)
tdict['low_phase'] = phase2.key
tdict['trantype'] = 1
transitions.append(tdict)
if not start_high:
return sorted(transitions, key=lambda x: x['high_phase'])[::-1] # changed, before key=lambda x: x['Tcrit']
start_phase = getStartPhase(phases, V)
raise NotImplementedError("start_high=True not yet supported")
def addCritTempsForFullTransitions(phases, crit_trans, full_trans):
"""
For each transition dictionary in `full_trans`, find the corresponding
transition in `crit_trans` and add it to the dictionary for the key
`crit_trans`, or add None if no corresponding transition is found.
Notes
-----
The phases in the supercooled transitions might not be exactly
the same as the phases in the critical temperature transitions. This would
be the case, for example, if in `full_trans` the phase transitions go like
1 -> 2 -> 3, but in `crit_trans` they go like 1 -> (2 or 3).
Parameters
----------
phases : dict
crit_trans : list
full_trans : list
"""
parents_dict = {}
for i in list(phases.keys()):
parents = [i]
for tcdict in crit_trans[::-1]:
j = tcdict['high_phase']
if tcdict['low_phase'] in parents and j not in parents:
parents.append(j)
parents_dict[i] = parents
for tdict in full_trans:
low_parents = parents_dict[tdict['low_phase']]
high_parents = parents_dict[tdict['high_phase']]
common_parents = set.intersection(
set(low_parents), set(high_parents))
for p in common_parents:
# exclude the common parents
try:
k = low_parents.index(p)
low_parents = low_parents[:k]
except: pass
try:
k = high_parents.index(p)
high_parents = high_parents[:k+1]
except: pass
for tcdict in crit_trans[::-1]: # start at low-T
if tcdict['Tcrit'] < tdict['Tnuc']:
continue
if (tcdict['low_phase'] in low_parents
and tcdict['high_phase'] in high_parents):
tdict['crit_trans'] = tcdict
break
else:
tdict['crit_trans'] = None
| 40.412938
| 222
| 0.585536
| 8,823
| 66,843
| 4.345121
| 0.101326
| 0.016694
| 0.012416
| 0.005765
| 0.448131
| 0.399405
| 0.369878
| 0.349427
| 0.331481
| 0.317865
| 0
| 0.01561
| 0.319525
| 66,843
| 1,653
| 223
| 40.437387
| 0.82724
| 0.363658
| 0
| 0.455435
| 0
| 0.005435
| 0.07899
| 0.001087
| 0
| 0
| 0
| 0.00121
| 0.003261
| 1
| 0.042391
| false
| 0.004348
| 0.009783
| 0.011957
| 0.105435
| 0.047826
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13fecb8c46693f75faf20fe0071fb2ddb03a2ed2
| 3,720
|
py
|
Python
|
red-scare/instance-generators/make-words.py
|
Sebastian-ba/DoDoBing
|
6edcc18de22ad76505d2c13ac6a207a2c274cc95
|
[
"MIT"
] | 3
|
2017-09-25T11:59:20.000Z
|
2017-11-20T12:55:21.000Z
|
red-scare/instance-generators/make-words.py
|
ITU-2019/DoDoBing
|
6edcc18de22ad76505d2c13ac6a207a2c274cc95
|
[
"MIT"
] | 6
|
2017-09-25T12:04:51.000Z
|
2017-11-13T07:51:40.000Z
|
red-scare/instance-generators/make-words.py
|
ITU-2019/DoDoBing
|
6edcc18de22ad76505d2c13ac6a207a2c274cc95
|
[
"MIT"
] | null | null | null |
import sys
import random
import networkx as nx
from write_nx_graph import write_graph
uncommons = set() # Everything except the 3300 common words in SGB
f = open ('data/words.txt','r')
words = []
for line in f:
if len(line)>0 and line[0] == '*': continue
word = line.strip()[:5]
words.append(word)
if not (len(line.strip())>5 and line[5] == '*'): uncommons.add(word)
f.close()
def starredwords(word, numstars):
# given 'AWORD' returns ['*WORD', 'A*ORD',...,'AWOR*']
if numstars == 1: return [word[:i] + '*' +word[i+1:] for i in range(5)]
else: return [word[:i] + '*' + word[i+1:j] + '*' + word[j+1:] for j in range (1,5) for i in range(j)]
def _numvowels(word):
# returns the number of vowels in word
counter = 0
for c in word: counter += (c in 'aeiou')
return counter
def sorted(word):
letters = list(word)
letters.sort()
sorted = "".join(letters)
return sorted
class Words(nx.Graph):
def __init__(self,L, numstars):
nx.Graph.__init__(self)
self.add_nodes_from(L)
N = dict()
for word in L:
for starred in starredwords(word,numstars):
if not starred in N:
N[starred] = set([word])
else: N[starred].add(word)
s = sorted(word)
if not s in N:
N[s] = set([word])
else:
N[s].add(word)
S = set(L)
for word in self.nodes():
for starred in starredwords(word,numstars):
for neighbour in N[starred]:
if word != neighbour:
self.add_edge(word, neighbour)
for neighbour in N[sorted(word)]:
if word != neighbour:
self.add_edge(word, neighbour)
def wordgraph(n, numstars, musthaves):
L = []
for word in words:
if word not in musthaves: L.append(word)
random.seed(0)
random.shuffle(L)
L = L[:n-len(musthaves)]
L.extend(musthaves)
return Words(L, numstars)
def write_rusties():
for n in [2000, 2500, 3000, 3500, 4000, 4500, 5000, 10000]:
for numstars in [1,2]:
G = wordgraph(n, numstars, ['begin','ender','rusty'])
name = "rusty-{0}-{1}".format(numstars, len(G))
write_graph(G, name, 'begin', 'ender', ['rusty'])
# write a small graph as well:
G = Words(words, 1)
V = set()
P = nx.all_shortest_paths(G, 'begin', 'rusty')
for L in P:
for w in L: V.add(w)
P = nx.all_shortest_paths(G, 'ender', 'rusty')
for L in P:
for w in L: V.add(w)
L = list(V)
for v in L:
V.add(v)
G = Words(V, 1)
name = "rusty-1-{0}".format(len(G))
write_graph(G, name, 'begin', 'ender', ['rusty'])
def write_commons():
for n in [20, 50, 100, 250, 500,1000,1500,2000, 2500, 3000, 3500, 4000, 4500, 5000, 10000]:
for numstars in [1,2]:
G = wordgraph(n, numstars, ['start', 'ender'])
name = "common-{0}-{1}".format(numstars,len(G))
R = [word for word in G.nodes() if word in uncommons]
write_graph(G, name, 'start', 'ender', R)
write_rusties()
write_commons()
| 36.116505
| 109
| 0.470968
| 466
| 3,720
| 3.701717
| 0.244635
| 0.017391
| 0.02087
| 0.026087
| 0.295652
| 0.295652
| 0.190145
| 0.190145
| 0.144928
| 0.105507
| 0
| 0.052232
| 0.397849
| 3,720
| 102
| 110
| 36.470588
| 0.717857
| 0.044355
| 0
| 0.16092
| 0
| 0
| 0.04169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08046
| false
| 0
| 0.045977
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13ff78cbd83636d6edec29d58b60fdaa0be4d91a
| 7,490
|
py
|
Python
|
scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/bin/vcf_to_feature_csv.py
|
dongxuemin666/RNA-combine
|
13e178aae585e16a9a8eda8151d0f34316de0475
|
[
"Apache-2.0"
] | 7
|
2021-09-03T09:11:00.000Z
|
2022-02-14T15:02:12.000Z
|
scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/bin/vcf_to_feature_csv.py
|
dongxuemin666/RNA-combine
|
13e178aae585e16a9a8eda8151d0f34316de0475
|
[
"Apache-2.0"
] | null | null | null |
scripts/strelka-2.9.2.centos6_x86_64/share/scoringModelTraining/somatic/bin/vcf_to_feature_csv.py
|
dongxuemin666/RNA-combine
|
13e178aae585e16a9a8eda8151d0f34316de0475
|
[
"Apache-2.0"
] | 2
|
2022-01-10T13:07:29.000Z
|
2022-01-11T22:14:11.000Z
|
#!/usr/bin/env python2
#
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
Convert a Strelka somatic VCF to CSV format, annotate TP and FP given a
truth VCF and FP / ambiguous region bed files.
"""
__author__ = "Peter Krusche <pkrusche@illumina.com>"
import os
import sys
import pandas
scriptDir = os.path.abspath(os.path.dirname(__file__))
scriptName = os.path.basename(__file__)
workflowDir = os.path.abspath(os.path.join(scriptDir, "../lib"))
sys.path.append(workflowDir)
import evs
import evs.features
from evs.tools.bedintervaltree import BedIntervalTree
def parseArgs():
import argparse
parser = argparse.ArgumentParser(description="Converts somatic VCF to annotated CSV",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input", help="Strelka VCF file", nargs=1)
parser.add_argument("-o", "--output", required=True,
help="Output CSV filename for training data")
parser.add_argument("--testSet", action='append', help="Chromosome (e.g. chr20) to hold out as test data (may be specified more than once; if omitted, all data will be used for training)")
parser.add_argument("--testOutput", help="Output CSV filename for test data")
parser.add_argument("--truth", help="Truth VCF file")
parser.add_argument("--fp-regions", dest="fpRegionsFile",
help="Bed file indicating regions where variants that are not true can be labeled as false positives. Outside of these regions variants will be labeled as unknown.")
parser.add_argument("--ambiguous", dest="ambiguousRegionsFiles", action='append',
help="Bed file conforming to the curium ambiguous region file format"
" (may be specified more than once)")
parser.add_argument("--features", required=True,
choices=evs.features.FeatureSet.sets.keys(),
help="Select a feature table to output.")
args = parser.parse_args()
def checkFile(filename, label) :
if not os.path.isfile(filename) :
raise Exception("Can't find input %s file: '%s'" % (label,filename))
def checkOptionalFile(filename, label) :
if filename is None : return
checkFile(filename, label)
checkOptionalFile(args.truth,"truth")
checkOptionalFile(args.fpRegionsFile,"false positive regions")
if args.ambiguousRegionsFiles is not None :
for ambiguousRegionsFile in args.ambiguousRegionsFiles :
checkFile(ambiguousRegionsFile,"ambiguous regions")
return args
def main():
args = parseArgs()
fset = evs.features.FeatureSet.make(args.features)
featuretable = fset.collect(args.input[0])
featuretable["tag"] = "FP" # If no truth set is specified, label all variants as FP. Useful for normal-normal.
if args.truth:
fset2 = evs.features.FeatureSet.make("posandalleles")
truth_alleles = fset2.collect(args.truth)
truth_alleles["tag"] = "TP"
featuretable = pandas.merge(featuretable, truth_alleles, how="outer", on=["CHROM", "POS", "REF", "ALT"],
suffixes=(".query", ".truth"))
featuretable["tag.truth"].fillna("", inplace=True)
featuretable["tag.query"].fillna("", inplace=True)
featuretable.loc[(featuretable["tag.query"] == "FP") & (featuretable["tag.truth"] == "TP"), "tag"] = "TP"
featuretable.loc[(featuretable["tag.query"] == "") & (featuretable["tag.truth"] == "TP"), "tag"] = "FN"
featuretable.loc[(featuretable["tag.query"] == "FP") & (featuretable["tag.truth"] == ""), "tag"] = "FP"
to_keep = [x for x in list(featuretable) if not x.endswith(".query") and not x.endswith(".truth")]
featuretable = featuretable[to_keep]
if args.ambiguousRegionsFiles or args.fpRegionsFile:
#
# 1. Load all false positive and ambiguous region information into labeledIntervals
#
labeledIntervals = BedIntervalTree()
if args.fpRegionsFile:
labeledIntervals.addFromBed(args.fpRegionsFile, "FP")
if args.ambiguousRegionsFiles:
# can have multiple ambiguous BED files
for ambiguousRegionsFile in args.ambiguousRegionsFiles:
labeledIntervals.addFromBed(ambiguousRegionsFile, lambda xe: xe[4])
#
# 2. Resolve all interaction rules between truth sets, fp and amiguous regions to produce a final labeling
#
areFPRegionsProvided = (labeledIntervals.count("FP") > 0) or (labeledIntervals.count("fp") > 0 and args.ambiguousRegionsFiles)
def relabeller(xx):
"""
Resolve various rules regarding how variants should interact with the fp and ambiguous regions they
intersect.
Rules:
- All TP and FN calls are untouched -- even if they fall in a false positive or ambiguous region
- Otherwise...
- Any call intersecting an FP region is labeled as "FP", regardless of ambiguous region input
- Any call intersecting an ambiguous region gets a comma separated list of all ambiguous region labels
- Any call falling outside of an ambiguous or fp region will be labeled as:
- FP if no fp regions are given the ambiguous region file contains no false positive regions
- UNK otherwise.
"""
if xx["tag"] == "TP" or xx["tag"] == "FN":
return xx
chrom = xx["CHROM"]
start = xx["POS"]
stop = xx["POS"] + len(xx["REF"])
overlap = labeledIntervals.intersect(chrom, start, stop)
is_fp = False
is_ambi = False
classes_this_pos = set()
for o in overlap:
reason = o.value[0].upper()
classes_this_pos.add(reason)
if reason == "FP":
is_fp = True
else:
is_ambi = True
if is_fp:
xx["tag"] = "FP"
elif is_ambi:
xx["tag"] = ",".join(list(classes_this_pos))
elif not areFPRegionsProvided:
# when we don't have FP regions, unk stuff becomes FP
xx["tag"] = "FP"
else:
xx["tag"] = "UNK"
return xx
featuretable = featuretable.apply(relabeller, axis=1)
if args.testSet is not None:
if args.testOutput is not None:
featuretable[featuretable["CHROM"].isin(args.testSet)].to_csv(args.testOutput)
featuretable = featuretable[~featuretable["CHROM"].isin(args.testSet)]
featuretable.to_csv(args.output)
if __name__ == '__main__':
main()
| 41.153846
| 192
| 0.633111
| 887
| 7,490
| 5.290868
| 0.328072
| 0.028766
| 0.028979
| 0.012146
| 0.12508
| 0.066056
| 0.024292
| 0.024292
| 0.024292
| 0
| 0
| 0.004168
| 0.263284
| 7,490
| 181
| 193
| 41.381215
| 0.846321
| 0.242457
| 0
| 0.078431
| 0
| 0.019608
| 0.186
| 0.008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04902
| false
| 0
| 0.068627
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd0344b2d15b20e60fd3ab647958bb726ead940c
| 2,750
|
py
|
Python
|
qiskit_neko/backend_plugin.py
|
garrison/qiskit-neko
|
50c6f0f6975425c7ff86417cedc094e984dc5d1c
|
[
"Apache-2.0"
] | 5
|
2022-01-11T16:07:48.000Z
|
2022-02-01T22:05:34.000Z
|
qiskit_neko/backend_plugin.py
|
garrison/qiskit-neko
|
50c6f0f6975425c7ff86417cedc094e984dc5d1c
|
[
"Apache-2.0"
] | 1
|
2022-02-03T14:10:57.000Z
|
2022-02-03T14:10:57.000Z
|
qiskit_neko/backend_plugin.py
|
garrison/qiskit-neko
|
50c6f0f6975425c7ff86417cedc094e984dc5d1c
|
[
"Apache-2.0"
] | 1
|
2022-03-07T15:06:21.000Z
|
2022-03-07T15:06:21.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Backend plugin interface."""
import abc
import logging
import stevedore
LOG = logging.getLogger(__name__)
class BackendPlugin(abc.ABC):
"""Abstract class for providing :class:`~qiskit.providers.Backend` objects to tests
This class is designed to be implemented by qiskit providers packages or any
other user that needs to provide custom backend objects to the test suite.
In general the authentication and initialization of working with backends
from different vendors or with different simulators are all unique
qiskit-neko provides the backend plugin interface to enable a standard
interface to return backend objects and leave the specifics of
authentication or initialization of providers.
"""
@abc.abstractmethod
def get_backend(self, backend_selection=None):
"""Return the Backend object to run tests on.
:param str backend_selection: An optional user supplied value to select
a specific backend. The exact behavior of this option is up to
each individual plugin and should be clearly documented in the
plugin how this is used if at all. If the plugin doesn't support
a selection string a string should still be accepted and a warning
just logged. If a string is provided (and they're accepted) but
the string is invalid raising an exception is expected.
"""
pass
class BackendPluginManager:
"""Class to manage installed backend plugins"""
def __init__(self):
self.ext_plugins = stevedore.ExtensionManager(
"qiskit_neko.backend_plugins",
invoke_on_load=True,
propagate_map_exceptions=True,
on_load_failure_callback=self.failure_hook,
)
@staticmethod
def failure_hook(_, ep, err):
"""Hook method to execute on import failure."""
LOG.error("Could not load %r: %s", ep.name, err)
raise err
def get_plugin_backends(self, backend_selection=None):
"""Return a dictionary of plugin names to backend objects."""
return {
plug.name: plug.obj.get_backend(backend_selection=backend_selection)
for plug in self.ext_plugins
}
| 36.184211
| 87
| 0.704
| 378
| 2,750
| 5.039683
| 0.489418
| 0.041995
| 0.010499
| 0.025197
| 0.031496
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003813
| 0.237091
| 2,750
| 75
| 88
| 36.666667
| 0.904194
| 0.626909
| 0
| 0
| 0
| 0
| 0.054176
| 0.030474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0.04
| 0.12
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd03952161db20fd79bc08d5412273256911f00a
| 2,155
|
py
|
Python
|
utils/utils.py
|
ZhenqiSong/OCR_Pytorch
|
df4e8c53353b6c515509241d4c9af3b153224a10
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
ZhenqiSong/OCR_Pytorch
|
df4e8c53353b6c515509241d4c9af3b153224a10
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
ZhenqiSong/OCR_Pytorch
|
df4e8c53353b6c515509241d4c9af3b153224a10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# __author__:Song Zhenqi
# 2021-01-20
import os
import sys
import yaml
import logging
import functools
logger_initialized = set()
def get_img_list(img_file):
img_lists = []
if img_file is None or not os.path.exists(img_file):
raise FileNotFoundError("file path: {} is not exist".format(img_file))
if os.path.isfile(img_file):
img_lists.append(img_file)
elif os.path.isdir(img_file):
for file_name in os.listdir(img_file):
file_path = os.path.join(img_file, file_name)
if os.path.isfile(file_path):
img_lists.append(file_name)
if len(img_lists) == 0:
raise Exception('not find any img file in {}'.format(img_file))
return img_lists
def get_config(file):
"""
读取yaml配置文件,获取网络配置
:param file: 配置文件,只支持yaml/yml格式
:return: 配置 dict
"""
_, ext = os.path.splitext(file)
assert ext in ['.yaml', '.yml'], "只支持yaml/yml格式的文件"
config = yaml.load(open(file, 'rb'), Loader=yaml.Loader)
return config
@functools.lru_cache()
def get_logger(name: str = 'root', file: str = None, level=logging.INFO) -> logging.Logger:
"""
初始化日志logger,配置日志的设置
:param name: 日志名称
:param file: 保存本地的日志文件
:param level: 日志显示的等级
:return: 使用的Logger对象
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
# 设置日志的显示格式
formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s',
datefmt="%Y/%m/%d %H:%M:%S")
# 设置日志流句柄
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# 设置日志文件
if file is not None:
log_file_folder = os.path.split(file)[0]
os.makedirs(log_file_folder, exist_ok=True)
file_handle = logging.FileHandler(file, 'a')
file_handle.setFormatter(formatter)
logger.addHandler(file_handle)
logger.setLevel(level)
return logger
| 27.278481
| 91
| 0.645476
| 283
| 2,155
| 4.75265
| 0.392226
| 0.057249
| 0.01487
| 0.022305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006687
| 0.236659
| 2,155
| 78
| 92
| 27.628205
| 0.810942
| 0.116937
| 0
| 0.065217
| 0
| 0
| 0.081932
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 1
| 0.065217
| false
| 0
| 0.108696
| 0
| 0.282609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd0601891b2dad5746ac7c08ac9655b6e8d13ab9
| 2,130
|
py
|
Python
|
monitoring/uss_qualifier/webapp/tasks.py
|
interuss/InterUSS-Platform
|
099abaa1159c4c143f8f1fde6b88956c86608281
|
[
"Apache-2.0"
] | null | null | null |
monitoring/uss_qualifier/webapp/tasks.py
|
interuss/InterUSS-Platform
|
099abaa1159c4c143f8f1fde6b88956c86608281
|
[
"Apache-2.0"
] | 1
|
2021-11-29T21:53:39.000Z
|
2021-11-29T21:53:39.000Z
|
monitoring/uss_qualifier/webapp/tasks.py
|
interuss/InterUSS-Platform
|
099abaa1159c4c143f8f1fde6b88956c86608281
|
[
"Apache-2.0"
] | null | null | null |
from monitoring.uss_qualifier.test_data import test_report
from monitoring.uss_qualifier.utils import USSQualifierTestConfiguration
from monitoring.uss_qualifier.main import uss_test_executor
from monitoring.uss_qualifier.rid.simulator import flight_state_from_kml
from monitoring.uss_qualifier.rid.utils import FullFlightRecord
from monitoring.uss_qualifier.rid.utils import FullFlightRecord
import json
from typing import List
import redis
import rq
import uuid
from . import resources
from monitoring.monitorlib.typing import ImplicitDict
def get_rq_job(job_id):
try:
rq_job = resources.qualifier_queue.fetch_job(job_id)
except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):
return None
return rq_job
def remove_rq_job(job_id):
"""Removes a job from the queue."""
try:
rq_job = resources.qualifier_queue.remove(job_id)
except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):
return None
return rq_job
def call_test_executor(
user_config_json: str,
auth_spec: str,
flight_record_jsons: List[str],
testruns_id,
debug=False,
scd_test_definitions_path=None,
):
config_json = json.loads(user_config_json)
config: USSQualifierTestConfiguration = ImplicitDict.parse(
config_json, USSQualifierTestConfiguration
)
flight_records: List[FullFlightRecord] = [
ImplicitDict.parse(json.loads(j), FullFlightRecord) for j in flight_record_jsons
]
if debug:
report = json.dumps(test_report.test_data)
else:
report = json.dumps(
uss_test_executor(
config, auth_spec, flight_records, scd_test_definitions_path
)
)
resources.redis_conn.hset(resources.REDIS_KEY_TEST_RUNS, testruns_id, report)
return report
def call_kml_processor(kml_content, output_path):
flight_states = flight_state_from_kml.main(
kml_content, output_path, from_string=True
)
resources.redis_conn.hset(
resources.REDIS_KEY_UPLOADED_KMLS, str(uuid.uuid4()), json.dumps(flight_states)
)
return flight_states
| 30.869565
| 88
| 0.746948
| 269
| 2,130
| 5.628253
| 0.297398
| 0.064729
| 0.067371
| 0.103038
| 0.299207
| 0.280053
| 0.239102
| 0.187583
| 0.113606
| 0.113606
| 0
| 0.000575
| 0.184038
| 2,130
| 68
| 89
| 31.323529
| 0.870541
| 0.013615
| 0
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.224138
| 0
| 0.396552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd06e20fb3a5b8f7301bbddc6604a232ac3d8294
| 11,853
|
py
|
Python
|
grenades_services/modules/basket.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
grenades_services/modules/basket.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
grenades_services/modules/basket.py
|
Parveen3300/Reans
|
6dfce046b01099284a8c945a04600ed83e5099a4
|
[
"Apache-2.0"
] | null | null | null |
"""
BasketManagementRelated modules
"""
# import basket models
from basket.models import Basket
from basket.models import BasketProductLine
# import configuration models
from grenades_services.all_configuration_data import get_currency_instance
from grenades_services.all_configuration_data import get_customer_instance_from_request_user
from grenades_services.all_configuration_data import product_price_calculator
# import home modules
from grenades_services.modules.home import Home
# import serializers modules
from grenades_services.separate_serializers.basket_serializers import \
BasketProductSerializer
class UpdateProductsBasket:
"""
UpdateProductsBasket
"""
def __init__(self, **kwargs):
self.basket_data = kwargs
self._request = kwargs.get('request')
self.basket_id = None
self.customer_instance = None
self.filter_query_data = kwargs.get(
'filter_query_data', {'status': 'Open'})
@staticmethod
def _use_common_module(filter_input_data):
"""
in this '_use_common_module' used to get the common request instance
as per request filter data
In this Home class module will cover all filter logic to help of product basket class modules
"""
return Home(**filter_input_data)
@staticmethod
def calculate_offer_value(product_offer_instance, product_price):
"""
in this calculate_offer_value method we have calculate the product pricing according to the offer
product_offer_instance: for get the offer related key fields
manage two types of offer price
RUPPPEES & PERCENTAGE
"""
if product_offer_instance.offer_price_type == 'RUPPPEES':
if product_price > product_offer_instance.value:
return product_price - product_offer_instance.value
return product_price
if product_offer_instance.offer_price_type == 'PERCENTAGE':
if product_price > product_offer_instance.value:
return (product_offer_instance.value * product_price) / 100
return product_price
def get_basket_instance(self, _filter_query_data=None):
"""
This 'get_basket_instance' method used to get the basket instance according
to auth user and session basket id or with inherit
"""
try:
print(self.filter_query_data)
return Basket.objects.get(**_filter_query_data) \
if _filter_query_data else Basket.objects.get(**self.filter_query_data)
except Exception as e:
print('Basket.DoesNotExist.Error')
print(e)
return None
def collect_basket_product_values(self):
"""collect_basket_product_values
This 'collect_basket_product_values' method used to collect the all basket related value data
to entered in basket table with customer and session maintain instance
"""
home_instance = self._use_common_module(dict(
product_get_data={
'product_alias_name': self.basket_data['product_alias_name']
}
)
)
product_instance = home_instance.get_product_instance()
if product_instance:
home_instance = self._use_common_module(
dict(filter_input_data={'mapped_products__id__in': [product_instance.id]}))
category_product_mapping_instance = \
home_instance.category_product_mapping_instance()
home_instance = self._use_common_module(
dict(filter_input_data={
'included_products__id__in': [product_instance.id],
'offer_type': 'offer'
})
)
product_offer_instance = home_instance.offer_products()
payable_amount = self.calculate_offer_value(
product_offer_instance,
product_instance.price) if product_offer_instance else product_instance.price
return (product_instance,
category_product_mapping_instance,
payable_amount)
@staticmethod
def create_basket_product_line(basket_create_data):
"""
This 'create_basket_product_line' method used to create the basket
"""
create_basket_line = BasketProductLine.objects.create(
**basket_create_data)
return True if create_basket_line else False
def collect_basket_details(self, basket_instance):
"""
This 'collect_basket_details' method collect the basket common code details
"""
product_instance, category_product_mapping_instance, payable_amount = \
self.collect_basket_product_values()
return {
'basket': basket_instance,
'line_reference': str(product_instance.id),
'product': product_instance,
'category': category_product_mapping_instance.last(
).category if category_product_mapping_instance else None,
'quantity': self.basket_data.get('quantity', 1),
'price_currency': get_currency_instance(),
'price_excl_tax': None,
'price_incl_tax': None,
'payable_amount': payable_amount
}
def add_new_basket(self):
"""
This 'add_new_basket' method used to create a fresh basket for a customer or user
"""
if self.customer_instance:
self.filter_query_data['owner'] = self.customer_instance
create_basket = Basket.objects.create(**self.filter_query_data)
print("63546735435463543564", create_basket)
if create_basket:
if self.create_basket_product_line(self.collect_basket_details(create_basket)):
self._request.session['basket_id'] = create_basket.id
return True
return False
def update_product_basket(self):
"""
This 'update_product_basket' method used to update the product in the basket
"""
if self.basket_id:
self.filter_query_data['id'] = self.basket_id
if self.customer_instance:
self.filter_query_data['owner'] = self.customer_instance
basket_instance = self.get_basket_instance()
if basket_instance:
if self.create_basket_product_line(self.collect_basket_details(
basket_instance)):
return True
else:
return False
def add_to_basket(self):
"""
This 'add_to_basket' method used to add the product in the basket
"""
self.customer_instance = get_customer_instance_from_request_user(
self._request.user)
if 'basket_id' in self._request.session.keys():
self.basket_id = self._request.session['basket_id']
return self.update_product_basket()
else:
return self.add_new_basket()
class DisplayProductsBasket(UpdateProductsBasket):
"""
DisplayProductsBasket
return: {
'products_description': {
'id': 14,
'products_list': [],
'line_reference': '2',
'quantity': 1,
'price_currency': 'INR',
'price_excl_tax': None,
'price_incl_tax': None,
'payable_amount': '1000.00',
'date_created': '2021-11-01T10:29:50.091484Z',
'date_updated': '2021-11-01T10:29:50.091502Z',
'basket': 5,
'product': 2,
'category': 5,
'collection': None
},
'product_price_details': {'total_item': 0},
'random_products_list': <QuerySet [<Product: Instruments>]>
}
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._request = kwargs.get('request')
self.customer_instance = None
self.basket_id = None
self.products_description_data = []
self.product_price_details = {}
self.filter_data = {'status': 'Open'}
self.estimate_tax = 0
self.offer_name = '-'
self.coupon_name = '-'
@staticmethod
def get_basket_product_lines(filter_query_data=None):
"""get_basket_product_lines
This 'get_basket_product_lines' method is used to get the all instance of
products of basket
"""
_product_line_instance = BasketProductLine.objects.filter(
**filter_query_data)
if _product_line_instance:
return _product_line_instance
def basket_product_description(self):
"""basket_product_description
This 'basket_product_description' method used to get the all product description with
all products details from baskets
"""
if self.basket_id:
self.filter_data['id'] = self.basket_id
if self.customer_instance:
self.filter_data['owner'] = self.customer_instance
basket_instance = self.get_basket_instance(self.filter_data)
if basket_instance:
product_line_last_obj = self.get_basket_product_lines(
{'basket': basket_instance}).last()
self.products_description_data = BasketProductSerializer(
product_line_last_obj).data
def create_product_order_summary_dict(self, order_summary_dict):
"""
This 'create_product_order_summary_dict' method used to create dict for product order summary
total_price, coupon_price, offer_price
"""
self.product_price_details['total'] = order_summary_dict['total_price']
self.product_price_details['sub_total'] = order_summary_dict['total_price']
self.product_price_details['estimate_tax'] = self.estimate_tax
self.product_price_details['coupon_name'] = self.coupon_name
self.product_price_details['coupon_price'] = order_summary_dict['coupon_price']
self.product_price_details['offer_name'] = self.offer_name
self.product_price_details['offer_price'] = order_summary_dict['offer_price']
def order_product_price_details(self):
"""order_product_price_details
This 'order_product_price_details' method used to get the all product order summary with price calculation
and manage the all coupon and offers
"""
self.product_price_details['total_item'] = len(
self.products_description_data['products_list'])
for _products_details in self.products_description_data['products_list']:
order_summary_dict = product_price_calculator(_products_details,
self.coupon_details,
self.offer_details)
# create product order summary
# return total_price, coupon_price, offer_price
self.create_product_order_summary_dict(order_summary_dict)
def display_products(self):
"""
This 'display_products' method used to get the all session and customer related
basket products for help on display
"""
if 'basket_id' in self._request.session.keys():
self.basket_id = self._request.session.get('basket_id')
else:
self.basket_id = None
self.customer_instance = get_customer_instance_from_request_user(
self._request.user)
self.basket_product_description()
self.order_product_price_details()
home_instance = Home()
random_products_list = home_instance.random_products_list()
return {
'products_description': self.products_description_data,
'product_price_details': self.product_price_details,
'random_products_list': random_products_list if random_products_list else []
}
| 40.731959
| 114
| 0.651312
| 1,318
| 11,853
| 5.487102
| 0.127466
| 0.041482
| 0.042035
| 0.031803
| 0.390072
| 0.297566
| 0.256084
| 0.192754
| 0.157218
| 0.1362
| 0
| 0.009172
| 0.273349
| 11,853
| 290
| 115
| 40.872414
| 0.830489
| 0.2218
| 0
| 0.245714
| 0
| 0
| 0.069529
| 0.010821
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091429
| false
| 0
| 0.04
| 0
| 0.245714
| 0.022857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd0b619e6db23ae007998ba9f088e9c319778c9d
| 517
|
py
|
Python
|
230.py
|
BYOUINZAKA/LeetCodeNotes
|
48e1b4522c1f769eeec4944cfbd57abf1281d09a
|
[
"MIT"
] | null | null | null |
230.py
|
BYOUINZAKA/LeetCodeNotes
|
48e1b4522c1f769eeec4944cfbd57abf1281d09a
|
[
"MIT"
] | null | null | null |
230.py
|
BYOUINZAKA/LeetCodeNotes
|
48e1b4522c1f769eeec4944cfbd57abf1281d09a
|
[
"MIT"
] | null | null | null |
'''
@Author: Hata
@Date: 2020-05-24 15:30:19
@LastEditors: Hata
@LastEditTime: 2020-05-24 15:32:04
@FilePath: \LeetCode\230.py
@Description: https://leetcode-cn.com/problems/kth-smallest-element-in-a-bst/
'''
class Solution:
def kthSmallest(self, root, k):
def gen(r):
if r is not None:
yield from gen(r.left)
yield r.val
yield from gen(r.right)
it = gen(root)
for _ in range(k):
ans = next(it)
return ans
| 22.478261
| 77
| 0.558994
| 74
| 517
| 3.891892
| 0.702703
| 0.041667
| 0.055556
| 0.069444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086835
| 0.309478
| 517
| 22
| 78
| 23.5
| 0.719888
| 0.381044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd0c0c186a507173da38fb9c91812fd94be9043a
| 3,430
|
py
|
Python
|
Scripts/TestParsers/PyUnittestTestParser.py
|
davidbrownell/v3-Common_Environment
|
8f42f256e573cbd83cbf9813db9958025ddf12f2
|
[
"BSL-1.0"
] | null | null | null |
Scripts/TestParsers/PyUnittestTestParser.py
|
davidbrownell/v3-Common_Environment
|
8f42f256e573cbd83cbf9813db9958025ddf12f2
|
[
"BSL-1.0"
] | 1
|
2018-06-08T06:45:16.000Z
|
2018-06-08T06:45:16.000Z
|
Scripts/TestParsers/PyUnittestTestParser.py
|
davidbrownell/v3-Common_Environment
|
8f42f256e573cbd83cbf9813db9958025ddf12f2
|
[
"BSL-1.0"
] | 1
|
2018-06-08T04:15:17.000Z
|
2018-06-08T04:15:17.000Z
|
# ----------------------------------------------------------------------
# |
# | PythonUnittestTestParser.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-05-22 07:59:46
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""Contains the TestParser object"""
import os
import re
import CommonEnvironment
from CommonEnvironment.Interface import staticderived, override, DerivedProperty
from CommonEnvironment.TestParserImpl import TestParserImpl
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@staticderived
class TestParser(TestParserImpl):
"""Parses content produced by Python's unittest library"""
# ----------------------------------------------------------------------
# | Public Properties
Name = DerivedProperty("PyUnittest")
Description = DerivedProperty("Parses Python unittest output.")
# ----------------------------------------------------------------------
# | Public Methods
@staticmethod
@override
def IsSupportedCompiler(compiler):
# Supports any compiler that supports python; use this file as a test subject
return compiler.IsSupported(_script_fullpath if os.path.splitext(_script_name)[1] == ".py" else "{}.py".format(os.path.splitext(_script_fullpath)[0]))
# ----------------------------------------------------------------------
_IsSupportedTestItem_imports = [
re.compile("^\s*import unittest"),
re.compile("^\s*from unittest import"),
]
@classmethod
@override
def IsSupportedTestItem(cls, item):
# Use this parser for any python file that imports 'unittest'
assert os.path.isfile(item), item
with open(item) as f:
for line in f.readlines():
for regex in cls._IsSupportedTestItem_imports:
if regex.search(line):
return True
return
# ----------------------------------------------------------------------
_Parse_failed = re.compile(r"^FAILED", re.DOTALL | re.MULTILINE)
_Parse_ok = re.compile(r"^OK\s*", re.DOTALL | re.MULTILINE)
@classmethod
@override
def Parse(cls, test_data):
if cls._Parse_failed.search(test_data):
return -1
if cls._Parse_ok.search(test_data):
return 0
return 1
# ----------------------------------------------------------------------
@classmethod
@override
def CreateInvokeCommandLine(cls, context, debug_on_error):
command_line = super(TestParser, cls).CreateInvokeCommandLine(context, debug_on_error)
return 'python "{}"'.format(command_line)
| 38.539326
| 159
| 0.473178
| 275
| 3,430
| 5.76
| 0.436364
| 0.035354
| 0.041667
| 0.015152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011725
| 0.229155
| 3,430
| 88
| 160
| 38.977273
| 0.587368
| 0.390379
| 0
| 0.159091
| 0
| 0
| 0.058465
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 1
| 0.090909
| false
| 0
| 0.204545
| 0.022727
| 0.590909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd0c8d9af792a61f23cb21cb4b226023ec5c2f1f
| 7,116
|
py
|
Python
|
fairseq/models/transformer_xlm_iwslt_decoder.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
fairseq/models/transformer_xlm_iwslt_decoder.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
fairseq/models/transformer_xlm_iwslt_decoder.py
|
jm-glowienke/fairseq
|
ca45353322f92776e34a7308bf3fab75af9c1d50
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict
from fairseq import checkpoint_utils
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture as transformer_base_architecture,
)
@register_model("transformer_xlm_iwslt_decoder")
class TransformerFromPretrainedXLMModel(TransformerModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--pretrained-xlm-checkpoint",
type=str,
metavar="STR",
help="XLM model to use for initializing transformer encoder "
"and/or decoder",
)
parser.add_argument(
"--init-encoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into "
"decoder",
)
parser.add_argument(
"--init-decoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into "
"encoder",
)
@classmethod
def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"You must specify a path for --pretrained-xlm-checkpoint to use "
"--arch transformer_from_pretrained_xlm"
)
assert isinstance(task.source_dictionary,
cls_dictionary) and isinstance(
task.target_dictionary, cls_dictionary
), (
"You should use a MaskedLMDictionary when using --arch "
"transformer_from_pretrained_xlm because the pretrained XLM model "
"was trained using data binarized with MaskedLMDictionary. "
"For translation, you may want to use --task "
"translation_from_pretrained_xlm"
)
assert not (
getattr(args, "init_encoder_only", False)
and getattr(args, "init_decoder_only", False)
), "Only one of --init-encoder-only and --init-decoder-only can be set."
return super().build_model(args, task)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
def upgrade_state_dict_with_xlm_weights(
state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str
) -> Dict[str, Any]:
"""
Load XLM weights into a Transformer encoder or decoder model.
Args:
state_dict: state dict for either TransformerEncoder or
TransformerDecoder
pretrained_xlm_checkpoint: checkpoint to load XLM weights from
Raises:
AssertionError: If architecture (num layers, attention heads, etc.)
does not match between the current Transformer encoder or
decoder and the pretrained_xlm_checkpoint
"""
if not os.path.exists(pretrained_xlm_checkpoint):
raise IOError(
"Model file not found: {}".format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state["model"]
for key in xlm_state_dict.keys():
for search_key in ["embed_tokens", "embed_positions", "layers"]:
if search_key in key:
subkey = key[key.find(search_key):]
if "in_proj_weight" in subkey or \
"in_proj_bias" in subkey:
continue
else:
assert subkey in state_dict, (
"{} \nTransformer encoder / decoder "
"state_dict does not contain {}. \nCannot "
"load {} from pretrained XLM checkpoint "
"{} into Transformer.".format(
str(state_dict.keys()), subkey, key,
pretrained_xlm_checkpoint
)
)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
class TransformerEncoderFromPretrainedXLM(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, "init_decoder_only", False):
# Don't load XLM weights for encoder if --init-decoder-only
return
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"--pretrained-xlm-checkpoint must be specified to load Transformer "
"encoder from pretrained XLM"
)
if args.pretrained_xlm_checkpoint != 'interactive':
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
state_dict=self.state_dict(),
pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
# class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
# def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
# super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
# if getattr(args, "init_encoder_only", False):
# # Don't load XLM weights for decoder if --init-encoder-only
# return
# assert hasattr(args, "pretrained_xlm_checkpoint"), (
# "--pretrained-xlm-checkpoint must be specified to load Transformer "
# "decoder from pretrained XLM"
# )
#
# xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
# state_dict=self.state_dict(),
# pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
# )
# self.load_state_dict(xlm_loaded_state_dict, strict=True)
@register_model_architecture(
"transformer_xlm_iwslt_decoder", "transformer_xlm_iwslt_decoder")
def transformer_xlm_iwslt_decoder(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
transformer_base_architecture(args)
| 40.662857
| 82
| 0.647274
| 786
| 7,116
| 5.587786
| 0.221374
| 0.076958
| 0.104736
| 0.036885
| 0.331967
| 0.246585
| 0.203552
| 0.175546
| 0.153005
| 0.153005
| 0
| 0.003477
| 0.272485
| 7,116
| 174
| 83
| 40.896552
| 0.844891
| 0.210933
| 0
| 0.1
| 0
| 0
| 0.245443
| 0.067858
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.058333
| false
| 0
| 0.05
| 0.016667
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd0ff0154f3a2ed2059c34dae1964cf271d9a2e1
| 3,674
|
py
|
Python
|
analysis/sharpness.py
|
sanketvmehta/lifelong-learning-pretraining-and-sam
|
2fee18a4b13c918f6005f88c19089b86f4a8aae2
|
[
"Apache-2.0"
] | null | null | null |
analysis/sharpness.py
|
sanketvmehta/lifelong-learning-pretraining-and-sam
|
2fee18a4b13c918f6005f88c19089b86f4a8aae2
|
[
"Apache-2.0"
] | null | null | null |
analysis/sharpness.py
|
sanketvmehta/lifelong-learning-pretraining-and-sam
|
2fee18a4b13c918f6005f88c19089b86f4a8aae2
|
[
"Apache-2.0"
] | null | null | null |
import copy
import numpy as np
import torch
from scipy import optimize
import logging
def sharpness(model, criterion_fn, A, epsilon=1e-3, p=0, bounds=None):
"""Computes sharpness metric according to https://arxiv.org/abs/1609.04836.
Args:
model: Model on which to compute sharpness
criterion_fn: Function that takes in a model and returns the loss
value and gradients on the appropriate data that will be used in
the loss maximization done in the sharpness calculation.
A: Projection matrix that defines the subspace in which the loss
maximization will be done. If A=1, no projection will be done.
epsilon: Defines the size of the neighborhood that will be used in the
loss maximization.
p: The dimension of the random projection subspace in which maximization
will be done. If 0, assumed to be the full parameter space.
"""
run_fn = create_run_model(model, A, criterion_fn)
if bounds is None:
bounds = compute_bounds(model, A, epsilon)
dim = flatten_parameters(model).shape[0] if p == 0 else p
# Find the maximum loss in the neighborhood of the minima
y = optimize.minimize(
lambda x: run_fn(x),
np.zeros(dim),
method="L-BFGS-B",
bounds=bounds,
jac=True,
options={"maxiter": 10},
).x.astype(np.float32)
model_copy = copy.deepcopy(model)
if A is 1:
flat_diffs = y
else:
flat_diffs = A @ y
apply_diffs(model_copy, flat_diffs)
maximum = criterion_fn(model_copy)["loss"]
loss_value = criterion_fn(model)["loss"]
sharpness = 100 * (maximum - loss_value) / (1 + loss_value)
return sharpness
def flatten_parameters(model):
"""Returns a flattened numpy array with the parameters of the model."""
return np.concatenate(
[
param.detach().cpu().numpy().flatten()
for param in model.parameters()
if param.requires_grad
]
)
def compute_bounds(model, A, epsilon):
"""Computes the bounds in which to search for the maximum loss."""
x = flatten_parameters(model)
if A is 1:
bounds = epsilon * (np.abs(x) + 1)
else:
b, _, _, _ = np.linalg.lstsq(A, x)
bounds = epsilon * (np.abs(b) + 1)
return optimize.Bounds(-bounds, bounds)
def create_run_model(model, A, criterion_fn):
"""Creates a run function that takes in parameters in the subspace that loss
maximization takes place in, and computes the loss and gradients
corresponding to those parameters.
"""
def run(y):
y = y.astype(np.float32)
model_copy = copy.deepcopy(model)
model_copy.zero_grad()
if A is 1:
flat_diffs = y
else:
flat_diffs = A @ y
apply_diffs(model_copy, flat_diffs)
metrics = criterion_fn(model_copy)
objective = -metrics["loss"]
gradient = -metrics["gradients"]
logging.info("Loss: %f", objective)
if A is not 1:
gradient = gradient @ A
return objective, gradient.astype(np.float64)
return run
def apply_diffs(model, diffs):
"""Adds deltas to the parameters in the model corresponding to diffs."""
parameters = model.parameters()
idx = 0
for parameter in parameters:
if parameter.requires_grad:
n_elements = parameter.nelement()
cur_diff = diffs[idx : idx + n_elements]
parameter.data = parameter.data + torch.tensor(
cur_diff.reshape(parameter.shape)
).to(device=parameter.device)
idx += n_elements
| 32.803571
| 80
| 0.631464
| 491
| 3,674
| 4.627291
| 0.285132
| 0.033891
| 0.008803
| 0.007923
| 0.194982
| 0.146127
| 0.146127
| 0.118838
| 0.051937
| 0.051937
| 0
| 0.013253
| 0.281165
| 3,674
| 111
| 81
| 33.099099
| 0.847028
| 0.311922
| 0
| 0.191781
| 0
| 0
| 0.018033
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.068493
| 0
| 0.219178
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd13a01142ccf63d717a89caf8e588ed9c337f8d
| 850
|
py
|
Python
|
D_QuickS.py
|
rut999/Algo
|
9180f66452597a758a31073cb2b8fa4a3e6a93fe
|
[
"MIT"
] | null | null | null |
D_QuickS.py
|
rut999/Algo
|
9180f66452597a758a31073cb2b8fa4a3e6a93fe
|
[
"MIT"
] | null | null | null |
D_QuickS.py
|
rut999/Algo
|
9180f66452597a758a31073cb2b8fa4a3e6a93fe
|
[
"MIT"
] | null | null | null |
import time
from random import randint
def random_int(x):
value = []
for i in range(x):
value.append(randint(0, x))
return value
def Quick_sort(list1):
N = len(list1)
if N <=1:
return list1
pivot = list1.pop()
# mid = len(list1)//2
Left_H = []
Right_H = []
for i in range(len(list1)):
if(list1[i]>pivot):
Right_H.append(list1[i])
else:
Left_H.append(list1[i])
return (Quick_sort(Left_H)+[pivot]+Quick_sort(Right_H))
random_list = random_int(100000)
#list2 = [0,0,99,34,56,54,-1,-1,32,2.5,-1.1,1000,1000,-2,30,21,24,15,10,6]
t1 = time.time()
Quick_sort(random_list)
t2 = time.time()
print(t2-t1)
# def Quick_Sort(list1):
# if (list1[0]<list1[-1]):
# partition_index =partition(list1)
# quicksort(list1,)
# quicksort()
| 22.368421
| 74
| 0.583529
| 134
| 850
| 3.58209
| 0.395522
| 0.09375
| 0.025
| 0.045833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106583
| 0.249412
| 850
| 37
| 75
| 22.972973
| 0.645768
| 0.272941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.28
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd18f82e759c1f805c2c156a96b2d6d4fe352c3d
| 780
|
py
|
Python
|
api/service/cidades_atendimento_service.py
|
FinotelliCarlos/ewipesimple-adminweb-python
|
3bf779250efeb9f85b4283ffbf210bf227aa8e8c
|
[
"MIT"
] | 1
|
2021-06-17T06:13:33.000Z
|
2021-06-17T06:13:33.000Z
|
api/service/cidades_atendimento_service.py
|
FinotelliCarlos/ewipesimple-adminweb-python
|
3bf779250efeb9f85b4283ffbf210bf227aa8e8c
|
[
"MIT"
] | null | null | null |
api/service/cidades_atendimento_service.py
|
FinotelliCarlos/ewipesimple-adminweb-python
|
3bf779250efeb9f85b4283ffbf210bf227aa8e8c
|
[
"MIT"
] | null | null | null |
from adminweb.services import cep_service
from adminweb.models import Profissional
from rest_framework import serializers
import json
def listar_profissionais_cidade(cep):
codigo_ibge = buscar_cidade_cep(cep)['ibge']
try:
profissionais = Profissional.objects.filter(codigo_ibge=codigo_ibge).order_by('id')
return profissionais
except Profissional.DoesNotExist:
return []
def buscar_cidade_cep(cep):
response = cep_service.buscar_cidade_cep(cep)
if response.status_code == 400:
raise serializers.ValidationError('O CEP informado está incorreto!')
cidade_api = json.loads(response.content)
if 'erro' in cidade_api:
raise serializers.ValidationError('O CEP informado não foi encontrado!')
return cidade_api
| 32.5
| 91
| 0.75
| 96
| 780
| 5.895833
| 0.489583
| 0.063604
| 0.079505
| 0.095406
| 0.155477
| 0.155477
| 0
| 0
| 0
| 0
| 0
| 0.004658
| 0.174359
| 780
| 23
| 92
| 33.913043
| 0.874224
| 0
| 0
| 0
| 0
| 0
| 0.097436
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd1a66acf2cfd6c3c481c4c94e53d436215cbbe7
| 9,414
|
py
|
Python
|
omicron/core/numpy_extensions.py
|
evimacs/omicron
|
abe77fd25a93cf3d0d17661ae957373474724535
|
[
"MIT"
] | 4
|
2020-11-09T02:23:51.000Z
|
2021-01-24T00:45:21.000Z
|
omicron/core/numpy_extensions.py
|
evimacs/omicron
|
abe77fd25a93cf3d0d17661ae957373474724535
|
[
"MIT"
] | 14
|
2020-11-09T02:31:34.000Z
|
2021-12-22T10:15:47.000Z
|
omicron/core/numpy_extensions.py
|
evimacs/omicron
|
abe77fd25a93cf3d0d17661ae957373474724535
|
[
"MIT"
] | 2
|
2021-01-24T00:45:25.000Z
|
2021-12-24T06:18:37.000Z
|
"""Extension function related to numpy
"""
from __future__ import annotations
from typing import List, Tuple
import numpy as np
import pandas
from numpy.typing import ArrayLike
def dict_to_numpy_array(d: dict, dtype: List[Tuple]) -> np.array:
"""convert dictionary to numpy array
Examples:
>>> d = {"aaron": 5, "jack": 6}
>>> dtype = [("name", "S8"), ("score", "<i4")]
>>> dict_to_numpy_array(d, dtype)
array([(b'aaron', 5), (b'jack', 6)],
dtype=[('name', 'S8'), ('score', '<i4')])
Args:
d (dict): [description]
dtype (List[Tuple]): [description]
Returns:
np.array: [description]
"""
return np.fromiter(d.items(), dtype=dtype, count=len(d))
def dataframe_to_structured_array(
df: pandas.DataFrame, dtypes: List[Tuple] = None
) -> ArrayLike:
"""convert dataframe (with all columns, and index possibly) to numpy structured arrays
`len(dtypes)` should be either equal to `len(df.columns)` or `len(df.columns) + 1`. In the later case, it implies to include `df.index` into converted array.
Args:
df: the one needs to be converted
dtypes: Defaults to None. If it's `None`, then dtypes of `df` is used, in such case, the `index` of `df` will not be converted.
Returns:
ArrayLike: [description]
"""
v = df
if dtypes is not None:
dtypes_in_dict = {key: value for key, value in dtypes}
col_len = len(df.columns)
if len(dtypes) == col_len + 1:
v = df.reset_index()
rename_index_to = set(dtypes_in_dict.keys()).difference(set(df.columns))
v.rename(columns={"index": list(rename_index_to)[0]}, inplace=True)
elif col_len != len(dtypes):
raise ValueError(
f"length of dtypes should be either {col_len} or {col_len + 1}, is {len(dtypes)}"
)
# re-arrange order of dtypes, in order to align with df.columns
dtypes = []
for name in v.columns:
dtypes.append((name, dtypes_in_dict[name]))
else:
dtypes = df.dtypes
return np.array(np.rec.fromrecords(v.values), dtype=dtypes)
def numpy_array_to_dict(arr: np.array, key: str, value: str) -> dict:
return {item[key]: item[value] for item in arr}
def find_runs(x):
"""Find runs of consecutive items in an array."""
# ensure array
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError("only 1D array supported")
n = x.shape[0]
# handle empty array
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
# find run starts
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
def count_between(arr, start, end):
"""计算数组中,`start`元素与`end`元素之间共有多少个元素
要求arr必须是已排序。计算结果会包含区间边界点。
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> count_between(arr, 20050104, 20050111)
6
>>> count_between(arr, 20050104, 20050109)
4
"""
pos_start = np.searchsorted(arr, start, side="right")
pos_end = np.searchsorted(arr, end, side="right")
counter = pos_end - pos_start + 1
if start < arr[0]:
counter -= 1
if end > arr[-1]:
counter -= 1
return counter
def shift(arr, start, offset):
"""在numpy数组arr中,找到start(或者最接近的一个),取offset对应的元素。
要求`arr`已排序。`offset`为正,表明向后移位;`offset`为负,表明向前移位
Examples:
>>> arr = [20050104, 20050105, 20050106, 20050107, 20050110, 20050111]
>>> shift(arr, 20050104, 1)
20050105
>>> shift(arr, 20050105, -1)
20050104
>>> # 起始点已右越界,且向右shift,返回起始点
>>> shift(arr, 20050120, 1)
20050120
Args:
arr : 已排序的数组
start : numpy可接受的数据类型
offset (int): [description]
Returns:
移位后得到的元素值
"""
pos = np.searchsorted(arr, start, side="right")
if pos + offset - 1 >= len(arr):
return start
else:
return arr[pos + offset - 1]
def floor(arr, item):
"""
在数据arr中,找到小于等于item的那一个值。如果item小于所有arr元素的值,返回arr[0];如果item
大于所有arr元素的值,返回arr[-1]
与`minute_frames_floor`不同的是,本函数不做回绕与进位.
Examples:
>>> a = [3, 6, 9]
>>> floor(a, -1)
3
>>> floor(a, 9)
9
>>> floor(a, 10)
9
>>> floor(a, 4)
3
>>> floor(a,10)
9
Args:
arr:
item:
Returns:
"""
if item < arr[0]:
return arr[0]
index = np.searchsorted(arr, item, side="right")
return arr[index - 1]
def join_by_left(key, r1, r2, mask=True):
"""左连接 `r1`, `r2` by `key`
如果`r1`中存在`r2`中没有的行,则该行对应的`r2`中的那些字段的取值将使用`fill`来填充。如果
same as numpy.lib.recfunctions.join_by(key, r1, r2, jointype='leftouter'), but allows
r1 have duplicat keys
[Reference: stackoverflow](https://stackoverflow.com/a/53261882/13395693)
Examples:
>>> # to join the following
>>> # [[ 1, 2],
>>> # [ 1, 3], x [[1, 5],
>>> # [ 2, 3]] [4, 7]]
>>> # only first two rows in left will be joined
>>> r1 = np.array([(1, 2), (1,3), (2,3)], dtype=[('seq', 'i4'), ('score', 'i4')])
>>> r2 = np.array([(1, 5), (4,7)], dtype=[('seq', 'i4'), ('age', 'i4')])
>>> joined = join_by_left('seq', r1, r2)
>>> print(joined)
[(1, 2, 5) (1, 3, 5) (2, 3, --)]
>>> print(joined.dtype)
(numpy.record, [('seq', '<i4'), ('score', '<i4'), ('age', '<i4')])
>>> joined[2][2]
masked
>>> joined.tolist()[2][2] == None
True
Args:
key : join关键字
r1 : 数据集1
r2 : 数据集2
fill : 对匹配不上的cell进行填充时使用的值
Returns:
a numpy array
"""
# figure out the dtype of the result array
descr1 = r1.dtype.descr
descr2 = [d for d in r2.dtype.descr if d[0] not in r1.dtype.names]
descrm = descr1 + descr2
# figure out the fields we'll need from each array
f1 = [d[0] for d in descr1]
f2 = [d[0] for d in descr2]
# cache the number of columns in f1
ncol1 = len(f1)
# get a dict of the rows of r2 grouped by key
rows2 = {}
for row2 in r2:
rows2.setdefault(row2[key], []).append(row2)
# figure out how many rows will be in the result
nrowm = 0
for k1 in r1[key]:
if k1 in rows2:
nrowm += len(rows2[k1])
else:
nrowm += 1
# allocate the return array
# ret = np.full((nrowm, ), fill, dtype=descrm)
_ret = np.recarray(nrowm, dtype=descrm)
if mask:
ret = np.ma.array(_ret, mask=True)
else:
ret = _ret
# merge the data into the return array
i = 0
for row1 in r1:
if row1[key] in rows2:
for row2 in rows2[row1[key]]:
ret[i] = tuple(row1[f1]) + tuple(row2[f2])
i += 1
else:
for j in range(ncol1):
ret[i][j] = row1[j]
i += 1
return ret
def numpy_append_fields(base, names, data, dtypes):
"""给现有的数组`base`增加新的字段
实现了`numpy.lib.recfunctions.rec_append_fields`的功能。因为`rec_append_fields`不能处
理`data`元素的类型为Object的情况
Example:
>>> # 新增单个字段
>>> import numpy
>>> old = np.array([i for i in range(3)], dtype=[('col1', '<f4')])
>>> new_list = [2 * i for i in range(3)]
>>> res = numpy_append_fields(old, 'new_col', new_list, [('new_col', '<f4')])
>>> print(res)
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0.) (1., 2.) (2., 4.)]
>>> # 新增多个字段
>>> data = [res['col1'].tolist(), res['new_col'].tolist()]
>>> print(numpy_append_fields(old, ('col3', 'col4'), data, [('col3', '<f4'), ('col4', '<f4')]))
... # doctest: +NORMALIZE_WHITESPACE
[(0., 0., 0.) (1., 1., 2.) (2., 2., 4.)]
Args:
base ([numpy.array]): 基础数组
name ([type]): 新增字段的名字,可以是字符串(单字段的情况),也可以是字符串列表
data (list): 增加的字段的数据,list类型
dtypes ([type]): 新增字段的dtype
"""
if isinstance(names, str):
names = [
names,
]
data = [
data,
]
result = np.empty(base.shape, dtype=base.dtype.descr + dtypes)
for col in base.dtype.names:
result[col] = base[col]
for i in range(len(names)):
result[names[i]] = data[i]
return result
def ffill_na(s: np.array) -> np.array:
"""前向替换一维数组中的np.NaN
如果s以np.NaN起头,则起头处的np.NaN将无法被替换。
Examples:
>>> arr = np.arange(6, dtype=np.float32)
>>> arr[3:5] = np.NaN
>>> ffill_na(arr)
... # doctest: +NORMALIZE_WHITESPACE
array([0., 1., 2., 2., 2., 5.], dtype=float32)
>>> arr[0:2] = np.nan
>>> ffill_na(arr)
... # doctest: +NORMALIZE_WHITESPACE
array([nan, nan, 2., 2., 2., 5.], dtype=float32)
Args:
s (np.array): [description]
Returns:
np.array: [description]
"""
mask = np.isnan(s)
idx = np.where(~mask, np.arange(len(mask)), 0)
np.maximum.accumulate(idx, out=idx)
return s[idx]
| 26.222841
| 161
| 0.546633
| 1,247
| 9,414
| 4.054531
| 0.256616
| 0.019383
| 0.010878
| 0.008307
| 0.109573
| 0.074367
| 0.050633
| 0.041535
| 0.018196
| 0
| 0
| 0.060505
| 0.297748
| 9,414
| 358
| 162
| 26.296089
| 0.704281
| 0.504143
| 0
| 0.086207
| 0
| 0.008621
| 0.030966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0
| 0.043103
| 0.008621
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd1c390db89d68211aa13e58ba3a2a89676c5247
| 3,039
|
py
|
Python
|
finetuning/pretrain_scripts/create_sentiment_mask.py
|
tatsu-lab/mlm_inductive_bias
|
2d99e2477293036949ba356c88513729244dc1f9
|
[
"MIT"
] | 10
|
2021-04-14T22:06:19.000Z
|
2022-01-12T19:41:12.000Z
|
finetuning/pretrain_scripts/create_sentiment_mask.py
|
tatsu-lab/mlm_inductive_bias
|
2d99e2477293036949ba356c88513729244dc1f9
|
[
"MIT"
] | null | null | null |
finetuning/pretrain_scripts/create_sentiment_mask.py
|
tatsu-lab/mlm_inductive_bias
|
2d99e2477293036949ba356c88513729244dc1f9
|
[
"MIT"
] | 3
|
2021-06-06T09:43:14.000Z
|
2022-02-20T00:40:42.000Z
|
"""
This script computes word masks based on sentiment lexicons
"""
import os
import torch
import argparse
from tqdm import tqdm
from transformers import AutoTokenizer
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import GlueDataset as Dataset
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default="./data/SST-2", help="path to the dir containing lm data.")
parser.add_argument("--lexicon-dir", type=str, default="./data/sentiment_lexicon", help="path to the dir containing sentiment lexicon.")
parser.add_argument("--tokenizer-name", type=str, default="bert-base-uncased", help="name of the tokenizer to use.")
parser.add_argument("--block_size", type=int, default=72, help="maximum length of the mask")
args = parser.parse_args()
positive_words = set()
with open(os.path.join(args.lexicon_dir, "positive-words.txt"), "r", encoding="ISO-8859-1") as f:
for line in f:
line = line.strip()
# skip the initial comments with ; and empty lines
if not line.startswith(";") and len(line) > 0:
positive_words.add(line.lower())
negative_words = set()
with open(os.path.join(args.lexicon_dir, "negative-words.txt"), "r", encoding="ISO-8859-1") as f:
for line in f:
line = line.strip()
# skip the initial comments with ; and empty lines
if not line.startswith(";") and len(line) > 0:
negative_words.add(line.lower())
salient_words = positive_words | negative_words
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)
splits = ["train", "dev", "test"]
for split in splits:
with open(os.path.join(args.data_dir, f"{split}.lm"), "r") as f:
all_sens = [s.strip() for s in f.readlines()]
salient_word_masks = torch.zeros(len(all_sens), args.block_size, dtype=torch.bool)
total_word_count = 0
salient_word_count = 0
# Main loop that handles subword tokenization
for i, sen in tqdm(enumerate(all_sens), total=len(all_sens)):
words = sen.split()
curr_idx = 1 # skip the [CLS] token
total_word_count += len(words)
for word in words:
tokens = tokenizer.tokenize(word)
# Need to truncate SQuAD
if curr_idx + len(tokens) > args.block_size:
raise ValueError("Encountered examples longer than block size.")
if word in salient_words:
salient_word_count += 1
for j in range(len(tokens)):
salient_word_masks[i, curr_idx + j] = 1
curr_idx += len(tokens)
print(f"{(salient_word_count/total_word_count):.2%} salient words")
salient_pct = salient_word_masks.any(dim=1).sum().float() / len(all_sens)
print(f"{split} {salient_pct:.2%} documents have salient words")
torch.save(
salient_word_masks,
os.path.join(
args.data_dir,
f"cached_{split}_{args.tokenizer_name.replace('-', '_')}_{args.block_size}.sentiment_mask",
),
)
| 37.518519
| 136
| 0.66535
| 422
| 3,039
| 4.646919
| 0.329384
| 0.039266
| 0.034676
| 0.028557
| 0.237634
| 0.216216
| 0.18562
| 0.163182
| 0.163182
| 0.163182
| 0
| 0.009595
| 0.211254
| 3,039
| 80
| 137
| 37.9875
| 0.808511
| 0.080948
| 0
| 0.105263
| 0
| 0.017544
| 0.202805
| 0.055016
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.122807
| 0
| 0.122807
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd1f80834765c75ab8a5bfc49335f1d5e1f2a008
| 456
|
py
|
Python
|
Leetcode/443. String Compression/solution2.py
|
asanoviskhak/Outtalent
|
c500e8ad498f76d57eb87a9776a04af7bdda913d
|
[
"MIT"
] | 51
|
2020-07-12T21:27:47.000Z
|
2022-02-11T19:25:36.000Z
|
Leetcode/443. String Compression/solution2.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | null | null | null |
Leetcode/443. String Compression/solution2.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | 32
|
2020-07-27T13:54:24.000Z
|
2021-12-25T18:12:50.000Z
|
class Solution:
def compress(self, chars: List[str]) -> int:
l = 0
while l < len(chars):
r = l + 1
while r < len(chars) and chars[l] == chars[r]: r += 1
num = r - l
for k in range(r - l, 1, -1): chars.pop(l)
if num > 1:
for i, v in enumerate(str(num)): chars.insert(l + i + 1, v)
l += len(str(num))
l += 1
return len(chars)
| 32.571429
| 75
| 0.41886
| 68
| 456
| 2.808824
| 0.411765
| 0.125654
| 0.031414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.438596
| 456
| 13
| 76
| 35.076923
| 0.714844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd2798a9ad4d90fcc9bb40c5df39c9d1117edd80
| 5,946
|
py
|
Python
|
fetch.py
|
kirillvarn/grocerycomparator-stat
|
861f90a2d5b4c2b52d89b6cdb574b722eae2327d
|
[
"MIT"
] | null | null | null |
fetch.py
|
kirillvarn/grocerycomparator-stat
|
861f90a2d5b4c2b52d89b6cdb574b722eae2327d
|
[
"MIT"
] | null | null | null |
fetch.py
|
kirillvarn/grocerycomparator-stat
|
861f90a2d5b4c2b52d89b6cdb574b722eae2327d
|
[
"MIT"
] | null | null | null |
import repo
import export.csv as csv
# CONSTANTS
milk_q = "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%1l%%' OR name ILIKE '%%1 l%%') AND (name ILIKE '%%piim %%' OR name ILIKE '%%piim,%%') AND name NOT ILIKE '%%juust%%' AND name NOT ILIKE '%%kohupiim%%' AND name NOT ILIKE '%%laktoos%%' AND name NOT ILIKE '%%täis%%' AND name NOT ILIKE '%%kookos%%' AND name NOT ILIKE '%%latte%%'"
wheat_kilos = 1
query_to_parse: dict = {
"milk": milk_q,
"cookies": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%küpsised %%' OR name ILIKE '%%küpsis %%') AND name NOT ILIKE '%%koer%%';",
"sugar": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%suhkur%%'",
#"rimi milk": f"{milk_q} AND shop ILIKE '%%rimi%%'",
#"other shop milk": f"{milk_q} AND shop NOT ILIKE '%%rimi%%'",
#"eggs": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%munad %%' OR name ILIKE '%%munad, %%' OR name ILIKE '%%muna,%%') AND name NOT ilike '%%salvrät%%' AND name NOT ILIKE '%%Šokolaad%%' AND name NOT ILIKE '%%Martsipani%%' AND name NOT ILIKE '%%SELVERI KÖÖK%%' AND name NOT ILIKE '%%kitkat%%'" ,
"wheat": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%{wheat_kilos}kg%%' OR name ILIKE '%%{wheat_kilos} kg%%') AND (name ILIKE '%%nisujahu %%' OR name ILIKE '%%nisujahu,%%')",
"beef": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%veise %%' OR name ILIKE '%%veisepraad%%' OR name ILIKE '%%lihaveise%%') AND name NOT ILIKE '%%koera%%' AND name NOT ILIKE '%%pelmeen%%' AND name NOT ILIKE '%%põltsama%%' AND name NOT ILIKE '%%sink%%'",
"tomatoes": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%tomat %%' OR name ILIKE '%%tomat, %%') AND name NOT ILIKE '%%pasta%%' AND name NOT ILIKE '%%0g%%' AND name NOT ILIKE '%%0 g%%' AND name NOT ILIKE '%%harilik%%' AND name NOT ILIKE '%%krõpsud%%' AND name NOT ILIKE '%%marinaad%%' AND name NOT ILIKE '%%eine%%'",
#"cucumber": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%kurk %%' OR name ILIKE '%%kurk,%%')",
#"banana": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%kg%%' OR name ILIKE '%%chiq%%') AND (name ILIKE '%%banaan %%' OR name ILIKE '%%banaan,%%')",
"apple": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%õun %%' OR name ILIKE '%%õun,%%')",
"pear": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND name ILIKE '%%kg%%' AND (name ILIKE '%%pirn %%' OR name ILIKE '%%pirn,%%')",
"pizza": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%pizza%%' OR name ILIKE '%%pitsa%%' AND name NOT ILIKE '%%pitsamaitseline%%')",
"pig meat": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%sea kaela%%' OR name ILIKE '%%sea välisfilee%%' OR name ILIKE '%%sea sisefilee%%')",
"cake": f"SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%kook,%%' OR name ILIKE '%%kook%%') AND name NOT ILIKE '%%van kook%%' AND name NOT ILIKE '%%selveri köök%%' AND name NOT ILIKE '%%kookos%%' AND name NOT LIKE '%%smuuti%%' AND name NOT ILIKE '%%pannkook%%'",
"chicken": "SELECT * FROM \"%s\" WHERE price != 0 AND discount = false AND (name ILIKE '%%broileri rinnafilee%%' OR name ILIKE '%%pooltiivad%%' OR name ILIKE '%%poolkoivad%%' OR name ILIKE '%%kanafilee%%' OR name ILIKE '%%broilerifilee%%') AND name NOT ILIKE '%%HAU-HAU%%'"
}
def get_products():
return repo.get_prices(repo.connect(db="naive_products"))[1]
def get_products_by_name(name: str = "", query: str = ""):
if len(name) != 0:
return repo.get_prices(repo.connect(db="naive_products"), search_string=name)[1]
else:
return repo.get_prices(repo.connect(db="naive_products"), query=query)[1]
def get_normalized_price(data: list) -> list:
new_data = list()
for index, item in enumerate(data):
if index == 0 and item == None:
new_data.append(next(item for item in data if item is not None))
elif index != 0 and data[index] == None:
new_data.append(new_data[index - 1])
else:
new_data.append(item)
return new_data
def get_trend(data: list) -> list:
new_data = list()
for index, item in enumerate(data):
if index != 0:
trend = "still"
if data[index - 1] != None:
if item > data[index - 1]:
trend = "up"
elif item < data[index - 1]:
trend = "down"
new_data.append({"value": item, "trend": trend})
return new_data
# def save_to_excel(dataset, sheet_name: str = "Sheet") -> None:
# tables = [i[0] for i in main.get_tables(main.connect(db="naive_products"))]
# # tables.remove("initial_products")
# header = ["Product name", "Shop name"] + tables
# data = []
# for item in dataset:
# prices = get_normalized_price(
# [dataset[item]["prices"][value]
# for value in dataset[item]["prices"]]
# )
# prices = get_trend(prices)
# value = [item, dataset[item]["shop"]] + prices
# data.append(value)
# table.append_header(header, sheet_name)
# table.put_data(data, sheet_name)
def save_to_csv(filename, dataset) -> None:
data = []
for item in dataset:
prices = get_normalized_price(
[dataset[item]["prices"][value]
for value in dataset[item]["prices"]]
)
value = [item] + prices
data.append(value)
csv.write_to_csv(f"datasets/{filename}.csv", zip(*data))
for i in query_to_parse:
products = get_products_by_name(query=query_to_parse[i])
save_to_csv(i, products)
| 58.871287
| 365
| 0.601245
| 835
| 5,946
| 4.209581
| 0.189222
| 0.101565
| 0.085349
| 0.123755
| 0.433855
| 0.407112
| 0.388336
| 0.388336
| 0.374964
| 0.336558
| 0
| 0.006888
| 0.218634
| 5,946
| 100
| 366
| 59.46
| 0.749677
| 0.235452
| 0
| 0.133333
| 0
| 0.183333
| 0.581251
| 0.014592
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.033333
| 0.016667
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd27a3a7d166518d8d7678101792de0e23b578ef
| 1,755
|
py
|
Python
|
code1.py
|
roshangol/executed-path-visualize
|
1759c12b0048fe117205990b151d2f5f57ad9616
|
[
"MIT"
] | null | null | null |
code1.py
|
roshangol/executed-path-visualize
|
1759c12b0048fe117205990b151d2f5f57ad9616
|
[
"MIT"
] | null | null | null |
code1.py
|
roshangol/executed-path-visualize
|
1759c12b0048fe117205990b151d2f5f57ad9616
|
[
"MIT"
] | null | null | null |
# EX1
# if x < y:
# y = 0
# x = x + 1
# else:
# x = y
def max(a, b, c):
if a > b and a > c:
print(a,' is maximum among all')
elif b > a and b > c:
print(b, ' is maximum among all')
else:
print(c, ' is maximum among all')
max(30, 28, 18)
# def triangleType(a, b, c):
# isATriangle = False
# if (a < b + c) and\
# (b < a + c) and\
# (c < a + b):
# isATriangle = True
# if isATriangle:
# if (a == b) and (b == c):
# print("the triangle was a EQUILATERAL")
# elif (a != b) and \
# (a != c) and \
# (b != c):
# print("the triangle was a SCALENE")
# else:
# print("invalid")
#
# triangleType(3, 5, 8)
# def testfunc(x, y):
# if x >= 0 and y >= 0:
# if y*y >= x*10 and y <= math.sin(math.radians(x*30))*25:
# if y >= math.cos(math.radians(x*40))*15:
# print('oooookk')
# testfunc(2, 3)
# EX2
# if (x < y):
# y = 0
# x = x + 1
# EX3
# if x < y:
# return
# print(x)
# return
# EX4
# x = 0
# while (x < y):
# y = f(x,y)
# x = x + 1
# EX5
# for x in range(10):
# y = f(x,y)
# a = [2 * x for x in y if x > 0 for y in z if y[0] < 3]
#
# digits = [0, 1, 5]
# a = 0
#
# for i in digits:
# a += i
# if i == 5:
# print("5 in list")
# break
# else:
# print("out of the loop")
# try:
# b = b + 5
# except KeyError:
# a += 1
# except ZeroDivisionError:
# a += 2
# else:
# a += 3
# finally:
# b += 1
# a = a - b
#
# x = 0
# while(x < y):
# y = f(x, y)
# if(y == 0):
# break
# elif(y < 0):
# y = y * 2
# continue
# x = x + 1
| 16.25
| 66
| 0.4
| 275
| 1,755
| 2.552727
| 0.254545
| 0.02849
| 0.017094
| 0.07265
| 0.153846
| 0.133903
| 0.133903
| 0.133903
| 0.037037
| 0
| 0
| 0.054795
| 0.417664
| 1,755
| 107
| 67
| 16.401869
| 0.632094
| 0.773219
| 0
| 0
| 0
| 0
| 0.198113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.125
| 0.375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd28f531641b97aa10ded06e3c6b7fdb2de0d2e7
| 1,193
|
py
|
Python
|
GameProject/dice.py
|
CreativeUsernameThatWontInsultAnyone/GameProject
|
998274e4587d93ff0564af174f4fc1e3a3e60174
|
[
"CC0-1.0"
] | 1
|
2021-11-13T17:14:03.000Z
|
2021-11-13T17:14:03.000Z
|
GameProject/dice.py
|
CreativeUsernameThatWontInsultAnyone/GameProject
|
998274e4587d93ff0564af174f4fc1e3a3e60174
|
[
"CC0-1.0"
] | null | null | null |
GameProject/dice.py
|
CreativeUsernameThatWontInsultAnyone/GameProject
|
998274e4587d93ff0564af174f4fc1e3a3e60174
|
[
"CC0-1.0"
] | null | null | null |
import random
import time
while (1):
def clear(): ##Placeholder code
time.sleep(1)
clearConsole = lambda: print('\n' * 150) ##
clearConsole()
wmsg = "Good morning!"
events = {
1 : "calm",
2 : "calm",
3 : "rainy",
4 : "rainy",
5 : "rainy",
6 : "thunder",
}
array = [1,2,3,4,5,6] ## Array used to get events or smth
output = random.choice(array)
defevent = events[output]
if defevent == "calm":
print(wmsg ,"It's a sunny day outside.")
clear()
elif defevent == "rainy":
print(wmsg, "You can hear the droplets falling onto your tent.")
clear()
else:
print(wmsg,"You hear thunder rumbling outside")
clear()
del array[output - 1]
if len(array) == 0: ##Array reset
array.append('1','2','3','4','5','6')
##Actually, we could throw out them specifics outta window and use it's skelly as
##our primary dice. def could take out the variables from other files and juggle them to our delight
break
| 28.404762
| 105
| 0.506287
| 144
| 1,193
| 4.194444
| 0.583333
| 0.044702
| 0.009934
| 0.013245
| 0.019868
| 0.019868
| 0
| 0
| 0
| 0
| 0
| 0.033467
| 0.373847
| 1,193
| 41
| 106
| 29.097561
| 0.7751
| 0.198659
| 0
| 0.09375
| 0
| 0
| 0.184939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.09375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd2c1598eaae27b2b8504f6e96bc81711b260dde
| 774
|
py
|
Python
|
multivision/oa_image_io.py
|
olaals/tpktools
|
50416ca554809e3d2f364b25531c78cf4751311c
|
[
"MIT"
] | null | null | null |
multivision/oa_image_io.py
|
olaals/tpktools
|
50416ca554809e3d2f364b25531c78cf4751311c
|
[
"MIT"
] | null | null | null |
multivision/oa_image_io.py
|
olaals/tpktools
|
50416ca554809e3d2f364b25531c78cf4751311c
|
[
"MIT"
] | null | null | null |
import numpy as np
import OpenEXR as exr
import cv2
import Imath
import matplotlib.pyplot as plt
def readEXR(filename):
exrfile = exr.InputFile(filename)
header = exrfile.header()
dw = header['dataWindow']
isize = (dw.max.y - dw.min.y + 1, dw.max.x - dw.min.x + 1)
channelData = dict()
# convert all channels in the image to numpy arrays
for c in header['channels']:
C = exrfile.channel(c, Imath.PixelType(Imath.PixelType.FLOAT))
C = np.frombuffer(C, dtype=np.float32)
C = np.reshape(C, isize)
channelData[c] = C
colorChannels = ['R', 'G', 'B', 'A'] if 'A' in header['channels'] else ['R', 'G', 'B']
img = np.concatenate([channelData[c][...,np.newaxis] for c in colorChannels], axis=2)
return img
| 29.769231
| 90
| 0.630491
| 114
| 774
| 4.280702
| 0.5
| 0.018443
| 0.02459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009967
| 0.222222
| 774
| 25
| 91
| 30.96
| 0.800664
| 0.063307
| 0
| 0
| 0
| 0
| 0.047157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.263158
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd318b68f4231a08be74b1a2c64d0b4969b29c51
| 2,422
|
py
|
Python
|
NNet/utils/readNNet.py
|
noyahoch/Marabou
|
03eb551498287e5372d462e3c2ad4fcc3210a5fa
|
[
"BSD-3-Clause"
] | 7
|
2020-01-27T21:25:49.000Z
|
2022-01-07T04:37:37.000Z
|
NNet/utils/readNNet.py
|
noyahoch/Marabou
|
03eb551498287e5372d462e3c2ad4fcc3210a5fa
|
[
"BSD-3-Clause"
] | 1
|
2022-01-25T17:41:54.000Z
|
2022-01-26T02:27:51.000Z
|
NNet/utils/readNNet.py
|
noyahoch/Marabou
|
03eb551498287e5372d462e3c2ad4fcc3210a5fa
|
[
"BSD-3-Clause"
] | 3
|
2020-03-14T17:12:17.000Z
|
2022-03-16T09:50:46.000Z
|
import numpy as np
def readNNet(nnetFile, withNorm=False):
'''
Read a .nnet file and return list of weight matrices and bias vectors
Inputs:
nnetFile: (string) .nnet file to read
withNorm: (bool) If true, return normalization parameters
Returns:
weights: List of weight matrices for fully connected network
biases: List of bias vectors for fully connected network
'''
# Open NNet file
f = open(nnetFile,'r')
# Skip header lines
line = f.readline()
while line[:2]=="//":
line = f.readline()
# Extract information about network architecture
record = line.split(',')
numLayers = int(record[0])
inputSize = int(record[1])
line = f.readline()
record = line.split(',')
layerSizes = np.zeros(numLayers+1,'int')
for i in range(numLayers+1):
layerSizes[i]=int(record[i])
# Skip extra obsolete parameter line
f.readline()
# Read the normalization information
line = f.readline()
inputMins = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
inputMaxes = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
means = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
ranges = [float(x) for x in line.strip().split(",")[:-1]]
# Initialize list of weights and biases
weights = [np.zeros((layerSizes[i],layerSizes[i+1])) for i in range(numLayers)]
biases = [np.zeros(layerSizes[i+1]) for i in range(numLayers)]
# Read remainder of file and place each value in the correct spot in a weight matrix or bias vector
layer=0
i=0
j=0
line = f.readline()
record = line.split(',')
while layer+1 < len(layerSizes):
while i<layerSizes[layer+1]:
while record[j]!="\n":
weights[layer][j,i] = float(record[j])
j+=1
j=0
i+=1
line = f.readline()
record = line.split(',')
i=0
while i<layerSizes[layer+1]:
biases[layer][i] = float(record[0])
i+=1
line = f.readline()
record = line.split(',')
layer+=1
i=0
j=0
f.close()
if withNorm:
return weights, biases, inputMins, inputMaxes, means, ranges
return weights, biases
| 27.83908
| 103
| 0.562758
| 311
| 2,422
| 4.382637
| 0.282958
| 0.040352
| 0.104916
| 0.061629
| 0.289068
| 0.242113
| 0.22157
| 0.200293
| 0.153338
| 0.088041
| 0
| 0.015449
| 0.30512
| 2,422
| 87
| 104
| 27.83908
| 0.794415
| 0.251858
| 0
| 0.480769
| 0
| 0
| 0.009665
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.019231
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd336f08882633e139c7b8cf8e6bbf9503123d24
| 13,668
|
py
|
Python
|
models/model.py
|
hearai/hearai
|
2f2bc2923fa2bb170d9ed895c3f638e99811442f
|
[
"MIT"
] | 16
|
2021-12-16T20:19:31.000Z
|
2022-03-19T15:59:23.000Z
|
models/model.py
|
hearai/hearai
|
2f2bc2923fa2bb170d9ed895c3f638e99811442f
|
[
"MIT"
] | 34
|
2021-12-21T19:33:31.000Z
|
2022-03-31T19:04:39.000Z
|
models/model.py
|
hearai/hearai
|
2f2bc2923fa2bb170d9ed895c3f638e99811442f
|
[
"MIT"
] | 5
|
2021-12-18T22:35:20.000Z
|
2022-02-20T12:26:39.000Z
|
from typing import Dict
import neptune.new as neptune
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from config import NEPTUNE_API_TOKEN, NEPTUNE_PROJECT_NAME
from sklearn.metrics import classification_report, f1_score
from utils.summary_loss import SummaryLoss
from math import ceil
from models.feature_extractors.multi_frame_feature_extractor import (
MultiFrameFeatureExtractor,
)
from models.model_loader import ModelLoader
from models.common.simple_sequential_model import SimpleSequentialModel
from models.landmarks_models.lanmdarks_sequential_model import LandmarksSequentialModel
from models.head_models.head_sequential_model import HeadClassificationSequentialModel
# initialize neptune logging
def initialize_neptun(tags):
return neptune.init(
api_token=NEPTUNE_API_TOKEN,
project=NEPTUNE_PROJECT_NAME,
tags=tags,
capture_stdout=False,
capture_stderr=False,
)
class GlossTranslationModel(pl.LightningModule):
"""Awesome model for Gloss Translation"""
def __init__(
self,
general_parameters: Dict = None,
train_parameters: Dict = None,
feature_extractor_parameters: Dict = None,
transformer_parameters: Dict = None,
heads: Dict = None,
freeze_scheduler: Dict = None,
loss_function=nn.BCEWithLogitsLoss,
steps_per_epoch: int = 1000
):
"""
Args:
general_parameters (Dict): Dict containing general parameters not parameterizing training process.
[Warning] Must contain fields:
- path_to_save (str)
- neptune (bool)
feature_extractor_parameters (Dict): Dict containing parameters regarding currently used feature extractor.
[Warning] Must contain fields:
- "name" (str)
- "model_path" (str)
- "representation_size" (int)
transformer_parameters (Dict): Dict containing parameters regarding currently used transformer.
[Warning] Must contain fields:
- "name" (str)
- "output_size" (int)
- "feedforward_size" (int)
- "num_encoder_layers" (int)
- "num_attention_heads" (int)
- "dropout_rate" (float)
train_parameters (Dict): Dict containing parameters parameterizing the training process.
[Warning] Must contain fields:
- "num_segments" (int)
- "lr" (float)
- "multiply_lr_step" (float)
- "warmup_steps" (float)
- "classification_mode" (str)
heads (Dict): Dict containg information describing structure of output heads for specific tasks (gloss/hamnosys).
freeze_scheduler (Dict): Dict containing information describing feature_extractor & transformer freezing/unfreezing process.
loss_function (torch.nn.Module): Loss function.
"""
super().__init__()
if general_parameters["neptune"]:
tags = [train_parameters["classification_mode"], feature_extractor_parameters["name"], transformer_parameters["name"]]
self.run = initialize_neptun(tags)
self.run["parameters"] = {
"general_parameters": general_parameters,
"train_parameters": train_parameters,
"feature_extractor_parameters": feature_extractor_parameters,
"transformer_parameters": transformer_parameters,
"heads": heads,
"freeze_scheduler": freeze_scheduler,
"loss_function": loss_function
}
else:
self.run = None
# parameters
self.lr = train_parameters["lr"]
self.model_save_dir = general_parameters["path_to_save"]
self.warmup_steps = train_parameters["warmup_steps"]
self.multiply_lr_step = train_parameters["multiply_lr_step"]
self.use_frames = train_parameters["use_frames"]
self.use_landmarks = train_parameters["use_landmarks"]
self.classification_heads = heads[train_parameters['classification_mode']]
self.cls_head = nn.ModuleList()
self.loss_weights = []
for value in self.classification_heads.values():
self.cls_head.append(
HeadClassificationSequentialModel(
classes_number=value["num_class"],
representation_size=3 * value["num_class"],
additional_layers=1,
dropout_rate=heads["model"]["dropout_rate"]
)
)
self.loss_weights.append(value["loss_weight"])
# losses
self.summary_loss = SummaryLoss(loss_function, self.loss_weights)
# models-parts
self.model_loader = ModelLoader()
representation_size = feature_extractor_parameters["representation_size"]
self.adjustment_to_representatios_size = nn.LazyLinear(out_features=representation_size)
if self.use_frames:
self.multi_frame_feature_extractor = MultiFrameFeatureExtractor(
self.model_loader.load_feature_extractor(
feature_extractor_name=feature_extractor_parameters["name"],
representation_size=representation_size,
model_path=feature_extractor_parameters["model_path"],
)
)
else:
self.multi_frame_feature_extractor = None
self.transformer = self.model_loader.load_transformer(
transformer_name=transformer_parameters["name"],
feature_extractor_parameters=feature_extractor_parameters,
transformer_parameters=transformer_parameters,
train_parameters=train_parameters
)
self.steps_per_epoch = steps_per_epoch
if freeze_scheduler is not None:
self.freeze_scheduler = freeze_scheduler
self.configure_freeze_scheduler()
def forward(self, input, **kwargs):
predictions = []
frames, landmarks = input
if self.use_frames:
x = self.multi_frame_feature_extractor(frames.to(self.device))
if self.use_landmarks:
x_landmarks = self._prepare_landmarks_tensor(landmarks)
if self.use_frames:
x = torch.concat([x, x_landmarks], dim=-1)
else:
x = x_landmarks
x = self.adjustment_to_representatios_size(x)
x = self.transformer(x)
for head in self.cls_head:
predictions.append(head(x))
return predictions
def _prepare_landmarks_tensor(self, landmarks):
concatenated_landmarks = np.concatenate(
[landmarks[landmarks_name] for landmarks_name in landmarks.keys()],
axis=-1
)
return torch.as_tensor(concatenated_landmarks, dtype=torch.float32, device=self.device)
def training_step(self, batch, batch_idx):
targets, predictions, losses = self._process_batch(batch)
self.scheduler.step()
if self.global_step < 2:
for name, child in self.named_children():
for param in child.parameters():
param.requires_grad = True
if self.freeze_scheduler["freeze_mode"] == "step":
self.freeze_step()
if self.run:
self.run["metrics/batch/training_loss"].log(losses)
return {"loss": losses}
def validation_step(self, batch, batch_idx):
targets, predictions, losses = self._process_batch(batch)
if self.run:
self.run["metrics/batch/validation_loss"].log(losses)
return {"val_loss": losses, "targets": targets, "predictions": predictions}
def _process_batch(self, batch):
frames, landmarks, targets = batch
predictions = self((frames, landmarks))
losses = self.summary_loss(predictions, targets)
return targets, predictions, losses
def validation_epoch_end(self, out):
head_names = list(self.classification_heads.keys())
# initialize empty list with list per head
all_targets = [[] for name in head_names]
all_predictions = [[] for name in head_names]
for single_batch in out:
targets, predictions = single_batch["targets"], single_batch["predictions"]
# append predictions and targets for every head
for nr_head, head_targets in enumerate(targets):
all_targets[nr_head] += list(torch.argmax(targets[nr_head], dim=1).cpu().detach().numpy())
all_predictions[nr_head] += list(torch.argmax(predictions[nr_head], dim=1).cpu().detach().numpy())
for nr_head, targets_for_head in enumerate(all_targets):
head_name = head_names[nr_head]
predictions_for_head = all_predictions[nr_head]
head_report = "\n".join(
[
head_name,
classification_report(
targets_for_head, predictions_for_head, zero_division=0
),
]
)
print(head_report)
f1 = f1_score(targets_for_head, predictions_for_head,
average='macro', zero_division=0)
if self.run:
log_path = "/".join(["metrics/epoch/", head_name])
self.run[log_path].log(head_report)
self.run[f'/metrics/epoch/f1/{head_name}'].log(f1)
if self.trainer.global_step > 0:
print("Saving model...")
torch.save(self.state_dict(), self.model_save_dir)
self.scheduler.step()
if (self.freeze_scheduler is not None) and self.freeze_scheduler["freeze_mode"] == "epoch":
self.freeze_step()
def configure_optimizers(self):
optimizer = torch.optim.RAdam(self.parameters(), lr=self.lr)
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,
max_lr=self.lr,
div_factor=100,
final_div_factor=10,
pct_start=0.2,
total_steps=self.trainer.max_epochs * self.steps_per_epoch + 2)
return [optimizer], [self.scheduler]
def optimizer_step(
self,
epoch,
batch_idx,
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu=False,
using_native_amp=False,
using_lbfgs=False,
):
optimizer.step(closure=optimizer_closure)
if self.run:
self.run["params/lr"].log(optimizer.param_groups[0]["lr"])
def configure_freeze_scheduler(self):
### TO-DO check if all params are correctly set
# e.g. check if all lists are the same length
# check if values are bools
self.freeze_scheduler["current_pattern"] = 0
self.freeze_scheduler["current_counter"] = 0
self.freeze_step()
def freeze_step(self):
### TO- DO
# If the `freeze_pattern_repeats` is set as an integer isntead of a list,
# e.g. `freeze_pattern_repeats = 3`, it is equal to a pattern
# `feature_extractor = [True, False] * freeze_pattern_repeats`,
# hence it is exactly the same as:
# ```
# "model_params": {
# "feature_extractor": [True, False, True, False, True, False],
# "transformer": [False, True,False, True, False, True],
# }
# ```
if self.freeze_scheduler is not None:
self.freeze_update()
for params_to_freeze in list(self.freeze_scheduler["model_params"].keys()):
if self.freeze_scheduler["current_pattern"] >= len(
self.freeze_scheduler["model_params"][params_to_freeze]
):
current_pattern = True
else:
current_pattern = self.freeze_scheduler["model_params"][
params_to_freeze
][self.freeze_scheduler["current_pattern"]]
for name, child in self.named_children():
if params_to_freeze in name:
for param in child.parameters():
param.requires_grad = not current_pattern
if self.freeze_scheduler["verbose"]:
print(
"Freeze status:",
params_to_freeze,
"set to",
str(current_pattern),
)
def freeze_update(self):
if self.freeze_scheduler["current_pattern"] >= len(
self.freeze_scheduler["model_params"][
list(self.freeze_scheduler["model_params"].keys())[0]
]
):
return
if (
self.freeze_scheduler["current_counter"]
>= self.freeze_scheduler["freeze_pattern_repeats"][
self.freeze_scheduler["current_pattern"]
]
):
self.freeze_scheduler["current_pattern"] += 1
self.freeze_scheduler["current_counter"] = 0
self.freeze_scheduler["current_counter"] += 1
| 41.92638
| 136
| 0.592479
| 1,382
| 13,668
| 5.591896
| 0.194645
| 0.05823
| 0.054089
| 0.033644
| 0.273163
| 0.178571
| 0.149068
| 0.104296
| 0.059006
| 0.059006
| 0
| 0.003993
| 0.321993
| 13,668
| 325
| 137
| 42.055385
| 0.829934
| 0.156277
| 0
| 0.138655
| 0
| 0
| 0.071023
| 0.013938
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054622
| false
| 0
| 0.063025
| 0.004202
| 0.155462
| 0.012605
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd33abe036b992ac7ac194a0541c5439617437c4
| 2,305
|
py
|
Python
|
solutions/day09/solution.py
|
dbjohnson/advent-of-code-2021
|
2ed1d30362afa0a73c890730cea46de3291be21f
|
[
"MIT"
] | null | null | null |
solutions/day09/solution.py
|
dbjohnson/advent-of-code-2021
|
2ed1d30362afa0a73c890730cea46de3291be21f
|
[
"MIT"
] | null | null | null |
solutions/day09/solution.py
|
dbjohnson/advent-of-code-2021
|
2ed1d30362afa0a73c890730cea46de3291be21f
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
from collections import defaultdict
import pandas as pd
import numpy as np
with open('input.txt') as fh:
depthmap = pd.DataFrame([{
'row': row,
'col': col,
'height': int(d)
}
for row, line in enumerate(fh)
for col, d in enumerate(line.strip())
]).pivot_table(
index='row',
columns='col',
values='height'
).values
idx = (
# right neighbor
np.pad(
depthmap[:, :-1] < depthmap[:, 1:],
((0, 0), (0, 1)),
'constant',
constant_values=1
) &
# left neighbor
np.pad(
depthmap[:, 1:] < depthmap[:, :-1],
((0, 0), (1, 0)),
'constant',
constant_values=1
) &
# lower neighbor
np.pad(
depthmap[:-1, :] < depthmap[1:, :],
((0, 1), (0, 0)),
'constant',
constant_values=1
) &
# upper neighbor
np.pad(
depthmap[1:, :] < depthmap[:-1, :],
((1, 0), (0, 0)),
'constant',
constant_values=1
)
)
print('part 1', (depthmap[np.where(idx)] + 1).sum())
# lru_cache here is essentially cheap DP - once we've calculated
# the basin for any point A, we know the basin for any point B that
# flows through point A
@lru_cache(maxsize=None)
def lowpoint(row, col):
if depthmap[row, col] == 9:
return None
drains = {(row, col)}
for r, c in (
(row - 1, col),
(row + 1, col),
(row, col - 1),
(row, col + 1)
):
if (
0 <= r < depthmap.shape[0]
and 0 <= c < depthmap.shape[1]
and depthmap[r, c] < depthmap[row, col]
):
drains.add(lowpoint(r, c))
return min(
drains,
key=lambda rowcol: depthmap[rowcol]
)
lowpoint_to_basin = defaultdict(list)
for r in range(depthmap.shape[0]):
for c in range(depthmap.shape[1]):
lowpoint_to_basin[lowpoint(r, c)].append((r, c))
print(
'part 2',
np.prod(sorted([
len(points)
for basin, points in lowpoint_to_basin.items()
if basin
])[-3:])
)
# part 1 now that we solved part 2...
print(
'part 1 redux',
sum([
depthmap[lowpoint] + 1
for lowpoint in lowpoint_to_basin
if lowpoint
])
)
| 21.342593
| 67
| 0.516269
| 296
| 2,305
| 3.966216
| 0.331081
| 0.061329
| 0.044293
| 0.07155
| 0.205281
| 0.15247
| 0.109881
| 0.083475
| 0.056218
| 0
| 0
| 0.030619
| 0.334056
| 2,305
| 108
| 68
| 21.342593
| 0.734202
| 0.106291
| 0
| 0.223529
| 0
| 0
| 0.043372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0
| 0.047059
| 0
| 0.082353
| 0.035294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd3470135bfe7a2b8866c6a268c9e629dad7a8b7
| 3,467
|
py
|
Python
|
docs/conf.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 52
|
2021-04-15T23:24:12.000Z
|
2022-03-09T23:02:27.000Z
|
docs/conf.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 119
|
2021-04-13T11:42:01.000Z
|
2022-02-24T10:02:35.000Z
|
docs/conf.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 14
|
2021-04-13T19:00:19.000Z
|
2022-02-23T09:17:30.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import re
import subprocess
import sys
from pathlib import Path
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, str(Path(__file__).parent.parent.parent.resolve()))
from pystac_client import __version__ # noqa: E402
git_branch = (
subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
.decode("utf-8")
.strip()
)
# -- Project information -----------------------------------------------------
project = 'pystac-client'
copyright = '2021, Jon Duckworth'
author = 'Matthew Hanson, Jon Duckworth'
github_user = 'stac-utils'
github_repo = 'pystac-client'
package_description = 'A Python client for the STAC and STAC-API specs'
# The full version, including alpha/beta/rc tags
version = re.fullmatch(r'^(\d+\.\d+\.\d).*$', __version__).group(1)
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon',
'sphinx.ext.extlinks', 'sphinxcontrib.fulltoc', 'nbsphinx', 'myst_parser'
]
extlinks = {
"tutorial": (
"https://github.com/stac-utils/pystac-client/"
"tree/{}/docs/tutorials/%s".format(git_branch),
"tutorial",
)
}
nbsphinx_allow_errors = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
source_suffix = [".rst", "*.md", "*.ipynb"]
exclude_patterns = ['build/*']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme_options = {
# 'sidebar_collapse': False,
'fixed_sidebar': True,
'github_button': True,
'github_user': github_user,
'github_repo': github_repo,
'description': package_description
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'requests': ('https://requests.readthedocs.io/en/master', None),
'pystac': ('https://pystac.readthedocs.io/en/latest', None),
'dateutil': ('https://dateutil.readthedocs.io/en/stable/', None),
}
# -- Options for autodoc extension -------------------------------------------
autodoc_typehints = "none"
| 33.660194
| 97
| 0.654168
| 425
| 3,467
| 5.221176
| 0.48
| 0.024335
| 0.020279
| 0.01352
| 0.044164
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003709
| 0.144505
| 3,467
| 102
| 98
| 33.990196
| 0.744437
| 0.496971
| 0
| 0
| 0
| 0
| 0.398949
| 0.03972
| 0
| 0
| 0
| 0.009804
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd36eb6513428b0c0f981f91eaea0aa21154992a
| 689
|
py
|
Python
|
cb_scripts/nums_square_cube.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | 1
|
2022-02-05T06:39:05.000Z
|
2022-02-05T06:39:05.000Z
|
cb_scripts/nums_square_cube.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | null | null | null |
cb_scripts/nums_square_cube.py
|
christopher-burke/python-scripts
|
bdbea2456130e0958b6a6ab8d138f4f19b39b934
|
[
"MIT"
] | 1
|
2021-06-10T22:04:35.000Z
|
2021-06-10T22:04:35.000Z
|
#!/usr/bin/env python3
"""Squares and Cubes for a range of numbers.
Given a start and end, calucate the Square x**2 and
the Cube x**3 for all numbers.
Example of generator and functools.partial.
"""
from functools import partial
def power(base, exponent):
"""Raise a base to the exponent."""
return base ** exponent
square = partial(power, exponent=2)
cube = partial(power, exponent=3)
def main(start, end):
"""Square and cube all numbers in range of start to end."""
for i in range(start, end+1):
yield i, square(i), cube(i)
if __name__ == "__main__":
print("number\tsquare\tcube")
for x in main(1, 10):
print("{}\t{}\t{}".format(*x))
| 20.264706
| 63
| 0.651669
| 109
| 689
| 4.045872
| 0.458716
| 0.031746
| 0.090703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016514
| 0.208999
| 689
| 33
| 64
| 20.878788
| 0.792661
| 0.400581
| 0
| 0
| 0
| 0
| 0.09596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd36fd075f7cd95707b64e346e7a7db96e365eac
| 1,748
|
py
|
Python
|
mozdns/txt/tests.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 22
|
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
mozdns/txt/tests.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 8
|
2015-12-28T18:56:19.000Z
|
2019-04-01T17:33:48.000Z
|
mozdns/txt/tests.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 13
|
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
from django.test import TestCase
from django.core.exceptions import ValidationError
from mozdns.txt.models import TXT
from mozdns.domain.models import Domain
class TXTTests(TestCase):
def setUp(self):
self.o = Domain(name="org")
self.o.save()
self.o_e = Domain(name="oregonstate.org")
self.o_e.save()
def do_generic_add(self, data):
txt = TXT(**data)
txt.__repr__()
txt.save()
self.assertTrue(txt.details())
self.assertTrue(txt.get_absolute_url())
self.assertTrue(txt.get_edit_url())
self.assertTrue(txt.get_delete_url())
rtxt = TXT.objects.filter(**data)
self.assertTrue(len(rtxt) == 1)
return txt
def do_remove(self, data):
txt = self.do_generic_add(data)
txt.delete()
rmx = TXT.objects.filter(**data)
self.assertTrue(len(rmx) == 0)
def test_add_remove_txt(self):
label = "asdf"
data = "asdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "asdf"
data = "asdfasfd"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "df"
data = "aasdf"
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.do_generic_add(data)
label = "12314"
data = "dd"
data = {'label': label, 'txt_data': data, 'domain': self.o}
self.do_generic_add(data)
def test_bad_data(self):
label = "asdf"
data = '"dfa f'
data = {'label': label, 'txt_data': data, 'domain': self.o_e}
self.assertRaises(ValidationError, self.do_generic_add, data)
| 29.627119
| 69
| 0.587529
| 227
| 1,748
| 4.343612
| 0.242291
| 0.045639
| 0.085193
| 0.097363
| 0.44929
| 0.341785
| 0.341785
| 0.266734
| 0.266734
| 0.230223
| 0
| 0.005507
| 0.272883
| 1,748
| 58
| 70
| 30.137931
| 0.77026
| 0
| 0
| 0.229167
| 0
| 0
| 0.089817
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 1
| 0.104167
| false
| 0
| 0.083333
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd39f1397ad328542fed8bb62d6c47dc4c191597
| 6,698
|
py
|
Python
|
xtesting/tests/unit/core/test_behaveframework.py
|
collivier/functest-xtesting
|
17739d718901a10f7ec0aaf9a6d53141294a347d
|
[
"Apache-2.0"
] | 1
|
2020-05-15T12:58:58.000Z
|
2020-05-15T12:58:58.000Z
|
xtesting/tests/unit/core/test_behaveframework.py
|
collivier/functest-xtesting
|
17739d718901a10f7ec0aaf9a6d53141294a347d
|
[
"Apache-2.0"
] | null | null | null |
xtesting/tests/unit/core/test_behaveframework.py
|
collivier/functest-xtesting
|
17739d718901a10f7ec0aaf9a6d53141294a347d
|
[
"Apache-2.0"
] | 3
|
2018-02-28T15:55:14.000Z
|
2022-02-24T15:46:12.000Z
|
#!/usr/bin/env python
# Copyright (c) 2019 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""Define the classes required to fully cover behave."""
import logging
import os
import unittest
import mock
from xtesting.core import behaveframework
__author__ = "Deepak Chandella <deepak.chandella@orange.com>"
class ParseResultTesting(unittest.TestCase):
"""The class testing BehaveFramework.parse_results()."""
# pylint: disable=missing-docstring
_response = [{'status': 'passed'}]
def setUp(self):
self.test = behaveframework.BehaveFramework(
case_name='behave', project_name='xtesting')
@mock.patch('builtins.open', side_effect=OSError)
def test_raises_exc_open(self, *args): # pylint: disable=unused-argument
with self.assertRaises(OSError):
self.test.parse_results()
@mock.patch('json.load', return_value=[{'foo': 'bar'}])
@mock.patch('builtins.open', mock.mock_open())
def test_raises_exc_key(self, *args): # pylint: disable=unused-argument
with self.assertRaises(KeyError):
self.test.parse_results()
@mock.patch('json.load', return_value=[])
@mock.patch('builtins.open', mock.mock_open())
def test_raises_exe_zerodivision(self, *args):
# pylint: disable=unused-argument
with self.assertRaises(ZeroDivisionError):
self.test.parse_results()
def _test_result(self, response, result):
with mock.patch('builtins.open', mock.mock_open()), \
mock.patch('json.load', return_value=response):
self.test.parse_results()
self.assertEqual(self.test.result, result)
def test_null_passed(self):
data = [{'status': 'dummy'}]
self._test_result(data, 0)
def test_half_success(self):
data = [{'status': 'passed'}, {'status': 'failed'}]
self._test_result(data, 50)
def test_success(self):
data = [{'status': 'passed'}, {'status': 'passed'}]
self._test_result(data, 100)
@mock.patch('builtins.open', mock.mock_open())
def test_count(self, *args): # pylint: disable=unused-argument
self._response.extend([{'status': 'failed'}, {'status': 'skipped'}])
with mock.patch('json.load', mock.Mock(return_value=self._response)):
self.test.parse_results()
self.assertEqual(self.test.details['pass_tests'], 1)
self.assertEqual(self.test.details['fail_tests'], 1)
self.assertEqual(self.test.details['skip_tests'], 1)
self.assertEqual(self.test.details['total_tests'], 3)
class RunTesting(unittest.TestCase):
"""The class testing BehaveFramework.run()."""
# pylint: disable=missing-docstring
suites = ["foo"]
tags = ["bar"]
def setUp(self):
self.test = behaveframework.BehaveFramework(
case_name='behave', project_name='xtesting')
def test_exc_key_error(self):
self.assertEqual(self.test.run(), self.test.EX_RUN_ERROR)
@mock.patch('xtesting.core.behaveframework.behave_main')
def _test_makedirs_exc(self, *args):
with mock.patch.object(self.test, 'parse_results') as mock_method:
self.assertEqual(
self.test.run(
suites=self.suites, tags=self.tags),
self.test.EX_RUN_ERROR)
args[0].assert_not_called()
mock_method.assert_not_called()
@mock.patch('os.makedirs', side_effect=Exception)
@mock.patch('os.path.exists', return_value=False)
def test_makedirs_exc(self, *args):
self._test_makedirs_exc()
args[0].assert_called_once_with(self.test.res_dir)
args[1].assert_called_once_with(self.test.res_dir)
@mock.patch('xtesting.core.behaveframework.behave_main')
def _test_makedirs(self, *args):
with mock.patch.object(self.test, 'parse_results') as mock_method:
self.assertEqual(
self.test.run(suites=self.suites, tags=self.tags),
self.test.EX_OK)
html_file = os.path.join(self.test.res_dir, 'output.html')
args_list = [
'--junit',
f'--junit-directory={self.test.res_dir}',
'--format=json', f'--outfile={self.test.json_file}',
'--format=behave_html_formatter:HTMLFormatter',
f'--outfile={html_file}',
'--tags='+','.join(self.tags)]
args_list.append('foo')
args[0].assert_called_once_with(args_list)
mock_method.assert_called_once_with()
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=False)
def test_makedirs(self, *args):
self._test_makedirs()
args[0].assert_called_once_with(self.test.res_dir)
args[1].assert_called_once_with(self.test.res_dir)
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=True)
def test_makedirs_oserror17(self, *args):
self._test_makedirs()
args[0].assert_called_once_with(self.test.res_dir)
args[1].assert_not_called()
@mock.patch('os.makedirs')
@mock.patch('xtesting.core.behaveframework.behave_main')
def _test_parse_results(self, status, console, *args):
self.assertEqual(
self.test.run(
suites=self.suites, tags=self.tags, console=console),
status)
html_file = os.path.join(self.test.res_dir, 'output.html')
args_list = [
'--junit',
f'--junit-directory={self.test.res_dir}',
'--format=json', f'--outfile={self.test.json_file}',
'--format=behave_html_formatter:HTMLFormatter',
f'--outfile={html_file}',
'--tags='+','.join(self.tags)]
if console:
args_list += ['--format=pretty', '--outfile=-']
args_list.append('foo')
args[0].assert_called_once_with(args_list)
args[1].assert_called_once_with(self.test.res_dir)
def test_parse_results_exc(self, console=False):
with mock.patch.object(self.test, 'parse_results',
side_effect=Exception) as mock_method:
self._test_parse_results(self.test.EX_RUN_ERROR, console)
mock_method.assert_called_once_with()
def test_parse_results_exc_console(self):
self.test_parse_results_exc(console=True)
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
unittest.main(verbosity=2)
| 37.418994
| 77
| 0.640042
| 823
| 6,698
| 4.985419
| 0.200486
| 0.083841
| 0.050695
| 0.048745
| 0.68389
| 0.637582
| 0.565684
| 0.516208
| 0.506702
| 0.418962
| 0
| 0.005939
| 0.220663
| 6,698
| 178
| 78
| 37.629213
| 0.780077
| 0.094357
| 0
| 0.453846
| 0
| 0
| 0.158331
| 0.069228
| 0
| 0
| 0
| 0
| 0.192308
| 1
| 0.146154
| false
| 0.038462
| 0.038462
| 0
| 0.223077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803c
| 963
|
py
|
Python
|
data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/test/python/cd3a28ba018f4c08dd5b0ec2fb2ba69c859e803cdjango.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
# coding=utf-8
from fabric.api import env, run
COMMAND_COLLECTSTATIC = 'collectstatic'
COMMAND_SYNCDB = 'syncdb'
COMMAND_MIGRATE = 'migrate'
_default_command = '{python} {manage} {command}'
_commands_list = {
COMMAND_COLLECTSTATIC: 'yes yes | {python} {manage} {command}',
COMMAND_MIGRATE: '{python} {manage} {command} --noinput',
}
def django_commands(os_environment=None):
for command in env.django_commands:
_django_command(command, os_environment)
def _django_command(command, os_environment):
command_to_run = _commands_list.get(command, _default_command)
command_to_run = command_to_run.format(
python=env.server_python,
manage=env.server_manage,
command=command
)
if os_environment is None:
run(command_to_run)
return
prefix = ' '.join([
'{}={}'.format(k, v)
for k, v in os_environment.items()
])
run('{} {}'.format(prefix, command_to_run))
| 24.075
| 67
| 0.677051
| 115
| 963
| 5.356522
| 0.33913
| 0.113636
| 0.097403
| 0.071429
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001302
| 0.202492
| 963
| 39
| 68
| 24.692308
| 0.800781
| 0.012461
| 0
| 0
| 0
| 0
| 0.14557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd3da08c421072d75aa5562437930fcd09889489
| 8,820
|
py
|
Python
|
commercialoperator/components/bookings/utils.py
|
wilsonc86/ledger
|
a60a681e547f37e4ac81cb93dffaf90aea8c8151
|
[
"Apache-2.0"
] | null | null | null |
commercialoperator/components/bookings/utils.py
|
wilsonc86/ledger
|
a60a681e547f37e4ac81cb93dffaf90aea8c8151
|
[
"Apache-2.0"
] | null | null | null |
commercialoperator/components/bookings/utils.py
|
wilsonc86/ledger
|
a60a681e547f37e4ac81cb93dffaf90aea8c8151
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.exceptions import ValidationError
from datetime import datetime, timedelta
from commercialoperator.components.main.models import Park
from commercialoperator.components.proposals.models import Proposal
from ledger.checkout.utils import create_basket_session, create_checkout_session, calculate_excl_gst
from ledger.payments.models import Invoice
from ledger.payments.utils import oracle_parser
import json
from decimal import Decimal
from commercialoperator.components.bookings.models import Booking, ParkBooking, ApplicationFee
import logging
logger = logging.getLogger('payment_checkout')
def create_booking(request, proposal_id):
""" Create the ledger lines - line items for invoice sent to payment system """
#import ipdb; ipdb.set_trace()
booking = Booking.objects.create(proposal_id=proposal_id)
tbody = json.loads(request.POST['payment'])['tbody']
for row in tbody:
park_id = row[0]['value']
arrival = row[1]
no_adults = int(row[2]) if row[2] else 0
no_children = int(row[3]) if row[3] else 0
no_free_of_charge = int(row[4]) if row[4] else 0
park = Park.objects.get(id=park_id)
if any([no_adults, no_children, no_free_of_charge]) > 0:
park_booking = ParkBooking.objects.create(
booking = booking,
park_id = park_id,
arrival = datetime.strptime(arrival, '%Y-%m-%d').date(),
no_adults = no_adults,
no_children = no_children,
no_free_of_charge = no_free_of_charge,
cost = no_adults*park.adult_price + no_children*park.child_price
)
if not park_booking:
raise ValidationError('Must have at least one person visiting the park')
return booking
def get_session_booking(session):
if 'cols_booking' in session:
booking_id = session['cols_booking']
else:
raise Exception('Booking not in Session')
try:
return Booking.objects.get(id=booking_id)
except Booking.DoesNotExist:
raise Exception('Booking not found for booking_id {}'.format(booking_id))
def set_session_booking(session, booking):
session['cols_booking'] = booking.id
session.modified = True
def delete_session_booking(session):
if 'cols_booking' in session:
del session['cols_booking']
session.modified = True
def get_session_application_invoice(session):
""" Application Fee session ID """
if 'cols_app_invoice' in session:
application_fee_id = session['cols_app_invoice']
else:
raise Exception('Application not in Session')
try:
#return Invoice.objects.get(id=application_invoice_id)
#return Proposal.objects.get(id=proposal_id)
return ApplicationFee.objects.get(id=application_fee_id)
except Invoice.DoesNotExist:
raise Exception('Application not found for application {}'.format(application_fee_id))
def set_session_application_invoice(session, application_fee):
""" Application Fee session ID """
session['cols_app_invoice'] = application_fee.id
session.modified = True
def delete_session_application_invoice(session):
""" Application Fee session ID """
if 'cols_app_invoice' in session:
del session['cols_app_invoice']
session.modified = True
def create_fee_lines(proposal, invoice_text=None, vouchers=[], internal=False):
""" Create the ledger lines - line item for application fee sent to payment system """
#import ipdb; ipdb.set_trace()
now = datetime.now().strftime('%Y-%m-%d %H:%M')
price = proposal.application_type.application_fee
line_items = [{
'ledger_description': 'Application Fee - {} - {}'.format(now, proposal.lodgement_number),
'oracle_code': proposal.application_type.oracle_code,
'price_incl_tax': price,
'price_excl_tax': price if proposal.application_type.is_gst_exempt else calculate_excl_gst(price),
'quantity': 1,
}]
logger.info('{}'.format(line_items))
return line_items
def create_lines(request, invoice_text=None, vouchers=[], internal=False):
""" Create the ledger lines - line items for invoice sent to payment system """
#import ipdb; ipdb.set_trace()
def add_line_item(park, arrival, age_group, price, no_persons):
price = Decimal(price)
if no_persons > 0:
return {
'ledger_description': '{} - {} - {}'.format(park.name, arrival, age_group),
'oracle_code': park.oracle_code,
'price_incl_tax': price,
'price_excl_tax': price if park.is_gst_exempt else calculate_excl_gst(price),
'quantity': no_persons,
}
return None
lines = []
tbody = json.loads(request.POST['payment'])['tbody']
for row in tbody:
park_id = row[0]['value']
arrival = row[1]
no_adults = int(row[2]) if row[2] else 0
no_children = int(row[3]) if row[3] else 0
no_free_of_charge = int(row[4]) if row[4] else 0
park= Park.objects.get(id=park_id)
if no_adults > 0:
lines.append(add_line_item(park, arrival, 'Adult', price=park.adult_price, no_persons=no_adults))
if no_children > 0:
lines.append(add_line_item(park, arrival, 'Child', price=park.child_price, no_persons=no_children))
if no_free_of_charge > 0:
lines.append(add_line_item(park, arrival, 'Free', price=0.0, no_persons=no_free_of_charge))
return lines
def checkout(request, proposal, lines, return_url_ns='public_booking_success', return_preload_url_ns='public_booking_success', invoice_text=None, vouchers=[], internal=False):
#import ipdb; ipdb.set_trace()
basket_params = {
'products': lines,
'vouchers': vouchers,
'system': settings.PS_PAYMENT_SYSTEM_ID,
'custom_basket': True,
}
basket, basket_hash = create_basket_session(request, basket_params)
#fallback_url = request.build_absolute_uri('/')
checkout_params = {
'system': settings.PS_PAYMENT_SYSTEM_ID,
'fallback_url': request.build_absolute_uri('/'), # 'http://mooring-ria-jm.dbca.wa.gov.au/'
'return_url': request.build_absolute_uri(reverse(return_url_ns)), # 'http://mooring-ria-jm.dbca.wa.gov.au/success/'
'return_preload_url': request.build_absolute_uri(reverse(return_url_ns)), # 'http://mooring-ria-jm.dbca.wa.gov.au/success/'
#'fallback_url': fallback_url,
#'return_url': fallback_url,
#'return_preload_url': fallback_url,
'force_redirect': True,
'proxy': True if internal else False,
'invoice_text': invoice_text, # 'Reservation for Jawaid Mushtaq from 2019-05-17 to 2019-05-19 at RIA 005'
}
# if not internal:
# checkout_params['check_url'] = request.build_absolute_uri('/api/booking/{}/booking_checkout_status.json'.format(booking.id))
if internal or request.user.is_anonymous():
#checkout_params['basket_owner'] = booking.customer.id
checkout_params['basket_owner'] = proposal.submitter_id
create_checkout_session(request, checkout_params)
# if internal:
# response = place_order_submission(request)
# else:
response = HttpResponseRedirect(reverse('checkout:index'))
# inject the current basket into the redirect response cookies
# or else, anonymous users will be directionless
response.set_cookie(
settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
)
# if booking.cost_total < 0:
# response = HttpResponseRedirect('/refund-payment')
# response.set_cookie(
# settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
# max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
# secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
# )
#
# # Zero booking costs
# if booking.cost_total < 1 and booking.cost_total > -1:
# response = HttpResponseRedirect('/no-payment')
# response.set_cookie(
# settings.OSCAR_BASKET_COOKIE_OPEN, basket_hash,
# max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
# secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True
# )
return response
def oracle_integration(date,override):
system = '0557'
oracle_codes = oracle_parser(date, system, 'Commercial Operator Licensing', override=override)
| 40.090909
| 175
| 0.675057
| 1,098
| 8,820
| 5.176685
| 0.1949
| 0.027094
| 0.030084
| 0.039585
| 0.427692
| 0.378958
| 0.333216
| 0.32266
| 0.284835
| 0.262139
| 0
| 0.008164
| 0.222336
| 8,820
| 219
| 176
| 40.273973
| 0.820528
| 0.218367
| 0
| 0.228571
| 0
| 0
| 0.114567
| 0.006454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.1
| 0
| 0.242857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd3ebb35376a9ad6bb35907b043a70f74ff3d06d
| 2,488
|
py
|
Python
|
driver.py
|
Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation
|
23ab060316c5978724b3f109d851ea33206d0e10
|
[
"MIT"
] | 6
|
2020-05-01T23:33:13.000Z
|
2021-12-18T08:13:50.000Z
|
driver.py
|
Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation--Python
|
23ab060316c5978724b3f109d851ea33206d0e10
|
[
"MIT"
] | null | null | null |
driver.py
|
Nobregaigor/Robot-path-tracking-and-obstacle-avoidance-simulation--Python
|
23ab060316c5978724b3f109d851ea33206d0e10
|
[
"MIT"
] | 2
|
2020-05-06T11:54:10.000Z
|
2020-07-30T01:58:06.000Z
|
import pygame
import math
import path_planning as pp
class Driver():
def __init__(self, vehicle, path, settings):
""" Driver """
#_______main objects references_______
#reference to driver vehicle object:
self.vehicle = vehicle
#creating a plan object:
self.plan = pp.path_plan(path)
#___________Settings_________
#initial velocity
self.velocity = settings._velocity_
#allowed error range for angle
self.angle_allowed_error = math.radians(settings._angle_allowed_error_)
#_______Class variables_______
#amount of degrees that it needs to turn to match desired path
self.angle_to_turn = None
#diretion that it needs to turn the wheel
self.direction_to_turn = None
#Boolean to indicate when to stop
self.safe_to_drive = False
self.settings = settings
#######################################################
def update_settings(self):
self.angle_allowed_error = math.radians(self.settings._angle_allowed_error_)
self.velocity = self.settings._velocity_
def update_driving_condition(self,conditions):
self.safe_to_drive = conditions['safe_to_drive']
self.angle_to_turn = conditions['angle_to_turn']
self.direction_to_turn = conditions['direction_to_turn']
def turn_wheel(self):
if self.angle_to_turn > self.angle_allowed_error:
if self.direction_to_turn == 'CCW': #opposite to match screens coordinates
self.vehicle.turn_left(self.velocity)
return "Turning left"
elif self.direction_to_turn == 'CW': #opposite to match screens coordinates
self.vehicle.turn_right(self.velocity)
return "Turning Right"
else:
print("I am confused!")
return "I am confused!"
else:
self.vehicle.move_forward(self.velocity)
return "Moving forward"
def drive(self, win, draw_plan=False, draw_sensor=False, debug=False):
self.update_settings()
conditions = self.plan.update_plan(win,self.vehicle.position,self.vehicle.direction, draw_plan, draw_sensor, debug)
self.update_driving_condition(conditions)
# print(conditions)
if self.safe_to_drive == True:
response = self.turn_wheel()
else:
response = "Not safe to drive"
return response
| 35.542857
| 123
| 0.63545
| 286
| 2,488
| 5.129371
| 0.293706
| 0.04499
| 0.057941
| 0.051806
| 0.132243
| 0.109066
| 0.06544
| 0.06544
| 0
| 0
| 0
| 0
| 0.268489
| 2,488
| 69
| 124
| 36.057971
| 0.806044
| 0.17283
| 0
| 0.069767
| 0
| 0
| 0.066532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.069767
| 0
| 0.325581
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd423af6c5271daa0eac7f6a8ca5e2cf87ffc2fe
| 2,752
|
py
|
Python
|
test/test_api_v1_module.py
|
feizhihui/deepnlp
|
cc6647d65ec39aadd35e4a4748da92df5b79bd48
|
[
"MIT"
] | null | null | null |
test/test_api_v1_module.py
|
feizhihui/deepnlp
|
cc6647d65ec39aadd35e4a4748da92df5b79bd48
|
[
"MIT"
] | null | null | null |
test/test_api_v1_module.py
|
feizhihui/deepnlp
|
cc6647d65ec39aadd35e4a4748da92df5b79bd48
|
[
"MIT"
] | 1
|
2019-05-13T14:24:15.000Z
|
2019-05-13T14:24:15.000Z
|
#coding:utf-8
'''
Demo for calling API of deepnlp.org web service
Anonymous user of this package have limited access on the number of API calling 100/day
Please Register and Login Your Account to deepnlp.org to get unlimited access to fully support
api_service API module, now supports both windows and linux platforms.
'''
from __future__ import unicode_literals
import json, requests, sys, os
if (sys.version_info>(3,0)): from urllib.parse import quote
else : from urllib import quote
from deepnlp import api_service
login = api_service.init() # registration, if failed, load default empty login {} with limited access
login = {} # use your personal login {'username': 'your_user_name' , 'password': 'your_password'}
conn = api_service.connect(login) # save the connection with login cookies
# API Setting
text = ("我爱吃北京烤鸭").encode('utf-8') # convert text from unicode to utf-8 bytes, quote() function
# Segmentation
url_segment = "http://www.deepnlp.org/api/v1.0/segment/?" + "lang=zh" + "&text=" + quote(text)
web = requests.get(url_segment, cookies = conn)
tuples = json.loads(web.text)
wordsList = tuples['words'] # segmentation json {'words', [w1, w2,...]} return list
print ("Segmentation API:")
print (" ".join(wordsList).encode("utf-8"))
# POS tagging
url_pos = "http://www.deepnlp.org/api/v1.0/pos/?"+ "lang=zh" + "&text=" + quote(text)
web = requests.get(url_pos, cookies = conn)
tuples = json.loads(web.text)
pos_str = tuples['pos_str'] # POS json {'pos_str', 'w1/t1 w2/t2'} return string
print ("POS API:")
print (pos_str.encode("utf-8"))
# NER tagging
url_ner = "http://www.deepnlp.org/api/v1.0/ner/?" + "lang=zh" + "&text=" + quote(text)
web = requests.get(url_ner, cookies = conn)
tuples = json.loads(web.text)
ner_str = tuples['ner_str'] # NER json {'ner_str', 'w1/t1 w2/t2'} return list
print ("NER API:")
print (ner_str.encode("utf-8"))
# Pipeline
annotators = "segment,pos,ner"
url_pipeline = "http://www.deepnlp.org/api/v1.0/pipeline/?" + "lang=zh" + "&text=" + quote(text) + "&annotators=" + quote(annotators)
web = requests.get(url_pipeline, cookies = conn)
tuples = json.loads(web.text)
segment_str = tuples['segment_str'] # segment module
pos_str = tuples['pos_str'] # pos module
ner_str = tuples['ner_str'] # ner module
ner_json = tuples['ner_json'] # ner result in json
# output
def json_to_str(json_dict):
json_str = ""
for k, v in json_dict.items():
json_str += ("'" + k + "'" + ":" + "'" + v + "'" + ",")
json_str = "{" + json_str + "}"
return json_str
print ("Pipeline API:")
print (segment_str.encode("utf-8"))
print (pos_str.encode("utf-8"))
print (ner_str.encode("utf-8"))
print ("NER JSON:")
print (json_to_str(ner_json).encode("utf-8"))
| 38.222222
| 133
| 0.682776
| 418
| 2,752
| 4.370813
| 0.284689
| 0.021894
| 0.043788
| 0.035577
| 0.314176
| 0.296661
| 0.181719
| 0.059113
| 0.059113
| 0
| 0
| 0.014169
| 0.153706
| 2,752
| 71
| 134
| 38.760563
| 0.770288
| 0.306323
| 0
| 0.255319
| 0
| 0
| 0.211896
| 0
| 0.085106
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.106383
| 0
| 0.148936
| 0.255319
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd46541bba89d45678808a7b911ed3c9f61dd510
| 4,245
|
py
|
Python
|
utils/dataset_utils.py
|
dpaiton/DeepSparseCoding
|
5ea01fa8770794df5e13743aa3f2d85297c27eb1
|
[
"MIT"
] | 12
|
2017-04-27T17:19:31.000Z
|
2021-11-07T03:37:59.000Z
|
utils/dataset_utils.py
|
dpaiton/DeepSparseCoding
|
5ea01fa8770794df5e13743aa3f2d85297c27eb1
|
[
"MIT"
] | 12
|
2018-03-21T01:16:25.000Z
|
2022-02-10T00:21:58.000Z
|
utils/dataset_utils.py
|
dpaiton/DeepSparseCoding
|
5ea01fa8770794df5e13743aa3f2d85297c27eb1
|
[
"MIT"
] | 12
|
2017-02-01T19:49:57.000Z
|
2021-12-08T03:16:58.000Z
|
import os
import sys
import numpy as np
import torch
from torchvision import datasets, transforms
ROOT_DIR = os.path.dirname(os.getcwd())
if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR)
import DeepSparseCoding.utils.data_processing as dp
import DeepSparseCoding.datasets.synthetic as synthetic
class CustomTensorDataset(torch.utils.data.Dataset):
def __init__(self, data_tensor):
self.data_tensor = data_tensor
def __getitem__(self, index):
return self.data_tensor[index], self.data_tensor[index]
def __len__(self):
return self.data_tensor.size(0)
def load_dataset(params):
new_params = {}
if(params.dataset.lower() == 'mnist'):
preprocessing_pipeline = [
transforms.ToTensor(),
transforms.Lambda(lambda x: x.permute(1, 2, 0)) # channels last
]
if params.standardize_data:
preprocessing_pipeline.append(
transforms.Lambda(lambda x: dp.standardize(x, eps=params.eps)[0]))
if params.rescale_data_to_one:
preprocessing_pipeline.append(
transforms.Lambda(lambda x: dp.rescale_data_to_one(x, eps=params.eps, samplewise=True)[0]))
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=params.data_dir, train=True, download=True,
transform=transforms.Compose(preprocessing_pipeline)),
batch_size=params.batch_size, shuffle=params.shuffle_data,
num_workers=0, pin_memory=False)
val_loader = None
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=params.data_dir, train=False, download=True,
transform=transforms.Compose(preprocessing_pipeline)),
batch_size=params.batch_size, shuffle=params.shuffle_data,
num_workers=0, pin_memory=False)
elif(params.dataset.lower() == 'dsprites'):
root = os.path.join(*[params.data_dir])
dsprites_file = os.path.join(*[root, 'dsprites/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'])
if not os.path.exists(dsprites_file):
import subprocess
print(f'Now downloading the dsprites-dataset to {root}/dsprites')
subprocess.call(['./scripts/download_dsprites.sh', f'{root}'])
print('Finished')
data = np.load(dsprites_file, encoding='bytes')
data = torch.from_numpy(data['imgs']).unsqueeze(1).float()
train_kwargs = {'data_tensor':data}
dset = CustomTensorDataset
train_data = dset(**train_kwargs)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=params.batch_size,
shuffle=params.shuffle_data,
num_workers=0,
pin_memory=False)
val_loader = None
test_loader = None
elif(params.dataset.lower() == 'synthetic'):
preprocessing_pipeline = [transforms.ToTensor(),
transforms.Lambda(lambda x: x.permute(1, 2, 0)) # channels last
]
train_loader = torch.utils.data.DataLoader(
synthetic.SyntheticImages(params.epoch_size, params.data_edge_size, params.dist_type,
params.rand_state, params.num_classes,
transform=transforms.Compose(preprocessing_pipeline)),
batch_size=params.batch_size, shuffle=params.shuffle_data,
num_workers=0, pin_memory=False)
val_loader = None
test_loader = None
new_params["num_pixels"] = params.data_edge_size**2
else:
assert False, (f'Supported datasets are ["mnist", "dsprites", "synthetic"], not {dataset_name}')
new_params = {}
new_params['epoch_size'] = len(train_loader.dataset)
if(not hasattr(params, 'num_val_images')):
if val_loader is None:
new_params['num_val_images'] = 0
else:
new_params['num_val_images'] = len(val_loader.dataset)
if(not hasattr(params, 'num_test_images')):
if test_loader is None:
new_params['num_test_images'] = 0
else:
new_params['num_test_images'] = len(test_loader.dataset)
new_params['data_shape'] = list(next(iter(train_loader))[0].shape)[1:]
return (train_loader, val_loader, test_loader, new_params)
| 42.878788
| 107
| 0.660306
| 518
| 4,245
| 5.169884
| 0.239382
| 0.033607
| 0.026139
| 0.034354
| 0.446229
| 0.43652
| 0.373413
| 0.348021
| 0.309186
| 0.309186
| 0
| 0.009786
| 0.229682
| 4,245
| 98
| 108
| 43.316327
| 0.809174
| 0.00636
| 0
| 0.280899
| 0
| 0
| 0.092527
| 0.020166
| 0
| 0
| 0
| 0
| 0.011236
| 1
| 0.044944
| false
| 0
| 0.089888
| 0.022472
| 0.179775
| 0.022472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd485ea8847607e1b8262b17b33a7d95c7b05c48
| 2,327
|
py
|
Python
|
src/empirical_study.py
|
arshajithwolverine/Recommentation-System_KGNN-LS
|
82ad10633a56794bbc38dc7e6c40a3636c7d570a
|
[
"MIT"
] | 133
|
2019-06-20T08:38:04.000Z
|
2022-03-30T07:57:14.000Z
|
src/empirical_study.py
|
piaofu110/KGNN-LS
|
3afd76361b623e9e38b822861c79bcd61dae41aa
|
[
"MIT"
] | 10
|
2019-07-06T12:53:01.000Z
|
2021-11-10T12:58:50.000Z
|
src/empirical_study.py
|
piaofu110/KGNN-LS
|
3afd76361b623e9e38b822861c79bcd61dae41aa
|
[
"MIT"
] | 40
|
2019-08-07T06:02:31.000Z
|
2022-01-05T15:19:29.000Z
|
import networkx as nx
import numpy as np
import argparse
if __name__ == '__main__':
np.random.seed(555)
NUM = 10000
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=str, default='music')
args = parser.parse_args()
DATASET = args.d
kg_np = np.load('../data/' + DATASET + '/kg_final.npy')
kg = nx.Graph()
kg.add_edges_from([(triple[0], triple[2]) for triple in kg_np]) # construct knowledge graph
rating_np = np.load('../data/' + DATASET + '/ratings_final.npy')
item_history = dict()
item_set = set()
for record in rating_np:
user = record[0]
item = record[1]
rating = record[2]
if rating == 1:
if item not in item_history:
item_history[item] = set()
item_history[item].add(user)
item_set.add(item)
item_pair_num_no_common_rater = 0
item_pair_num_with_common_rater = 0
sp_no_common_rater = dict()
sp_with_common_rater = dict()
while True:
item1, item2 = np.random.choice(list(item_set), size=2, replace=False)
if item_pair_num_no_common_rater == NUM and item_pair_num_with_common_rater == NUM:
break
if item_pair_num_no_common_rater < NUM and len(item_history[item1] & item_history[item2]) == 0:
item_pair_num_no_common_rater += 1
if not nx.has_path(kg, item1, item2):
sp = 'infinity'
else:
sp = nx.shortest_path_length(kg, item1, item2)
if sp not in sp_no_common_rater:
sp_no_common_rater[sp] = 0
sp_no_common_rater[sp] += 1
print(item_pair_num_no_common_rater, item_pair_num_with_common_rater)
if item_pair_num_with_common_rater < NUM and len(item_history[item1] & item_history[item2]) > 0:
item_pair_num_with_common_rater += 1
if not nx.has_path(kg, item1, item2):
sp = 'infinity'
else:
sp = nx.shortest_path_length(kg, item1, item2)
if sp not in sp_with_common_rater:
sp_with_common_rater[sp] = 0
sp_with_common_rater[sp] += 1
print(item_pair_num_no_common_rater, item_pair_num_with_common_rater)
print(sp_no_common_rater)
print(sp_with_common_rater)
| 36.359375
| 104
| 0.621401
| 333
| 2,327
| 3.966967
| 0.231231
| 0.183195
| 0.099924
| 0.059046
| 0.558668
| 0.460257
| 0.440575
| 0.3838
| 0.3838
| 0.348221
| 0
| 0.023367
| 0.282768
| 2,327
| 63
| 105
| 36.936508
| 0.768125
| 0.010743
| 0
| 0.181818
| 0
| 0
| 0.033913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054545
| 0
| 0.054545
| 0.072727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd4d5dd7883050a254679a4b1f93de18a8465561
| 1,179
|
py
|
Python
|
datacamp-master/22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py
|
vitthal10/datacamp
|
522d2b192656f7f6563bf6fc33471b048f1cf029
|
[
"MIT"
] | 1
|
2020-06-11T01:32:36.000Z
|
2020-06-11T01:32:36.000Z
|
22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py
|
AndreasFerox/DataCamp
|
41525d7252f574111f4929158da1498ee1e73a84
|
[
"MIT"
] | null | null | null |
22-introduction-to-time-series-analysis-in-python/04-moving-average-ma-and-arma-models/08-equivalance-of-ar(1)-and-ma(infinity).py
|
AndreasFerox/DataCamp
|
41525d7252f574111f4929158da1498ee1e73a84
|
[
"MIT"
] | 1
|
2021-08-08T05:09:52.000Z
|
2021-08-08T05:09:52.000Z
|
'''
Equivalence of AR(1) and MA(infinity)
To better understand the relationship between MA models and AR models, you will demonstrate that an AR(1) model is equivalent to an MA(∞
∞
) model with the appropriate parameters.
You will simulate an MA model with parameters 0.8,0.82,0.83,…
0.8
,
0.8
2
,
0.8
3
,
…
for a large number (30) lags and show that it has the same Autocorrelation Function as an AR(1) model with ϕ=0.8
ϕ
=
0.8
.
INSTRUCTIONS
100XP
Import the modules for simulating data and plotting the ACF from statsmodels
Use a list comprehension to build a list with exponentially decaying MA parameters: 1,0.8,0.82,0.83,…
1
,
0.8
,
0.8
2
,
0.8
3
,
…
Simulate 5000 observations of the MA(30) model
Plot the ACF of the simulated series
'''
# import the modules for simulating data and plotting the ACF
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.graphics.tsaplots import plot_acf
# Build a list MA parameters
ma = [0.8**i for i in range(30)]
# Simulate the MA(30) model
ar = np.array([1])
AR_object = ArmaProcess(ar, ma)
simulated_data = AR_object.generate_sample(nsample=5000)
# Plot the ACF
plot_acf(simulated_data, lags=30)
plt.show()
| 19.983051
| 136
| 0.74894
| 220
| 1,179
| 4.040909
| 0.386364
| 0.024747
| 0.013498
| 0.022497
| 0.195726
| 0.195726
| 0.195726
| 0.170979
| 0.170979
| 0.146232
| 0
| 0.066124
| 0.166243
| 1,179
| 59
| 137
| 19.983051
| 0.824008
| 0.735369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd4e4c3a86cc4a31b024c46ddddde1fa3e66e93b
| 3,752
|
py
|
Python
|
imutils.py
|
shimoda-uec/ssdd
|
564c3e08fae7a158516cdbd9f3599a74dc748aff
|
[
"MIT"
] | 33
|
2019-11-05T07:15:36.000Z
|
2021-04-27T06:33:47.000Z
|
imutils.py
|
shimoda-uec/ssdd
|
564c3e08fae7a158516cdbd9f3599a74dc748aff
|
[
"MIT"
] | 1
|
2019-11-18T13:02:40.000Z
|
2019-11-18T13:02:54.000Z
|
imutils.py
|
shimoda-uec/ssdd
|
564c3e08fae7a158516cdbd9f3599a74dc748aff
|
[
"MIT"
] | 3
|
2019-11-25T11:00:39.000Z
|
2021-03-27T06:53:21.000Z
|
import PIL.Image
import random
import numpy as np
import cv2
class RandomHorizontalFlip():
def __init__(self):
return
def __call__(self, inputs):
if bool(random.getrandbits(1)):
outputs=[]
for inp in inputs:
out = np.fliplr(inp).copy()
outputs.append(out)
return outputs
else:
return inputs
class RandomResizeLong():
def __init__(self, min_long, max_long):
self.min_long = min_long
self.max_long = max_long
def __call__(self, inputs):
img=inputs[0]
target_long = random.randint(self.min_long, self.max_long)
#w, h = img.size
h, w, c = img.shape
target_shape = (target_long, target_long)
"""
if w > h:
target_shape = (int(round(w * target_long / h)), target_long)
else:
target_shape = (target_long, int(round(h * target_long / w)))
"""
outputs=[]
for inp in inputs:
out = cv2.resize(inp, target_shape)
if len(out.shape)==2:
out=np.expand_dims(out,2)
outputs.append(out)
return outputs
class RandomCrop():
def __init__(self, cropsize):
self.cropsize = cropsize
def __call__(self, inputs):
imgarr = np.concatenate(inputs, axis=-1)
h, w, c = imgarr.shape
ch = min(self.cropsize, h)
cw = min(self.cropsize, w)
w_space = w - self.cropsize
h_space = h - self.cropsize
if w_space > 0:
cont_left = 0
img_left = random.randrange(w_space+1)
else:
cont_left = random.randrange(-w_space+1)
img_left = 0
if h_space > 0:
cont_top = 0
img_top = random.randrange(h_space+1)
else:
cont_top = random.randrange(-h_space+1)
img_top = 0
outputs=[]
for inp in inputs:
container = np.zeros((self.cropsize, self.cropsize, inp.shape[-1]), np.float32)
container[cont_top:cont_top+ch, cont_left:cont_left+cw] = \
inp[img_top:img_top+ch, img_left:img_left+cw]
outputs.append(container)
return outputs
class Normalize():
def __init__(self, mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225)):
self.mean = mean
self.std = std
def __call__(self, img):
imgarr = np.asarray(img)
proc_img = np.empty_like(imgarr, np.float32)
proc_img[..., 0] = (imgarr[..., 0] / 255. - self.mean[0]) / self.std[0]
proc_img[..., 1] = (imgarr[..., 1] / 255. - self.mean[1]) / self.std[1]
proc_img[..., 2] = (imgarr[..., 2] / 255. - self.mean[2]) / self.std[2]
return proc_img
def HWC_to_CHW(img):
return np.transpose(img, (2, 0, 1))
class Rescale():
def __init__(self, scale):
self.scale=scale
def __call__(self, inputs):
outputs=[]
for inp in inputs:
out = cv2.resize(inp, self.scale)
if len(out.shape)==2:
out=np.expand_dims(out,2)
outputs.append(out)
return outputs
def crf_inference(img, probs, t=3, scale_factor=1, labels=21):
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
h, w = img.shape[:2]
n_labels = labels
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_softmax(probs)
unary = np.ascontiguousarray(unary)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=3/scale_factor, compat=3)
d.addPairwiseBilateral(sxy=80/scale_factor, srgb=13, rgbim=np.copy(img), compat=10)
Q = d.inference(t)
return np.array(Q).reshape((n_labels, h, w))
| 32.068376
| 91
| 0.567697
| 502
| 3,752
| 4.035857
| 0.231076
| 0.047384
| 0.027147
| 0.033564
| 0.201382
| 0.158934
| 0.096742
| 0.096742
| 0.096742
| 0.061204
| 0
| 0.033487
| 0.307569
| 3,752
| 117
| 92
| 32.068376
| 0.746343
| 0.003998
| 0
| 0.265306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0
| 0.061224
| 0.020408
| 0.326531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd503144da89b34c7f7e0c6f7d30f63249106454
| 398
|
py
|
Python
|
dfmt/svg/run.py
|
wangrl2016/coding
|
fd6cd342cade42379c4a0447d83e17c6596fd3a3
|
[
"MIT"
] | 4
|
2021-02-20T03:47:48.000Z
|
2021-11-09T17:25:43.000Z
|
dfmt/svg/run.py
|
wangrl2016/coding
|
fd6cd342cade42379c4a0447d83e17c6596fd3a3
|
[
"MIT"
] | null | null | null |
dfmt/svg/run.py
|
wangrl2016/coding
|
fd6cd342cade42379c4a0447d83e17c6596fd3a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import subprocess
if __name__ == '__main__':
out_dir = 'out'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
subprocess.run(['cargo', 'build', '--release'])
exe = 'target/release/svg'
subprocess.run([exe, '-i', 'test/simple-text.svg', '-o', 'out/simple-text.png', '--perf',
'--dump-svg', 'out/simple-text.svg'])
| 26.533333
| 93
| 0.585427
| 54
| 398
| 4.111111
| 0.574074
| 0.081081
| 0.117117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003165
| 0.20603
| 398
| 14
| 94
| 28.428571
| 0.699367
| 0.052764
| 0
| 0
| 0
| 0
| 0.335106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd5534b9b393b4ca6ad72c44a3438fcc6e74b3d0
| 2,501
|
py
|
Python
|
socketshark/utils.py
|
Play2Live/socketshark
|
9b1e40654bf629c593079fb44c548911d4c864af
|
[
"MIT"
] | null | null | null |
socketshark/utils.py
|
Play2Live/socketshark
|
9b1e40654bf629c593079fb44c548911d4c864af
|
[
"MIT"
] | null | null | null |
socketshark/utils.py
|
Play2Live/socketshark
|
9b1e40654bf629c593079fb44c548911d4c864af
|
[
"MIT"
] | null | null | null |
import asyncio
import ssl
import aiohttp
from . import constants as c
def _get_rate_limit_wait(log, resp, opts):
"""
Returns the number of seconds we should wait given a 429 HTTP response and
HTTP options.
"""
max_wait = 3600
wait = opts['wait']
header_name = opts['rate_limit_reset_header_name']
if header_name and header_name in resp.headers:
header_value = resp.headers[header_name]
try:
new_wait = float(header_value)
# Make sure we have a valid value (not negative, NaN, or Inf)
if 0 <= new_wait <= max_wait:
wait = new_wait
elif new_wait > max_wait:
log.warn('rate reset value too high',
name=header_name, value=header_value)
wait = max_wait
else:
log.warn('invalid rate reset value',
name=header_name, value=header_value)
except ValueError:
log.warn('invalid rate reset value',
name=header_name, value=header_value)
return wait
async def http_post(shark, url, data):
log = shark.log.bind(url=url)
opts = shark.config['HTTP']
if opts.get('ssl_cafile'):
ssl_context = ssl.create_default_context(cafile=opts['ssl_cafile'])
else:
ssl_context = None
conn = aiohttp.TCPConnector(ssl_context=ssl_context)
async with aiohttp.ClientSession(connector=conn) as session:
wait = opts['wait']
for n in range(opts['tries']):
if n > 0:
await asyncio.sleep(wait)
try:
log.debug('http request', data=data)
async with session.post(url, json=data,
timeout=opts['timeout']) as resp:
if resp.status == 429: # Too many requests.
wait = _get_rate_limit_wait(log, resp, opts)
continue
else:
wait = opts['wait']
resp.raise_for_status()
data = await resp.json()
log.debug('http response', data=data)
return data
except aiohttp.ClientError:
log.exception('unhandled exception in http_post')
except asyncio.TimeoutError:
log.exception('timeout in http_post')
return {'status': 'error', 'error': c.ERR_SERVICE_UNAVAILABLE}
| 35.225352
| 78
| 0.551779
| 290
| 2,501
| 4.596552
| 0.351724
| 0.060015
| 0.027007
| 0.042761
| 0.150038
| 0.150038
| 0.127532
| 0.087022
| 0.087022
| 0.087022
| 0
| 0.007524
| 0.362255
| 2,501
| 70
| 79
| 35.728571
| 0.828213
| 0.067173
| 0
| 0.232143
| 0
| 0
| 0.104671
| 0.012111
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd597b04327e251c7079f983fdc1e98e38cf4a8a
| 4,324
|
py
|
Python
|
cogs/member_.py
|
himo1101/NFlegel
|
7621f5d71b41b71faaf44d142f3b903b0471873a
|
[
"MIT"
] | null | null | null |
cogs/member_.py
|
himo1101/NFlegel
|
7621f5d71b41b71faaf44d142f3b903b0471873a
|
[
"MIT"
] | null | null | null |
cogs/member_.py
|
himo1101/NFlegel
|
7621f5d71b41b71faaf44d142f3b903b0471873a
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
from flegelapi.pg import default, server
from distutils.util import strtobool
import discord
member_table= """ member_(
id serial PRIMARY KEY,
server_id interger NOT NULL,
role_ld interger,
channel_id interger,
custom_mes character varying DEFAULT が入出しました。,
on_off boolean DEFAULT False)"""
class Member_(commands.Cog):
def __init__(self,bot):
self.bot=bot
self.pool=bot.pool
@commands.command()
async def enable(self, ctx, enable:str='on'):
try:
result_enable=strtobool(enable)
except ValueError:
return await ctx.send(f'{enable}は正常な値ではありません')
before_content=await server.fetch(self.pool, 'member_', ctx.guild)
await default.update(self.pool, 'member_', 'on_off', result_enable, 'server_id', ctx.guild.id)
await embed.default(ctx, 'enable change', f'{before_content} -> {"有効" if enable else "無効"}化')
@commands.command()
async def add_mrole(self, ctx, role: discord.Role=None):
if role is None:
return await ctx.send('役職が指定されていません')
before_content= await server.fetch(self.pool, 'member', ctx.guild)
await default.update(self.pool, 'member_', 'role_id', role.id, 'server_id', ctx.guild.id)
await embed.default(ctx, 'role change', f'{before_content} -> {role.name}')
@commands.command()
async def add_channel(self, ctx, channel:discord.TextChannel=None):
if channel is None:
return await ctx.send('チャンネルが指定されていません')
before_content=await server.fetch(self.pool, 'member_', ctx.guild)
await default.update(self.pool, 'member_', 'channel_id', channel, 'server_id', ctx.guild.id)
await embed.default(ctx, 'channel change', f'{before_content} -> {channel.mention}')
@commands.command()
async def add_mes(self, ctx, mes:str=None):
if mes is None:
return await ctx.send('メッセージが指定されていません')
before_content=await server.fetch(self.pool, 'member_', ctx.guild)
await default.update(self.pool, 'member_', 'custom_mes', mes, 'server_id', ctx.guild.id)
await embed.default(ctx, 'custom message change', f'{before_content} -> {mes}')
@commands.Cog.listener()
async def on_member_join(self, member):
server_date=await server.fetch(self.pool, 'member_', ctx.guild)
if server_date['on_off']== False:
return
role= member.guild.get_role(int(server_date['role_id']))
await member.add_roles(role)
status = str(member.status)
if status == 'online':
status = 'オンライン'
elif status == 'offline':
status = 'オフライン'
elif status == 'idle':
status = '退席中'
elif status == 'dnd':
status = '起こさないで'
roles = [role.name for role in member.roles if role.name != '@everyone']
roles = ', '.join(roles) if roles != [] else 'なし'
e = discord.Embed(
title = '新しい人が来ました。',
description=f'ユーザー情報: {user.display_name}',
colour=discord.Colour.purple()
)
e.set_author(name=member.name, icon_url=member.avatar_url)
e.set_thumbnail(url=member.avatar_url)
e.add_field(
name='ステータス',
value=status
)
e.add_field(
name='サーバー参加日時',
value=self.fmt.format(member.joined_at)
)
e.add_field(
name='アカウント作成日時',
value=self.fmt.format(member.created_at)
)
e.add_field(
name='役職',
value=roles
)
if server_date['custom_mes'] is not None:
e.set_footer(
text=f'ID: {user.id} '
)
else:
e.set_footer(
text= server_date['custom_mes']
)
channel= self.bot.get_channel(int(server_date['channel_id']))
await channel.send(embed=e)
def setup(bot):
bot.add_cog(Member_(bot))
bot.add_table(member_table)
| 32.757576
| 102
| 0.56568
| 503
| 4,324
| 4.713718
| 0.250497
| 0.033741
| 0.053142
| 0.042176
| 0.332349
| 0.250527
| 0.22016
| 0.22016
| 0.204133
| 0.140025
| 0
| 0
| 0.315911
| 4,324
| 132
| 103
| 32.757576
| 0.801555
| 0
| 0
| 0.131313
| 0
| 0
| 0.178955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020202
| false
| 0
| 0.040404
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd5a19f0cbafdf639c273ea9eebb620d7cbc509e
| 7,720
|
py
|
Python
|
client.py
|
andreidorin13/cs544-messaging-protocol
|
40d26cb20234a4ad58095150795946aceaf9e4d4
|
[
"MIT"
] | null | null | null |
client.py
|
andreidorin13/cs544-messaging-protocol
|
40d26cb20234a4ad58095150795946aceaf9e4d4
|
[
"MIT"
] | null | null | null |
client.py
|
andreidorin13/cs544-messaging-protocol
|
40d26cb20234a4ad58095150795946aceaf9e4d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
Andrei Dorin
06/10/2018
User interface for WISP chat implementation
'''
import argparse
import logging
import signal
import sys
import time
import queue
import select
import getpass
from wisp_client import WispClient
from wisp_common import State, WispRequest, WispResponse, WispMessage, WISP_DEFAULT_PORT
class Client():
'''
Chat client class
Handles drawing menu and buttons
Uses inner WispClient for networking
'''
def __init__(self):
self._logger = logging.getLogger()
self._wclient = WispClient()
self._friend_list = None
# ---- Public Functions ----
def connect(self, host, port):
'''Connect to chat server'''
if not self._wclient.connect(host, port):
self._logger.error('Client exiting')
sys.exit(1)
def discover(self):
'''Attempt to discover IP of chat server on network'''
if not self._wclient.discover(WISP_DEFAULT_PORT):
self._logger.error('Client exiting')
sys.exit(1)
def start(self):
'''
Start WispClient event loop
Begin authentication procedure
'''
self._wclient.start()
response = self._auth()
while response.code != WispResponse.OK:
print(f'\033[31m{response.data}\033[39m')
response = self._auth()
self._wclient.state = State.Command
self._draw_main_menu()
def _draw_main_menu(self):
'''
UI
Main application menu
Delegates to sub-menus
'''
user_cmd = None
cmd_list = [
self._search,
self._delete,
self._talk,
self._quit
]
while user_cmd != len(cmd_list):
self._friends()
print(f'Commands:\n1. Search for Friends\n2. Delete Friend\n3. Talk to Friend\n4. Exit')
user_cmd = self._get_user_input(1, len(cmd_list))
cmd_list[user_cmd-1]()
# ---- Client Commands + Menus ----
def _auth(self):
'''Gather username and password, attempt blocking authentication call'''
username = input('Username: ')
while len(username) > 16:
username = input('Username too long, try again: ')
password = getpass.getpass()
while len(password) > 16:
password = input('Password too long, try again: ')
return self._blocking_request(WispRequest.AUTH, username, password)
def _search(self):
'''
Query server for users containting search phrase
Offer option of adding them as friends
'''
phrase = input('Search phrase: ')
while len(phrase) > 16:
phrase = input('Phrase too long, try again: ')
# Display search results
results = self._blocking_request(WispRequest.SEARCH, phrase).data
index = self._draw_menu('Results', results)
if index == -1:
return
# If here then make friend request
response = self._blocking_request(WispRequest.ADD, results[index])
if response.code == WispResponse.OK:
print(f'Friend added succesfully!')
else:
print(f'\033[31m{response.data}\033[39m')
def _friends(self):
'''Retrieve and draw friend list'''
self._friend_list = self._blocking_request(WispRequest.LIST).data
print('Friends:')
for i, friend in enumerate(self._friend_list):
print(f'{i+1}. {friend}')
def _delete(self):
'''Delete a friend'''
index = self._draw_menu('Deleting Friend:', self._friend_list)
if index == -1:
return
self._blocking_request(WispRequest.DEL, self._friend_list[index])
def _talk(self):
'''Start a conversation with a friend'''
index = self._draw_menu('Select Friend to talk to: ', self._friend_list)
if index == -1:
return
response = self._blocking_request(WispRequest.CONV, self._friend_list[index])
if response.code == WispResponse.OK:
self._wclient.state = State.Conversation
self._async_conv()
self._wclient.state = State.Command
else:
print(f'\033[31m{response.data}\033[39m')
def _quit(self):
'''Nicely close connection to server'''
print('Sending goodbye message')
self._wclient.reqq.put(WispRequest(WispRequest.QUIT))
time.sleep(.250) # make sure request get processed before exiting
sys.exit(0)
# ----- Helper Functions -----
def _blocking_request(self, cmd, arg1=None, arg2=None):
'''Sends command to server and awaits response'''
res = None
self._wclient.reqq.put(WispRequest(cmd, arg1, arg2))
while res is None:
try:
res = self._wclient.resq.get(block=False)
except queue.Empty:
pass
time.sleep(0.01)
return res
def _async_conv(self):
print('New conversion! Empty line to return to menu')
line = None
while line != '':
read, _, _ = select.select([sys.stdin], [], [], 0)
if read:
line = sys.stdin.readline().rstrip()
if len(line) > 127:
for batch in [line[i:i+127] for i in range(0, len(line), 127)]:
self._wclient.reqq.put(WispMessage(batch))
else:
self._wclient.reqq.put(WispMessage(line))
try:
res = self._wclient.resq.get(block=False)
print(f'\033[92m{res}\033[39m')
except queue.Empty:
pass
time.sleep(0.01)
print('Returning to menu!')
@classmethod
def _draw_menu(cls, header, options):
'''Draws menu based on list of options'''
upper = len(options)+1
print(header)
for i, opt in enumerate(options):
print(f'{i+1}. {opt}')
print(f'Press {upper} to go back')
index = cls._get_user_input(1, upper)
return -1 if index == upper else index-1
@classmethod
def _get_user_input(cls, lower, upper):
'''Gets user input as int within lower/upper bounds'''
user_cmd = -1
while not lower <= user_cmd <= upper:
try:
user_cmd = int(input('Choose Number: '))
except (ValueError, EOFError):
continue
return user_cmd
def signal_sigint(_, __):
'''
Signal handler for KeyboardInterrupt or SIGINT
'''
print('SIGINT Received, shutting down')
sys.exit(0)
def main():
'''
Main entry point of client
Argument parsing and initializing client
'''
parser = argparse.ArgumentParser(description='WISP protocol chat client')
parser.add_argument('-H', '--host', type=str,
help='IP of server, if none is specified, service discovery will be attempted')
parser.add_argument('-p', '--port', type=int, default=32500,
help='Port of server to connect, if none is specified, protocol default 32500 will be used')
parser.add_argument('-v', '--verbosity', type=int, default=4, choices=[4, 3, 2, 1],
help='Verbosity of logger, 4: Error, 3: Warning, 2: Info, 1: Debug')
args = parser.parse_args()
logging.basicConfig()
logging.getLogger().setLevel(args.verbosity * 10)
signal.signal(signal.SIGINT, signal_sigint)
# CLIENT
client = Client()
if args.host:
client.connect(args.host, args.port)
else:
client.discover()
client.start()
if __name__ == '__main__':
main()
| 32.166667
| 116
| 0.584197
| 904
| 7,720
| 4.850664
| 0.280973
| 0.032611
| 0.022349
| 0.041049
| 0.191106
| 0.119498
| 0.086203
| 0.073432
| 0.036488
| 0.036488
| 0
| 0.022127
| 0.303368
| 7,720
| 239
| 117
| 32.301255
| 0.793232
| 0.137953
| 0
| 0.236025
| 0
| 0.006211
| 0.135728
| 0.017724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10559
| false
| 0.043478
| 0.062112
| 0
| 0.217391
| 0.093168
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd5e1e26e39c56d3ae62b8fd2032ab324293acc8
| 526
|
py
|
Python
|
lib/redis_set_get.py
|
InformaticsResearchCenter/ITeung
|
2e3f76294c3affca07934293cdeb46d6d618180a
|
[
"MIT"
] | null | null | null |
lib/redis_set_get.py
|
InformaticsResearchCenter/ITeung
|
2e3f76294c3affca07934293cdeb46d6d618180a
|
[
"MIT"
] | 37
|
2020-03-22T23:21:14.000Z
|
2020-09-16T15:07:06.000Z
|
lib/redis_set_get.py
|
InformaticsResearchCenter/ITeung
|
2e3f76294c3affca07934293cdeb46d6d618180a
|
[
"MIT"
] | 1
|
2020-09-08T11:31:30.000Z
|
2020-09-08T11:31:30.000Z
|
import redis
def set(key, value, expired):
#use None if don't want to use expired time
try:
r = redis.Redis()
r.set(name=key, value=value, ex=expired)
return True, None
except Exception as e:
return False, f'{e}'
def get(key):
#key to get value
r = redis.Redis()
result=r.get(key)
if result:
return result.decode('utf-8')
else:
return None
def cekRedisToken(key):
if get(key) is not None:
return True
else:
return False
| 21.04
| 48
| 0.579848
| 78
| 526
| 3.910256
| 0.461538
| 0.059016
| 0.072131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002786
| 0.31749
| 526
| 25
| 49
| 21.04
| 0.846797
| 0.110266
| 0
| 0.2
| 0
| 0
| 0.017131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.05
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd5e82adedde50cba3e364b3ccb25d0a6e80401a
| 18,185
|
py
|
Python
|
FTDISPI.py
|
g-i-wilson/spi-tools
|
1c961a97572a366235f9f3b0517d8201fa8be371
|
[
"MIT"
] | 1
|
2022-03-22T20:44:01.000Z
|
2022-03-22T20:44:01.000Z
|
FTDISPI.py
|
g-i-wilson/spi-tools
|
1c961a97572a366235f9f3b0517d8201fa8be371
|
[
"MIT"
] | null | null | null |
FTDISPI.py
|
g-i-wilson/spi-tools
|
1c961a97572a366235f9f3b0517d8201fa8be371
|
[
"MIT"
] | null | null | null |
from pyftdi.spi import SpiController
from pyftdi.gpio import GpioSyncController
import serial
import time
import sys
import JSONFile
dbg = False
def createByteList(addrList, dataList):
newBytes = []
for byte in addrList:
newBytes.append(byte)
for byte in dataList:
newBytes.append(byte)
return newBytes
def printByteList(byteList):
str = ""
for byte in byteList:
str += hex(byte)+" "
return str
def readModifyWrite(old=[], mask=[], new=[]):
for i in range(len(old)):
new[i] = (old[i] & ~mask[i]) | new[i]
class bcolors:
WHITE = '\033[37m'
BLUE = '\033[94m'
GREEN = '\033[92m'
RED = '\033[31m'
RESET = '\033[0m'
def printByte(a, b):
for bit in range(8):
a_bit = (a >> (7-bit)) & 0x01
b_bit = (b >> (7-bit)) & 0x01
if a_bit and b_bit:
print("1", end='')
elif (not a_bit) and b_bit:
print(bcolors.GREEN, end='')
print("1", end='')
elif a_bit and (not b_bit):
print(bcolors.RED, end='')
print("0", end='')
else:
print("0", end='')
print(bcolors.RESET, end='')
print(end=' ')
def printReg(addrName, addr=[], data=[], old_data=None, note="", nameColWidth=18):
print( addrName+(" "*(nameColWidth-len(addrName))), end=' ')
for a in addr:
if old_data:
print(bcolors.GREEN, end='')
print("0x{:02x}".format(a), end=' ')
print(bcolors.RESET, end='')
else:
print("0x{:02x}".format(a), end=' ')
print(' | ', end='')
for i in range(len(data)):
print("0x{:02x}".format(data[i]), end=' ')
if old_data:
printByte(old_data[i], data[i])
else:
printByte(data[i], data[i])
print(' | '+note)
def printStruct(struct):
for name in struct:
if 'old_data' in struct[name]:
printReg(name, addr=struct[name]['addr_w'], data=struct[name]['data'], old_data=struct[name]['old_data'], note=struct[name]['info'])
else:
printReg(name, addr=struct[name]['addr_w'], data=struct[name]['data'], note=struct[name]['info'])
def bitMaskToBytes(bitStrArray):
data = []
mask = []
for bitStr in bitStrArray:
bit = 0x80
bitMask = 0x00
bitData = 0x00
for aChar in bitStr:
if aChar == '1' or aChar == '0':
bitMask += bit
if aChar == '1':
bitData += bit
bit = bit >> 1
data.append(bitData)
mask.append(bitMask)
return {"data":data, "mask":mask}
class GPIO:
def __init__(self, gpio, SCLK=0x10, MOSI=0x20, MISO=0x40, CS=0x80):
self.gpio = gpio
self.SCLK = SCLK
self.MOSI = MOSI
self.MISO = MISO
self.CS = CS
self.txList = []
self.readFlag = []
self.rxList = []
def transaction(self, byteList, readSize=0): # readSize 0 is simply a write
self.txList = [self.CS] # CS high, others low
self.readFlag = []
self.insertDelay(4)
self.clockLow()
self.csLow()
for aByte in byteList:
self.writeByte(aByte)
self.readFlag.append(False)
for i in range(readSize):
self.writeByte(0x00)
self.readFlag.append(True)
self.csHigh()
self.clockLow()
self.insertDelay(4)
self.transmit()
def insertDelay(self, d):
for i in range(d):
self.txList.append(self.txList[-1])
def clockLow(self):
self.txList.append( self.txList[-1] & ~self.SCLK )
def clockLowdataHigh(self):
self.txList.append( (self.txList[-1] & ~self.SCLK) | self.MOSI )
def clockLowdataLow(self):
self.txList.append( (self.txList[-1] & ~self.SCLK) & ~self.MOSI )
def clockHigh(self):
self.txList.append( self.txList[-1] | self.SCLK )
def csLow(self):
self.txList.append( self.txList[-1] & ~self.CS )
def csHigh(self):
self.txList.append( self.txList[-1] | self.CS )
def writeByte(self, aByte):
for i in range(8):
shiftPlaces = 7-i # MSB first "big endian"
# clock falling edge and data transition
if ((aByte >> shiftPlaces) & 0x01):
self.clockLowdataHigh()
else:
self.clockLowdataLow()
# clock rising edge
self.clockHigh()
def readByte(self):
self.writeByte(0x00, read=True)
def getTxList(self):
return self.txList
def transmit(self):
rxBytes = self.gpio.exchange( self.txList );
self.rxList = []
for byte in rxBytes:
self.rxList.append(byte)
def getRxList(self):
return self.rxList
def getReadFlag(self):
return self.readFlag
def read(self, byteList, readSize):
self.transaction(byteList, readSize)
rxByteList = self.readBitBang()
rxByteListOut = []
for i in range(len(self.readFlag)):
if self.readFlag[i]:
rxByteListOut.append(rxByteList[1][i])
return rxByteListOut
def write(self, byteList):
self.transaction(byteList)
def readBitBang(self):
mosiArray = []
misoArray = []
bitPlace = 7
mosiByte = 0x00
misoByte = 0x00
for a in range(len(self.rxList)):
if (not (self.rxList[a-1] & self.SCLK) and (self.rxList[a] & self.SCLK)): # rising edge
if (self.rxList[a] & self.MOSI):
mosiByte += (1 << bitPlace)
if (self.rxList[a] & self.MISO): # data=1
misoByte += (1 << bitPlace)
bitPlace -= 1
if bitPlace < 0:
mosiArray.append(mosiByte)
misoArray.append(misoByte)
mosiByte = 0x00
misoByte = 0x00
bitPlace = 7
if dbg:
print("MOSI: ")
print(mosiArray)
print("MISO: ")
print(misoArray)
return [mosiArray, misoArray]
class MPSSE:
def __init__(self, slave):
self.slave = slave
def write(self, byteList):
self.slave.exchange( \
out=byteList, \
readlen=0, \
start=True, \
stop=True, \
duplex=False, \
droptail=0 \
)
def read(self, byteList, readSize):
byteArray = self.slave.exchange( \
out=byteList, \
readlen=readSize, \
start=True, \
stop=True, \
duplex=False, \
droptail=0 \
)
byteList = []
for byte in byteArray:
byteList.append(byte)
return byteList
class UARTSPIBridge:
def __init__(self, port="/dev/ttyUSB0", baudrate="9600"):
self.serial = serial.Serial(port=port,baudrate=baudrate)
if self.serial.is_open:
self.serial.close()
self.serial.open()
def read(self, byteList, readLen):
# send write-length byte, read-length byte, and first data byte
self.serial.write( [ len(byteList) ] )
self.serial.write( [ readLen ] );
#print([ len(byteList) ])
#print([ readLen ])
if len(byteList) > 0:
self.serial.write( [ byteList[0] ] )
#print([ byteList[0] ])
#self.serial.write( [ 0x0A ] ) # newline char
#print([ 0x0A ])
self.serial.flush()
# time.sleep(0.1)
# read acknowledgement bytes
#print("hi0")
lenBytes = []
#print(self.serial.inWaiting())
while(not self.serial.inWaiting()):
pass
lenBytes.append( self.serial.read(1) )
#print("hi1")
#print(lenBytes)
# time.sleep(0.1)
while(not self.serial.inWaiting()):
pass
lenBytes.append( self.serial.read(1) )
#print("hi2")
#print(lenBytes)
if lenBytes[0] != len(byteList).to_bytes(1,'big') or lenBytes[1] != readLen.to_bytes(1,'big'):
print("Error communicating with UARTSPIBridge")
print("W length received: "+str(lenBytes[0])+" != "+str(byteList[0].to_bytes(1,'big')))
print("R length received: "+str(lenBytes[1])+" != "+str(byteList[1].to_bytes(1,'big')))
return []
# time.sleep(0.1)
while(not self.serial.inWaiting()):
pass
#print("hi3")
lastByte = self.serial.read(1) # ack byte from first data-write byte
#print(lastByte)
# write the remainder of byteList
for i in range(1,len(byteList)):
self.serial.write( [ outByte[i] ] )
#self.serial.write( [ 0x0A ] ) # newline char
self.serial.flush()
while(not self.serial.inWaiting()):
pass
lastByte = self.serial.read(1) # ack byte for each data-write byte
#print(lastByte)
inList = []
for i in range(readLen):
while(not self.serial.inWaiting()):
pass
inList.append( self.serial.read(1) )
#print(inList)
return inList
def write(self, byteList):
return self.read(byteList, 0);
class Interface:
def __init__(self, rwObject, defaultMap, currentState, previousState):
self.rwObject = rwObject
# default register map
defaultMapFile = JSONFile.load(defaultMap)
if not defaultMapFile.fileExists():
print("Unable to load "+defaultMap)
exit()
self.defaultMap = defaultMapFile.read()
# states for comparison
self.previousState = JSONFile.load(previousState)
if not self.previousState.fileExists():
print("Unable to load "+currentState)
exit()
self.currentState = JSONFile.load(currentState)
if not self.currentState.fileExists():
print("Unable to load "+currentState)
exit()
def writeRaw(self, byteList):
self.rwObject.write(byteList)
def fillDefaults(self, struct={}):
for name in struct:
if name in self.defaultMap.keys():
for key in self.defaultMap[name].keys():
if not key in struct[name].keys():
struct[name][key] = self.defaultMap[name][key]
def writeStruct(self, struct, display=False):
self.fillDefaults(struct)
for name in struct:
if 'mask' in struct[name]:
old = {name : {}}
self.readStruct(old)
struct[name]['old_data'] = old[name]['data']
readModifyWrite(old=struct[name]['old_data'], mask=struct[name]['mask'], new=struct[name]['data'])
if 'pre_w' in struct[name]:
for step in struct[name]['pre_w']: # ...is a list
for pre_name in step: # step is a dictionary with one key
if dbg:
print("Write: "+pre_name+", "+printByteList( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) ))
self.rwObject.write( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) )
if dbg:
print("Write: "+name+", "+printByteList(createByteList(struct[name]['addr_w'], struct[name]['data'])))
self.rwObject.write( createByteList(struct[name]['addr_w'], struct[name]['data']) )
if display:
printStruct(struct)
return struct
def readStruct(self, struct, display=False):
self.fillDefaults(struct)
for name in struct:
if 'pre_r' in struct[name]:
for step in struct[name]['pre_r']: # ...is a list
for pre_name in step: # step is a dictionary with one key
if dbg:
print("Write: "+pre_name+", "+printByteList( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) ))
self.rwObject.write( createByteList(self.defaultMap[pre_name]['addr_w'], step[pre_name]) )
struct[name]['data'] = self.rwObject.read( struct[name]['addr_r'], len(struct[name]['data']) )
if dbg:
print("Read: "+name+", "+printByteList(createByteList(struct[name]['addr_r'], struct[name]['data'])))
if display:
printStruct(struct)
return struct
def readState(self, display=True):
self.previousState.write(self.currentState.read())
struct = {}
for name in self.defaultMap:
struct[name] = {}
self.currentState.write(self.readStruct(struct, display))
return self.currentState.read()
def compare(self, display=True, pre_display="", post_display=""):
self.readState(display=False)
comparison = {}
if len(self.previousState.read().keys()) == 0:
self.readState(display=False)
for name in self.currentState.read():
# aliases
prevData = self.previousState.read()[name]['data']
currData = self.currentState.read()[name]['data']
same = True
for i in range(len(currData)):
if currData[i] != prevData[i]:
same = False
if not same:
comparison[name] = {}
comparison[name]['data'] = currData
comparison[name]['old_data'] = prevData
self.fillDefaults(comparison)
if display and len(comparison.keys()) > 0:
if pre_display:
print(pre_display)
printStruct(comparison)
if post_display:
print(post_display)
return comparison
def trigger(self, display=True, pre_display="", delay=.25):
while(1):
comp = self.compare(display=False)
if len(comp.keys()) > 0:
if display:
print(pre_display)
printStruct(comp)
return comp
time.sleep(delay)
def writeDefault(self, display=True):
struct = self.writeStruct(self.defaultMap)
return self.readState(display)
def writeBits(self, name, bitStrings=[], display=True, compare=True):
if compare:
self.compare(display=display, pre_display="Changes before write:")
if display:
print("Writing...")
struct = self.writeStruct( { name : bitMaskToBytes(bitStrings) }, display )
if compare:
self.currentState.merge(struct) # also merges everything into struct
self.compare(display=display, pre_display="Changes after write:")
return {name: struct[name]} # return only this name
def writeBitsList(self, bitsList):
for bits in bitsList:
self.writeBits(name=bits[0], bitStrings=bits[1])
def writeCSV(self, csvFilePath):
print("Writing raw bytes from CSV file...")
csvFile = open(csvFilePath, "r")
print("Opened file: "+csvFilePath)
for line in csvFile.readlines():
byteList = []
for aByte in line.rstrip().split(','):
byteList.append( int(aByte,16) )
print(byteList)
self.writeRaw( byteList )
def ui_hex(str):
return int(str,16)
def uiLoopHelp():
print()
print("Command set:")
print()
print("write <REG_NAME> XXXX1010 1XXXXXX0 | Write bits (any char not 0 or 1 is a don't-care)")
print("writeRaw 0xXX 0xXX 0xXX | Write a raw sequence of bytes")
print("read <REG_NAME> | Read register")
print("all | Read all registers")
print("save <fileName> | Save registers to JSON file")
print("load <fileName> | Load and write registers from JSON file")
print("loadCSV <fileName> | Write bytes from CSV file (each line is one transaction)")
print("loadDefault | Load datasheet default JSON configuration")
print("help | Print this command set")
print("exit | Exit the program")
def uiLoop(spiObject, printHelp=True):
if printHelp:
uiLoopHelp()
jsonObject = None
ui = [""]
while (ui[0] != "exit"):
print("\n> ", end='')
ui = sys.stdin.readline().rstrip().split(' ')
if (ui[0] == "read"):
spiObject.readStruct({ ui[1] : {} }, display=True)
if (ui[0] == "write"):
dataRegs = []
for i in range(2,len(ui)):
dataRegs.append( ui[i] )
spiObject.writeBits( ui[1], dataRegs )
if (ui[0] == "all"):
spiObject.readState()
if (ui[0] == "compare"):
spiObject.compare()
if (ui[0] == "trigger"):
while(1):
spiObject.trigger(pre_display=chr(27)+"[2J")
time.sleep(1)
if (ui[0] == "save"):
if jsonObject is None:
if len(ui) > 1:
jsonObject = JSONFile.new(ui[1])
else:
jsonObject = JSONFile.new(input("\nSave as: "))
jsonObject.write( spiObject.readState() )
if (ui[0] == "load"):
if jsonObject is None:
jsonObject = JSONFile.load(ui[1])
spiObject.writeStruct(jsonObject.read())
spiObject.readState()
if (ui[0] == "loadCSV"):
spiObject.writeCSV(ui[1])
print("Comparing changes...")
spiObject.compare()
if (ui[0] == "writeRaw"):
print("Writing raw bytes...")
byteList = []
for i in range(1,len(ui)):
byteList.append( int(ui[i],16) )
print(byteList)
spiObject.writeRaw( byteList )
if (ui[0] == "loadDefault"):
spiObject.writeDefault()
if (ui[0] == "help"):
uiLoopHelp()
| 36.081349
| 144
| 0.534451
| 1,987
| 18,185
| 4.851535
| 0.151988
| 0.03112
| 0.006846
| 0.012552
| 0.302282
| 0.21888
| 0.178942
| 0.145124
| 0.129979
| 0.122925
| 0
| 0.017353
| 0.334506
| 18,185
| 503
| 145
| 36.153082
| 0.77921
| 0.049711
| 0
| 0.270023
| 0
| 0.002288
| 0.084813
| 0
| 0
| 0
| 0.003713
| 0
| 0
| 1
| 0.105263
| false
| 0.011442
| 0.01373
| 0.011442
| 0.187643
| 0.155606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd63c34fbdfbd183f707a4b54997655b51643809
| 3,417
|
py
|
Python
|
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/gazette/views/groups.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from morepath import redirect
from onegov.core.security import Private
from onegov.gazette import _
from onegov.gazette import GazetteApp
from onegov.gazette.forms import EmptyForm
from onegov.gazette.layout import Layout
from onegov.user import UserGroup
from onegov.user import UserGroupCollection
from onegov.user.forms import UserGroupForm
@GazetteApp.html(
model=UserGroupCollection,
template='groups.pt',
permission=Private
)
def view_groups(self, request):
""" View all the user groups.
This view is only visible by an admin.
"""
layout = Layout(self, request)
return {
'layout': layout,
'groups': self.query().all(),
'title': _('Groups'),
'new_group': request.link(self, name='new-group')
}
@GazetteApp.form(
model=UserGroupCollection,
name='new-group',
template='form.pt',
permission=Private,
form=UserGroupForm
)
def create_group(self, request, form):
""" Create a new user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if form.submitted(request):
self.add(name=form.name.data)
request.message(_("Group added."), 'success')
return redirect(layout.manage_groups_link)
return {
'layout': layout,
'form': form,
'title': _("New Group"),
'cancel': layout.manage_groups_link
}
@GazetteApp.form(
model=UserGroup,
name='edit',
template='form.pt',
permission=Private,
form=UserGroupForm
)
def edit_group(self, request, form):
""" Edit a user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if form.submitted(request):
form.update_model(self)
request.message(_("Group modified."), 'success')
return redirect(layout.manage_groups_link)
if not form.errors:
form.apply_model(self)
return {
'layout': layout,
'form': form,
'title': self.name,
'subtitle': _("Edit Group"),
'cancel': layout.manage_groups_link
}
@GazetteApp.form(
model=UserGroup,
name='delete',
template='form.pt',
permission=Private,
form=EmptyForm
)
def delete_group(self, request, form):
""" Delete a user group.
This view is only visible by an admin.
"""
layout = Layout(self, request)
if self.official_notices:
request.message(
_("There are official notices linked to this group!"),
'warning'
)
if self.users.count():
request.message(
_('Only groups without users may be deleted.'),
'alert'
)
return {
'layout': layout,
'title': self.name,
'subtitle': _("Delete Group"),
'show_form': False
}
if form.submitted(request):
UserGroupCollection(request.session).delete(self)
request.message(_("Group deleted."), 'success')
return redirect(layout.manage_groups_link)
return {
'message': _(
'Do you really want to delete "${item}"?',
mapping={'item': self.name}
),
'layout': layout,
'form': form,
'title': self.name,
'subtitle': _("Delete Group"),
'button_text': _("Delete Group"),
'button_class': 'alert',
'cancel': layout.manage_groups_link
}
| 23.244898
| 66
| 0.605502
| 375
| 3,417
| 5.421333
| 0.237333
| 0.054107
| 0.053123
| 0.064929
| 0.432366
| 0.418593
| 0.364978
| 0.343827
| 0.205116
| 0.205116
| 0
| 0
| 0.272169
| 3,417
| 146
| 67
| 23.40411
| 0.817451
| 0.073164
| 0
| 0.457143
| 0
| 0
| 0.15893
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038095
| false
| 0
| 0.085714
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd64ffc5e28a3c1d060e7cdf2e73c1f3c1f202dd
| 1,466
|
py
|
Python
|
personal_utilities/fourier_filters.py
|
dbstein/personal_utilities
|
3a4c7d2416b13a87f88fc0e400b299d648e1e541
|
[
"Apache-2.0"
] | null | null | null |
personal_utilities/fourier_filters.py
|
dbstein/personal_utilities
|
3a4c7d2416b13a87f88fc0e400b299d648e1e541
|
[
"Apache-2.0"
] | null | null | null |
personal_utilities/fourier_filters.py
|
dbstein/personal_utilities
|
3a4c7d2416b13a87f88fc0e400b299d648e1e541
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
class SimpleFourierFilter(object):
"""
Class to apply simple Fourier Filtration to a vector
Filter types:
'fraction' (requires kwarg: 'fraction' to be set)
'rule 36' (can set kwarg: 'power' but not necessary)
"""
def __init__(self, modes, filter_type, **kwargs):
self.n = modes.shape[0]
self.modes = modes
self.filter_type = filter_type
self._get_filter(**kwargs)
def __call__(self, fin, input_type='space', output_type='space'):
input_is_real = fin.dtype == float and input_type == 'space'
if input_type=='space':
fin = np.fft.fft(fin)
fout = fin*self.filter
if output_type == 'space':
fout = np.fft.ifft(fout)
if input_is_real:
fout = fout.real
return fout
def _get_filter(self, **kwargs):
if self.filter_type == 'fraction':
max_k = np.abs(self.modes).max()
self.filter = np.ones(self.n, dtype=float)
self.filter[np.abs(self.modes) > max_k*kwargs['fraction']] = 0.0
elif self.filter_type == 'rule 36':
max_k = np.abs(self.modes).max()
if 'power' in kwargs:
power36 = kwargs['power']
else:
power36 = 36
self.filter = np.exp(-power36*(np.abs(self.modes)/max_k)**power36)
else:
raise Exception('Filter type not defined.')
| 34.093023
| 78
| 0.563438
| 188
| 1,466
| 4.234043
| 0.345745
| 0.08794
| 0.045226
| 0.070352
| 0.09799
| 0.09799
| 0.052764
| 0
| 0
| 0
| 0
| 0.016882
| 0.313097
| 1,466
| 42
| 79
| 34.904762
| 0.773585
| 0.122101
| 0
| 0.129032
| 0
| 0
| 0.065287
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.032258
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd682359aededb5fca5a5b75e857cce2e964a4f3
| 1,385
|
py
|
Python
|
Final/P2Pchat.py
|
cainanBlack/csc321
|
9cebf9c3b61befda932732316b7406f1462c0bee
|
[
"MIT"
] | null | null | null |
Final/P2Pchat.py
|
cainanBlack/csc321
|
9cebf9c3b61befda932732316b7406f1462c0bee
|
[
"MIT"
] | null | null | null |
Final/P2Pchat.py
|
cainanBlack/csc321
|
9cebf9c3b61befda932732316b7406f1462c0bee
|
[
"MIT"
] | null | null | null |
import netifaces
import argparse
import os
import zmq
import threading
def recieve(message):
ctx = zmq.Context.instance()
reciever = ctx.socket(zmq.SUB)
for last in range(1, 255):
reciever.connect("tcp://{0}.{1}:9000".format(message, last))
reciever.setsockopt(zmq.SUBSCRIBE, b'')
while True:
try:
print(reciever.recv_string())
except (KeyboardInterrupt, zmq.ContextTerminated):
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument("interface", type=str, help="the network interface", choices=interfaces())
parser.add_argument("user", type=str, default=os.environ['USER'], nargs='?', help="Your username")
args = parser.parse_args()
inet = ifaddresses(args.interface)[AF_INET]
addr = inet[0]['addr']
message = addr.rsplit('.', 1)[0]
ctx = zmq.Context.instance()
recieve_thread = Thread(target=recieve, args=(message,))
recieve_thread.start()
serve = ctx.socket(zmq.PUB)
serve.bind("tcp://%s:9000" % args.interface)
print("starting chat on %s:9000 (%s.*)" % (args.interface, message))
while True:
try:
msg = raw_input()
serve.send_string("%s: %s" % (args.user, msg))
except KeyboardInterrupt:
break
serve.close(linger=0)
ctx.term()
if __name__ == '__main__':
main()
| 26.634615
| 102
| 0.626715
| 167
| 1,385
| 5.095808
| 0.502994
| 0.045828
| 0.030552
| 0.049354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020427
| 0.222383
| 1,385
| 51
| 103
| 27.156863
| 0.769731
| 0
| 0
| 0.2
| 0
| 0
| 0.096029
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.125
| 0
| 0.175
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd68c200c93d96ecc3b7ad0ac3280311cd7d42ce
| 1,822
|
py
|
Python
|
src/playerprofile.py
|
MarinVlaic/AIBG
|
cf4960586bdb3c32f8e566c10f9f1e59e6f9ac2d
|
[
"MIT"
] | null | null | null |
src/playerprofile.py
|
MarinVlaic/AIBG
|
cf4960586bdb3c32f8e566c10f9f1e59e6f9ac2d
|
[
"MIT"
] | null | null | null |
src/playerprofile.py
|
MarinVlaic/AIBG
|
cf4960586bdb3c32f8e566c10f9f1e59e6f9ac2d
|
[
"MIT"
] | null | null | null |
class PlayerProfile:
def __init__(self, id):
self.cities = []
self.resources = {
"SHEEP": 0,
"WOOD": 0,
"WHEAT": 0,
"CLAY": 0,
"IRON": 0
}
self.current_builder_intersection_position_id = None
self.id = id
self.owned_roads = set()
def get_score(self) -> int:
return sum(map(lambda x: x.level, self.cities))
def has_enough_resources(self, resource_dict):
for resource in resource_dict:
if resource_dict[resource] > self.resources[resource]:
return False
return True
def add_road(self, destination_intersection_id):
if self.current_builder_intersection_position_id > destination_intersection_id:
self.owned_roads.add((destination_intersection_id, self.current_builder_intersection_position_id))
else:
self.owned_roads.add((self.current_builder_intersection_position_id, destination_intersection_id))
def __eq__(self, other):
if other.id != self.id:
return False
else:
retval = set(self.cities) == set(other.cities)
if not retval:
return False
for resource in self.resources:
retval = retval and self.resources[resource] == other.resources[resource]
return retval
def check_road(self, id1, id2):
if id1 > id2:
return (id2, id1) in self.owned_roads
else:
return (id1, id2) in self.owned_roads
def update_resources(self):
for city in self.cities:
for tile in city.intersection.neighbouring_tiles:
if tile.type not in ("WATER", "DUST"):
self.resources[tile.type] += tile.weight * city.level
| 35.038462
| 110
| 0.593853
| 210
| 1,822
| 4.928571
| 0.290476
| 0.028986
| 0.067633
| 0.115942
| 0.202899
| 0.202899
| 0.125604
| 0.125604
| 0.125604
| 0
| 0
| 0.01045
| 0.317234
| 1,822
| 51
| 111
| 35.72549
| 0.821543
| 0
| 0
| 0.133333
| 0
| 0
| 0.017014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155556
| false
| 0
| 0
| 0.022222
| 0.355556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd6940cd949b8d012c79a302492e17dd59770ba1
| 2,267
|
py
|
Python
|
source/CTRW.py
|
tangxiangong/ClassTop
|
fdafdafd165672ae464210fb8c66c70256d50956
|
[
"MIT"
] | null | null | null |
source/CTRW.py
|
tangxiangong/ClassTop
|
fdafdafd165672ae464210fb8c66c70256d50956
|
[
"MIT"
] | null | null | null |
source/CTRW.py
|
tangxiangong/ClassTop
|
fdafdafd165672ae464210fb8c66c70256d50956
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Time : 2021/12/1 13:27
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
from trajectory import Trajectory
from rnd import stable_rnd, skewed_stable_rnd
class CTRW(Trajectory):
def __init__(self, t_len, ind_waiting, ind_jump, init_position=0):
super(Trajectory, self).__init__()
self._T = t_len
self._alpha = ind_jump
self._beta = ind_waiting
self._x0 = init_position
self._simulate()
def _simulate(self):
if self._beta == 1:
waiting_time = random.exponential
else:
waiting_time = skewed_stable_rnd
self._t = np.zeros(1)
self._x = np.array([self._x0])
total_time = 0
current_position = self._x0
n = 1
while True:
n += 1
tau = waiting_time(self._beta)
if total_time + tau > self._T:
self._t = np.append(self._t, self._T)
self._x = np.append(self._x, current_position)
self._n = n
break
else:
xi = stable_rnd(self._alpha)
total_time += tau
current_position += xi
self._t = np.append(self._t, total_time)
self._x = np.append(self._x, current_position)
def plot(self):
plt.figure()
plt.step(self._t, self._x, where="post")
plt.show()
if __name__ == "__main__":
m1 = CTRW(100, 1, 2)
t1, x1 = m1.get()
fig1 = plt.figure(1)
plt.step(t1, x1, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig1.savefig("../figures/ctrw1.png")
m2 = CTRW(100, 0.7, 2)
t2, x2 = m2.get()
fig2 = plt.figure(2)
plt.step(t2, x2, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig2.savefig("../figures/ctrw2.png")
m3 = CTRW(100, 1, 1.5)
t3, x3 = m3.get()
fig3 = plt.figure(3)
plt.step(t3, x3, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig3.savefig("../figures/ctrw3.png")
m4 = CTRW(100, 0.7, 1.5)
t4, x4 = m4.get()
fig4 = plt.figure(4)
plt.step(t4, x4, where="post")
plt.xlabel("t")
plt.ylabel("x")
fig4.savefig("../figures/ctrw4.png")
| 27.313253
| 70
| 0.549625
| 317
| 2,267
| 3.719243
| 0.309148
| 0.042409
| 0.050891
| 0.061069
| 0.184902
| 0.184902
| 0.154368
| 0.154368
| 0
| 0
| 0
| 0.054106
| 0.307014
| 2,267
| 82
| 71
| 27.646341
| 0.696372
| 0.029996
| 0
| 0.171429
| 0
| 0
| 0.052823
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0
| 0.071429
| 0
| 0.128571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd6b1d33d27551aa6e7a920f48a0b7633b6280b3
| 3,931
|
py
|
Python
|
Paris_G_1-2-3_v2.py
|
Gaspe-R/Rendez-vous-prefecture-Paris
|
e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a
|
[
"MIT"
] | null | null | null |
Paris_G_1-2-3_v2.py
|
Gaspe-R/Rendez-vous-prefecture-Paris
|
e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a
|
[
"MIT"
] | null | null | null |
Paris_G_1-2-3_v2.py
|
Gaspe-R/Rendez-vous-prefecture-Paris
|
e24d1bf0ae6ca5860ad858957c5e923c0ac3d85a
|
[
"MIT"
] | null | null | null |
from sqlite3 import Date
from twilio.rest import Client
from datetime import datetime
from playsound import playsound
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import csv
import time
################################ "PREFCTURE DE PARIS" #####################################
######################## "Remise d'un titre de séjour étranger" ###########################
while True:
# New instance for Chrome
browser = webdriver.Chrome(ChromeDriverManager().install())
# Open the webpage
try:
browser.get('https://pprdv.interieur.gouv.fr/booking/create/989')
time.sleep(3)
# Save the window opener (current window, do not mistaken with tab... not the same)
main_window = browser.current_window_handle
# Accepter les cookies :
browser.find_element_by_xpath("//a[@onclick='javascript:accepter()']").click()
time.sleep(2)
# Click in checkbox "Veuillez cocher la case pour..." :
browser.find_element_by_xpath("//input[@name='condition']").click()
time.sleep(3)
# Click in the submit button :
browser.find_element_by_xpath("//input[@name='nextButton']").click()
time.sleep(3)
# Click in the radio button "Guichets 1-2 &3" :
browser.find_element_by_xpath("//input[@id='planning990']").click()
time.sleep(3)
# Click in the submit button 1 :
browser.find_element_by_xpath("//input[@type='submit']").click()
time.sleep(4)
##################################################
# Variables :
textNo = "Il n'existe plus de plage horaire libre pour votre demande de rendez-vous"
textOui = "Choix d'une plage horaire"
son = "./alert.wav" # ajouter le chemin de votre fichier audio pour l'alerte
url = browser.current_url
now = datetime.now()
Heure = now.strftime("%H:%M:%S")
Date = datetime.now().strftime("%d/%m/%Y")
#account Twilio :
account_sid = 'SID' # ajouter le SID Twilio
auth_token = 'token' # ajouter le Token Twilio
client = Client(account_sid, auth_token)
#log CSV:
header = ['Date', 'Heure', 'Préfecture', 'Disponibilité']
DataNo = [Date, Heure,'Paris G 1-2 et 3', 'Pas de Rendez-vous']
DataOui = [Date, Heure, 'Paris G 1-2 et 3', 'Rendez-vous Disponible']
##################################################
#Conditions :
if (textOui in browser.page_source):
browser.find_element_by_xpath("//input[@type='submit']").click()
print("")
print("RDV Disponible")
print("")
with open('./log.csv', 'a', newline='') as f: #ajouter le chemin de votre fichier log
writer = csv.writer(f)
writer.writerow(DataOui)
"""
# Send SMS Alert :
message = client.messages.create(
from_='votre numero twilio',
body = 'Rendez-vous prefecture disponible, https://pprdv.interieur.gouv.fr/booking/create/989',
to ='votre numero perso'
)
print(message.sid)
"""
#alert sound :
playsound(son)
time.sleep(900)
break
elif (textNo in browser.page_source):
playsound(son)
print("")
print("Pas de RDV")
print("")
with open('./log.csv', 'a', newline='') as f: #ajouter le chemin de votre fichier log
writer = csv.writer(f)
writer.writerow(DataNo)
time.sleep(30)
browser.quit()
except:
browser.quit()
time.sleep(60)
| 36.738318
| 126
| 0.522768
| 419
| 3,931
| 4.832936
| 0.398568
| 0.04
| 0.053333
| 0.059259
| 0.319506
| 0.30716
| 0.278025
| 0.232099
| 0.171852
| 0.090864
| 0
| 0.012231
| 0.313661
| 3,931
| 106
| 127
| 37.084906
| 0.738325
| 0.155177
| 0
| 0.295082
| 0
| 0
| 0.191754
| 0.063011
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.131148
| 0
| 0.131148
| 0.098361
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd6e8efff351684ee42b6f8c78aec9644cacd755
| 8,661
|
py
|
Python
|
acme_tiny.py
|
dennydai/docker-letsencrypt
|
898fa70665d321e527c7fcc463a57a66dbbdab26
|
[
"MIT"
] | 22
|
2015-12-06T06:19:43.000Z
|
2016-03-10T06:44:34.000Z
|
acme_tiny.py
|
dennydai/docker-letsencrypt
|
898fa70665d321e527c7fcc463a57a66dbbdab26
|
[
"MIT"
] | 1
|
2016-09-11T07:38:45.000Z
|
2016-09-11T10:50:26.000Z
|
acme_tiny.py
|
dennydai/docker-letsencrypt
|
898fa70665d321e527c7fcc463a57a66dbbdab26
|
[
"MIT"
] | 4
|
2015-12-22T01:25:16.000Z
|
2016-01-14T13:24:27.000Z
|
#!/usr/bin/env python
import argparse, subprocess, json, os, os.path, urllib2, sys, base64, binascii, time, \
hashlib, re, copy, textwrap
#CA = "https://acme-staging.api.letsencrypt.org"
CA = "https://acme-v01.api.letsencrypt.org"
def get_crt(account_key, csr, acme_dir):
# helper function base64 encode for jose spec
def _b64(b):
return base64.urlsafe_b64encode(b).replace("=", "")
# parse account key to get public key
sys.stderr.write("Parsing account key...")
proc = subprocess.Popen(["openssl", "rsa", "-in", account_key, "-noout", "-text"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
out, re.MULTILINE|re.DOTALL).groups()
pub_mod = binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex))
pub_mod64 = _b64(pub_mod)
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
pub_exp64 = _b64(binascii.unhexlify(pub_exp))
header = {
"alg": "RS256",
"jwk": {
"e": pub_exp64,
"kty": "RSA",
"n": pub_mod64,
},
}
accountkey_json = json.dumps(header['jwk'], sort_keys=True, separators=(',', ':'))
thumbprint = _b64(hashlib.sha256(accountkey_json).digest())
sys.stderr.write("parsed!\n")
# helper function make signed requests
def _send_signed_request(url, payload):
nonce = urllib2.urlopen(CA + "/directory").headers['Replay-Nonce']
payload64 = _b64(json.dumps(payload))
protected = copy.deepcopy(header)
protected.update({"nonce": nonce})
protected64 = _b64(json.dumps(protected))
proc = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", account_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64))
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps({
"header": header,
"protected": protected64,
"payload": payload64,
"signature": _b64(out),
})
try:
resp = urllib2.urlopen(url, data)
return resp.getcode(), resp.read()
except urllib2.HTTPError as e:
return e.code, e.read()
# find domains
sys.stderr.write("Parsing CSR...")
proc = subprocess.Popen(["openssl", "req", "-in", csr, "-noout", "-text"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("Error loading {0}: {1}".format(csr, err))
domains = set([])
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out)
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out, re.MULTILINE|re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
sys.stderr.write("parsed!\n")
# get the certificate domains and expiration
sys.stderr.write("Registering account...")
code, result = _send_signed_request(CA + "/acme/new-reg", {
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.0.1-July-27-2015.pdf",
})
if code == 201:
sys.stderr.write("registered!\n")
elif code == 409:
sys.stderr.write("already registered!\n")
else:
raise ValueError("Error registering: {0} {1}".format(code, result))
# verify each domain
for domain in domains:
sys.stderr.write("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(CA + "/acme/new-authz", {
"resource": "new-authz",
"identifier": {
"type": "dns",
"value": domain,
},
})
if code != 201:
raise ValueError("Error registering: {0} {1}".format(code, result))
# make the challenge file
challenge = [c for c in json.loads(result)['challenges'] if c['type'] == "http-01"][0]
challenge['token'] = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = "{0}.{1}".format(challenge['token'], thumbprint)
wellknown_path = os.path.join(acme_dir, challenge['token'])
wellknown_file = open(wellknown_path, "w")
wellknown_file.write(keyauthorization)
wellknown_file.close()
# check that the file is in place
wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(
domain, challenge['token'])
try:
resp = urllib2.urlopen(wellknown_url)
assert resp.read().strip() == keyauthorization
except (urllib2.HTTPError, urllib2.URLError, AssertionError):
os.remove(wellknown_path)
raise ValueError("Wrote file to {0}, but couldn't download {1}".format(
wellknown_path, wellknown_url))
# notify challenge are met
code, result = _send_signed_request(challenge['uri'], {
"resource": "challenge",
"keyAuthorization": keyauthorization,
})
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
while True:
try:
resp = urllib2.urlopen(challenge['uri'])
challenge_status = json.loads(resp.read())
except urllib2.HTTPError as e:
raise ValueError("Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read())))
if challenge_status['status'] == "pending":
time.sleep(2)
elif challenge_status['status'] == "valid":
sys.stderr.write("verified!\n")
os.remove(wellknown_path)
break
else:
raise ValueError("{0} challenge did not pass: {1}".format(
domain, challenge_status))
# get the new certificate
sys.stderr.write("Signing certificate...")
proc = subprocess.Popen(["openssl", "req", "-in", csr, "-outform", "DER"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
csr_der, err = proc.communicate()
code, result = _send_signed_request(CA + "/acme/new-cert", {
"resource": "new-cert",
"csr": _b64(csr_der),
})
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
# return signed certificate!
sys.stderr.write("signed!\n")
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result), 64)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script automates the process of getting a signed TLS certificate from
Let's Encrypt using the ACME protocol. It will need to be run on your server
and have access to your private account key, so PLEASE READ THROUGH IT! It's
only ~200 lines, so it won't take long.
===Example Usage===
python acme_tiny.py --account-key ./account.key --csr ./domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > signed.crt
===================
===Example Crontab Renewal (once per month)===
0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed.crt 2>> /var/log/acme_tiny.log
==============================================
""")
)
parser.add_argument("--account-key", required=True, help="path to your Let's Encrypt account private key")
parser.add_argument("--csr", required=True, help="path to your certificate signing request")
parser.add_argument("--acme-dir", required=True, help="path to the .well-known/acme-challenge/ directory")
args = parser.parse_args()
signed_crt = get_crt(args.account_key, args.csr, args.acme_dir)
sys.stdout.write(signed_crt)
| 43.522613
| 221
| 0.592772
| 1,041
| 8,661
| 4.837656
| 0.300672
| 0.023828
| 0.03058
| 0.020651
| 0.226966
| 0.185068
| 0.159253
| 0.132645
| 0.111199
| 0.091739
| 0
| 0.023455
| 0.246854
| 8,661
| 198
| 222
| 43.742424
| 0.748582
| 0.051149
| 0
| 0.189024
| 0
| 0.02439
| 0.28197
| 0.035841
| 0
| 0
| 0
| 0
| 0.012195
| 1
| 0.018293
| false
| 0.006098
| 0.006098
| 0.006098
| 0.04878
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd6f2ad698e4fc98b32ec95e10035f7d48a91c97
| 3,667
|
py
|
Python
|
esb/Tags.py
|
sgbalogh/esb.py
|
06e8f86b94d5dadc628a0fbd396212649328864d
|
[
"MIT"
] | null | null | null |
esb/Tags.py
|
sgbalogh/esb.py
|
06e8f86b94d5dadc628a0fbd396212649328864d
|
[
"MIT"
] | null | null | null |
esb/Tags.py
|
sgbalogh/esb.py
|
06e8f86b94d5dadc628a0fbd396212649328864d
|
[
"MIT"
] | null | null | null |
class Tags:
class Thematic:
ACC_NOTE = "account:note"
DELIMITER = "delimiter:thematic"
FAM_SIBLINGS = "fam:siblings"
FAM_CHILDREN = "fam:children"
FAM_PARENTS = "fam:parents"
FAM_SPOUSE = "fam:spouse"
META_NO_REMARK = "meta:no-remarks"
META_PARENTHETICAL = "meta:parenthetical"
META_RECORD = "meta:record-reference"
SUBJ_AGE = "subj:age"
SUBJ_BIO = "subj:biographical-note"
SUBJ_EMIGRATION = "subj:emigration-event"
SUBJ_MARTIAL = "subj:marital-status"
SUBJ_NAME = "subj:name"
SUBJ_NATIVEOF = "subj:native-of"
SUBJ_OCCUPATION = "subj:occupation"
SUBJ_RESIDENCE = "subj:residence-info"
class Token:
END = "END"
START = "START"
DELIMITER = "t:DELIMITER"
EMIGRATION_ARRIVED = "t:emigration:ARRIVED"
EMIGRATION_VESSEL = "t:emigration:VESSEL"
EMIGRATION_VESSEL_HAS_ORIGIN = "t:emigration:VESSEL_HAS_ORIGIN"
EMIGRATION_VIA = "t:emigration:VIA"
LOCATION_DISTANCE = "t:location:DISTANCE"
LOCATION_DISTANCE_UNIT = "t:location:DISTANCE_UNIT"
LOCATION_FROM = "t:location:FROM"
LOCATION_NAME = "t:location:NAME"
META_ACCOUNT_CLOSED = "t:meta:ACCOUNT_CLOSED"
META_ACCOUNT_NUMBER = "t:meta:ACCOUNT_NUMBER"
META_IS_SAME_AS = "t:meta:IS_SAME_AS"
META_NO_REMARKS = "t:meta:NO_REMARKS"
META_SEE = "t:meta:SEE"
META_PARENTHETICAL = "meta:parenthetical"
PERSON_AGE = "t:person:AGE"
PERSON_AGE_YEAR = "t:person:AGE_YEAR"
PERSON_BROTHERS = "t:person:BROTHERS"
PERSON_CHILDREN = "t:person:CHILDREN"
PERSON_FATHER = "t:person:FATHER"
PERSON_HAS_NATIONALITY = "t:person:HAS_NATIONALITY"
PERSON_IS_DEAD = "t:person:IS_DEAD"
PERSON_IS_LIVING = "t:person:IS_LIVING"
PERSON_IS_SINGLE = "t:person:IS_SINGLE"
PERSON_IS_WIDOWED = "t:person:IS_WIDOWED"
PERSON_LOCATED_IN = "t:person:LOCATED_IN"
PERSON_MOTHER = "t:person:MOTHER"
PERSON_NAME = "t:person:NAME"
PERSON_NUMBER = "t:person:NUMBER"
PERSON_PARENTS = "t:person:PARENTS"
PERSON_SISTERS = "t:person:SISTERS"
PERSON_SON = "t:person:SON"
PERSON_HUSBAND = "t:person:HUSBAND"
PERSON_WIFE = "t:person:WIFE"
PERSON_STEP_SIBLING = "t:person:STEP_SIBLING"
PERSON_IS_MINOR = "t:person:IS_MINOR"
REL_HAS = "t:rel:HAS"
REL_HAS_HUSBAND = "t:rel:HAS_HUSBAND"
REL_HAS_WIFE = "t:rel:HAS_WIFE"
REL_HAS_SPOUSE = "t:rel:HAS_SPOUSE"
REL_IS_NATIVE_OF = "t:rel:IS_NATIVE_OF"
REL_IS_WIDOW_OF = "t:rel:IS_WIDOW_OF"
REL_IS_DAUGHTER_OF = "t:rel:IS_DAUGHTER_OF"
REL_TO = "t:rel:TO"
RESIDENTIAL_RESIDENCE = "t:residential:RESIDENCE"
RESIDENTIAL_CURRENTLY_LIVING_AT = "t:residential:CURRENTLY_LIVING_AT"
RESIDENTIAL_FORMERLY_LOCATED_AT = "t:residential:FORMERLY_LOCATED_AT"
RESIDENTIAL_LIVED_WITH = "t:residential:LIVED_WITH"
RESIDENTIAL_LIVES_WITH = "t:residential:LIVES_WITH"
SUBJ_IS_MAN = "t:subj:IS_MAN"
SUBJ_IS_WOMAN = "t:subj:IS_WOMAN"
TIME_DATE = "t:time:DATE"
TIME_MONTH = "t:time:MONTH"
TIME_YEAR = "t:time:YEAR"
TIME_DURATION_VALUE = "t:time:DURATION_VALUE"
TIME_DURATION_YEAR = "t:time:DURATION_YEAR"
TIME_ABOUT = "t:time:ABOUT"
UNKNOWN = "t:UNKNOWN"
WORK_EMPLOYER_NAME = "t:work:EMPLOYER_NAME"
WORK_OCCUPATION = "t:work:OCCUPATION"
WORK_WORKS_FOR = "t:work:WORKS_FOR"
BLANK = "t:BLANK"
| 35.95098
| 77
| 0.642214
| 464
| 3,667
| 4.747845
| 0.211207
| 0.066727
| 0.020427
| 0.010894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251432
| 3,667
| 101
| 78
| 36.306931
| 0.80255
| 0
| 0
| 0.02381
| 0
| 0
| 0.360884
| 0.099018
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd7529c73cff8550931b72e595537b4c1b291bee
| 1,940
|
py
|
Python
|
scripts/stats_wrapper.py
|
gpertea/regtools
|
a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c
|
[
"MIT"
] | 70
|
2015-08-05T21:32:51.000Z
|
2021-11-26T13:26:33.000Z
|
scripts/stats_wrapper.py
|
gpertea/regtools
|
a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c
|
[
"MIT"
] | 145
|
2015-08-05T22:27:58.000Z
|
2022-03-14T21:50:17.000Z
|
scripts/stats_wrapper.py
|
gpertea/regtools
|
a59d5dbd3268b0d83412e6fe81cf7e924c7bcb7c
|
[
"MIT"
] | 29
|
2015-08-01T02:19:40.000Z
|
2021-12-16T20:02:40.000Z
|
import glob
import subprocess
import os
import argparse
import shutil
input_parser = argparse.ArgumentParser(
description="Run RegTools stats script",
)
input_parser.add_argument(
'tag',
help="Variant tag parameter used to run RegTools.",
)
args = input_parser.parse_args()
tag = args.tag
cwd = os.getcwd()
lines_per_file = 25000
smallfile = None
with open(f'all_splicing_variants_{tag}.bed', 'r') as bigfile:
header = bigfile.readline()
for lineno, line in enumerate(bigfile):
if lineno % lines_per_file == 0:
if smallfile:
smallfile.close()
small_filename = 'small_file_{}.txt'.format(lineno + lines_per_file)
smallfile = open(small_filename, "w")
smallfile.write(header)
smallfile.write(line)
if smallfile:
smallfile.close()
#get chunks
files = glob.glob('small_file_*')
files.sort()
number_of_in_files = len(files)
for file in files:
subprocess.run(f'Rscript --vanilla compare_junctions_hist_v2.R {tag} {file}', shell=True, check=True)
output_files = glob.glob("*_out.tsv")
output_files.sort()# glob lacks reliable ordering, so impose your own if output order matters
number_of_out_files = len(output_files)
if number_of_in_files == number_of_out_files:
with open(f'compare_junctions/hist/junction_pvalues_{tag}.tsv', 'wb') as outfile:
for i, fname in enumerate(output_files):
with open(fname, 'rb') as infile:
if i != 0:
infile.readline() # Throw away header on all but first file
# Block copy rest of file from input to output without parsing
shutil.copyfileobj(infile, outfile)
print(fname + " has been imported.")
else:
print("Number of output files doesn't match the number of input files that should have been processed")
files = glob.glob('small_file_*')
for file in files:
os.remove(file)
| 33.448276
| 107
| 0.676804
| 266
| 1,940
| 4.766917
| 0.451128
| 0.037855
| 0.028391
| 0.028391
| 0.0347
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005316
| 0.224227
| 1,940
| 57
| 108
| 34.035088
| 0.837209
| 0.094845
| 0
| 0.16
| 0
| 0
| 0.215877
| 0.061108
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd7892510c7f345ccc184879db2d6bb6e417c44a
| 451
|
py
|
Python
|
lib/model/utils/plt_loss.py
|
PhoneSix/Domain-Contrast
|
5c674b581bce9beacf5bc0dd13113f33c4050495
|
[
"MIT"
] | 4
|
2021-07-31T01:04:15.000Z
|
2022-03-09T07:23:10.000Z
|
lib/model/utils/plt_loss.py
|
PhoneSix/Domain-Contrast
|
5c674b581bce9beacf5bc0dd13113f33c4050495
|
[
"MIT"
] | null | null | null |
lib/model/utils/plt_loss.py
|
PhoneSix/Domain-Contrast
|
5c674b581bce9beacf5bc0dd13113f33c4050495
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import os
def plt_loss(epoch, dir_, name, value):
if not os.path.exists(dir_):
os.makedirs(dir_)
axis = np.linspace(1,epoch,epoch)
label = '{}'.format(name)
fig = plt.figure()
plt.title(label)
plt.plot(axis, value)
# plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig('{}/{}.pdf'.format(dir_, name))
plt.close(fig)
| 25.055556
| 47
| 0.62306
| 66
| 451
| 4.181818
| 0.560606
| 0.050725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002809
| 0.210643
| 451
| 18
| 48
| 25.055556
| 0.772472
| 0.026608
| 0
| 0
| 0
| 0
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd79597c4dc624f2537254fe68c7bb39e5b6003c
| 2,549
|
py
|
Python
|
apps/insar.py
|
giswqs/streamlit-insar
|
e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455
|
[
"MIT"
] | 5
|
2021-12-14T23:28:36.000Z
|
2022-02-27T14:35:29.000Z
|
apps/insar.py
|
giswqs/streamlit-insar
|
e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455
|
[
"MIT"
] | null | null | null |
apps/insar.py
|
giswqs/streamlit-insar
|
e2c0897f01aeff96cd119cce8cf6dd3d8fb0e455
|
[
"MIT"
] | null | null | null |
import folium
import altair as alt
import leafmap.foliumap as leafmap
import pandas as pd
import streamlit as st
def app():
st.title("InSAR")
option = st.radio("Choose an option", ("Marker Cluster", "Circle Marker"))
m = leafmap.Map(
center=[29.7029, -95.3335], latlon_control=False, zoom=16, height=600
)
data = "data/insar_data.csv"
if option == "Circle Marker":
df = pd.read_csv(data, skiprows=0).head(100)
df.columns = [col.replace(" ", "_").replace(".", "_") for col in df.columns]
columns = df.columns.values.tolist()
tooltip_cols = [
"ID",
"LAT",
"LON",
"HEIGHT",
"HEIGHT_WRT_DEM",
"SIGMA_HEIGHT",
"COHER",
]
ts_cols = columns[16:82]
ts_df = df[ts_cols]
min_width = 100
max_width = 200
x = ("LON",)
y = ("LAT",)
radius = 5
i = 0
for row in df.itertuples():
html = ""
for p in tooltip_cols:
html = (
html
+ "<b>"
+ p
+ "</b>"
+ ": "
+ str(eval(str("row." + p)))
+ "<br>"
)
i_df = ts_df.iloc[[i]].transpose()
i_df.columns = ["value"]
i_df["date"] = i_df.index
i_df = i_df.reset_index()
graph = line = (
alt.Chart(i_df)
.mark_line(interpolate="basis")
.encode(
x="date",
y="value",
)
)
popup_html = folium.Popup(html, min_width=min_width, max_width=max_width)
tooltip_str = folium.Tooltip(html)
popup = folium.Popup().add_child(
folium.features.VegaLite(graph, width="50%")
)
folium.CircleMarker(
location=[row.LAT, row.LON],
radius=radius,
popup=popup,
tooltip=tooltip_str,
).add_to(m)
i += 1
elif option == "Marker Cluster":
df = pd.read_csv(data)
columns = [
"ID",
"LAT",
"LON",
"HEIGHT",
"HEIGHT_WRT_DEM",
"SIGMA_HEIGHT",
"COHER",
]
df = df[columns]
m.add_points_from_xy(df, x="LON", y="LAT", radius=5)
m.to_streamlit(height=600)
| 25.747475
| 85
| 0.433503
| 266
| 2,549
| 3.996241
| 0.402256
| 0.019755
| 0.035748
| 0.020696
| 0.135466
| 0.107244
| 0.079022
| 0.079022
| 0.079022
| 0.079022
| 0
| 0.027913
| 0.437819
| 2,549
| 98
| 86
| 26.010204
| 0.713887
| 0
| 0
| 0.168675
| 0
| 0
| 0.095332
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012048
| false
| 0
| 0.060241
| 0
| 0.072289
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd7a330fb695d24e5d3e2270fbbe2e1e0d11d2dc
| 2,105
|
py
|
Python
|
solve_net.py
|
a1exwang/theano-cnn-intro
|
5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62
|
[
"MIT"
] | null | null | null |
solve_net.py
|
a1exwang/theano-cnn-intro
|
5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62
|
[
"MIT"
] | null | null | null |
solve_net.py
|
a1exwang/theano-cnn-intro
|
5f6ecdcb2908afb34a7d94e69b1d1ab13beb3c62
|
[
"MIT"
] | null | null | null |
from utils import LOG_INFO
import numpy as np
def data_iterator(x, y, batch_size, shuffle=True):
indx = range(len(x))
if shuffle:
np.random.shuffle(indx)
for start_idx in range(0, len(x), batch_size):
end_idx = min(start_idx + batch_size, len(x))
yield x[start_idx: end_idx], y[start_idx: end_idx]
def solve_net(model, train_x, train_y, test_x, test_y,
batch_size, max_epoch, disp_freq, test_freq):
iter_counter = 0
loss_list = []
accuracy_list = []
test_acc = []
test_loss = []
for k in range(max_epoch):
for x, y in data_iterator(train_x, train_y, batch_size):
iter_counter += 1
loss, accuracy = model.train(x, y)
loss_list.append(loss)
accuracy_list.append(accuracy)
if iter_counter % disp_freq == 0:
msg = 'Training iter %d, mean loss %.5f (batch loss %.5f), mean acc %.5f' % (iter_counter,
np.mean(loss_list),
loss_list[-1],
np.mean(accuracy_list))
LOG_INFO(msg)
loss_list = []
accuracy_list = []
if iter_counter % test_freq == 0:
LOG_INFO(' Testing...')
for tx, ty in data_iterator(test_x, test_y, batch_size, shuffle=False):
t_accuracy, t_loss = model.test(tx, ty)
test_acc.append(t_accuracy)
test_loss.append(t_loss)
msg = ' Testing iter %d, mean loss %.5f, mean acc %.5f' % (iter_counter,
np.mean(test_loss),
np.mean(test_acc))
LOG_INFO(msg)
test_acc = []
test_loss = []
| 39.716981
| 116
| 0.443705
| 231
| 2,105
| 3.774892
| 0.251082
| 0.061927
| 0.045872
| 0.038991
| 0.144495
| 0.116972
| 0.073395
| 0.073395
| 0.073395
| 0
| 0
| 0.009821
| 0.467933
| 2,105
| 52
| 117
| 40.480769
| 0.76875
| 0
| 0
| 0.238095
| 0
| 0
| 0.061283
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd7b0a77a1f93e1e0546019ec5051874f1e448ee
| 1,199
|
py
|
Python
|
playground/test1.py
|
mathee92/unirentalz
|
803c58628ebda002e2c127db11fbaddf181ef394
|
[
"MIT"
] | null | null | null |
playground/test1.py
|
mathee92/unirentalz
|
803c58628ebda002e2c127db11fbaddf181ef394
|
[
"MIT"
] | null | null | null |
playground/test1.py
|
mathee92/unirentalz
|
803c58628ebda002e2c127db11fbaddf181ef394
|
[
"MIT"
] | null | null | null |
# -----------
# User Instructions
#
# Modify the valid_month() function to verify
# whether the data a user enters is a valid
# month. If the passed in parameter 'month'
# is not a valid month, return None.
# If 'month' is a valid month, then return
# the name of the month with the first letter
# capitalized.
#
import string
import cgi
months = ['January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December']
def valid_month(month):
month = month.lower()
month = month.title()
if month in months:
return month
else:
return None
def valid_day(day):
if day and day.isdigit():
day = int(day)
if day > 0 and day <= 31:
return day
def valid_year(year):
if year and year.isdigit():
year = int(year)
if year > 1900 and year < 2020:
return year
def escape_html1(s):
for (i,o) in (("&", "&"),
(">", ">"),
("<", "<"),
('"', ""e;")):
s = s.replace(i,o)
return s
def escape_html2(s):
return cgi.escape(s, quote=True)
| 18.734375
| 46
| 0.539616
| 155
| 1,199
| 4.135484
| 0.451613
| 0.078003
| 0.051482
| 0.040562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015796
| 0.313595
| 1,199
| 63
| 47
| 19.031746
| 0.763062
| 0.247706
| 0
| 0
| 0
| 0
| 0.110485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.05
| 0.025
| 0.325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd7da929a4d4176f292520c09ac6f877772c0b49
| 2,274
|
py
|
Python
|
hookio/logs.py
|
Marak/hook.io-sdk-python
|
722b04eb0832ef712d5dcd491899996088e1aa8b
|
[
"Unlicense"
] | 1
|
2021-06-15T11:52:44.000Z
|
2021-06-15T11:52:44.000Z
|
hookio/logs.py
|
Marak/hook.io-sdk-python
|
722b04eb0832ef712d5dcd491899996088e1aa8b
|
[
"Unlicense"
] | null | null | null |
hookio/logs.py
|
Marak/hook.io-sdk-python
|
722b04eb0832ef712d5dcd491899996088e1aa8b
|
[
"Unlicense"
] | null | null | null |
import sys
import weakref
import json
import logging
from .utils import opt_json, Response2JSONLinesIterator
from six import StringIO
log = logging.getLogger(__name__)
class Logs:
def __init__(self, client):
self.client = weakref.proxy(client)
def read(self, url, raw=False, raw_data=True, **opts):
r = self.client.request('GET', url + '/logs', {}, **opts)
res = opt_json(r, raw)
if not raw and not raw_data and type(res) == list:
res = [json.loads(line) for line in res]
for row in res:
if 'data' in row:
row['data'] = json.loads(row['data'])
return res
def stream(self, url, raw=True, raw_data=True, streaming=True, **opts):
opts['streaming'] = streaming
if streaming:
opts.setdefault('stream_in', StringIO())
if not raw and callable(streaming):
def wrapper(line):
row = json.loads(line)
if not raw_data and 'data' in row:
row['data'] = json.loads(row['data'])
return streaming(row)
assert self.client.line_streaming, "Inconsistent API call"
opts['streaming'] = wrapper
log.debug("Will stream via wrapper")
r = self.client.request('GET', url + '/logs', {}, **opts)
if not raw and streaming and not callable(streaming):
log.debug("Will return iter_objects generator")
chunk_size = opts.get('chunk_size', self.client.chunk_size)
if raw_data:
func = None
else:
func = data_converted
return Response2JSONLinesIterator(r, converter=func, chunk_size=chunk_size)
return r
def flush(self, url, raw=False, **opts):
r = self.client.request('GET', url + '/logs?flush=true', {}, **opts)
return opt_json(r, raw)
def write(self, msg):
assert hasattr(sys.modules['__main__'], 'Hook'), \
"Writing logs supported only inside hook processing"
msg = {'type': 'log', 'payload': {'entry': msg}}
sys.stderr.write(json.dumps(msg) + '\n')
def data_converted(obj):
if 'data' in obj:
obj['data'] = json.loads(obj['data'])
return obj
| 35.53125
| 87
| 0.575638
| 282
| 2,274
| 4.535461
| 0.294326
| 0.05473
| 0.02502
| 0.04222
| 0.137608
| 0.137608
| 0.137608
| 0.137608
| 0.059421
| 0.059421
| 0
| 0.001254
| 0.298593
| 2,274
| 63
| 88
| 36.095238
| 0.800627
| 0
| 0
| 0.074074
| 0
| 0
| 0.118294
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 1
| 0.12963
| false
| 0
| 0.111111
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd7f21d270d7885499684e88d3eb5ad2fac11de9
| 6,376
|
py
|
Python
|
alberto/annotation/train.py
|
lettomobile/DeepPoseKit
|
a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503
|
[
"Apache-2.0"
] | 1
|
2021-11-01T02:08:00.000Z
|
2021-11-01T02:08:00.000Z
|
alberto/annotation/train.py
|
albertoursino/DeepPoseKit
|
a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503
|
[
"Apache-2.0"
] | null | null | null |
alberto/annotation/train.py
|
albertoursino/DeepPoseKit
|
a922d2d99cd55d0a3909c1f3f8b2bf8c377ff503
|
[
"Apache-2.0"
] | null | null | null |
from alberto.annotation import annotation_set
from pandas import np
from deepposekit.io import TrainingGenerator, DataGenerator
from deepposekit.augment import FlipAxis
import imgaug.augmenters as iaa
import imgaug as ia
from deepposekit.models import StackedHourglass
from deepposekit.models import load_model
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from deepposekit.callbacks import Logger, ModelCheckpoint
import time
from os.path import expanduser
HOME = annotation_set.HOME
IMAGE_SIZE = annotation_set.IMAGE_SIZE
TYPE = annotation_set.TYPE
data_generator = DataGenerator(
datapath=HOME + '/deepposekit-data/datasets/{}/annotation_set_{}_{}.h5'.format(TYPE, IMAGE_SIZE[0], IMAGE_SIZE[1]))
image, keypoints = data_generator[0]
plt.figure(figsize=(5, 5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
if jdx > -1:
x1 = keypoints[0, idx, 0]
x2 = keypoints[0, jdx, 0]
if (0 <= x1 <= IMAGE_SIZE[0]) and (0 <= x2 <= IMAGE_SIZE[0]):
plt.plot(
[keypoints[0, idx, 0], keypoints[0, jdx, 0]],
[keypoints[0, idx, 1], keypoints[0, jdx, 1]],
'r-'
)
plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.show()
# Augmentation
augmenter = []
augmenter.append(FlipAxis(data_generator, axis=0)) # flip image up-down
augmenter.append(FlipAxis(data_generator, axis=1)) # flip image left-right
sometimes = []
sometimes.append(iaa.Affine(scale={"x": (0.95, 1.05), "y": (0.95, 1.05)},
translate_percent={'x': (-0.05, 0.05), 'y': (-0.05, 0.05)},
shear=(-8, 8),
order=ia.ALL,
cval=ia.ALL,
mode=ia.ALL)
)
sometimes.append(iaa.Affine(scale=(0.8, 1.2),
mode=ia.ALL,
order=ia.ALL,
cval=ia.ALL)
)
augmenter.append(iaa.Sometimes(0.75, sometimes))
augmenter.append(iaa.Affine(rotate=(-180, 180),
mode=ia.ALL,
order=ia.ALL,
cval=ia.ALL)
)
augmenter = iaa.Sequential(augmenter)
# image, keypoints = data_generator[0]
# image, keypoints = augmenter(images=image, keypoints=keypoints)
# plt.figure(figsize=(5, 5))
# image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
# cmap = None if image.shape[-1] is 3 else 'gray'
# plt.imshow(image, cmap=cmap, interpolation='none')
# for idx, jdx in enumerate(data_generator.graph):
# if jdx > -1:
# x1 = keypoints[0, idx, 0]
# x2 = keypoints[0, jdx, 0]
# if (0 <= x1 <= IMAGE_SIZE[0]) and (0 <= x2 <= IMAGE_SIZE[0]):
# plt.plot(
# [keypoints[0, idx, 0], keypoints[0, jdx, 0]],
# [keypoints[0, idx, 1], keypoints[0, jdx, 1]],
# 'r-'
# )
plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50,
cmap=plt.cm.hsv, zorder=3)
# plt.show()
train_generator = TrainingGenerator(generator=data_generator,
downsample_factor=3,
augmenter=augmenter,
sigma=5,
validation_split=0,
use_graph=False,
random_seed=1,
graph_scale=1)
train_generator.get_config()
# n_keypoints = data_generator.keypoints_shape[0]
# batch = train_generator(batch_size=1, validation=False)[0]
# inputs = batch[0]
# outputs = batch[1]
# fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10, 10))
# ax1.set_title('image')
# ax1.imshow(inputs[0, ..., 0], vmin=0, vmax=255)
#
# ax2.set_title('posture graph')
# ax2.imshow(outputs[0, ..., n_keypoints:-1].max(-1))
#
# ax3.set_title('keypoints confidence')
# ax3.imshow(outputs[0, ..., :n_keypoints].max(-1))
#
# ax4.set_title('posture graph and keypoints confidence')
# ax4.imshow(outputs[0, ..., -1], vmin=0)
# plt.show()
train_generator.on_epoch_end()
# Define a model
model = StackedHourglass(train_generator)
model.get_config()
# data_size = (10,) + data_generator.image_shape
# x = np.random.randint(0, 255, data_size, dtype="uint8")
# y = model.predict(x[:100], batch_size=100) # make sure the model is in GPU memory
# t0 = time.time()
# y = model.predict(x, batch_size=100, verbose=1)
# t1 = time.time()
# print(x.shape[0] / (t1 - t0))
# logger = Logger(validation_batch_size=10,
# # filepath saves the logger data to a .h5 file
# filepath=HOME + "/deepposekit-data/datasets/{}/log_densenet.h5".format(TYPE)
# )
# Remember, if you set validation_split=0 for your TrainingGenerator,
# which will just use the training set for model fitting,
# make sure to set monitor="loss" instead of monitor="val_loss".
reduce_lr = ReduceLROnPlateau(monitor="loss", factor=0.2, verbose=1, patience=20)
model_checkpoint = ModelCheckpoint(
HOME + "/deepposekit-data/datasets/{}/model_densenet.h5".format(TYPE),
monitor="loss",
# monitor="loss" # use if validation_split=0
verbose=1,
save_best_only=True,
)
early_stop = EarlyStopping(
monitor="loss",
# monitor="loss" # use if validation_split=0
min_delta=0.001,
patience=100,
verbose=1
)
callbacks = [early_stop, reduce_lr, model_checkpoint]
model.fit(
batch_size=5,
validation_batch_size=10,
callbacks=callbacks,
# epochs=1000, # Increase the number of epochs to train the model longer
epochs=50,
n_workers=8,
steps_per_epoch=None,
)
# model = load_model(
# HOME + "/deepposekit-data/datasets/{}/model_densenet.h5".format(TYPE),
# augmenter=augmenter,
# generator=data_generator,
# )
#
# model.fit(
# batch_size=2,
# validation_batch_size=10,
# callbacks=callbacks,
# epochs=50,
# n_workers=8,
# steps_per_epoch=None,
# )
| 32.697436
| 132
| 0.606336
| 831
| 6,376
| 4.54272
| 0.244284
| 0.042384
| 0.020662
| 0.028609
| 0.396556
| 0.346225
| 0.32
| 0.297748
| 0.297748
| 0.229404
| 0
| 0.047239
| 0.25298
| 6,376
| 194
| 133
| 32.865979
| 0.745329
| 0.387077
| 0
| 0.095745
| 0
| 0
| 0.032872
| 0.026089
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.138298
| 0
| 0.138298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd8237accaa927ddf6513747162736a47cc442f6
| 763
|
py
|
Python
|
northpole/settings/local_staging.py
|
mhotwagner/northpole
|
7d904d919aeb6a36549750ee0700578246896691
|
[
"MIT"
] | null | null | null |
northpole/settings/local_staging.py
|
mhotwagner/northpole
|
7d904d919aeb6a36549750ee0700578246896691
|
[
"MIT"
] | null | null | null |
northpole/settings/local_staging.py
|
mhotwagner/northpole
|
7d904d919aeb6a36549750ee0700578246896691
|
[
"MIT"
] | null | null | null |
from .base import *
from dotenv import load_dotenv
load_dotenv(dotenv_path='northpole/.staging.env', verbose=True)
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('POSTGRES_DB', 'northpole-staging'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': os.getenv('POSTGRES_PORT', '5432'),
}
}
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, '..', 'static_source'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, '..' 'media')
MEDIA_URL = '/media/'
| 25.433333
| 64
| 0.651376
| 91
| 763
| 5.252747
| 0.494505
| 0.083682
| 0.167364
| 0.087866
| 0.148536
| 0.148536
| 0
| 0
| 0
| 0
| 0
| 0.007837
| 0.163827
| 763
| 29
| 65
| 26.310345
| 0.741379
| 0
| 0
| 0
| 0
| 0
| 0.352556
| 0.138925
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.047619
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd836f4eaf2d0f0894b304e6d9d109cacae91338
| 12,587
|
py
|
Python
|
bc4py/bip32/bip32.py
|
namuyan/bc4py
|
6484d356096261d0d57e9e1f5ffeae1f9a9865f3
|
[
"MIT"
] | 12
|
2018-09-19T14:02:09.000Z
|
2020-01-27T16:20:14.000Z
|
bc4py/bip32/bip32.py
|
kumacoinproject/bc4py
|
6484d356096261d0d57e9e1f5ffeae1f9a9865f3
|
[
"MIT"
] | 1
|
2020-03-19T16:57:30.000Z
|
2020-03-19T16:57:30.000Z
|
bc4py/bip32/bip32.py
|
namuyan/bc4py
|
6484d356096261d0d57e9e1f5ffeae1f9a9865f3
|
[
"MIT"
] | 6
|
2018-11-13T17:20:14.000Z
|
2020-02-15T11:46:52.000Z
|
#!/usr/bin/env python
#
# Copyright 2014 Corgan Labs
# See LICENSE.txt for distribution terms
#
from bc4py.bip32.base58 import check_decode, check_encode
from bc4py_extension import PyAddress
from ecdsa.curves import SECP256k1
from ecdsa.keys import SigningKey, VerifyingKey, square_root_mod_prime as mod_sqrt
from ecdsa.ecdsa import generator_secp256k1, int_to_string
from ecdsa.ellipticcurve import Point, INFINITY
from os import urandom
import hmac
import hashlib
import codecs
import struct
CURVE_GEN = generator_secp256k1 # Point class
CURVE_ORDER = CURVE_GEN.order() # int
FIELD_ORDER = SECP256k1.curve.p() # int
MIN_ENTROPY_LEN = 128 # bits
BIP32_HARDEN = 0x80000000 # choose from hardened set of child keys
EX_MAIN_PRIVATE = [codecs.decode('0488ade4', 'hex')] # Version strings for mainnet extended private keys
EX_MAIN_PUBLIC = [codecs.decode('0488b21e', 'hex'),
codecs.decode('049d7cb2', 'hex')] # Version strings for mainnet extended public keys
EX_TEST_PRIVATE = [codecs.decode('04358394', 'hex')] # Version strings for testnet extended private keys
EX_TEST_PUBLIC = [codecs.decode('043587CF', 'hex')] # Version strings for testnet extended public keys
WALLET_VERSION = b'\x80'
class Bip32(object):
__slots__ = ("secret", "public", "chain", "depth", "index", "parent_fpr", "path")
def __init__(self, secret, public, chain, depth, index, fpr, path):
self.secret: SigningKey = secret
self.public: VerifyingKey = public
self.chain: bytes = chain
self.depth: int = depth
self.index: int = index
self.parent_fpr: bytes = fpr
self.path: str = path
def __repr__(self):
key_type = "PUB" if self.secret is None else "SEC"
return "<BIP32-{} depth={} path={}>".format(key_type, self.depth, self.path)
@classmethod
def from_entropy(cls, entropy, is_public=False):
"""Create a BIP32Key using supplied entropy >= MIN_ENTROPY_LEN"""
if entropy is None:
entropy = urandom(MIN_ENTROPY_LEN // 8) # Python doesn't have os.random()
if not len(entropy) >= MIN_ENTROPY_LEN // 8:
raise ValueError("Initial entropy %i must be at least %i bits" % (len(entropy), MIN_ENTROPY_LEN))
i64 = hmac.new(b"Bitcoin seed", entropy, hashlib.sha512).digest()
il, ir = i64[:32], i64[32:]
# FIXME test Il for 0 or less than SECP256k1 prime field order
secret = SigningKey.from_string(il, SECP256k1)
public = secret.verifying_key
if is_public:
return cls(secret=None, public=public, chain=ir, depth=0, index=0, fpr=b'\0\0\0\0', path='m')
else:
return cls(secret=secret, public=public, chain=ir, depth=0, index=0, fpr=b'\0\0\0\0', path='m')
@classmethod
def from_extended_key(cls, key, is_public=False):
"""
Create a BIP32Key by importing from extended private or public key string
If public is True, return a public-only key regardless of input type.
"""
# Sanity checks
if isinstance(key, str):
raw = check_decode(key)
else:
raw = b'\x00\x00\x00\x00' + key
if len(raw) != 78:
raise ValueError("extended key format wrong length")
# Verify address version/type
version = raw[:4]
if version == b'\x00\x00\x00\x00':
is_testnet = None
is_pubkey = None
elif version in EX_MAIN_PRIVATE:
is_testnet = False
is_pubkey = False
elif version in EX_TEST_PRIVATE:
is_testnet = True
is_pubkey = False
elif version in EX_MAIN_PUBLIC:
is_testnet = False
is_pubkey = True
elif version in EX_TEST_PUBLIC:
is_testnet = True
is_pubkey = True
else:
raise ValueError("unknown extended key version")
# Extract remaining fields
depth = raw[4]
fpr = raw[5:9]
child = struct.unpack(">L", raw[9:13])[0]
chain = raw[13:45]
data = raw[45:78]
# check prefix of key
is_pubkey = (data[0] == 2 or data[0] == 3)
# Extract private key or public key point
if not is_pubkey:
secret = SigningKey.from_string(data[1:], SECP256k1)
public = secret.verifying_key
else:
# Recover public curve point from compressed key
# Python3 FIX
lsb = data[0] & 1 if type(data[0]) == int else ord(data[0]) & 1
x = int.from_bytes(data[1:], 'big')
ys = (x**3 + 7) % FIELD_ORDER # y^2 = x^3 + 7 mod p
y = mod_sqrt(ys, FIELD_ORDER)
if y & 1 != lsb:
y = FIELD_ORDER - y
secret = None
point = Point(SECP256k1.curve, x, y)
public = VerifyingKey.from_public_point(point, SECP256k1)
if not is_pubkey and is_public:
return cls(secret=None, public=public, chain=chain, depth=depth, index=child, fpr=fpr, path='m')
else:
return cls(secret=secret, public=public, chain=chain, depth=depth, index=child, fpr=fpr, path='m')
# Internal methods not intended to be called externally
def _hmac(self, data):
"""
Calculate the HMAC-SHA512 of input data using the chain code as key.
Returns a tuple of the left and right halves of the HMAC
"""
i64 = hmac.new(self.chain, data, hashlib.sha512).digest()
return i64[:32], i64[32:]
def CKDpriv(self, i):
"""
Create a child key of index 'i'.
If the most significant bit of 'i' is set, then select from the
hardened key set, otherwise, select a regular child key.
Returns a BIP32Key constructed with the child key parameters,
or None if i index would result in an invalid key.
"""
# Index as bytes, BE
i_str = struct.pack(">L", i)
# Data to HMAC
if i & BIP32_HARDEN:
data = b'\0' + self.get_private_key() + i_str
path = self.path + '/' + str(i % BIP32_HARDEN) + '\''
else:
data = self.get_public_key() + i_str
path = self.path + '/' + str(i)
# Get HMAC of data
(Il, Ir) = self._hmac(data)
# Construct new key material from Il and current private key
Il_int = int.from_bytes(Il, 'big')
if Il_int > CURVE_ORDER:
return None
sec_int = int.from_bytes(self.secret.to_string(), 'big')
k_int = (Il_int + sec_int) % CURVE_ORDER
if k_int == 0:
return None
# Construct and return a new BIP32Key
secret = SigningKey.from_string(int_to_string(k_int), SECP256k1)
public = secret.verifying_key
return Bip32(secret=secret, public=public, chain=Ir, depth=self.depth + 1, index=i, fpr=self.fingerprint(), path=path)
def CKDpub(self, i):
"""
Create a publicly derived child key of index 'i'.
If the most significant bit of 'i' is set, this is
an error.
Returns a BIP32Key constructed with the child key parameters,
or None if index would result in invalid key.
"""
if i & BIP32_HARDEN:
raise Exception("Cannot create a hardened child key using public child derivation")
# Data to HMAC. Same as CKDpriv() for public child key.
data = self.get_public_key() + struct.pack(">L", i)
# Get HMAC of data
(Il, Ir) = self._hmac(data)
# Construct curve point Il*G+K
Il_int = int.from_bytes(Il, 'big')
if Il_int >= CURVE_ORDER:
return None
point = Il_int*CURVE_GEN + self.public.pubkey.point
if point == INFINITY:
return None
public = VerifyingKey.from_public_point(point, SECP256k1)
# Construct and return a new BIP32Key
path = self.path + '/' + str(i)
return Bip32(
secret=None, public=public, chain=Ir, depth=self.depth + 1, index=i, fpr=self.fingerprint(), path=path)
def child_key(self, i):
"""
Create and return a child key of this one at index 'i'.
The index 'i' should be summed with BIP32_HARDEN to indicate
to use the private derivation algorithm.
"""
if self.secret is None:
return self.CKDpub(i)
else:
return self.CKDpriv(i)
def get_private_key(self) -> bytes:
if self.secret is None:
raise Exception("Publicly derived deterministic keys have no private half")
else:
return self.secret.to_string()
def get_public_key(self):
point: Point = self.public.pubkey.point
if point.y() & 1:
return b'\3' + int_to_string(point.x())
else:
return b'\2' + int_to_string(point.x())
def get_address(self, hrp, ver) -> PyAddress:
"""Return bech32 compressed address"""
return PyAddress.from_param(hrp, ver, self.identifier())
def identifier(self):
"""Return key identifier as string"""
pk = self.get_public_key()
return hashlib.new('ripemd160', hashlib.sha256(pk).digest()).digest()
def fingerprint(self):
"""Return key fingerprint as string"""
return self.identifier()[:4]
def extended_key(self, is_private=True, encoded=True, is_testnet=False):
"""Return extended private or public key as string, optionally base58 encoded"""
if self.secret is None and is_private is True:
raise Exception("Cannot export an extended private key from a public-only deterministic key")
if is_testnet:
version = EX_TEST_PRIVATE[0] if is_private else EX_TEST_PUBLIC[0]
else:
version = EX_MAIN_PRIVATE[0] if is_private else EX_MAIN_PUBLIC[0]
depth = self.depth.to_bytes(1, 'big')
fpr = self.parent_fpr
child = struct.pack('>L', self.index)
chain = self.chain
if self.secret is None or is_private is False:
# startswith b'\x02' or b'\x03'
data = self.get_public_key()
else:
# startswith b'\x00'
data = b'\x00' + self.get_private_key()
if encoded:
return check_encode(version + depth + fpr + child + chain + data)
else:
return depth + fpr + child + chain + data
def wallet_import_format(self, prefix=WALLET_VERSION):
"""Returns private key encoded for wallet import"""
if self.secret is None:
raise Exception("Publicly derived deterministic keys have no private half")
raw = prefix + self.get_private_key() + b'\x01' # Always compressed
return check_encode(raw)
def dump(self):
"""Dump key fields mimicking the BIP0032 test vector format"""
print(" * Identifier")
print(" * (hex): ", self.identifier().hex())
print(" * (fpr): ", self.fingerprint().hex())
print(" * (main addr):", self.get_address('bc', 0))
print(" * (path): ", self.path)
if self.secret:
print(" * Secret key")
print(" * (hex): ", self.get_private_key().hex())
print(" * (wif): ", self.wallet_import_format())
print(" * Public key")
print(" * (hex): ", self.get_public_key().hex())
print(" * Chain code")
print(" * (hex): ", self.chain.hex())
print(" * Serialized")
print(" * (pub hex): ", self.extended_key(is_private=False, encoded=False).hex())
print(" * (pub b58): ", self.extended_key(is_private=False, encoded=True))
if self.secret:
print(" * (prv hex): ", self.extended_key(is_private=True, encoded=False).hex())
print(" * (prv b58): ", self.extended_key(is_private=True, encoded=True))
def parse_bip32_path(path):
"""parse BIP32 format"""
r = list()
for s in path.split('/'):
if s == 'm':
continue
elif s.endswith("'") or s.endswith('h'):
r.append(int(s[:-1]) + BIP32_HARDEN)
else:
r.append(int(s))
return r
def struct_bip32_path(path):
"""struct BIP32 string path"""
s = 'm'
for p in path:
if p & BIP32_HARDEN:
s += "/{}'".format(p % BIP32_HARDEN)
else:
s += "/{}".format(p)
return s
__all__ = [
"BIP32_HARDEN",
"Bip32",
"parse_bip32_path",
"struct_bip32_path",
]
| 37.573134
| 126
| 0.593072
| 1,658
| 12,587
| 4.378166
| 0.1731
| 0.016531
| 0.013225
| 0.011572
| 0.330762
| 0.237085
| 0.192451
| 0.142306
| 0.136107
| 0.128668
| 0
| 0.03263
| 0.296337
| 12,587
| 334
| 127
| 37.685629
| 0.786948
| 0.186542
| 0
| 0.212389
| 0
| 0.017699
| 0.097225
| 0
| 0
| 0
| 0.001005
| 0.002994
| 0
| 1
| 0.079646
| false
| 0
| 0.057522
| 0
| 0.256637
| 0.088496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd865fa7395cf48130baac47f65fb9a0acdb8fa6
| 1,378
|
py
|
Python
|
etapa 2/gaussJacobi.py
|
jlucartc/MetodosNumericos20182
|
d5610b95945ed6ec9b9bae6cd96672f4d616c1b9
|
[
"MIT"
] | null | null | null |
etapa 2/gaussJacobi.py
|
jlucartc/MetodosNumericos20182
|
d5610b95945ed6ec9b9bae6cd96672f4d616c1b9
|
[
"MIT"
] | null | null | null |
etapa 2/gaussJacobi.py
|
jlucartc/MetodosNumericos20182
|
d5610b95945ed6ec9b9bae6cd96672f4d616c1b9
|
[
"MIT"
] | null | null | null |
import numpy as np
from sympy import *
from math import *
from timeit import default_timer as timer
start = None
end = None
def maxXi(Xn,X):
n = None
d = None
for i in range(Xn.shape[0]):
if(np.copy(Xn[i,0]) != 0):
nk = abs(np.copy(Xn[i,0]) - np.copy(X[i,0]))/abs(np.copy(Xn[i,0]))
dk = abs(np.copy(Xn[i,0]))
if n == None or nk > n:
n = nk
if d == None or dk > d:
d = dk
return n/d
A = np.matrix(eval(input("Digite uma matriz : ")))
A = A.astype(float)
X = np.matrix(eval(input("Digite X : ")))
e = float(input("Digite a precisão: "))
B = np.copy(A[:,A.shape[1]-1])
A = np.delete(np.copy(A),A.shape[1]-1,1)
C = np.asmatrix(np.zeros([A.shape[0],A.shape[1]]))
C = C.astype(float)
G = np.copy(B)
for i in range(C.shape[0]):
for j in range(C.shape[1]):
if i != j:
C[i,j] = (np.copy(A[i,j])/np.copy(A[i,i]))*(-1)
G[i,0] = (np.copy(G[i,0]))/(np.copy(A[i,i]))
C[i,i] = 0
Xn = None
z = True
print("Matriz C:\n",C)
print("Matriz G:\n",G)
start = timer()
while(z):
Xn = (np.copy(C) @ np.copy(X)) + np.copy(G)
d = maxXi(np.copy(Xn),np.copy(X))
if(d < e):
z = False
else:
X = np.copy(Xn)
end = timer()
print("Resposta de Gauss-Jacobi: ")
print(Xn)
print("Tempo de execucao total: %e segundos" % (end - start))
| 18.621622
| 78
| 0.523948
| 262
| 1,378
| 2.751908
| 0.248092
| 0.149792
| 0.066574
| 0.049931
| 0.230236
| 0.122053
| 0.041609
| 0
| 0
| 0
| 0
| 0.019666
| 0.261974
| 1,378
| 73
| 79
| 18.876712
| 0.689282
| 0
| 0
| 0
| 0
| 0
| 0.097242
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.081633
| 0
| 0.122449
| 0.102041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd8770a9a9b49ceb88698ef2075f53487bd2aca7
| 8,139
|
py
|
Python
|
custom_libs/Project2/plotter.py
|
drkostas/COSC522
|
5731576301daf99ca7c3d382fe3ea8b1398008ff
|
[
"MIT"
] | 1
|
2021-12-22T14:29:42.000Z
|
2021-12-22T14:29:42.000Z
|
custom_libs/Project2/plotter.py
|
drkostas/COSC522
|
5731576301daf99ca7c3d382fe3ea8b1398008ff
|
[
"MIT"
] | 3
|
2021-10-13T02:14:30.000Z
|
2021-11-24T05:28:32.000Z
|
custom_libs/Project2/plotter.py
|
drkostas/COSC522
|
5731576301daf99ca7c3d382fe3ea8b1398008ff
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
class Plotter:
synth_tr: np.ndarray
synth_te: np.ndarray
pima_tr: np.ndarray
pima_te: np.ndarray
def __init__(self, synth_tr: np.ndarray, synth_te: np.ndarray, pima_tr: np.ndarray,
pima_te: np.ndarray):
self.synth_tr = synth_tr
self.synth_te = synth_te
self.pima_tr = pima_tr
self.pima_te = pima_te
def plot_dataset(self):
fig, ax = plt.subplots(1, 3, figsize=(11, 4))
plot_color = 'dodgerblue'
# synth_tr f1-f2 Scatter Plot
ax[0].scatter(self.synth_tr[:, 0][self.synth_tr[:, -1] == 0],
self.synth_tr[:, 1][self.synth_tr[:, -1] == 0],
color='royalblue', s=12, marker='o', label="Class 0")
ax[0].scatter(self.synth_tr[:, 0][self.synth_tr[:, -1] == 1],
self.synth_tr[:, 1][self.synth_tr[:, -1] == 1],
color='red', s=12, marker='o', label="Class 1")
ax[0].margins(0.1) # 1% padding in all directions
ax[0].set_title("Synth Dataset Scatter Plot")
ax[0].set_xlabel("Feature 1")
ax[0].set_ylabel("Feature 2")
ax[0].legend()
ax[0].grid(True)
# f1 Hist
hist, bins, patches = ax[1].hist(self.synth_tr[:, 0], density=True, bins=20, color=plot_color,
edgecolor='black',
linewidth=0.5) # density=False would make counts
ax[1].set_title("Synth Dataset Density Histogram")
ax[1].set_xlabel("Feature 1")
ax[1].set_ylabel("Density")
ax[1].margins(0.1) # 1% padding in all directions
# f2 Hist
hist, bins, patches = ax[2].hist(self.synth_tr[:, 1], density=True, bins=20, color=plot_color,
edgecolor='black',
linewidth=0.5) # density=False would make counts
ax[2].set_title("Synth Dataset Density Histogram")
ax[2].set_xlabel("Feature 2")
ax[2].set_ylabel("Density")
ax[2].margins(0.1) # 1% padding in all directions
fig.tight_layout()
fig.show()
@staticmethod
def plot_knn_overall_accuracies(synth_k_range, synth_accuracies, pima_k_range, pima_accuracies):
fig, ax = plt.subplots(2, 1, figsize=(9, 9))
# Synth Dataset
ax[0].plot(synth_k_range, synth_accuracies, label='Synthetic Dataset', color='deepskyblue')
ax[0].set_title('Overall Classification accuracy vs k for the Synthetic Dataset')
ax[0].set_xlabel('k')
ax[0].set_ylabel('Overall Classification Accuracy')
_ = ax[0].set_xticks(synth_k_range)
ax[0].legend()
# Pima Dataset
ax[1].plot(pima_k_range, pima_accuracies, label='Pima Dataset', color='orange')
ax[1].set_title('Overall Classification accuracy vs k for the Pima Dataset')
ax[1].set_xlabel('k')
ax[1].set_ylabel('Overall Classification Accuracy')
_ = ax[1].set_xticks(pima_k_range)
ax[1].legend()
# Show plot
fig.tight_layout()
fig.show()
@staticmethod
def plot_decision_boundaries(knn, h: float = 0.2):
# Init values statically from Project 1
a_eucl = -0.8326229483927666
b_eucl = 0.44378197841356054
a_maha = -0.13486408662390306
b_maha = 0.49454949088419903
A = -2.9353736949690252
B = -7.122064910873636
C = -9.131232270572491
D = -4.023021305932989
E = 29.777685196099192
F = -14.251862334038359
means = np.array([[-0.22147024, 0.32575494], [0.07595431, 0.68296891]])
means_center = np.array([-0.07275796159999995, 0.5043619269200001])
a_m = 1.2010238270880302
b_m = 0.591745972411956
# Plot the Decision Boundaries
fig, ax = plt.subplots(1, 1, figsize=(11, 9))
eucl_x_range = np.linspace(-0.8, 0.9, 50)
maha_x_range = np.linspace(-1, 1, 50)
quadr_x_range = np.linspace(-1.1, 1.1, 50)
quadr_y_range = np.linspace(-0.2, 1.1, 50)
# KNN Decision Boundaries
cmap_light = ListedColormap(['lightblue', 'moccasin'])
# KNN Decision Boundaries
x, y = knn.train_x, knn.train_y
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
x_target = np.c_[xx.ravel(), yy.ravel()]
Z = knn.predict(x_target, only_x=True)
Z = Z.reshape(xx.shape)
knn_contour_plot = ax.contourf(xx, yy, Z, cmap=cmap_light)
# Class 0 Scatter plot
ax.scatter(x[:, 0][y == 0], x[:, 1][y == 0],
color='royalblue', s=10, label='Class 0')
# Class 1 Scatter plot
ax.scatter(x[:, 0][y == 1], x[:, 1][y == 1],
color='red', s=10, label='Class 1')
# Decision Boundaries
# Euclidean
ax.plot(eucl_x_range, a_eucl * eucl_x_range + b_eucl, color='orange',
label=f'Euclidean Decision Boundary')
# Mahalanobis
ax.plot(maha_x_range, a_maha * maha_x_range + b_maha, color='deepskyblue',
label=f'Mahalanobis Decision Boundary')
# Quadratic
x_quad, y_quad = np.meshgrid(quadr_x_range, quadr_y_range)
quadr_equation = A * x_quad ** 2 + B * y_quad ** 2 + C * x_quad * y_quad + D * x_quad + E * y_quad + F
quad_contour_plt = ax.contour(x_quad, y_quad, quadr_equation, [0],
colors='limegreen')
ax.clabel(quad_contour_plt, inline=1, fontsize=10)
quad_contour_plt.collections[0].set_label('Quadratic Decision Boundary')
# Line that links the means of the two classes
mline_x_range = np.linspace(means[0][0], means[1][0], 5)
ax.plot(mline_x_range, a_m * mline_x_range + b_m,
color='m', linestyle='dashed', label='Line linking the two means')
# Class 0 Mean value
ax.plot(means[0][0], means[0][1],
'bo', markersize=11, markeredgecolor='w', label='Class 0 Mean value')
# Class 1 Mean value
ax.plot(means[1][0], means[1][1],
'ro', markersize=11, markeredgecolor='w', label='Class 1 Mean value')
# Center of the linking line
ax.plot(means_center[0], means_center[1],
'mo', markersize=11, markeredgecolor='w',
label=f'Center of the linking line')
# Show figure
ax.set_title(
"The three Decision Boundaries plotted against the scatter plot of the two features")
# ax.axis('equal')
ax.set_xlim(-1.35, 1.3)
ax.set_ylim(-0.35, 1.15)
ax.set_xlabel("Feature 1")
ax.set_ylabel("Feature 2")
ax.legend(loc='upper left')
# ax.margins(0.1)
fig.show()
@staticmethod
def plot_membership_changes(kmeans_membership_changes, wta_membership_changes, epsilon):
fig, ax = plt.subplots(2, 1, figsize=(9, 9))
# Pima, Kmeans
kmeans_range = range(2, len(kmeans_membership_changes)+2)
ax[0].plot(kmeans_range, kmeans_membership_changes,
label=f'Kmeans', color='deepskyblue')
ax[0].set_title('Membership Changes per epoch for Kmeans on Pima Dataset')
ax[0].set_xlabel('Epoch')
ax[0].set_ylabel('Membership Changes')
_ = ax[0].set_xticks(kmeans_range)
ax[0].legend()
# Pima, WTA
wta_range = range(2, len(wta_membership_changes) + 2)
ax[1].plot(wta_range, wta_membership_changes,
label=f'WTA: epsilon={epsilon}', color='orange')
ax[1].set_title('Membership Changes per epoch for WTA on Pima Dataset')
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('Membership Changes')
_ = ax[1].set_xticks(wta_range)
ax[1].legend()
# Show plot
fig.tight_layout()
fig.show()
| 44.966851
| 110
| 0.578327
| 1,128
| 8,139
| 4.003546
| 0.185284
| 0.013286
| 0.029229
| 0.018601
| 0.416962
| 0.295616
| 0.219442
| 0.176484
| 0.131532
| 0.099203
| 0
| 0.082672
| 0.288119
| 8,139
| 181
| 111
| 44.966851
| 0.696755
| 0.077159
| 0
| 0.143836
| 0
| 0
| 0.130882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034247
| false
| 0
| 0.020548
| 0
| 0.089041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd89017afbf663624d11e9b8f48f90440b465747
| 27,270
|
py
|
Python
|
connector/binance/websockets.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
connector/binance/websockets.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
connector/binance/websockets.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
# @date 2020-01-31
# @author Frederic Scherma, All rights reserved without prejudices.
# @license Copyright (c) 2020 Dream Overflow
# Binance Websocket connector.
import json
import threading
import traceback
from autobahn.twisted.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
from twisted.internet import ssl, reactor # , reactor
from twisted.internet.protocol import ReconnectingClientFactory
from connector.binance.client import Client
from monitor.service import MonitorService
import logging
logger = logging.getLogger('siis.connector.binance.ws')
error_logger = logging.getLogger('siis.error.connector.binance.ws')
traceback_logger = logging.getLogger('siis.traceback.connector.binance.ws')
class BinanceClientProtocol(WebSocketClientProtocol):
def __init__(self, factory):
super().__init__()
self.factory = factory
def onOpen(self):
self.factory.protocol_instance = self
def onConnect(self, response):
subscriptions = self.factory.subscriptions
if subscriptions:
params = []
rid = 1
for subscription, pair in subscriptions.items():
if pair:
params += ["%s@%s" % (p.lower(), subscription) for p in pair]
# else:
# params.append(subscription)
data = {
"method": "SUBSCRIBE",
"params": params,
"id": rid
}
if params:
logger.debug("onConnect %s" % data)
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
self.sendMessage(payload, isBinary=False)
else:
logger.debug("onConnect %s" % '/'.join(subscriptions.keys()))
# reset the delay after reconnecting
self.factory.resetDelay()
def onMessage(self, payload, isBinary):
if not isBinary:
try:
payload_obj = json.loads(payload.decode('utf8'))
except ValueError:
pass
else:
try:
self.factory.callback(payload_obj)
except Exception as e:
error_logger.error(repr(e))
traceback_logger.error(traceback.format_exc())
# def connectionLost(self, reason):
# WebSocketClientProtocol.connectionLost(self, reason)
# subs = '/'.join(self.factory.subscriptions.keys())
# error_logger.error("Binance WS public connection lost for %s: Reason is %s" % (subs, reason))
class BinanceReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 10
maxRetries = 30
class BinanceClientFactory(WebSocketClientFactory, BinanceReconnectingClientFactory):
protocol = BinanceClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': 'Max reconnect retries reached'
}
def __init__(self, *args, subscription=None, pair=None, **kwargs):
WebSocketClientFactory.__init__(self, *args, **kwargs)
self.protocol_instance = None
self.base_client = None
# active pairs
self.subscriptions = {}
if subscription:
self.subscriptions[subscription] = set(pair or [])
def clientConnectionFailed(self, connector, reason):
if not self.reconnect:
return
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
if not self.reconnect:
return
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def buildProtocol(self, addr):
return BinanceClientProtocol(self)
class BinanceSocketManager(threading.Thread):
"""
Binance spot and futures WS socket and subscription manager.
@todo Reuse the same connection for multiplex to avoid multiple sockets (have to do like in the kraken WS).
Also have to be sure to stay connected after 24h.
"""
STREAM_URL = 'wss://stream.binance.com:9443/'
FUTURES_STREAM_URL = 'wss://fstream.binance.com/'
# FUTURES_STREAM_URL = 'wss://fstream3.binance.com'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 minutes
def __init__(self, client, user_timeout=DEFAULT_USER_TIMEOUT, futures=False):
"""Initialise the BinanceSocketManager
:param client: Binance API client
:type client: binance.Client
:param user_timeout: Custom websocket timeout
:type user_timeout: int
"""
threading.Thread.__init__(self, name="binance-ws")
self._next_id = 2 # 1 is for connect
self.factories = {}
self._conns = {}
self._user_timer = None
self._user_listen_key = None
self._user_callback = None
self._client = client
self._user_timeout = user_timeout
self._future = futures
self._url = BinanceSocketManager.FUTURES_STREAM_URL if futures else BinanceSocketManager.STREAM_URL
def _start_socket(self, id_, path, callback, prefix='ws/', subscription=None, pair=None):
try:
if id_ in self._conns: # path in self._conns:
return False
factory_url = self._url + prefix + path
factory = BinanceClientFactory(factory_url, subscription=subscription, pair=pair)
factory.base_client = self
factory.protocol = BinanceClientProtocol
factory.callback = callback
factory.reconnect = True
self.factories[id_] = factory
context_factory = ssl.ClientContextFactory()
# self._conns[path] = reactor.connectSSL(factory_url, 443 if self._future else 9443, factory,
# context_factory, 5.0)
# self._conns[path] = connectWS(factory, context_factory)
self._conns[id_] = connectWS(factory, context_factory)
except Exception as e:
logger.error(repr(e))
return path
def start_depth_socket(self, symbol, callback, depth=None):
"""Start a websocket for symbol market depth returning either a diff or a partial book
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff
:type depth: str
:returns: connection key string if successful, False otherwise
Partial Message Format
.. code-block:: python
{
"lastUpdateId": 160, # Last update ID
"bids": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"asks": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
Diff Message Format
.. code-block:: python
{
"e": "depthUpdate", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"U": 157, # First update ID in event
"u": 160, # Final update ID in event
"b": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"a": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
"""
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, socket_name, callback, subscription='depth', pair=symbol.lower())
def start_kline_socket(self, symbol, callback, interval=Client.KLINE_INTERVAL_1MINUTE):
"""Start a websocket for symbol kline data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#klinecandlestick-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param interval: Kline interval, default KLINE_INTERVAL_1MINUTE
:type interval: str
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "kline", # event type
"E": 1499404907056, # event time
"s": "ETHBTC", # symbol
"k": {
"t": 1499404860000, # start time of this bar
"T": 1499404919999, # end time of this bar
"s": "ETHBTC", # symbol
"i": "1m", # interval
"f": 77462, # first trade id
"L": 77465, # last trade id
"o": "0.10278577", # open
"c": "0.10278645", # close
"h": "0.10278712", # high
"l": "0.10278518", # low
"v": "17.47929838", # volume
"n": 4, # number of trades
"x": false, # whether this bar is final
"q": "1.79662878", # quote volume
"V": "2.34879839", # volume of active buy
"Q": "0.24142166", # quote volume of active buy
"B": "13279784.01349473" # can be ignored
}
}
"""
socket_name = '{}@kline_{}'.format(symbol.lower(), interval)
return self._start_socket(socket_name, socket_name, callback, subscription='kline', pair=symbol.lower())
def start_miniticker_socket(self, callback, update_time=1000):
"""Start a miniticker websocket for all trades
This is not in the official Binance api docs, but this is what
feeds the right column on a ticker page on Binance.
:param callback: callback function to handle messages
:type callback: function
:param update_time: time between callbacks in milliseconds, must be 1000 or greater
:type update_time: int
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'e': '24hrMiniTicker', # Event type
'E': 1515906156273, # Event time
's': 'QTUMETH', # Symbol
'c': '0.03836900', # close
'o': '0.03953500', # open
'h': '0.04400000', # high
'l': '0.03756000', # low
'v': '147435.80000000', # volume
'q': '5903.84338533' # quote volume
}
]
"""
return self._start_socket('!miniTicker', '!miniTicker@arr@{}ms'.format(update_time), callback,
subscription='!miniTicker')
def start_trade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "trade", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"t": 12345, # Trade ID
"p": "0.001", # Price
"q": "100", # Quantity
"b": 88, # Buyer order Id
"a": 50, # Seller order Id
"T": 123456785, # Trade time
"m": true, # Is the buyer the market maker?
"M": true # Ignore.
}
"""
return self._start_socket(symbol.lower() + '@trade', symbol.lower() + '@trade', callback,
subscription='trade', pair=symbol.lower())
def start_aggtrade_socket(self, symbol, callback):
"""Start a websocket for symbol trade data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#aggregate-trade-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "aggTrade", # event type
"E": 1499405254326, # event time
"s": "ETHBTC", # symbol
"a": 70232, # aggregated tradeid
"p": "0.10281118", # price
"q": "8.15632997", # quantity
"f": 77489, # first breakdown trade id
"l": 77489, # last breakdown trade id
"T": 1499405254324, # trade time
"m": false, # whether buyer is a maker
"M": true # can be ignored
}
"""
return self._start_socket(symbol.lower() + '@aggTrade', symbol.lower() + '@aggTrade', callback,
subscription='aggTrade', pair=symbol.lower())
def start_symbol_ticker_socket(self, symbol, callback):
"""Start a websocket for a symbol's ticker data
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#individual-symbol-ticker-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
{
"e": "24hrTicker", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"p": "0.0015", # Price change
"P": "250.00", # Price change percent
"w": "0.0018", # Weighted average price
"x": "0.0009", # Previous day's close price
"c": "0.0025", # Current day's close price
"Q": "10", # Close trade's quantity
"b": "0.0024", # Best bid price
"B": "10", # Bid bid quantity
"a": "0.0026", # Best ask price
"A": "100", # Best ask quantity
"o": "0.0010", # Open price
"h": "0.0025", # High price
"l": "0.0010", # Low price
"v": "10000", # Total traded base asset volume
"q": "18", # Total traded quote asset volume
"O": 0, # Statistics open time
"C": 86400000, # Statistics close time
"F": 0, # First trade ID
"L": 18150, # Last trade Id
"n": 18151 # Total number of trades
}
"""
return self._start_socket(symbol.lower() + '@ticker', symbol.lower() + '@ticker', callback,
subscription='ticker', pair=symbol.lower())
def start_ticker_socket(self, callback):
"""Start a websocket for all ticker data
By default all markets are included in an array.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#all-market-tickers-stream
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
'F': 278610,
'o': '0.07393000',
's': 'BCCBTC',
'C': 1509622420916,
'b': '0.07800800',
'l': '0.07160300',
'h': '0.08199900',
'L': 287722,
'P': '6.694',
'Q': '0.10000000',
'q': '1202.67106335',
'p': '0.00494900',
'O': 1509536020916,
'a': '0.07887800',
'n': 9113,
'B': '1.00000000',
'c': '0.07887900',
'x': '0.07399600',
'w': '0.07639068',
'A': '2.41900000',
'v': '15743.68900000'
}
]
"""
return self._start_socket('!ticker@arr', '!ticker@arr', callback, subscription='!ticker@arr')
def start_book_ticker_socket(self, callback):
"""Start a websocket for all book ticker data
By default all markets are included in an array.
https://binance-docs.github.io/apidocs/futures/en/#all-market-tickers-streams
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format
.. code-block:: python
[
{
"u":400900217, // order book updateId
"s":"BNBUSDT", // symbol
"b":"25.35190000", // best bid price
"B":"31.21000000", // best bid qty
"a":"25.36520000", // best ask price
"A":"40.66000000" // best ask qty
}
]
"""
return self._start_socket('!bookTicker', '!bookTicker', callback, prefix="stream?streams=",
subscription='!bookTicker')
# def start_multiplex_socket(self, streams, callback):
# """Start a multiplexed socket using a list of socket names.
# User stream sockets can not be included.
#
# Symbols in socket name must be lowercase i.e bnbbtc@aggTrade, neobtc@ticker
#
# Combined stream events are wrapped as follows: {"stream":"<streamName>","data":<rawPayload>}
#
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md
#
# :param streams: list of stream names in lower case
# :type streams: list
# :param callback: callback function to handle messages
# :type callback: function
#
# :returns: connection key string if successful, False otherwise
#
# Message Format - see Binance API docs for all types
#
# """
# stream_path = 'streams={}'.format('/'.join(streams))
# return self._start_socket('multiplex', stream_path, callback, subscription='stream?')
def send_subscribe(self, id_, subscription, pair):
try:
factory = self.factories.get(id_)
if subscription and pair and factory:
if subscription not in factory.subscriptions:
factory.subscriptions[subscription] = set()
factory.subscriptions[subscription].update(pair)
# logger.info("send_subscribe %s / %s" % (id_, factory.protocol_instance))
if factory.protocol_instance:
rid = self._next_id
self._next_id += 1
# logger.info("2 send_subscribe %s" % id_)
data = {
"method": "SUBSCRIBE",
"params": ["%s@%s" % (p.lower(), subscription) for p in pair],
"id": rid
}
# logger.info("send_subscribe %s" % data)
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
factory.protocol_instance.sendMessage(payload, isBinary=False)
except Exception as e:
error_logger.error("%s : %s" % (subscription, repr(e)))
traceback_logger.error(traceback.format_exc())
def send_unsubscribe(self, id_, subscription, pair):
try:
factory = self.factories.get(id_)
if subscription and pair and factory:
if subscription not in factory.subscriptions:
factory.subscriptions[subscription] = set()
factory.subscriptions[subscription] = factory.subscriptions[subscription].difference(pair)
if factory.protocol_instance:
rid = self._next_id
self._next_id += 1
data = {
"method": "UNSUBSCRIBE",
"params": ["%s@%s" % (p.lower(), subscription) for p in pair],
"id": rid
}
payload = json.dumps(data, ensure_ascii=False).encode('utf8')
factory.protocol_instance.sendMessage(payload, isBinary=False)
except Exception as e:
error_logger.error("%s : %s" % (subscription, repr(e)))
traceback_logger.error(traceback.format_exc())
def subscribe_public(self, subscription, pair, callback):
id_ = "_".join([subscription])
if id_ not in self._conns:
# stream_path = 'streams={}'.format('/'.join(subscription))
stream_path = 'streams={}'.format(subscription)
return self._start_socket(subscription, stream_path, callback, subscription=subscription, pair=pair)
else:
reactor.callFromThread(self.send_subscribe, id_, subscription, pair)
def unsubscribe_public(self, subscription, pair):
id_ = "_".join([subscription])
if id_ in self._conns:
reactor.callFromThread(self.send_unsubscribe, id_, subscription, pair)
def start_user_socket(self, callback):
"""Start a websocket for user data
https://www.binance.com/restapipub.html#user-wss-endpoint
:param callback: callback function to handle messages
:type callback: function
:returns: connection key string if successful, False otherwise
Message Format - see Binance API docs for all types
"""
# Get the user listen key
user_listen_key = self._client.future_stream_get_listen_key() if self._future else self._client.stream_get_listen_key()
# and start the socket with this specific key
conn_key = self._start_user_socket(user_listen_key, callback)
return conn_key
def _start_user_socket(self, user_listen_key, callback):
# With this function we can start a user socket with a specific key
if self._user_listen_key:
# cleanup any sockets with this key
for conn_key in self._conns:
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self.stop_socket(conn_key)
break
self._user_listen_key = user_listen_key
self._user_callback = callback
conn_key = self._start_socket('user', self._user_listen_key, callback)
if conn_key:
# start timer to keep socket alive
self._start_user_timer()
return conn_key
def _start_user_timer(self):
self._user_timer = threading.Timer(self._user_timeout, self._keepalive_user_socket)
self._user_timer.setDaemon(True)
self._user_timer.start()
def _keepalive_user_socket(self):
try:
user_listen_key = self._client.future_stream_get_listen_key() if self._future else self._client.stream_get_listen_key()
except Exception as e:
# very rare exception ConnectTimeout
error_logger.error(repr(e))
# assume unchanged
user_listen_key = self._user_listen_key
# check if they key changed and
if user_listen_key != self._user_listen_key:
# Start a new socket with the key received
# `_start_user_socket` automatically cleanup open sockets
# and starts timer to keep socket alive
self._start_user_socket(user_listen_key, self._user_callback)
else:
# Restart timer only if the user listen key is not changed
self._start_user_timer()
def stop_socket(self, conn_key):
"""Stop a websocket given the connection key
:param conn_key: Socket connection key
:type conn_key: string
:returns: connection key string if successful, False otherwise
"""
if conn_key not in self._conns:
return
# disable reconnecting if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self._url + 'tmp_path')
self._conns[conn_key].disconnect()
del self._conns[conn_key]
# check if we have a user stream socket
if len(conn_key) >= 60 and conn_key[:60] == self._user_listen_key:
self._stop_user_socket()
def _stop_user_socket(self):
if not self._user_listen_key:
return
# stop the timer
self._user_timer.cancel()
self._user_timer = None
self._user_listen_key = None
def run(self):
MonitorService.use_reactor(installSignalHandlers=False)
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {}
| 37.510316
| 136
| 0.541584
| 2,770
| 27,270
| 5.202166
| 0.182671
| 0.01499
| 0.018043
| 0.012977
| 0.402776
| 0.368425
| 0.354337
| 0.338793
| 0.327412
| 0.300139
| 0
| 0.043691
| 0.36212
| 27,270
| 726
| 137
| 37.561983
| 0.784708
| 0.467253
| 0
| 0.333333
| 0
| 0
| 0.047314
| 0.01191
| 0
| 0
| 0
| 0.001377
| 0
| 1
| 0.123457
| false
| 0.004115
| 0.037037
| 0.004115
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd8b45d655ef0b191b537030a3d9f0b1784aa23f
| 772
|
py
|
Python
|
kolibri/core/public/utils.py
|
FollonSaxBass/kolibri
|
4cf820b14386aecc228fecff64c847bad407cbb1
|
[
"MIT"
] | 2
|
2021-05-13T10:20:46.000Z
|
2021-11-15T12:31:03.000Z
|
kolibri/core/public/utils.py
|
camellia26/kolibri
|
7f1cb794c93f37e039be22f56a5ac1989ed22bde
|
[
"MIT"
] | 8
|
2021-05-21T15:31:24.000Z
|
2022-02-24T15:02:14.000Z
|
kolibri/core/public/utils.py
|
camellia26/kolibri
|
7f1cb794c93f37e039be22f56a5ac1989ed22bde
|
[
"MIT"
] | 1
|
2019-10-05T11:14:40.000Z
|
2019-10-05T11:14:40.000Z
|
import platform
from django.core.exceptions import ObjectDoesNotExist
from morango.models import InstanceIDModel
import kolibri
def get_device_info():
"""Returns metadata information about the device"""
instance_model = InstanceIDModel.get_or_create_current_instance()[0]
try:
device_name = kolibri.core.device.models.DeviceSettings.objects.get().name
# When Koliri starts at the first time, and device hasn't been created
except ObjectDoesNotExist:
device_name = instance_model.hostname
info = {
"application": "kolibri",
"kolibri_version": kolibri.__version__,
"instance_id": instance_model.id,
"device_name": device_name,
"operating_system": platform.system(),
}
return info
| 28.592593
| 82
| 0.715026
| 87
| 772
| 6.114943
| 0.563218
| 0.075188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001621
| 0.200777
| 772
| 26
| 83
| 29.692308
| 0.860616
| 0.148964
| 0
| 0
| 0
| 0
| 0.109063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd8c005ad2ae492334e75e29d8ea3fae95bca95b
| 1,372
|
py
|
Python
|
mcpipy/cellcraft/config.py
|
cellcraft/cellcraft
|
1cb2b152bb6433250cec43e2586f1b5d093ec6e5
|
[
"MIT"
] | 2
|
2016-01-21T12:05:36.000Z
|
2016-04-18T09:50:03.000Z
|
mcpipy/cellcraft/config.py
|
cellcraft/cellcraft
|
1cb2b152bb6433250cec43e2586f1b5d093ec6e5
|
[
"MIT"
] | 1
|
2016-05-13T13:08:28.000Z
|
2016-05-13T13:08:28.000Z
|
mcpipy/cellcraft/config.py
|
cellcraft/cellcraft
|
1cb2b152bb6433250cec43e2586f1b5d093ec6e5
|
[
"MIT"
] | 3
|
2015-12-14T19:28:42.000Z
|
2020-11-29T12:53:12.000Z
|
import os
import json
import logging
# cellcraft node
CELLCRAFT_NODE_URL="http://192.168.178.29:4534"
# path to cache where pickle files will be stored
PATH_RESOURCES='cellcraft/resources'
PATH_CACHE='cellcraft/resources/cache/'
PATH_TEST_CACHE='test/fixtures/cache/'
# path to fixtures
PATH_TO_FIXTURES="test/fixtures"
# path to cellpack structures after processing them
PATH_CELLPACK = 'cellcraft/resources/cellpack/'
# cellpack parameters
envelop_id = 22
# database name to store biological information and coordinates of structures
DB='cellcraft'
TEST_DB='test'
# fix maximum amount of structures saved on cache
MAXIMUM_NUM_STRUCTURES_CACHE = 8
# load block appear appearance json
def load_block_appearance():
with open(os.path.join(PATH_RESOURCES, "block_appearance.json")) as appearance_json:
block_appearance = json.load(appearance_json)
return block_appearance
current_env = os.environ.get('app_env')
root_logger = logging.getLogger()
current_env = 'test'
if current_env == 'cellcraft':
DB_HOST = '127.0.0.1'
DB_PORT = 27017
root_logger.setLevel(logging.INFO)
elif current_env == 'test':
DB_HOST = '127.0.0.1'
DB_PORT = 27017
root_logger.setLevel(logging.DEBUG)
else:
logging.warning('Please configure a environment using now default dev environment for config')
root_logger.setLevel(logging.DEBUG)
| 24.070175
| 98
| 0.764577
| 194
| 1,372
| 5.226804
| 0.469072
| 0.069034
| 0.053254
| 0.073965
| 0.129191
| 0.094675
| 0.094675
| 0.094675
| 0.094675
| 0.094675
| 0
| 0.034043
| 0.143586
| 1,372
| 56
| 99
| 24.5
| 0.828936
| 0.223761
| 0
| 0.193548
| 0
| 0
| 0.268939
| 0.07197
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.096774
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
cd8c4a556bdf6a751d59f1d67ef4d0688f0e6844
| 9,123
|
py
|
Python
|
ftpsync/pyftpsync.py
|
wengzy/pyftpsync
|
db6decb02bf3535fe87d90b45a6cc974dd356b04
|
[
"MIT"
] | 86
|
2015-03-02T17:40:03.000Z
|
2022-03-14T03:41:40.000Z
|
ftpsync/pyftpsync.py
|
wengzy/pyftpsync
|
db6decb02bf3535fe87d90b45a6cc974dd356b04
|
[
"MIT"
] | 63
|
2015-04-12T19:01:52.000Z
|
2022-01-19T00:57:51.000Z
|
ftpsync/pyftpsync.py
|
wengzy/pyftpsync
|
db6decb02bf3535fe87d90b45a6cc974dd356b04
|
[
"MIT"
] | 25
|
2015-04-12T18:07:25.000Z
|
2021-04-25T15:20:24.000Z
|
# -*- coding: utf-8 -*-
"""
Simple folder synchronization using FTP.
(c) 2012-2021 Martin Wendt; see https://github.com/mar10/pyftpsync
Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php
Usage examples:
> pyftpsync.py --help
> pyftpsync.py upload . ftps://example.com/myfolder
"""
import argparse
import platform
import sys
from pprint import pprint
from ftpsync import __version__
from ftpsync.cli_common import (
common_parser,
creds_parser,
matcher_parser,
verbose_parser,
)
from ftpsync.run_command import add_run_parser, handle_run_command
from ftpsync.scan_command import add_scan_parser
from ftpsync.synchronizers import (
BiDirSynchronizer,
DownloadSynchronizer,
UploadSynchronizer,
)
from ftpsync.targets import FsTarget, make_target
from ftpsync.tree_command import add_tree_parser
from ftpsync.util import (
DEBUG_FLAGS,
PYTHON_VERSION,
check_cli_verbose,
namespace_to_dict,
set_pyftpsync_logger,
)
# ===============================================================================
# run
# ===============================================================================
def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Create the parser for the "tree" command -----------------------------
add_tree_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
if args.debug:
if args.verbose < 4:
parser.error("'--debug' requires verbose level >= 4")
DEBUG_FLAGS.update(args.debug)
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return
# Script entry point
if __name__ == "__main__":
# Just in case...
from multiprocessing import freeze_support
freeze_support()
run()
| 31.350515
| 90
| 0.574044
| 998
| 9,123
| 5.103206
| 0.247495
| 0.034557
| 0.038288
| 0.021206
| 0.428038
| 0.376988
| 0.374239
| 0.348125
| 0.348125
| 0.348125
| 0
| 0.003753
| 0.269758
| 9,123
| 290
| 91
| 31.458621
| 0.760733
| 0.163872
| 0
| 0.328571
| 0
| 0
| 0.268483
| 0.003012
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004762
| false
| 0
| 0.061905
| 0
| 0.07619
| 0.038095
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|