seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
34870614930 | from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
class TestCategoricalConstructors:
def test_fastpath_deprecated(self):
codes = np.array([1, 2, 3])
dtype = CategoricalDtype(categories=["a", "b", "c", "d"], ordered=False)
msg = "The 'fastpath' keyword in Categorical is deprecated"
with tm.assert_produces_warning(DeprecationWarning, match=msg):
Categorical(codes, dtype=dtype, fastpath=True)
def test_categorical_from_cat_and_dtype_str_preserve_ordered(self):
# GH#49309 we should preserve orderedness in `res`
cat = Categorical([3, 1], categories=[3, 2, 1], ordered=True)
res = Categorical(cat, dtype="category")
assert res.dtype.ordered
def test_categorical_disallows_scalar(self):
# GH#38433
with pytest.raises(TypeError, match="Categorical input must be list-like"):
Categorical("A", categories=["A", "B"])
def test_categorical_1d_only(self):
# ndim > 1
msg = "> 1 ndim Categorical are not supported at this time"
with pytest.raises(NotImplementedError, match=msg):
Categorical(np.array([list("abcd")]))
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with pytest.raises(exp_err, match=exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with pytest.raises(exp_err, match=exp_msg):
Categorical.from_codes(
[0, 0, 1], categories=["a", "b", "c"], ordered=ordered
)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = Index([1, 2, 3], dtype=np.int64)
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
# see gh-22702
cat = Categorical([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array(
[
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
("a", "b"),
],
dtype=object,
)[:-1]
result = Categorical(values)
expected = Index(
[(Timestamp("2010-01-01"),), (Timestamp("2010-01-02"),)],
tupleize_cols=False,
)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype="O")
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
msg = (
"'values' is not ordered, please explicitly specify the "
"categories order by passing in a categories argument."
)
with pytest.raises(TypeError, match=msg):
Categorical(arr, ordered=True)
def test_constructor_interval(self):
result = Categorical(
[Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True
)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
msg = "Categorical categories must be unique"
with pytest.raises(ValueError, match=msg):
Categorical([1, 2], [1, 2, 2])
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ["a", "b", "b"])
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2.0, 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1.0, 2.0, 3.0])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"])
with tm.assert_produces_warning(None):
Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5])
# the next one are from the old docs
with tm.assert_produces_warning(None):
Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
Categorical(np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True)
def test_constructor_with_existing_categories(self):
# GH25318: constructing with pd.Series used to bogusly skip recoding
# categories
c0 = Categorical(["a", "b", "c", "a"])
c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
c2 = Categorical(c0, categories=c1.categories)
tm.assert_categorical_equal(c1, c2)
c3 = Categorical(Series(c0), categories=c1.categories)
tm.assert_categorical_equal(c1, c3)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
with pytest.raises(TypeError, match=msg):
Categorical(["a", "b"], categories="a")
def test_constructor_with_null(self):
# Cannot have NaN in categories
msg = "Categorical categories cannot be null"
with pytest.raises(ValueError, match=msg):
Categorical([np.nan, "a", "b", "c"], categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical([None, "a", "b", "c"], categories=[None, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical(
DatetimeIndex(["nat", "20160101"]),
categories=[NaT, Timestamp("20160101")],
)
def test_constructor_with_index(self):
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(
ci.values, Categorical(ci.astype(object), categories=ci.categories)
)
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
exp = Categorical([0, 1, 2])
cat = Categorical(x for x in [0, 1, 2])
tm.assert_categorical_equal(cat, exp)
cat = Categorical(range(3))
tm.assert_categorical_equal(cat, exp)
MultiIndex.from_product([range(5), ["a", "b", "c"]])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=range(3))
tm.assert_categorical_equal(cat, exp)
def test_constructor_with_rangeindex(self):
# RangeIndex is preserved in Categories
rng = Index(range(3))
cat = Categorical(rng)
tm.assert_index_equal(cat.categories, rng, exact=True)
cat = Categorical([1, 2, 0], categories=rng)
tm.assert_index_equal(cat.categories, rng, exact=True)
@pytest.mark.parametrize(
"dtl",
[
date_range("1995-01-01 00:00:00", periods=5, freq="s"),
date_range("1995-01-01 00:00:00", periods=5, freq="s", tz="US/Eastern"),
timedelta_range("1 day", periods=5, freq="s"),
],
)
def test_constructor_with_datetimelike(self, dtl):
# see gh-12077
# constructor with a datetimelike and NaT
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8"))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected._data.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert "NaT" in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
idx = idx._with_freq(None) # freq not preserved in result.categories
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_date_objects(self):
# we dont cast date objects to timestamps, matching Index constructor
v = date.today()
cat = Categorical([v, v])
assert cat.categories.dtype == object
assert type(cat.categories[0]) is date
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range("1 days", freq="D", periods=3)
idx = idx._with_freq(None) # freq not preserved in result.categories
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range("2015-01-01", freq="D", periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
@pytest.mark.parametrize(
"values",
[
np.array([1.0, 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype="int64"),
["a", "b", "c", np.nan],
[pd.Period("2014-01"), pd.Period("2014-02"), NaT],
[Timestamp("2014-01-01"), Timestamp("2014-01-02"), NaT],
[
Timestamp("2014-01-01", tz="US/Eastern"),
Timestamp("2014-01-02", tz="US/Eastern"),
NaT,
],
],
)
def test_constructor_invariant(self, values):
# GH 14190
c = Categorical(values)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ["b", "a", "c"]
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(["a", "b", "a", "c"], dtype=dtype)
expected = Categorical(
["a", "b", "a", "c"], categories=categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(["a", "b"], ordered=True)
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], categories=["a", "b"], dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=True, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=False, dtype=dtype)
@pytest.mark.parametrize("categories", [None, ["a", "b"], ["a", "c"]])
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(
["a", "b"], categories=categories, ordered=ordered, dtype="category"
)
expected = Categorical(["a", "b"], categories=categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
def test_constructor_np_strs(self):
# GH#31499 Hashtable.map_locations needs to work on np.str_ objects
cat = Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")])
assert all(isinstance(x, np.str_) for x in cat.categories)
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(["a", "b", "c"], ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "d"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(["a", "b", "d"])
# use categories, ordered
result = Categorical(
values, categories=["a", "b", "c"], ordered=True, dtype="category"
)
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical(["a", "b"], categories=Categorical(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("klass", [lambda x: np.array(x, dtype=object), list])
def test_construction_with_null(self, klass, nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/31927
values = klass(["a", nulls_fixture, "b"])
result = Categorical(values)
dtype = CategoricalDtype(["a", "b"])
codes = [0, -1, 1]
expected = Categorical.from_codes(codes=codes, dtype=dtype)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("validate", [True, False])
def test_from_codes_nullable_int_categories(self, any_numeric_ea_dtype, validate):
# GH#39649
cats = pd.array(range(5), dtype=any_numeric_ea_dtype)
codes = np.random.default_rng(2).integers(5, size=3)
dtype = CategoricalDtype(cats)
arr = Categorical.from_codes(codes, dtype=dtype, validate=validate)
assert arr.categories.dtype == cats.dtype
tm.assert_index_equal(arr.categories, Index(cats))
def test_from_codes_empty(self):
cat = ["a", "b", "c"]
result = Categorical.from_codes([], categories=cat)
expected = Categorical([], categories=cat)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("validate", [True, False])
def test_from_codes_validate(self, validate):
# GH53122
dtype = CategoricalDtype(["a", "b"])
if validate:
with pytest.raises(ValueError, match="codes need to be between "):
Categorical.from_codes([4, 5], dtype=dtype, validate=validate)
else:
# passes, though has incorrect codes, but that's the user responsibility
Categorical.from_codes([4, 5], dtype=dtype, validate=validate)
def test_from_codes_too_few_categories(self):
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be between "
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], dtype=dtype)
def test_from_codes_non_int_codes(self):
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], dtype=dtype)
def test_from_codes_non_unique_categories(self):
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"])
def test_from_codes_nan_cat_included(self):
with pytest.raises(ValueError, match="Categorical categories cannot be null"):
Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan])
def test_from_codes_too_negative(self):
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = r"codes need to be between -1 and len\(categories\)-1"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], dtype=dtype)
def test_from_codes(self):
dtype = CategoricalDtype(categories=["a", "b", "c"])
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], categories=dtype.categories)
tm.assert_categorical_equal(exp, res)
res = Categorical.from_codes([0, 1, 2], dtype=dtype)
tm.assert_categorical_equal(exp, res)
@pytest.mark.parametrize("klass", [Categorical, CategoricalIndex])
def test_from_codes_with_categorical_categories(self, klass):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical.from_codes([0, 1], categories=klass(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("klass", [Categorical, CategoricalIndex])
def test_from_codes_with_non_unique_categorical_categories(self, klass):
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1], klass(["a", "b", "a"]))
def test_from_codes_with_nan_code(self):
# GH21767
codes = [1, 2, np.nan]
dtype = CategoricalDtype(categories=["a", "b", "c"])
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, categories=dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
@pytest.mark.parametrize("codes", [[1.0, 2.0, 0], [1.1, 2.0, 0]])
def test_from_codes_with_float(self, codes):
# GH21767
# float codes should raise even if values are equal to integers
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_dtype_raises(self):
msg = "Cannot specify"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], categories=["a", "b"], dtype=CategoricalDtype(["a", "b"])
)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], ordered=True, dtype=CategoricalDtype(["a", "b"])
)
def test_from_codes_neither(self):
msg = "Both were None"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1])
def test_from_codes_with_nullable_int(self):
codes = pd.array([0, 1], dtype="Int64")
categories = ["a", "b"]
result = Categorical.from_codes(codes, categories=categories)
expected = Categorical.from_codes(codes.to_numpy(int), categories=categories)
tm.assert_categorical_equal(result, expected)
def test_from_codes_with_nullable_int_na_raises(self):
codes = pd.array([0, None], dtype="Int64")
categories = ["a", "b"]
msg = "codes cannot contain NA values"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(codes, categories=categories)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories(self, dtype):
cats = ["a", "b"]
codes = np.array([0, 0, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories_sorts(self, dtype):
cats = ["b", "a"]
codes = np.array([0, 1, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ["a", "b", "d"]
codes = np.array([0, 1, 0, 2], dtype="i8")
dtype = CategoricalDtype(["c", "b", "a"], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(
["a", "b", "a", "d"], categories=["c", "b", "a"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ["1", "2", "bad"]
codes = np.array([0, 0, 1, 2], dtype="i8")
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("ordered", [None, True, False])
def test_construction_with_ordered(self, ordered):
# GH 9347, 9190
cat = Categorical([0, 1, 2], ordered=ordered)
assert cat.ordered == bool(ordered)
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
def test_constructor_string_and_tuples(self):
# GH 21416
c = Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object))
expected_index = Index([("a", "b"), ("b", "a"), "c"])
assert c.categories.equals(expected_index)
def test_interval(self):
idx = pd.interval_range(0, 10, periods=10)
cat = Categorical(idx, categories=idx)
expected_codes = np.arange(10, dtype="int8")
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# infer categories
cat = Categorical(idx)
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# list values
cat = Categorical(list(idx))
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# list values, categories
cat = Categorical(list(idx), categories=list(idx))
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# shuffled
values = idx.take([1, 2, 0])
cat = Categorical(values, categories=idx)
tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="int8"))
tm.assert_index_equal(cat.categories, idx)
# extra
values = pd.interval_range(8, 11, periods=3)
cat = Categorical(values, categories=idx)
expected_codes = np.array([8, 9, -1], dtype="int8")
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
# overlapping
idx = IntervalIndex([Interval(0, 2), Interval(0, 1)])
cat = Categorical(idx, categories=idx)
expected_codes = np.array([0, 1], dtype="int8")
tm.assert_numpy_array_equal(cat.codes, expected_codes)
tm.assert_index_equal(cat.categories, idx)
def test_categorical_extension_array_nullable(self, nulls_fixture):
# GH:
arr = pd.arrays.StringArray._from_sequence([nulls_fixture] * 2)
result = Categorical(arr)
assert arr.dtype == result.categories.dtype
expected = Categorical(Series([pd.NA, pd.NA], dtype=arr.dtype))
tm.assert_categorical_equal(result, expected)
def test_from_sequence_copy(self):
cat = Categorical(np.arange(5).repeat(2))
result = Categorical._from_sequence(cat, dtype=None, copy=False)
# more generally, we'd be OK with a view
assert result._codes is cat._codes
result = Categorical._from_sequence(cat, dtype=None, copy=True)
assert not tm.shares_memory(result, cat)
def test_constructor_datetime64_non_nano(self):
categories = np.arange(10).view("M8[D]")
values = categories[::2].copy()
cat = Categorical(values, categories=categories)
assert (cat == values).all()
def test_constructor_preserves_freq(self):
# GH33830 freq retention in categorical
dti = date_range("2016-01-01", periods=5)
expected = dti.freq
cat = Categorical(dti)
result = cat.categories.freq
assert expected == result
| pandas-dev/pandas | pandas/tests/arrays/categorical/test_constructors.py | test_constructors.py | py | 30,508 | python | en | code | 40,398 | github-code | 90 |
19405683955 | x = input('Insira um valor inteiro:')
tamanho = len(x)
verifica = False
i = 0
while i < tamanho - 1:
if x[i] == x[i + 1]:
verifica = True
i += 1
if verifica:
print("sim")
else:
print("não")
| marcelomiky/python_code | Coursera/CICCP1/digitos_adjacentes.py | digitos_adjacentes.py | py | 222 | python | pt | code | 4 | github-code | 90 |
38296757641 | import constant
from utils.shortcuts import render
from django.conf import settings
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth import authenticate as auth_authenticate
from django.http import HttpResponse, HttpResponseRedirect
from decorator import login_required
from services.token_service import TokenService
from services.ms_graph_service import MSGraphService
from services.user_service import UserService
from services.auth_service import AuthService
from .forms import UserInfo, UserRegInfo
user_service = UserService()
token_service = TokenService()
def index(request):
user = AuthService.get_current_user(request)
if not user.is_authenticated:
return HttpResponseRedirect('/Account/Login')
if not user.are_linked:
return HttpResponseRedirect('/Link')
if user.is_admin and not user_service.is_tenant_consented(user.tenant_id):
return HttpResponseRedirect('/Admin')
else:
return HttpResponseRedirect('/Schools')
def login(request):
# get /Account/Login
if request.method == 'GET':
o365_username = request.COOKIES.get(constant.o365_username_cookie)
o365_email = request.COOKIES.get(constant.o365_email_cookie)
if o365_username and o365_email:
context = {
'username': o365_username,
'email': o365_email
}
return render(request, 'account/O365login.html', context)
else:
user_form = UserInfo()
return render(request, 'account/login.html', { 'user_form': user_form })
# post /Account/Login
else:
return login_post(request)
def login_post(request):
email = ''
password = ''
errors = []
user_form = UserInfo(request.POST)
if user_form.is_valid():
data = user_form.clean()
email = data['Email']
password = data['Password']
rememberme = data['RememberMe']
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = not rememberme
user = auth_authenticate(username=email, password=password)
if user is not None:
auth_login(request, user)
o365_user = user_service.get_o365_user(user)
if o365_user:
AuthService.set_o365_user(request, o365_user)
return HttpResponseRedirect('/')
errors.append('Invalid login attempt.')
context = {
'user_form': user_form,
'errors': errors
}
return render(request, 'account/login.html', context)
def o365_login(request):
extra_params = {
'nonce': AuthService.get_random_string()
}
o365_email = request.COOKIES.get(constant.o365_email_cookie)
if o365_email:
extra_params['login_hint'] = o365_email
else:
extra_params['prompt'] = 'login'
o365_login_url = AuthService.get_authorization_url(request, 'code', 'Auth/O365/Callback', AuthService.get_random_string(), extra_params)
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True
return HttpResponseRedirect(o365_login_url)
def reset(request):
response = HttpResponseRedirect('/Account/Login')
response.set_cookie(constant.o365_username_cookie, '', expires=0)
response.set_cookie(constant.o365_email_cookie, '', expires=0)
return response
def o365_auth_callback(request):
AuthService.validate_state(request)
code = request.POST.get('code')
redirect_uri = AuthService.get_redirect_uri(request, 'Auth/O365/Callback')
auth_result = token_service.get_token_with_code(code, redirect_uri, constant.Resources.MSGraph)
o365_user_id = auth_result.get('oid')
tenant_id = auth_result.get('tenantId')
token_service.cache_tokens(auth_result, o365_user_id)
ms_graph_service = MSGraphService(auth_result.get('accessToken'))
o365_user = ms_graph_service.get_o365_user(tenant_id)
AuthService.set_o365_user(request, o365_user)
for role in o365_user.roles:
user_service.update_role(o365_user.id, role)
user_service.create_or_update_organization(tenant_id, o365_user.tenant_name)
local_user = user_service.get_user_by_o365_email(o365_user.email)
if local_user:
auth_login(request, local_user)
response = HttpResponseRedirect('/')
response.set_cookie(constant.o365_username_cookie, o365_user.display_name)
response.set_cookie(constant.o365_email_cookie, o365_user.email)
return response
@login_required
def photo(request, user_object_id):
user = AuthService.get_current_user(request)
token = token_service.get_access_token(constant.Resources.MSGraph, user.o365_user_id)
ms_graph_service = MSGraphService(token)
user_photo = ms_graph_service.get_photo(user_object_id)
if not user_photo:
local_photo_path = settings.STATICFILES_DIRS[0] + '/Images/DefaultUserPhoto.jpg'
local_photo_file = open(local_photo_path, 'rb')
user_photo = local_photo_file.read()
return HttpResponse(user_photo, content_type='image/jpeg')
def register(request):
user_reg_form = UserRegInfo()
# post /Account/Register
if request.method == 'POST':
errors = []
user_reg_form = UserRegInfo(request.POST)
if user_reg_form.is_valid():
data = user_reg_form.clean()
user = user_service.register(data['Email'], data['Password'], data['FavoriteColor'])
if user:
auth_login(request, user)
return HttpResponseRedirect('/')
else:
errors.append('Name %s is already taken.' % data['Email'])
errors.append("Email '%s' is already taken." % data['Email'])
return render(request, 'account/register.html', {'user_reg_form':user_reg_form, 'errors':errors})
# get /Account/Register
else:
return render(request, 'account/register.html', {'user_reg_form':user_reg_form})
@login_required
def logoff(request):
user = AuthService.get_current_user(request)
AuthService.clear_o365_user(request)
auth_logout(request)
if not user.are_linked:
return HttpResponseRedirect('/')
else:
scheme = request.scheme
host = request.get_host()
redirect_uri = scheme + '://' + host
logoff_url = constant.log_out_url % (redirect_uri, redirect_uri)
response = HttpResponseRedirect(logoff_url)
response.set_cookie(constant.username_cookie, '')
response.set_cookie(constant.email_cookie, '')
return response
| TylerLu/EDUGraphAPI-Python | account/views.py | views.py | py | 6,540 | python | en | code | 1 | github-code | 90 |
18430863009 | from collections import Counter
n = int(input())
S = input()
counts = Counter(S)
mod = int(1e9) + 7
ans = 1
for count in counts.values():
ans *= (count + 1)
ans %= mod
ans -= 1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03095/s295363983.py | s295363983.py | py | 197 | python | en | code | 0 | github-code | 90 |
18320460029 | import sys
input=sys.stdin.readline
import math
from collections import defaultdict,deque
from itertools import permutations
ml=lambda:map(int,input().split())
ll=lambda:list(map(int,input().split()))
ii=lambda:int(input())
ip=lambda:list(input())
ips=lambda:input().split()
"""========main code==============="""
t=ii()
lol=[]
for i in range(t):
a,b=ml()
lol.append([a,b])
yo=0
cnt=0
for i in list(permutations(lol)):
for j in range(1,len(i)):
x=i[j][0]-i[j-1][0]
y=i[j][1]-i[j-1][1]
yo+=math.sqrt(x*x+y*y)
cnt+=1
print(yo/cnt) | Aasthaengg/IBMdataset | Python_codes/p02861/s474482352.py | s474482352.py | py | 578 | python | en | code | 0 | github-code | 90 |
19586728954 | #!/usr/bin/env python3
import time
import pickle2reducer
import multiprocessing as mp
ROBOT_CMD_PORT=6000
ROBOT_SECRET_KEY=b"Friggin Lazer!"
ctx = mp.get_context()
ctx.reducer = pickle2reducer.Pickle2Reducer()
from multiprocessing.connection import Client
run=1
address = ('localhost', ROBOT_CMD_PORT)
while run:
try:
conn = Client(address, authkey=ROBOT_SECRET_KEY)
msgs=['Does that shark have a friggin lazer?!', 'close' ]
for msg in msgs:
print("SENDING: %s" % msg)
conn.send(msg)
# can also send arbitrary objects:
# conn.send(['a', 2.5, None, int, sum])
conn.close()
run=0
except ConnectionRefusedError:
time.sleep(1)
| rhazzed/potatoCHIP | archive/sender.py | sender.py | py | 723 | python | en | code | 0 | github-code | 90 |
42586161703 |
import requests
from utils.datetime_tools import DATE_TIME_FORMAT
from utils.gibber import logger
class eastmoneyFutureScrapper:
def __init__(self):
self.base_url = "https://np-futurelist.eastmoney.com/comm/future/fastNews"
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:102.0) Gecko/20100101 Firefox/102.0",
"Accept": "application/json, text/plain, */*",
"Host": "np-futurelist.eastmoney.com",
"Origin": "https://qhweb.eastmoney.com",
"Referer": "https://qhweb.eastmoney.com/",
}
self.timestamp_format = DATE_TIME_FORMAT
def _data_cleaner(self, news_dict):
fid = news_dict['newsId']
content = news_dict['digest'].strip()
timestamp = news_dict['showTime'] # already DATE_TIME_FORMAT
return {'fid': fid,
'source': 'eastmoney_future',
'content': content,
'timestamp': timestamp,
'industry': '',
'comment': ''}
def get_params(self, sortEnd="", pageSize=20):
params = {
"biz": "future_724",
"client": "future_web",
"impt": False,
"pageSize": pageSize,
"req_trace": "litu37wg-r2c6qqjm",
"sortEnd": sortEnd,
"version": "1.0.0"
}
return params
def get_news(self, params, retry=0, standard=True):
if retry > 3:
logger.error(f"from {__file__}: network error and exceed max retry.")
return [], ""
try:
r = requests.post(
url=self.base_url,
json=params,
headers=self.headers,
timeout=30)
if r.status_code == 200:
content = r.json()
last_timestamp = content["data"][-1]["sort"]
if standard:
content = [self._data_cleaner(i) for i in content["data"]]
return content, last_timestamp
else:
logger.fatal(f'from {__file__}: Requesting failed! check url \n{r.url}')
return [], ""
except:
return self.get_news(params, retry+1, standard=standard)
if __name__ == "__main__":
t = eastmoneyFutureScrapper()
p = t.get_params()
n, _t = t.get_news(p)
print(len(n)) | ettzzz/news_scrapper | scrapper/eastmoney_future.py | eastmoney_future.py | py | 2,383 | python | en | code | 0 | github-code | 90 |
69795114857 | import datetime
import os
import uuid
#파일이 업로드 될 때 파일을 올린 날짜별로 폴더로 나누어 구성
def file_upload_path(instance, filename):
ext = filename.split('.')[-1]
d = datetime.datetiem.now()
filepath = d.strftime("%Y/%m/%d")
suffix = d.strftime("%Y%m%d%H%M%S")
filename = "%s_%s.%s" % (uuid.uuid4().hex, suffix, ext)
return os.path.join(filepath,filename) | kkMina/2020_project | django_project/myapp/common.py | common.py | py | 412 | python | ko | code | 0 | github-code | 90 |
38958024276 | ''' Basic Reader and Writer tests.
'''
import c3d
import importlib
import io
import unittest
import numpy as np
from test.base import Base
from test.zipload import Zipload
climate_spec = importlib.util.find_spec("climate")
if climate_spec:
import climate
# If climate exist
if climate_spec:
logging = climate.get_logger('test')
climate.enable_default_logging()
class ReaderTest(Base):
''' Test basic Reader functionality
'''
def test_format_pi(self):
r = c3d.Reader(Zipload._get('sample01.zip', 'Eb015pi.c3d'))
self._log(r)
assert r.point_used == 26
assert r.point_rate == 50
def test_format_pr(self):
r = c3d.Reader(Zipload._get('sample01.zip', 'Eb015pr.c3d'))
self._log(r)
assert r.point_used == 26
assert r.point_rate == 50
def test_paramsa(self):
r = c3d.Reader(Zipload._get('sample08.zip', 'TESTAPI.c3d'))
self._log(r)
assert r.point_used == 26
assert r.point_rate == 50
def test_paramsb(self):
r = c3d.Reader(Zipload._get('sample08.zip', 'TESTBPI.c3d'))
self._log(r)
for g in r.group_values():
for p in g.param_values():
if len(p.dimensions) == 0:
val = None
width = len(p.bytes)
if width == 2:
val = p.int16_value
elif width == 4:
val = p.float_value
else:
val = p.int8_value
print('{0.name}.{1.name} = {2}'.format(g, p, val))
assert r.point_used == 26
assert r.point_rate == 50
assert r.analog_used == 16
assert r.get_float('POINT:RATE') == 50
assert r.get_float('ANALOG:RATE') == 200
def test_paramsc(self):
r = c3d.Reader(Zipload._get('sample08.zip', 'TESTCPI.c3d'))
self._log(r)
assert r.point_used == 26
assert r.point_rate == 50
def test_paramsd(self):
r = c3d.Reader(Zipload._get('sample08.zip', 'TESTDPI.c3d'))
self._log(r)
assert r.point_used == 26
assert r.point_rate == 50
def test_frames(self):
r = c3d.Reader(Zipload._get('sample08.zip', 'TESTDPI.c3d'))
self._log(r)
frames = list(r.read_frames())
assert len(frames) == 450
frame_no, points, analog = frames[0]
assert frame_no == 1, frame_no
expected = (r.point_used, 5)
assert points.shape == expected, \
'point shape: got {}, expected {}'.format(points.shape, expected)
expected = (r.analog_used, r.header.analog_per_frame)
assert analog.shape == expected, \
'analog shape: got {}, expected {}'.format(analog.shape, expected)
class WriterTest(Base):
''' Test basic writer functionality
'''
def test_add_frames(self):
r = c3d.Reader(Zipload._get('sample08.zip', 'TESTDPI.c3d'))
w = c3d.Writer(
point_rate=r.point_rate,
analog_rate=r.analog_rate,
point_scale=r.point_scale,
gen_scale=r.get_float('ANALOG:GEN_SCALE'),
)
w.add_frames([(p, a) for _, p, a in r.read_frames()])
w.add_frames([(p, a) for _, p, a in r.read_frames()], index=5)
h = io.BytesIO()
w.set_point_labels(r.point_labels)
w.set_analog_labels(r.analog_labels)
w.write(h)
def test_set_params(self):
r = c3d.Reader(Zipload._get('sample08.zip', 'TESTDPI.c3d'))
w = c3d.Writer(
point_rate=r.point_rate,
analog_rate=r.analog_rate,
point_scale=r.point_scale,
gen_scale=r.get_float('ANALOG:GEN_SCALE'),
)
w.add_frames([(p, a) for _, p, a in r.read_frames()])
h = io.BytesIO()
w.set_start_frame(255)
w.set_point_labels(r.point_labels)
w.set_analog_labels(r.analog_labels)
# Screen axis
X, Y = '-Y', '+Z'
w.set_screen_axis()
w.set_screen_axis(X, Y)
X_v, Y_v = w.get_screen_axis()
assert X_v == X and Y == Y_v, 'Mismatch between set & get screen axis.'
assert np.all(np.equal(r.point_labels, w.point_labels)), 'Expected labels to be equal.'
w.write(h)
if __name__ == '__main__':
unittest.main()
| EmbodiedCognition/py-c3d | test/test_c3d.py | test_c3d.py | py | 4,350 | python | en | code | 94 | github-code | 90 |
35968872063 | import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# # from keras.backend.tensorflow_backend import set_session
#
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import argparse
import scipy.io as sio
# import matlab.engine
import torch
from utils import *
from data_preprocessing.CRBP.getSequence import *
from data_preprocessing.CRBP.BertDealEmbedding import *
from data_preprocessing.CRBP.getCircRNA2Vec import *
def get_data(protein):
Kmer, dataY = dealwithdata1(protein) # X
Embedding = dealwithCircRNA2Vec(protein)
Embedding1 = circRNABert(protein, 3)
np.random.seed(4)
indexes = np.random.choice(Kmer.shape[0], Kmer.shape[0], replace=False)
training_idx, test_idx = indexes[:round(((Kmer.shape[0])/10)*8)], indexes[round(((Kmer.shape[0])/10)*8):] #8:2
X_train_1, X_test_1 = Kmer[training_idx, :, :], Kmer[test_idx, :, :]
X_train_2, X_test_2 = Embedding[training_idx, :, :], Embedding[test_idx, :, :]
X_train_3, X_test_3 = Embedding1[training_idx, :, :], Embedding1[test_idx, :, :] #(892,101,24)
y_train, y_test = dataY[training_idx], dataY[test_idx]
train_dataset = dict()
train_dataset["samples1"] = torch.from_numpy(X_train_1)
train_dataset["samples2"] = torch.from_numpy(X_train_2)
train_dataset["samples3"] = torch.from_numpy(X_train_3)
train_dataset["labels"] = torch.from_numpy(y_train)
test_dataset = dict()
test_dataset["samples1"] = torch.from_numpy(X_test_1)
test_dataset["samples2"] = torch.from_numpy(X_test_2)
test_dataset["samples3"] = torch.from_numpy(X_test_3)
test_dataset["labels"] = torch.from_numpy(y_test)
torch.save(train_dataset,'data/{}_train.pt'.format(protein))
torch.save(test_dataset,'data/{}_test.pt'.format(protein))
return train_dataset, test_dataset
| cc646201081/CircSSNN | data_preprocessing/CRBP/getDataView.py | getDataView.py | py | 1,794 | python | en | code | 0 | github-code | 90 |
17930966859 | #-*-coding:utf-8-*-
import sys
input=sys.stdin.readline
def main():
numbers=[2,1]
number = int(input())
for i in range(2,number+1):
numbers.append(numbers[i-1]+numbers[i-2])
print(numbers[-1])
if __name__=="__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03544/s997978728.py | s997978728.py | py | 259 | python | en | code | 0 | github-code | 90 |
27034997476 | def sum_neg(lst):
if len(lst) == 0:
return []
index = 0
output = []
s = 0
b = 0
while index < len(lst):
if lst[index] > 0:
s += 1
elif lst[index] <0:
b += lst[index]
index += 1
output.append(s)
output.append(b)
return output
print(sum_neg([92, 6, 73, -77, 81, -90, 99, 8, -85, 34])) # [7, -252] | adiseal/edabit-practices | Positive Count Negative Sum.py | Positive Count Negative Sum.py | py | 388 | python | en | code | 0 | github-code | 90 |
26696304801 | # reader.py
# 20161221 by Yong Wang
import numpy as np
inputDir = "Input/"
caseDir = "theta-0/"
# Read CFD Pressure Data (x[m], y[m], z[m], pres[Pa])
cfdPresFile = inputDir + caseDir + "cfd_pressure.csv"
cfdPresM = np.genfromtxt(cfdPresFile, delimiter=',', skip_header=17)
cfdPresM[:,3] = 1.0*cfdPresM[:,3] # into the surface pressure +
# Read CFD velocity Data
# (nodeNum, x[m], y[m], z[m], pres[Pa], u[m/s], v[m/s], w[m/s])
cfdVeloFile = inputDir + caseDir + "cfd_velocity2.out"
cfdVeloM = np.genfromtxt(cfdVeloFile, delimiter=',')
# Remove the zero lines
cfdV = cfdVeloM[~np.all(cfdVeloM == 0.0, axis = 1)]
cfdV = cfdV[:, [0,5,6,7]]
# Read Node List File (nodeNum, nodeX[m], nodeY[m], nodeZ[m])
nodeFile = inputDir+"node_list.txt"
nodeM = np.genfromtxt(nodeFile, delimiter=',')
# Read Element List File (eleNum, eleI, eleJ, eleSecn)
eleFile = inputDir+"ele_list.txt"
eleM = np.genfromtxt(eleFile, delimiter=',')
# Read Section List File (secNum, Ro[m])
secFile = inputDir+"sec_list.txt"
secM = np.genfromtxt(secFile, delimiter=',')
presCount = np.size(cfdPresM, 0) # total num. of cfd pressure points
nodeCount = np.size(nodeM, 0) # total num. of nodes
elemCount = np.size(eleM, 0) # total num. of elements
secCount = np.size(secM, 0) # total num. of sections
veloCount = np.size(cfdV, 0)
| windstriver/tornado-thesis | towerStatic-Para/LoadTransfer/reader.py | reader.py | py | 1,315 | python | en | code | 1 | github-code | 90 |
70760089578 | from io import open
import pathlib
import shutil
# Abrir archivo
route = str(pathlib.Path().absolute())+"/ficheros_texto.txt"
archive = open(route, "+a")
#print(f"Ruta absoluta: {route}")
#Escribir dentro de un archivo
#archive.write("##### Texto ingresado desde Python #####\n")
#Cerrar archivo
archive.close()
route = str(pathlib.Path().absolute()) + "/ficheros_texto.txt"
read_file = open(route, "r")
# Leer contenido
#content = read_file.read()
#print(content)
# Leer contenido y guardarlo en lista
list_content = read_file.readlines()
read_file.close()
for line_text in list_content:
#list_text = line_text.split()
print(f".- {line_text.center(100)}")
# Copiar un archivo
"""
original_route = str(pathlib.Path().absolute()) + "/ficheros_texto.txt"
new_route = str(pathlib.Path().absolute())+"/fichero_copiado.txt"
shutil.copyfile(original_route, new_route)
"""
# Mover un archivo
"""
original_route = str(pathlib.Path().absolute())+"/fichero_copiado.txt"
new_route = str(pathlib.Path().absolute())+"/fichero_renombrado.txt"
shutil.move(original_route, new_route)
"""
# Eliminar archivos
import os
#new_route = str(pathlib.Path().absolute())+"/fichero_renombrado.txt"
#os.remove(new_route)
# Comprobar si existe un fichero
import os.path
absolute_path = os.path.abspath("./")
#print(absolute_path)
check_route = os.path.abspath("./")+"/ficheros_texto.txt"
print(check_route)
if os.path.isfile(check_route):
print("El archivo existe.")
else:
print("El fichero no existe.") | AlexSR2590/master_python | 14-sistema_archivos/ficheros.py | ficheros.py | py | 1,501 | python | es | code | 0 | github-code | 90 |
37513063800 | """ Constants Module
----------------
Defines constants used in the method.
"""
K = 5 # size of user and review latent vectors
EM_ITER = [10]
BURN_IN = [0]
SAMPLES = [20]
NR_ITER = 50
NR_TOL = 1e-4
NR_STEP = 0.1 # In paper: 1
ETA = 0
| lucianamaroun/review_recommendation | algo/cap/const.py | const.py | py | 253 | python | en | code | 5 | github-code | 90 |
5381105338 | __author__ = 'Jakub Wojtanek, Kwojtanek@gmail.com'
import urlparse
import string
from django.shortcuts import render_to_response
from zorya.models import StellarObject
from zorya.appviews.mapviews import mapapistatic
from zorya.appviews.similarviews import SimilarViewStatic
#crawlers list
BotsUserAgents = [
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Googlebot/2.1 (+http://www.googlebot.com/bot.html)',
'Googlebot/2.1 (+http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp China; http://misc.yahoo.com.cn/help.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Mozilla/5.0 (compatible; bingbot/2.0 +http://www.bing.com/bingbot.htm)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mediapartners-Google',
'Baiduspider',
'Baiduspider+(+http://www.baidu.com/search/spider_jp.html)',
'Baiduspider+(+http://www.baidu.com/search/spider.htm)',
'BaiDuSpider',
'iaskspider/2.0(+http://iask.com/help/help_index.html)',
'iaskspider']
class CrawlerMiddleware(object):
def process_request(self,request):
if request.META.has_key('HTTP_USER_AGENT') and request.META['HTTP_USER_AGENT'] in BotsUserAgents:
urlpath =string.split(urlparse.urlsplit(request.path).path,'/')
if urlpath[-2] == 'object':
MainObject = StellarObject.objects.get(pk=urlpath[-1])
return render_to_response('CrawlersTemplate/SingleView.html',
{'MainObject':MainObject,
'charts':mapapistatic(MainObject.rightAsc,
MainObject.declination,
MainObject.magnitudo),
'similar': SimilarViewStatic(**{'type':MainObject.type_shortcut,
'constellation': MainObject.constelation,
'catalogue': MainObject.catalogues.first().object_catalogue,
'pk': MainObject.pk})
},
)
else:
return None | Kwojtanek/stargazer | zorya/middleware/crawlermiddleware.py | crawlermiddleware.py | py | 2,676 | python | en | code | 1 | github-code | 90 |
16305354052 | from django.shortcuts import render,redirect
from driver.database import mongo_test
from django.http import HttpResponseRedirect
# Create your views here.
def dashboard(request):
if mongo_test():
if request.COOKIES.get('SessionAuth'):
return render(request,'dashboard.html')
else:
return redirect('/')
else:
return render(request,'db_error.html') | salehsedghpour/monit_mine_panel | dashboard/views.py | views.py | py | 403 | python | en | code | 0 | github-code | 90 |
44360816727 | from base_analyzer import BaseAnalyzer
from models.protoss_csv import ProtossCSV
class ProtossAnalyzer(BaseAnalyzer):
def __init__(self):
self.csv = ProtossCSV()
self.race = "protoss"
self.csv_filename = f'{self.race}_all_replays.csv'
def getBuildOrderRow(self, filename, playerName, buildOrder):
for command in buildOrder:
for i in range(1, 6):
if command['command'] + " " + str(i) in self.csv.data[filename][playerName].keys():
if self.csv.data[filename][playerName][command['command'] + " " + str(i)] == -1:
self.csv.data[filename][playerName][command['command'] + " " + str(i)] = command['frame']
break
| ProgrammerMatt/bw-analyzer | machinelearning/analyzers/protoss_analyzer.py | protoss_analyzer.py | py | 747 | python | en | code | 0 | github-code | 90 |
39129519849 | import pandas as pd
import numpy as np
import string
import pickle
import math
def transform_sentences(sentences):
sentences = [s.translate(str.maketrans('', '', string.punctuation)) for s in sentences]
sentences = [s.strip() for s in sentences]
sentences = [s.lower() for s in sentences]
sentences = [s.replace(' ', ' ') for s in sentences]
sentences = [s.replace(' ', ' ') for s in sentences]
sentences = [s.split(' ') for s in sentences]
return sentences
def prepare_data(input_path):
df = pd.read_csv(input_path)
df = df[['Clean_Text', 'Corrupted_Text']]
clean = df['Clean_Text'].tolist()
corrupt = df['Corrupted_Text'].tolist()
del df
clean = transform_sentences(clean)
corrupt = transform_sentences(corrupt)
clean_lens = [len(c) for c in clean]
corrupt_lens = [len(c) for c in corrupt]
same_lens = [ix for ix, (clean_l, corrupt_l) in enumerate(zip(clean_lens, corrupt_lens))
if clean_l == corrupt_l]
print(f'Pct of maintained sentences: {len(same_lens) / len(corrupt)}')
clean = np.array(clean)[same_lens].tolist()
corrupt = np.array(corrupt)[same_lens].tolist()
labels = [[1 if clean_word != corrupt_word else 0
for clean_word, corrupt_word in zip(clean_sentence, corrupt_sentence)]
for clean_sentence, corrupt_sentence in zip(clean, corrupt)]
print(len(corrupt))
print(len(labels))
corrupt = shorten_sentences(corrupt, long_len=100)
labels = shorten_sentences(labels, long_len=100)
print(sum([len(c) for c in corrupt]) == sum([len(c) for c in labels]))
return corrupt, labels
def len_pct(sentences,
long_len = 100):
lens = [len(sentence) for sentence in sentences]
long_lens = [l for l in lens if l > long_len]
print(f"Percentage of long sentences in dataset: {round(len(long_lens) / len(lens), 4)}")
def partition(lst, n):
division = len(lst) / n
return [lst[round(division * i):round(division * (i + 1))] for i in range(n)]
def shorten_sentences(sentences,
long_len = 100):
len_pct(sentences, long_len)
long_sentences = [sentence for sentence in sentences if len(sentence) > long_len]
short_sentences = [sentence for sentence in sentences if len(sentence) <= long_len]
fixed_sentences = [partition(sentence, math.ceil(len(sentence) / long_len)) for sentence in long_sentences]
fixed_sentences = [f_sentence for sublist in fixed_sentences for f_sentence in sublist]
sentences = short_sentences + fixed_sentences
len_pct(sentences, long_len)
return sentences
source = 'amazon'
input_path = f'../../data/TypoDatasetCSV/{source}/medium/train.csv'
sentences, labels = prepare_data(input_path)
pickle.dump(sentences, open(f'../../data/files_pickle/words_{source}_medium.pickle', 'wb'))
pickle.dump(labels, open(f'../../data/files_pickle/labels_{source}_medium.pickle', 'wb'))
| YeonwooSung/ai_book | Experiments/CV/ocr_with_bert/src/modules/typo_detection/dataset_ready.py | dataset_ready.py | py | 3,035 | python | en | code | 17 | github-code | 90 |
18583980499 | s = input().split()
n = int(s[0])
a = int(s[1])
b = int(s[2])
sums = []
for i in range(1, n + 1):
x = i
check = 0
while (x > 0):
y = x % 10
x = int(x / 10)
check += y
if a <= check <= b:
sums.append(i)
answer = 0
for i in range(len(sums)):
answer += sums[i]
print(answer) | Aasthaengg/IBMdataset | Python_codes/p03478/s055437504.py | s055437504.py | py | 327 | python | en | code | 0 | github-code | 90 |
16503039535 | import tensorflow as tf
import os
import cv2
import numpy as np
import random
from tqdm import tqdm
import io
import logging
from xml.dom import minidom
import tensorflow.gfile as tf_reader
from tensorflow.python.keras.preprocessing.image import img_to_array
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _load_image(image_path, height, width, ann_dir):
try:
with tf_reader.GFile(image_path, 'rb') as fl:
image_bytes = fl.read()
image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), -1)
if ann_dir != "":
image = _get_ann_images(path, image, ann_dir)
image = cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)
im = np.array(img_to_array(image) / 255.)
return im
except Exception as e:
print("Error Processing Image: %s\n %s" % (image_path, str(e)))
return
def _get_ann_images(filepath, img_arr, ann_dir):
image_name = (filepath.split('/')[-1]).split(".")[-2]
breed_folder = filepath.split('/')[-2]
ann_filepath = os.path.join(ann_dir, breed_folder, image_name)
ann_xml = minidom.parse(ann_filepath)
xmin = int(ann_xml.getElementsByTagName('xmin')[0].firstChild.nodeValue)
ymin = int(ann_xml.getElementsByTagName('ymin')[0].firstChild.nodeValue)
xmax = int(ann_xml.getElementsByTagName('xmax')[0].firstChild.nodeValue)
ymax = int(ann_xml.getElementsByTagName('ymax')[0].firstChild.nodeValue)
new_img_arr = img_arr[ymin:ymax, xmin:xmax, :]
return new_img_arr
def build_example_list_tf(input_dir, seed):
examples = []
labels = {}
class_cnt = 0
for classname in tf.gfile.ListDirectory(input_dir):
if classname.endswith("/"):
classname = classname[:-1]
class_dir = os.path.join(input_dir, classname)
if tf.gfile.IsDirectory(class_dir):
for filename in tf.gfile.ListDirectory(class_dir):
filepath = os.path.join(class_dir, filename)
example = {
'classname': classname,
'path': filepath,
'label': class_cnt
}
examples.append(example)
labels[class_cnt] = classname
class_cnt = class_cnt + 1
random.seed(seed)
random.shuffle(examples)
return examples, labels
def get_example_share(examples, train_split):
example_len = len(examples)
training_len = int(example_len*train_split)
return np.split(examples, [training_len])
def split_list(tar_list, wanted_parts=1):
length = len(tar_list)
return [tar_list[i*length//wanted_parts : (i+1)*length//wanted_parts] for i in range(wanted_parts)]
def _write_tf_records(examples, output_filename, image_dims, ann_dir):
writer = tf.python_io.TFRecordWriter(output_filename)
cnt = 0
for example in tqdm(examples):
try:
cnt += 1
image = _load_image(example['path'], height=image_dims[0], width=image_dims[1], ann_dir=ann_dir)
if image is not None:
im_str = image.tostring()
g_label = example['label']
tf_example = tf.train.Example(features = tf.train.Features(feature = {
'filename': tf.train.Feature(bytes_list = tf.train.BytesList(value = [example['path'].encode('utf-8')])),
'image': tf.train.Feature(bytes_list = tf.train.BytesList(value = [im_str])),
'label': tf.train.Feature(int64_list = tf.train.Int64List(value = [g_label])),
'classname': tf.train.Feature(bytes_list = tf.train.BytesList(value = [example['classname'].encode('utf-8')]))
}))
writer.write(tf_example.SerializeToString())
except Exception as e:
print(e)
pass
writer.close()
# not being used
def _write_sharded_tfrecords(examples, num_shards, output_dir, image_dims, is_training=True):
sharded_examples = _split_list(examples, num_shards)
for count, shard in tqdm(enumerate(sharded_examples, start=1)):
output_filename = '{0}_{1:02d}_of_{2:02d}.tfrecord'.format('train' if is_training else 'test', count, num_shards)
out_filepath = os.path.join(output_dir, output_filename)
_write_tf_records(shard, out_filepath, image_dims) | aakashbajaj/retina-oct | preprocess/tfr_utils.py | tfr_utils.py | py | 3,907 | python | en | code | 0 | github-code | 90 |
18455705159 | import sys
input = lambda: sys.stdin.readline().rstrip()
from collections import defaultdict
from itertools import accumulate
N, K = map(int, input().split())
sushi = defaultdict(list)
for i in range(N):
t, d = map(int, input().split())
sushi[t].append(d)
first = []
second = []
for key in sushi:
sushi[key].sort(reverse=True)
first.append(sushi[key][0])
second += sushi[key][1:]
first.sort(reverse=True)
second.sort(reverse=True)
first_cum = [0] + list(accumulate(first))
second_cum = [0] + list(accumulate(second))
ans = 0
for i in range(1, K+1):
if K - i <= len(second_cum)-1 and i <= len(first_cum)-1:
ans = max(ans, first_cum[i] + second_cum[K-i] + i*i)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03148/s119147294.py | s119147294.py | py | 710 | python | en | code | 0 | github-code | 90 |
40342096449 | import socket
import threading
import struct
import time
def encode_servers_dict():
encoded_dict = ""
for i in SERVERS_ADDRESSES:
encoded_dict += str(i) + ':' + SERVERS_ADDRESSES[i] + '\0'
return encoded_dict[:-1].encode()
def decode_servers_dict(encoded_dict):
msg = ""
for char in encoded_dict.decode("utf-8")[6:].replace('\0', ','):
msg += char
msg = msg.split(',')
tuples = [(int(pair.split(':')[0]), pair.split(':')[1]) for pair in msg]
return dict(tuples)
def encode_users_list():
encoded_list = ""
for name in USERS:
encoded_list += name + '\0'
return encoded_list[:-1].encode()
def decode_users_list(encoded_list):
msg = ""
for char in encoded_list.decode("utf-8")[6:].replace('\0', ','):
msg += char
return msg.split(',')
def broadcast(data):
data = data.encode()
msg_type = 4
sub_type = 0
sub_len = 0
msg_len = len(data)
for server in SERVERS_ADDRESSES:
if server != 0:
try:
print("broadcast msg to Server [" + str(SERVERS_PORTS.index(server)) + "] ")
conn_socket = SERVERS_SOCKETS[server]
conn_socket.send(struct.pack(">bbhh{}s".format(msg_len), msg_type, sub_type, msg_len, sub_len, data))
except Exception as e:
print(e)
def send_msg_to_user(sender, rcv, msg):
msg = "[{0}:\0{1}]".format(sender, msg).encode()
msg_type = 3
sub_type = 0
msg_len = len(msg)
sub_len = 0
conn_socket = USERS_ADDRESSES[rcv]
data = struct.pack('>bbhh{}s'.format(msg_len), msg_type, sub_type, msg_len, sub_len, msg)
conn_socket.send(data)
def respond_to_client(conn_socket, client_address, client_name="client_name"):
while True:
try:
header = conn_socket.recv(6)
msg_type, sub_type, msg_len, sub_len = struct.unpack('>bbhh', header)
except struct.error as se:
continue
if msg_type == 0:
if sub_type == 0:
msg_type = 1
sub_len = 0
data = encode_servers_dict()
msg_len = len(data)
conn_socket.send(struct.pack(">bbhh{}s".format(msg_len), msg_type, sub_type, msg_len, sub_len, data))
elif sub_type == 1:
msg_type = 1
sub_len = 0
data = encode_users_list()
msg_len = len(data)
conn_socket.send(struct.pack(">bbhh{}s".format(msg_len), msg_type, sub_type, msg_len, sub_len, data))
elif msg_type == 2:
if sub_type == 0:
print("Server [" + str(SERVERS_PORTS.index(client_address[1])) + "] been added")
SERVERS_ADDRESSES[client_address[1]] = '127.0.0.1'
SERVERS_SOCKETS[client_address[1]] = conn_socket
elif sub_type == 1:
try:
data = conn_socket.recv(msg_len).decode()
USERS.append(data)
USERS_ADDRESSES[client_address[1]] = conn_socket
print("User " + data + " been added")
data = ("Hello " + data).encode()
msg_len = len(data)
conn_socket.send(struct.pack(">bbhh{}s".format(msg_len), msg_type, 0, msg_len, sub_len, data))
except Exception as e:
print(e)
elif msg_type == 3:
data = conn_socket.recv(msg_len).decode()
sender, msg, rcv = data.split('\0')
if rcv in USERS:
send_msg_to_user(sender, rcv, msg)
else:
broadcast(data)
elif msg_type == 4:
sender, msg, rcv = conn_socket.recv(msg_len).decode().split('\0')
print("beed broadcasted = " + str(msg))
if rcv in USERS:
send_msg_to_user(sender, rcv, msg)
elif msg_type == 5:
conn_socket.send(header)
print("rtt check")
elif msg_type == 6:
print("Closing connection with: " + str(client_address))
conn_socket.close()
return
def listen():
while True:
conn, client_address = sock.accept()
threading.Thread(target=respond_to_client, args=(conn, client_address)).start()
def connect_to_server(sock, port):
if port == 0 or port in SERVERS_SOCKETS:
return
try:
sock.send(struct.pack('>bbhh', 2, 0, 0, 0))
SERVERS_SOCKETS[port] = sock
print("Server [" + str(SERVERS_PORTS.index(port)) + "] been added")
except Exception as e:
print(e)
def init_connection_with_server(other_index):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', SERVERS_PORTS[my_index]))
sock.connect(('127.0.0.1', SERVERS_PORTS[other_index]))
if SERVERS_PORTS[other_index] not in SERVERS_SOCKETS:
sock.send(struct.pack('>bbhh', 2, 0, 0, 0))
SERVERS_SOCKETS[SERVERS_PORTS[other_index]] = sock
print("Server [" + str(other_index) + "] been added")
# servers
data = struct.pack('>bbhh', 0, 0, 0, 0)
sock.send(data)
servers = decode_servers_dict(sock.recv(1024))
print('server [' + str(other_index) + '] reply: ', servers)
global SERVERS_ADDRESSES
SERVERS_ADDRESSES[SERVERS_PORTS[other_index]] = '127.0.0.1'
SERVERS_ADDRESSES.update(servers)
for port in SERVERS_ADDRESSES:
connect_to_server(sock, port)
# clients
data = struct.pack('>bbhh', 0, 1, 0, 0)
sock.send(data)
users = decode_users_list(sock.recv(1024))
print('server [' + str(other_index) + '] reply: ', users)
global USERS
USERS = list(set(USERS + users))
except Exception as e:
print('server ' + str(other_index) + ' did not respond')
SERVERS_PORTS = [5555, 6666, 7777, 8888, 9999]
SERVERS_ADDRESSES = {}
SERVERS_SOCKETS = {}
USERS = []
USERS_ADDRESSES = {}
my_index = int(input("Enter server index [0-4]: "))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.bind(('0.0.0.0', SERVERS_PORTS[my_index]))
sock.listen()
threading.Thread(target=listen).start()
# connect to all other servers
for index in range(5):
if index != my_index:
threading.Thread(target=init_connection_with_server, args=([index])).start()
time.sleep(4)
print("SERVERS = " + str(SERVERS_ADDRESSES))
print("USERS = " + str(USERS))
| RaphaelBenoliel/CCNetworks | task_4/Server.py | Server.py | py | 6,615 | python | en | code | 0 | github-code | 90 |
17970512549 | H,W=map(int,input().split())
N=int(input())
A=list(map(int,input().split()))
A_dic={i+1:A[i] for i in range(N)}
Squares=[[0 for i in range(W)] for j in range(H)]
step_w=1
w,h,cnt=0,0,0
for k,v in A_dic.items():
for i in range(v):
cnt+=1
Squares[h][w]=str(k)
if cnt%W==0:
h+=1
step_w*=(-1)
else:
w+=step_w
for l in Squares:
print(' '.join(l))
| Aasthaengg/IBMdataset | Python_codes/p03638/s002434634.py | s002434634.py | py | 420 | python | en | code | 0 | github-code | 90 |
18021859269 | N,Ma,Mb=map(int,input().split())
abc=[]
INF=float("inf")
for i in range(N):
a,b,c=map(int,input().split())
abc.append((a,b,c))
stack=set([(0,0)])
dp=[[[INF]*(401) for j in range(401)] for i in range(N+1)]
dp[0][0][0]=0
for i in range(N):
dp[i+1][0][0]=0
a,b,c=abc[i]
stack_=set()
for x,y in stack:
dp[i+1][x][y]=min(dp[i][x][y],dp[i+1][x][y])
dp[i+1][x+a][y+b]=min(dp[i+1][x+a][y+b],dp[i][x+a][y+b],dp[i][x][y]+c)
stack_.add((x+a,y+b))
stack_.add((x,y))
stack=stack_
k=1
ans=INF
while(Ma*k<=400 and Mb*k<=400):
ans=min(dp[N][Ma*k][Mb*k],ans)
k+=1
if ans==INF:
print(-1)
else:
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03806/s670479106.py | s670479106.py | py | 663 | python | en | code | 0 | github-code | 90 |
15773406626 | # Ein kjapp og enkel gjennomgang av nokre filhandteringsteknikkar i Python.
# https://www.w3schools.com/python/python_ref_file.asp
# Sjå også dokumentasjonen til Python:
# https://docs.python.org/3/library/io.html
dokument = open("fil.txt", "w")
dokument.write("Dette er linje 1\n")
dokument.write("Dette er linje 2\n")
dokument.write("Dette er linje 3\n")
dokument.write("Dette er linje 4\n")
dokument.close()
dokument = open("fil.txt", "r")
print("dokument.read():")
print(dokument.read())
dokument.close()
dokument = open("fil.txt", "r")
print("dokument.readline():")
print(dokument.readline())
dokument.close()
dokument = open("fil.txt", "r")
print("dokument.readlines([0,2]):")
print(dokument.readlines(1))
dokument.close() | hausnes/IT2-2023-2024 | oop/fil-og-skriving/filoperasjonar-innebygd.py | filoperasjonar-innebygd.py | py | 734 | python | no | code | 1 | github-code | 90 |
9404737327 | from app.models import base
from sqlalchemy import (
Column,
String,
INTEGER,
)
class UserTeam(base):
__tablename__ = 'user-teams'
prefix = 'UT'
user_unid = Column(String(34))
team_unid = Column(String(34))
member_type = Column(INTEGER)
member_mappings = {
1: 'Participant',
2: 'Team Captain'
}
def __init__(self, user_unid, team_unid, member_type):
self.user_unid = user_unid
self.team_unid = team_unid
self.member_type = member_type
self.init()
@classmethod
def get_user_team_by_user_and_team(cls, user_unid, team_unid):
return UserTeam.get_single(user_unid=user_unid, team_unid=team_unid)
@classmethod
def add_user_to_team(cls, user_unid, team_unid, member_type):
return UserTeam(user_unid, team_unid, member_type)
@classmethod
def get_user_unids_by_team(cls, team_unid):
users = UserTeam.get_list(team_unid=team_unid).all()
return [u.user_unid for u in users]
@classmethod
def get_users_teams_by_team(cls, team_unid):
return UserTeam.get_list(team_unid=team_unid).all()
@classmethod
def get_team_by_user(cls, user_unid):
return UserTeam.get_single(user_unid=user_unid)
@classmethod
def get_oldest_team_member(cls, team_unid):
users = UserTeam.get_list(team_unid=team_unid)
return UserTeam.oldest(users).first()
| mitchfriedman/SatedSolutions | app/models/user_team.py | user_team.py | py | 1,446 | python | en | code | 1 | github-code | 90 |
18116458499 | import math
n = int(input())
def three(p1x, p1y, p2x, p2y):
dx = (p2x - p1x)
dy = (p2y - p1y)
sx = dx / 3 + p1x
sy = dy / 3 + p1y
tx = p2x - (dx / 3)
ty = p2y - (dy / 3)
mtx = tx - sx
mty = ty - sy
rad = math.radians(60)
mux = mtx * math.cos(rad) - (mty * math.sin(rad))
muy = mtx * math.sin(rad) + (mty * math.cos(rad))
ux = mux + sx
uy = muy + sy
return [sx, sy, ux, uy, tx, ty]
def koch(p1x, p1y, p2x, p2y, n):
if 0 == n:
print('{:.8f} {:.8f}'.format(p1x, p1y))
if 0 < n:
sx, sy, ux, uy, tx, ty = three(p1x, p1y, p2x, p2y)
koch(p1x, p1y, sx, sy, n - 1)
koch(sx, sy, ux, uy, n - 1)
koch(ux, uy, tx, ty, n - 1)
koch(tx, ty, p2x, p2y, n - 1)
koch(0.00000000, 0.00000000, 100.00000000, 0.00000000, n)
print('{:.8f} {:.8f}'.format(100.00000000, 0.00000000)) | Aasthaengg/IBMdataset | Python_codes/p02273/s691779051.py | s691779051.py | py | 874 | python | en | code | 0 | github-code | 90 |
73067927338 | import unittest
from fimutil.netam.nso import NsoClient
from fimutil.netam.sr_pce import SrPceClient
from fimutil.netam.arm import NetworkARM
class NetAmTest(unittest.TestCase):
def setUp(self) -> None:
pass
@unittest.skip
def testNsoClient(self):
nso = NsoClient()
devs = nso.devices()
for dev in devs:
dev_name = dev['name']
ifaces = nso.isis_interfaces(dev_name)
l = len(ifaces)
def testSrPceClient(self):
sr_pce = SrPceClient()
sr_pce.get_topology_json()
links_json = sr_pce.get_ipv4_links()
assert len(links_json) >= 1 and len(links_json) % 2 == 0
def testBuildNetworkARM(self):
arm = NetworkARM()
arm.build_topology()
arm.delegate_topology("primary")
arm.write_topology(file_name="/tmp/network-arm.graphml")
def testBuildNetworkARMwithISISvalidattion(self):
arm = NetworkARM(isis_link_validation=True)
arm.build_topology()
arm.delegate_topology("primary")
arm.write_topology(file_name="/tmp/network-arm.graphml")
| fabric-testbed/information-model-utils | test/netam_test.py | netam_test.py | py | 1,114 | python | en | code | 2 | github-code | 90 |
13590302008 | from setuptools import setup, find_packages
__version__ = '1.0.0'
url = 'https://github.com/rusty1s/pytorch_geometric'
install_requires = [
'numpy',
'scipy',
'networkx',
'plyfile',
]
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov']
setup(
name='torch_geometric',
version=__version__,
description='Geometric Deep Learning Extension Library for PyTorch',
author='Matthias Fey',
author_email='matthias.fey@tu-dortmund.de',
url=url,
download_url='{}/archive/{}.tar.gz'.format(url, __version__),
keywords=[
'pytorch', 'geometric-deep-learning', 'graph', 'mesh',
'neural-networks', 'spline-cnn'
],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
packages=find_packages())
| Cyanogenoid/fspool | graphs/setup.py | setup.py | py | 827 | python | en | code | 44 | github-code | 90 |
18500431789 | import sys
from collections import defaultdict, deque
import bisect
from heapq import *
from math import factorial, ceil, floor
sys.setrecursionlimit(200000)
input = sys.stdin.readline
# N, M, = map(int, input().split())
# N = int(input())
# L = [int(v) for v in input().split()]
# L = [[int(v) for v in input().split()] for _ in range(N)]
# L = [int(input()) for _ in range(N)]
# L = [list(input().strip()) for _ in range(N)]
# S = input().strip()
N, K, = map(int, input().split())
m = [0] * K
for v in range(1, N + 1):
m[v % K] += 1
ans = 0
for v in range(1, K + 1):
b = (K - v) % K
c = (K - v) % K
if (b + c) % K != 0:
continue
ans += m[v % K] * m[b] * m[c]
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03268/s552633743.py | s552633743.py | py | 704 | python | en | code | 0 | github-code | 90 |
23714544871 | import cv2
import os
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16, 16))
basedir = 'chest_xray'
for subdir in os.listdir(basedir):
for _class in os.listdir(os.path.join(basedir,subdir)):
for image_dir in os.listdir(os.path.join(basedir,subdir,_class)):
image_path = os.path.join(basedir,subdir,_class,image_dir)
image = cv2.imread(image_path,0)
img_clahe = clahe.apply(image)
cv2.imwrite(os.path.join('clahe_chest_xray',subdir,_class,image_dir),img_clahe)
| YuanitaIP/DPS3-D-Bangkit-Final-Project-Assignment---Pneumonia-Classification | clahe.py | clahe.py | py | 528 | python | en | code | 0 | github-code | 90 |
15802221825 | # -*- coding: utf-8 -*-
"""
606. Construct String from Binary Tree
You need to construct a string consists of parenthesis and integers from a binary tree with the preorder
traversing way.
The null node needs to be represented by empty parenthesis pair "()".
And you need to omit all the empty parenthesis pairs that don't affect the one-to-one mapping relationship between
the string and the original binary tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def tree2str(self, t: TreeNode) -> str:
if t is None:
return ""
res = str(t.val)
if t.right is not None:
res += self.tree2str(t.left).join(["(", ")"])
res += self.tree2str(t.right).join(["(", ")"])
elif t.left is not None:
res += self.tree2str(t.left).join(["(", ")"])
return res
| tjyiiuan/LeetCode | solutions/python3/problem606.py | problem606.py | py | 996 | python | en | code | 0 | github-code | 90 |
29037549150 | #
# Nathan Lay
# AI Resource at National Cancer Institute
# National Institutes of Health
# January 2021
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import argparse
import random
import pickle
import numpy as np
import hashlib
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from RandomHingeForest import RandomHingeForest, RandomHingeFern
from deterministic import set_deterministic
import datasets
class Net(nn.Module):
def __init__(self, forestType, numTrees, depth):
super(Net, self).__init__()
self.features = nn.Linear(in_features=8, out_features=100, bias=False)
self.bn = nn.BatchNorm1d(num_features=100, affine=False)
self.forest= forestType(in_channels=100, out_channels=numTrees, depth=depth)
self.agg = nn.Linear(in_features=numTrees, out_features=1)
def forward(self, x):
x = self.features(x)
x = self.forest(self.bn(x))
x = self.agg(x)
return x
def seed(seedStr):
seed = int(hashlib.md5(seedStr.encode("utf-8")).hexdigest()[24:], 16)
random.seed(seed)
np.random.seed(seed) # Bad way to do this!
#torch.random.manual_seed(seed)
#torch.cuda.random.manual_seed(seed)
torch.manual_seed(seed)
def shuffle(data, target, numTrain):
if target.size <= 0 or numTrain < 0 or numTrain > target.size:
return None
indices = np.arange(target.size)
np.random.shuffle(indices)
indexTrain = indices[:numTrain]
indexTest = indices[numTrain:]
return data[indexTrain, :], target[indexTrain], data[indexTest, :], target[indexTest]
def batches(x, y, batchSize):
numBatches = int(x.shape[0] / batchSize)
for b in range(numBatches):
begin = b*batchSize
end = begin + batchSize
yield x[begin:end, :], y[begin:end]
# Wrap around
if batchSize*numBatches < x.shape[0]:
begin = batchSize*numBatches
end = x.shape[0]
length = end - begin
xpart1 = x[begin:end, :]
ypart1 = y[begin:end]
missing = batchSize - length
xpart2 = x[:missing, :]
ypart2 = y[:missing]
#yield np.concatenate((xpart1, xpart2), axis=0), np.concatenate((ypart1, ypart2))
yield torch.cat((xpart1, xpart2), dim=0), torch.cat((ypart1, ypart2))
def train(snapshotroot, device, forestType, numTrees, depth):
xtrain, ytrain, xtest, ytest = datasets.load_abalone_reg()
xtrain, ytrain, xval, yval = shuffle(xtrain, ytrain, 2089)
#xval = xtrain[2089:]
#yval = ytrain[2089:]
#xtrain = xtrain[:2089]
#ytrain = ytrain[:2089]
ytrain = np.reshape(ytrain, [-1, 1])
yval = np.reshape(yval, [-1, 1])
ytest = np.reshape(ytest, [-1, 1])
# Transfer this data to the device
xtrain = torch.from_numpy(xtrain).type(torch.float32).to(device)
ytrain = torch.from_numpy(ytrain).type(torch.float32).to(device)
xval = torch.from_numpy(xval).type(torch.float32).to(device)
yval = torch.from_numpy(yval).type(torch.float32).to(device)
xtest = torch.from_numpy(xtest).type(torch.float32).to(device)
ytest = torch.from_numpy(ytest).type(torch.float32).to(device)
net = Net(forestType, numTrees, depth).to(device)
criterion = nn.MSELoss().to(device)
optimizer = optim.Adam(net.parameters(), lr = 0.001)
numEpochs=1000
batchSize=200
indices = [ i for i in range(xtrain.shape[0]) ]
bestEpoch=numEpochs-1
bestLoss=1000.0
valLosses = np.zeros([numEpochs])
for epoch in range(numEpochs):
random.shuffle(indices)
xtrain = xtrain[indices, :]
ytrain = ytrain[indices]
runningLoss = 0.0
count = 0
for xbatch, ybatch in batches(xtrain, ytrain, batchSize):
optimizer.zero_grad()
outputs = net(xbatch)
loss = criterion(outputs, ybatch)
loss.backward()
optimizer.step()
runningLoss += loss
count += 1
meanLoss = runningLoss/count
snapshotFile = os.path.join(snapshotroot, f"epoch_{epoch}")
torch.save(net.state_dict(), snapshotFile)
runningLoss = 0.0
count = 0
with torch.no_grad():
net.train(False)
#for xbatch, ybatch in batches(xval, yval, batchSize):
for xbatch, ybatch in zip([xval], [yval]):
outputs = net(xbatch)
loss = criterion(outputs, ybatch)
runningLoss += loss
count += 1
net.train(True)
valLoss = runningLoss / count
if valLoss < bestLoss:
bestLoss = valLoss
bestEpoch = epoch
valLosses[epoch] = valLoss
#print(f"Info: Epoch = {epoch}, loss = {meanLoss}, validation loss = {valLoss}")
snapshotFile = os.path.join(snapshotroot, f"epoch_{bestEpoch}")
net = Net(forestType, numTrees, depth)
net.load_state_dict(torch.load(snapshotFile, map_location="cpu"))
net = net.to(device)
runningLoss = 0.0
count = 0
with torch.no_grad():
net.train(False)
#for xbatch, ybatch in batches(xtest, ytest, batchSize):
for xbatch, ybatch in zip([xtest], [ytest]):
outputs = net(xbatch)
loss = criterion(outputs, ybatch)
runningLoss += loss
count += 1
testLoss = float(runningLoss) / float(count)
testR2 = 1.0 - testLoss / ytest.var()
print(f"Info: Best epoch = {bestEpoch}, test loss = {testLoss}, test R2 = {testR2}", flush=True)
return testLoss, testR2, valLosses
def main(device, **kwargs):
snapshotroot = "abalone_reg"
if not os.path.exists(snapshotroot):
os.mkdir(snapshotroot)
set_deterministic(True)
numExperiments = 100
#torch.autograd.set_detect_anomaly(True)
for forestType in [ RandomHingeForest, RandomHingeFern ]:
#for forestType in [ RandomHingeForest ]:
forestTypeName = "RandomHingeForest"
if forestType == RandomHingeFern:
forestTypeName = "RandomHingeFern"
for numTrees in [ 1, 10, 50, 100 ]:
#for numTrees in [ 100 ]:
for depth in [ 1, 3, 5, 7, 10 ]:
#for depth in [ 10 ]:
print(f"Info: Running {forestTypeName}, numTrees = {numTrees}, depth = {depth} ...", flush=True)
pickleFileName=os.path.join(snapshotroot, f"{forestTypeName}_{numTrees}_{depth}.pkl")
allTestLosses = np.zeros(numExperiments, dtype=np.float32)
allTestR2 = np.zeros(numExperiments, dtype=np.float32)
allValLosses = []
for i in range(numExperiments):
#snapshotdir = os.path.join(snapshotroot, forestTypeName, str(numTrees), str(depth), str(i))
snapshotdir = snapshotroot
if not os.path.exists(snapshotdir):
os.makedirs(snapshotdir)
seed(f"abalone{i}")
print(f"Training {snapshotdir} ...", flush=True)
testLoss, testR2, valLosses = train(snapshotroot=snapshotdir, device=device, forestType=forestType, numTrees=numTrees, depth=depth)
allTestLosses[i] = testLoss
allTestR2[i] = testR2
allValLosses.append(valLosses)
print(f"Info: Mean test loss = {allTestLosses.mean()}, std = {allTestLosses.std()}, mean test R2 = {allTestR2.mean()}, std = {allTestR2.std()}", flush=True)
print(f"Info: Saving results to {pickleFileName} ...", flush=True)
with open(pickleFileName, "wb") as f:
pickle.dump({"allTestLosses": allTestLosses, "allTestR2": allTestR2, "allValLosses": allValLosses, "depth": depth, "numTrees": numTrees, "forestTypeName": forestTypeName}, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="abalone_reg experiment")
parser.add_argument("--device", type=str, default="cpu", help="Torch device name to train/test (e.g. cuda:1)")
args = parser.parse_args()
main(**vars(args))
| nslay/HingeTreeForTorch | experiments/run_abalone_reg.py | run_abalone_reg.py | py | 9,312 | python | en | code | 0 | github-code | 90 |
5261537528 | from collections import deque
l = [2,-1,-7,8,-15,30,24,6]
def printFN(l,k):
dq = deque()
for i in range(k):
if l[i] < 0:
dq.append(i)
# print(dq)
if len(dq) == 0:
print(0,end = " ")
else:
print(l[dq[0]],end= " ")
for i in range(k,len(l)):
if l[i] < 0:
dq.append(i)
if len(dq)!=0 and dq[0] <= i-k:
dq.popleft()
# print(dq)
if len(dq) == 0:
print(0,end = " ")
else:
print(l[dq[0]],end= " ")
printFN(l,3)
print()
| ShubhamSinghal12/PythonDSAClassroomApril2022 | Lec29/FirstNegK.py | FirstNegK.py | py | 567 | python | en | code | 1 | github-code | 90 |
17944537909 | from collections import defaultdict
vis = [0 for i in range(0 , 2000)]
lt = [0 for i in range(0 , 2000)]
dis = [0 for i in range(0 , 2000)]
adj = defaultdict(list)
time = 0
ans = 0
def dfs(u , p):
global time , ans
time = time+1
lt[u]= dis[u]=time
vis[u]=1
for v in adj[u]:
if v==p:
continue
if vis[v]==0:
dfs(v , u)
lt[u]=min(lt[u] , lt[v])# if back edge through child v
if lt[v]>dis[u]:
ans = ans+1
else:
lt[u]=min(lt[u] , dis[v])# if there is a back edge to a child v
def main():
n , m = map(int , input().split())
for i in range(0 , m):
u, v= map(int , input().split())
adj[u].append((v))
adj[v].append((u))
dfs(1, 0)
print(ans)
if __name__=="__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03575/s845693510.py | s845693510.py | py | 705 | python | en | code | 0 | github-code | 90 |
34870824750 | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import Timedelta
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
class TestNonNano:
@pytest.fixture(params=["s", "ms", "us"])
def unit(self, request):
return request.param
@pytest.fixture
def tda(self, unit):
arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
return TimedeltaArray._simple_new(arr, dtype=arr.dtype)
def test_non_nano(self, unit):
arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)
assert tda.dtype == arr.dtype
assert tda[0].unit == unit
def test_as_unit_raises(self, tda):
# GH#50616
with pytest.raises(ValueError, match="Supported units"):
tda.as_unit("D")
tdi = pd.Index(tda)
with pytest.raises(ValueError, match="Supported units"):
tdi.as_unit("D")
@pytest.mark.parametrize("field", TimedeltaArray._field_ops)
def test_fields(self, tda, field):
as_nano = tda._ndarray.astype("m8[ns]")
tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
result = getattr(tda, field)
expected = getattr(tda_nano, field)
tm.assert_numpy_array_equal(result, expected)
def test_to_pytimedelta(self, tda):
as_nano = tda._ndarray.astype("m8[ns]")
tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
result = tda.to_pytimedelta()
expected = tda_nano.to_pytimedelta()
tm.assert_numpy_array_equal(result, expected)
def test_total_seconds(self, unit, tda):
as_nano = tda._ndarray.astype("m8[ns]")
tda_nano = TimedeltaArray._simple_new(as_nano, dtype=as_nano.dtype)
result = tda.total_seconds()
expected = tda_nano.total_seconds()
tm.assert_numpy_array_equal(result, expected)
def test_timedelta_array_total_seconds(self):
# GH34290
expected = Timedelta("2 min").total_seconds()
result = pd.array([Timedelta("2 min")]).total_seconds()[0]
assert result == expected
def test_total_seconds_nanoseconds(self):
# issue #48521
start_time = pd.Series(["2145-11-02 06:00:00"]).astype("datetime64[ns]")
end_time = pd.Series(["2145-11-02 07:06:00"]).astype("datetime64[ns]")
expected = (end_time - start_time).values / np.timedelta64(1, "s")
result = (end_time - start_time).dt.total_seconds().values
assert result == expected
@pytest.mark.parametrize(
"nat", [np.datetime64("NaT", "ns"), np.datetime64("NaT", "us")]
)
def test_add_nat_datetimelike_scalar(self, nat, tda):
result = tda + nat
assert isinstance(result, DatetimeArray)
assert result._creso == tda._creso
assert result.isna().all()
result = nat + tda
assert isinstance(result, DatetimeArray)
assert result._creso == tda._creso
assert result.isna().all()
def test_add_pdnat(self, tda):
result = tda + pd.NaT
assert isinstance(result, TimedeltaArray)
assert result._creso == tda._creso
assert result.isna().all()
result = pd.NaT + tda
assert isinstance(result, TimedeltaArray)
assert result._creso == tda._creso
assert result.isna().all()
# TODO: 2022-07-11 this is the only test that gets to DTA.tz_convert
# or tz_localize with non-nano; implement tests specific to that.
def test_add_datetimelike_scalar(self, tda, tz_naive_fixture):
ts = pd.Timestamp("2016-01-01", tz=tz_naive_fixture).as_unit("ns")
expected = tda.as_unit("ns") + ts
res = tda + ts
tm.assert_extension_array_equal(res, expected)
res = ts + tda
tm.assert_extension_array_equal(res, expected)
ts += Timedelta(1) # case where we can't cast losslessly
exp_values = tda._ndarray + ts.asm8
expected = (
DatetimeArray._simple_new(exp_values, dtype=exp_values.dtype)
.tz_localize("UTC")
.tz_convert(ts.tz)
)
result = tda + ts
tm.assert_extension_array_equal(result, expected)
result = ts + tda
tm.assert_extension_array_equal(result, expected)
def test_mul_scalar(self, tda):
other = 2
result = tda * other
expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
tm.assert_extension_array_equal(result, expected)
assert result._creso == tda._creso
def test_mul_listlike(self, tda):
other = np.arange(len(tda))
result = tda * other
expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
tm.assert_extension_array_equal(result, expected)
assert result._creso == tda._creso
def test_mul_listlike_object(self, tda):
other = np.arange(len(tda))
result = tda * other.astype(object)
expected = TimedeltaArray._simple_new(tda._ndarray * other, dtype=tda.dtype)
tm.assert_extension_array_equal(result, expected)
assert result._creso == tda._creso
def test_div_numeric_scalar(self, tda):
other = 2
result = tda / other
expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)
tm.assert_extension_array_equal(result, expected)
assert result._creso == tda._creso
def test_div_td_scalar(self, tda):
other = timedelta(seconds=1)
result = tda / other
expected = tda._ndarray / np.timedelta64(1, "s")
tm.assert_numpy_array_equal(result, expected)
def test_div_numeric_array(self, tda):
other = np.arange(len(tda))
result = tda / other
expected = TimedeltaArray._simple_new(tda._ndarray / other, dtype=tda.dtype)
tm.assert_extension_array_equal(result, expected)
assert result._creso == tda._creso
def test_div_td_array(self, tda):
other = tda._ndarray + tda._ndarray[-1]
result = tda / other
expected = tda._ndarray / other
tm.assert_numpy_array_equal(result, expected)
def test_add_timedeltaarraylike(self, tda):
tda_nano = tda.astype("m8[ns]")
expected = tda_nano * 2
res = tda_nano + tda
tm.assert_extension_array_equal(res, expected)
res = tda + tda_nano
tm.assert_extension_array_equal(res, expected)
expected = tda_nano * 0
res = tda - tda_nano
tm.assert_extension_array_equal(res, expected)
res = tda_nano - tda
tm.assert_extension_array_equal(res, expected)
class TestTimedeltaArray:
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
arr = TimedeltaArray._from_sequence([Timedelta("1h"), Timedelta("2h")])
if np.dtype(dtype) != np.int64:
with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
arr.astype(dtype)
return
result = arr.astype(dtype)
expected = arr._ndarray.view("i8")
tm.assert_numpy_array_equal(result, expected)
def test_setitem_clears_freq(self):
a = TimedeltaArray(pd.timedelta_range("1h", periods=2, freq="h"))
a[0] = Timedelta("1h")
assert a.freq is None
@pytest.mark.parametrize(
"obj",
[
Timedelta(seconds=1),
Timedelta(seconds=1).to_timedelta64(),
Timedelta(seconds=1).to_pytimedelta(),
],
)
def test_setitem_objects(self, obj):
# make sure we accept timedelta64 and timedelta in addition to Timedelta
tdi = pd.timedelta_range("2 Days", periods=4, freq="h")
arr = TimedeltaArray(tdi, freq=tdi.freq)
arr[0] = obj
assert arr[0] == Timedelta(seconds=1)
@pytest.mark.parametrize(
"other",
[
1,
np.int64(1),
1.0,
np.datetime64("NaT"),
pd.Timestamp("2021-01-01"),
"invalid",
np.arange(10, dtype="i8") * 24 * 3600 * 10**9,
(np.arange(10) * 24 * 3600 * 10**9).view("datetime64[ns]"),
pd.Timestamp("2021-01-01").to_period("D"),
],
)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_invalid_types(self, other, index):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
arr = TimedeltaArray(data, freq="D")
if index:
arr = pd.Index(arr)
msg = "|".join(
[
"searchsorted requires compatible dtype or scalar",
"value should be a 'Timedelta', 'NaT', or array of those. Got",
]
)
with pytest.raises(TypeError, match=msg):
arr.searchsorted(other)
class TestUnaryOps:
def test_abs(self):
vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
evals = np.array([3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
expected = TimedeltaArray(evals)
result = abs(arr)
tm.assert_timedelta_array_equal(result, expected)
result2 = np.abs(arr)
tm.assert_timedelta_array_equal(result2, expected)
def test_pos(self):
vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
result = +arr
tm.assert_timedelta_array_equal(result, arr)
assert not tm.shares_memory(result, arr)
result2 = np.positive(arr)
tm.assert_timedelta_array_equal(result2, arr)
assert not tm.shares_memory(result2, arr)
def test_neg(self):
vals = np.array([-3600 * 10**9, "NaT", 7200 * 10**9], dtype="m8[ns]")
arr = TimedeltaArray(vals)
evals = np.array([3600 * 10**9, "NaT", -7200 * 10**9], dtype="m8[ns]")
expected = TimedeltaArray(evals)
result = -arr
tm.assert_timedelta_array_equal(result, expected)
result2 = np.negative(arr)
tm.assert_timedelta_array_equal(result2, expected)
def test_neg_freq(self):
tdi = pd.timedelta_range("2 Days", periods=4, freq="h")
arr = TimedeltaArray(tdi, freq=tdi.freq)
expected = TimedeltaArray(-tdi._data, freq=-tdi.freq)
result = -arr
tm.assert_timedelta_array_equal(result, expected)
result2 = np.negative(arr)
tm.assert_timedelta_array_equal(result2, expected)
| pandas-dev/pandas | pandas/tests/arrays/test_timedeltas.py | test_timedeltas.py | py | 10,643 | python | en | code | 40,398 | github-code | 90 |
40886684262 | import xml.etree.ElementTree as etree
import codecs
import csv
import time
import os
import re
import json
import nltk
from nltk import word_tokenize
from nltk.stem import SnowballStemmer
import pickle as pkl
from gibberish_detector import detector
from tqdm import tqdm
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
PATH_WIKI_XML = '/home/starc/Downloads/'
PATH_CSV='/home/starc/IRE-Stuff/'
FILENAME_WIKI = 'data'
FILENAME_ARTICLES = 'articles.csv'
FILENAME_REDIRECT = 'articles_redirect.csv'
FILENAME_TEMPLATE = 'articles_template.csv'
ENCODING = "utf-8"
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
def strip_tag_name(t):
t = elem.tag
idx = k = t.rfind("}")
if idx != -1:
t = t[idx + 1:]
return t
pathWikiXML = os.path.join(PATH_WIKI_XML, FILENAME_WIKI)
pathArticles = os.path.join(PATH_CSV, FILENAME_ARTICLES)
pathArticlesRedirect = os.path.join(PATH_CSV, FILENAME_REDIRECT)
pathTemplateRedirect = os.path.join(PATH_CSV, FILENAME_TEMPLATE)
totalCount = 0
articleCount = 0
redirectCount = 0
templateCount = 0
title = None
start_time = time.time()
count_ns=[]
inverted_index={}
ss = SnowballStemmer(language='english')
id_correspondence={}
Detector = detector.create_from_model('big.model')
with open('stopwords.pkl', 'rb') as f:
stopword_set = pkl.load(f)
f.close()
# with codecs.open(pathArticles, "w", ENCODING) as articlesFH, \
# codecs.open(pathArticlesRedirect, "w", ENCODING) as redirectFH, \
# codecs.open(pathTemplateRedirect, "w", ENCODING) as templateFH:
# articlesWriter = csv.writer(articlesFH, quoting=csv.QUOTE_MINIMAL)
# redirectWriter = csv.writer(redirectFH, quoting=csv.QUOTE_MINIMAL)
# templateWriter = csv.writer(templateFH, quoting=csv.QUOTE_MINIMAL)
# articlesWriter.writerow(['id', 'title', 'redirect', 'text'])
# redirectWriter.writerow(['id', 'title', 'redirect'])
# templateWriter.writerow(['id', 'title'])
for event, elem in etree.iterparse(pathWikiXML, events=('start', 'end')):
tname = strip_tag_name(elem.tag)
temp_title_count={}
temp_redirect_count={}
temp_text_count={}
if event == 'start':
if tname == 'page':
title = ''
id = -1
redirect = ''
text=''
inrevision = False
ns = 0
elif tname == 'revision':
# Do not pick up on revision id's
inrevision = True
else:
if tname == 'title':
title = elem.text
# title = re.sub(r'#REDIRECT |#redirect ', '', str(title))
elif tname == 'id' and not inrevision:
id = int(elem.text)
elif tname == 'redirect':
redirect = elem.attrib['title']
# redirect = re.sub(r'#REDIRECT |#redirect ', '', str(redirect))
elif tname == 'ns':
ns = int(elem.text)
elif tname == 'text':
text = elem.text
text = re.sub(r'#REDIRECT|#redirect', '', str(text))
elif tname == 'page':
totalCount += 1
# text = elem.text
count_ns.append(ns)
# if len(text)>30:
# stringOfRe = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
title = re.sub(r'([^a-zA-Z0-9 ])', '', title)
text = re.sub(r'([^a-zA-Z0-9 ])', '', text)
# redirect = re.sub(r'([^a-zA-Z0-9 ])', '', redirect)
temp_title = title.split(" ") #word_tokenize(title)
for i in temp_title:
if i not in stopword_set and len(i)<30:
stemmed_i = ss.stem(i)
if stemmed_i in temp_title_count.keys():
temp_title_count[stemmed_i]+=1
else:
temp_title_count[stemmed_i]=1
set_temp_title = list(temp_title_count)
# temp_redirect = word_tokenize(redirect)
# for i in temp_redirect:
# if i not in stopword_set and len(i)<30:
# stemmed_i=ss.stem(i)
# if stemmed_i in temp_redirect_count.keys():
# temp_redirect_count[stemmed_i]+=1
# else:
# temp_redirect_count[stemmed_i]=1
# set_temp_redirect = list(temp_redirect_count)
temp_text = text.split(" ") #word_tokenize(text)
for i in temp_text:
if i not in stopword_set and len(i)<30:
stemmed_i=ss.stem(i)
if stemmed_i in temp_text_count.keys():
temp_text_count[stemmed_i]+=1
else:
temp_text_count[stemmed_i]=1
set_temp_text = list(temp_text_count)
all_keys = set(list(temp_text_count)).union(set(list(temp_title_count)))
# all_keys = set(list(temp_text_count)).union(set(list(temp_title_count)).union(set(list(temp_redirect_count))))
all_docStr = ['' for i in range(len(all_keys))]
id_correspondence[totalCount]=id
for key in all_keys:
docID_string=''
if key in temp_title_count.keys():
docID_string=str(temp_title_count[key])+','
if key in temp_text_count.keys():
docID_string=docID_string+str(temp_text_count[key])
# if key in temp_redirect_count.keys():
# docID_string=docID_string+'r'+str(temp_redirect_count[key])
docID_string=docID_string+'-'+str(totalCount)
if key in inverted_index.keys():
inverted_index[key]=inverted_index[key]+"|"+docID_string
else:
inverted_index[key]=docID_string
if totalCount%1000==0:
print(totalCount)
elem.clear()
count=0
for i in inverted_index.keys():
if Detector.is_gibberish(i):
count+=1
print(count)
print(len(inverted_index.keys()))
with open("inverted_index.json", "w") as f:
json.dump(inverted_index, f, indent=0)
f.close()
with open("id_correspondence.json", "w") as f:
json.dump(id_correspondence, f, indent=0)
f.close()
elapsed_time = time.time() - start_time
print(set(count_ns))
print("Total pages: {:,}".format(totalCount))
print("Template pages: {:,}".format(templateCount))
print("Article pages: {:,}".format(articleCount))
print("Redirect pages: {:,}".format(redirectCount))
print("Elapsed time: {}".format(hms_string(elapsed_time))) | starc52/Wikipedia-Search-Engine | index_old.py | index_old.py | py | 6,947 | python | en | code | 0 | github-code | 90 |
3873944150 | import pygame
class Column:
def __init__(self, width, title):
self.width = width
self.title = title
class Listbox:
def __init__(self, game, x, y, width, max_items, columns, ondraw, onclick, onupdate):
self.game = game
self.x = x
self.y = y
self.width = width
self.height = max_items * 20
self.item_count = 0
self.columns = columns
self.ondraw = ondraw
self.onclick = onclick
self.onupdate = onupdate
self.start_idx = 0 # for future scrollbars
def set_rows(self, cnt):
self.item_count = cnt
def draw(self):
# column header font
hdrfont = pygame.font.Font(None, 36)
# column text font
colfont = pygame.font.Font(None, 20)
column_idx = 0
column_width_start = self.x
for col in self.columns:
# render header
name = hdrfont.render(col.title, 1, (255,255,255))
self.game.screen.blit(name, (column_width_start, self.y))
# draw items in list
for idx in range(self.start_idx + self.item_count):
# show a maximum of 15 values in the list
if idx >= 15:
break
# draw column data
colval = self.ondraw(self.game, idx, column_idx)
collabel = colfont.render(colval, 1, (255,255,255))
self.game.screen.blit(collabel, (column_width_start, self.y + 24 + (22 * idx)))
# print("Entry {} is {}".format(idx, colval))
# increment values
column_width_start += col.width
column_idx += 1
def click(self, pos):
pass
def update(self):
self.onupdate(self.game, self)
listboxes = []
def remove(game):
listboxes.clear()
def update(game):
for listbox in listboxes:
listbox.update()
def create(game, x, y, width, item_count, columns, ondraw, onclick, onupdate):
_box = Listbox(game, x, y, width, item_count, columns, ondraw, onclick, onupdate)
listboxes.append(_box)
def draw(game):
for listbox in listboxes:
listbox.draw()
# this function is fired when a mouse button is clicked
def click(pos):
# loop through buttons
for btn in listboxes:
# check if mouse position is within our range of interest
if pos[0] > btn.x and pos[0] < btn.width + btn.x:
if pos[1] > btn.y and pos[1] < btn.height + btn.y:
# execute button callback
btn.click(pos)
# Buttons shouldn't overlap. Break loop to increase performance
break | RektInator/infprj2 | infprj2/listbox.py | listbox.py | py | 2,700 | python | en | code | 4 | github-code | 90 |
72207853418 | """
新建一个链表
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def stringToListNode(input):
input = input.split(',')
dummyRoot = ListNode(0)
ptr = dummyRoot
for number in input:
ptr.next = ListNode(int(number))
ptr = ptr.next
ptr = dummyRoot.next
return ptr
def listNodeToString(node):
if not node:
return "[]"
result = ""
while node:
result += str(node.val) + ", "
node = node.next
return "[" + result[:-2] + "]"
| Asunqingwen/LeetCode | AC组/链表.py | 链表.py | py | 555 | python | en | code | 0 | github-code | 90 |
73575666217 | def main():
lista = insertion([6,5,3,1,8,7,2,4])
print(lista)
def insertion(lista):
for i, number in enumerate(lista):
j = i - 1
while j >= 0 and number < lista[j]:
lista[i] = lista[j]
lista[j] = number
i-=1
j-=1
return lista
main() | Algoritmos-y-Programacion-2223-2-S5/Ejercicios-Clase | Semana 10/Lunes (1)/Lunes/insertion.py | insertion.py | py | 314 | python | en | code | 0 | github-code | 90 |
28111877027 | from db import *
from tabulate import tabulate
def addStudent(name, phno, address):
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'INSERT INTO STUDENT(name, phno, address) VALUES(%s, %s, %s);'
args = (name, phno, address)
cursor.execute(query, args)
conn.commit()
query = 'SELECT AUTO_INCREMENT FROM information_schema.Tables WHERE TABLE_SCHEMA = "lms" AND TABLE_NAME = "student";'
cursor.execute(query)
res = cursor.fetchone()
print('Student Successfully added with ID', res[0] - 1)
conn.close()
def addBook(name, author):
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'INSERT INTO BOOK(name, author, availability) VALUES(%s, %s, %s);'
args = (name, author, 'Yes')
cursor.execute(query, args)
conn.commit()
query = 'SELECT AUTO_INCREMENT FROM information_schema.Tables WHERE TABLE_SCHEMA = "lms" AND TABLE_NAME = "book";'
cursor.execute(query)
res = cursor.fetchone()
print('Book Successfully added with ID', res[0] - 1)
conn.close()
def removeStudent(stud_id):
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'DELETE FROM STUDENT WHERE stud_id = %s;'
args = (stud_id,)
cursor.execute(query, args)
conn.commit()
print('Student removed successfully!!.')
conn.close()
def removeBook(book_id):
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'DELETE FROM BOOK WHERE book_id = %s;'
args = (book_id,)
cursor.execute(query, args)
conn.commit()
print('Book removed successfully!!.')
conn.close()
def students():
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'SELECT * FROM student'
cursor.execute(query)
row = cursor.fetchall()
print(tabulate(row, headers=['ID', 'Name', 'Mobile No', 'Address']))
conn.close()
def books():
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'SELECT * FROM book'
cursor.execute(query)
row = cursor.fetchall()
print(tabulate(row, headers=['ID', 'Name', 'Author', 'Availability']))
conn.close()
def issue(book_id, stud_id, issue_date, return_date):
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'INSERT INTO issue_book (book_id, stud_id, issue_date, return_date, status) VALUES(%s, %s, %s, %s, %s);'
args = (book_id, stud_id, issue_date, return_date, 'In Issue')
cursor.execute(query, args)
conn.commit()
query = 'UPDATE book set availability = "No" WHERE book_id = %s'
args = (book_id,)
cursor.execute(query, args)
conn.commit()
query = 'SELECT AUTO_INCREMENT FROM information_schema.Tables WHERE TABLE_SCHEMA = "lms" AND TABLE_NAME = "issue_book";'
cursor.execute(query)
res = cursor.fetchone()
print('Book issued Successfully added with ID', res[0] - 1)
conn.close()
def return_book(book_id, stud_id):
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'UPDATE book set availability = "Yes" WHERE book_id = %s'
args = (book_id,)
cursor.execute(query, args)
conn.commit()
query = 'UPDATE issue_book set status = "Returned" WHERE book_id = %s and stud_id = %s'
args = (book_id, stud_id)
cursor.execute(query, args)
conn.commit()
conn.close()
print('Book Returned Successfully!!.')
def issued_books():
conn = connectDB()
if conn:
cursor = conn.cursor()
query = 'select book_id, book.name, stud_id, student.name from book, student where ROW(book_id, stud_id) in (select book_id, stud_id from issue_book where status="In Issue");'
cursor.execute(query)
row = cursor.fetchall()
print(tabulate(row, headers=['Book Id', 'Book Name', 'Student Id', 'Student Name']))
conn.close()
| RamanaMenda/library-management-system | method.py | method.py | py | 4,199 | python | en | code | 0 | github-code | 90 |
22029620172 | import os
import sys
import configparser
from rbnics.utils.decorators import overload, set_of
from rbnics.utils.mpi import parallel_io
class Config(object):
rbnics_directory = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir))
# Set class defaults
defaults = {
"backends": {
"online backend": "numpy",
"required backends": None
},
"EIM": {
"cache": {"disk", "RAM"},
"disk cache limit": "unlimited",
"RAM cache limit": "1"
},
"problems": {
"cache": {"disk", "RAM"},
"disk cache limit": "unlimited",
"RAM cache limit": "1"
},
"reduced problems": {
"cache": {"RAM"},
"RAM cache limit": "unlimited"
},
"SCM": {
"cache": {"disk", "RAM"},
"disk cache limit": "unlimited",
"RAM cache limit": "1"
}
}
# Read in required backends
required_backends = set()
for root, dirs, files in os.walk(os.path.join(rbnics_directory, "backends")):
for dir_ in dirs:
if dir_ in sys.modules:
required_backends.add(dir_)
break # prevent recursive exploration
defaults["backends"]["required backends"] = required_backends
del required_backends
def __init__(self):
# Setup configparser from defaults
self._config_as_parser = configparser.ConfigParser()
self._config_as_parser.optionxform = str
for (section, options_and_values) in self.defaults.items():
self._config_as_parser.add_section(section)
for (option, value) in options_and_values.items():
self._config_as_parser.set(section, option, self._value_to_parser(section, option, value))
# Setup dict
self._config_as_dict = dict()
self._parser_to_dict()
def read(self, directory=None):
# Read from configparser
config_files_list = list()
config_files_list.append(os.path.join(self.rbnics_directory, ".rbnicsrc"))
if directory is None:
if hasattr(sys.modules["__main__"], "__file__") and "pytest" not in sys.modules: # from script
main_directory = os.path.dirname(os.path.realpath(sys.modules["__main__"].__file__))
else: # interactive or pytest
main_directory = os.getcwd()
else:
main_directory = directory
main_directory_split = main_directory.split(os.path.sep)
for p in range(len(main_directory_split), 0, -1):
new_config_file_list = list()
new_config_file_list.append(os.path.sep)
new_config_file_list.extend(main_directory_split[:p])
new_config_file_list.append(".rbnicsrc")
new_config_file = os.path.join(*new_config_file_list)
if new_config_file not in config_files_list:
config_files_list.append(new_config_file)
self._config_as_parser.read(config_files_list)
# Update dict
self._parser_to_dict()
def write(self, file_or_file_object):
assert isinstance(file_or_file_object, str) or file_or_file_object is sys.stdout, (
"Please provide a file name and not a file object (except for sys.stdout)")
if isinstance(file_or_file_object, str):
def write_config_parser():
with open(file_or_file_object, "w") as file_:
self._config_as_parser.write(file_)
else:
assert file_or_file_object is sys.stdout
def write_config_parser():
self._config_as_parser.write(file_or_file_object)
parallel_io(write_config_parser)
def get(self, section, option):
return self._config_as_dict[section][option]
def set(self, section, option, value):
self._config_as_parser.set(section, option, self._value_to_parser(section, option, value))
self._config_as_dict[section][option] = value
@overload(str, str, str)
def _value_to_parser(self, section, option, value):
assert isinstance(self.defaults[section][option], str)
return value
@overload(str, str, bool)
def _value_to_parser(self, section, option, value):
assert isinstance(self.defaults[section][option], bool)
return str(value)
@overload(str, str, set_of(str))
def _value_to_parser(self, section, option, value):
default = self.defaults[section][option]
assert isinstance(default, set)
assert value.issubset(default)
value_str = ", ".join(str(v) for v in sorted(value))
if len(value) < 2:
value_str += "," # to differentiate between str and a set with one element
return value_str
def _value_from_parser(self, section, option, value):
assert isinstance(value, str)
if "," in value:
assert isinstance(self.defaults[section][option], set)
# strip trailing comma which has been possibly added to differentiate between str and set
value = value.strip(",")
return set([v.strip() for v in value.split(",")]).difference(("", ))
else:
if value.lower() in ("yes", "true", "on"):
assert isinstance(self.defaults[section][option], bool)
return True
elif value.lower() in ("no", "false", "off"):
assert isinstance(self.defaults[section][option], bool)
return False
else:
assert isinstance(self.defaults[section][option], str)
return value
def _parser_to_dict(self):
for section in self._config_as_parser.sections():
self._config_as_dict[section] = dict()
for (option, value) in self._config_as_parser.items(section):
self._config_as_dict[section][option] = self._value_from_parser(section, option, value)
def __eq__(self, other):
return (self._config_as_parser == other._config_as_parser
and self._config_as_dict == other._config_as_dict)
config = Config()
config.read()
| RBniCS/RBniCS | rbnics/utils/config/config.py | config.py | py | 6,211 | python | en | code | 83 | github-code | 90 |
41849828080 | #Write a program that reads a list of numbers list from the first line and a number x from the second line, which prints out all the positions where the number x occurs
#in the given list list.
#Positions are numbered from zero, if the number x is not found in the list, output the string "None" (without quotes, with a capital letter).
#Positions should be displayed on one line, in ascending order of absolute value.
k=0
u=0
a=[int(i) for i in input().split()]
b=int(input())
for z in a:
k+=1
if z==b:
u+=1
print(k)
if u==0:
print("None")
| Lavliet90/if_for_input_split.py | if_for_input_split.py | if_for_input_split.py | py | 573 | python | en | code | 0 | github-code | 90 |
10256157367 | # =============================================================================
# Programming Project 9
# Algorithm:
# read a file with information on pokemon video game
# build a nested dictionary
# loop prompting for a valid option
# call the specific function to display the data corresponding to the option
#
# =============================================================================
import csv,copy
EFFECTIVENESS = {0.25: "super effective", 0.5: "effective", 1:"normal",
2:"weak", 4:"super weak", 0:"resistant"}
MATCHUP_TYPES = {"resistant", "super effective", "effective", "normal",
"weak", "super weak"}
PROMPT = '''
\nTo make a selection, please enter an option 1-3:\n
\tOPTION 1: Find Pokemon
\tOPTION 2: Find Pokemon From Abilities
\tOPTION 3: Find Matchups
\nEnter an option: '''
def open_file(s):
'''
Opens a file, if it exists. If file name is not found, prints the
error message
s: Value to be processed (str)
Returns: fp (file pointer)
'''
while True:
try:
fp=open(input('Please enter a {} filename: '.format(s)), encoding="utf-8")
return fp
except FileNotFoundError:
print('This {} file does not exist. Please try again.'.format(s))
def read_file(fp):
"""
Reads data from the the file, organises data into a master dictionary
fp: File pointer
name_dict: A dictionary with pokemon names as keys and their values in a
list as values (dict)
against: List of all type values to find the one corresponding to pokemon
effectiveness (list)
Returns: Master dictionary (D)
"""
reader = csv.reader(fp)
D={}
name_dict={}
next(reader,None) #skipping header line
against=['bug', 'dark', 'dragon', 'electric', 'fairy', 'fight',
'fire', 'flying', 'ghost', 'grass', 'ground', 'ice',
'normal', 'poison', 'psychic', 'rock', 'steel', 'water']
#loop to go through the lines in a file
for column in reader:
gen=int(column[39])
abilities1=column[0]
abilities1=abilities1.strip('[]').replace("'","").split(',')
abilities2=[]
for word in abilities1:
word=word.strip(' ')
abilities2.append(word)
#assigning pokemon values according to the given column number
abilities2=set(abilities2)
hp=int(column[28])
capture_rate=int(column[23])
base_weight=float(column[38])
base_speed=int(column[35])
#if statement to make legendary value true if value of the column is 1
#or false if value of the column is 0
if column[40]=='1':
legendary=True
elif column[40]=='0':
legendary=False
name = column[30]
type1=column[36]
type2=column[37]
#if the second type value is empty, make a tuple with None value
if type2=='':
types=(type1,None)
else:
types=(type1,type2)
#effectiveness part
effect_dict={}
eff_values = column[1:19]
#making new list for every matchup type
sup_eff=[]
eff=[]
normal=[]
weak=[]
sup_weak=[]
resistant=[]
#assigning index value to go through the values of against list
index=0
for value in eff_values:
if value=="0.25": #super effective value
sup_eff.append(against[index])
elif value=="0.5": #effective value
eff.append(against[index])
elif value=="1": #normal value
normal.append(against[index])
elif value=="2": #weak value
weak.append(against[index])
elif value=="4": #super weak value
sup_weak.append(against[index])
elif value=="0": #resistant value
resistant.append(against[index])
#adding +1 to index so that it iterates through every element of against list
index+=1
#transforming all effectiveness lists into sets
sup_eff=set(sup_eff)
eff=set(eff)
normal=set(normal)
weak=set(weak)
sup_weak=set(sup_weak)
resistant=set(resistant)
#creating a dictionary with matchup types as keys and effectiveness
#sets as values
dict3={"super effective":sup_eff, "effective":eff, "normal":normal,
"weak":weak, "super weak":sup_weak, "resistant":resistant}
#the list of all values of a pokemon
L=[dict3, abilities2,hp,capture_rate,base_weight,
base_speed,legendary]
#updating dictionary with pokemon name and its list of values
name_dict[name]=L
#building a nested dictionary generation->types->name of pokemon->
# ->list of pokemon values
if gen not in D: #if generation key is not in a dictionary yet
D[gen]={} #creating an empty dict
if types not in D[gen]: #if type key is not in a dictionary yet
D[gen][types]={} #creating an empty dict
D[gen][types][name]=L
else:
D[gen][types][name]=L
else:
#if type key is not in a dictionary yet
if types not in D[gen]:
D[gen][types]={} #creating an empty dict
D[gen][types][name]=L
else:
D[gen][types][name]=L
return D
def find_pokemon(pokedex, names):
"""
Takes in the master dictionary and a set of names of the pokemon It
searches the dict for those pokemon and returns a corresponding dictionary
of information for each of those pokemon, with each key being a pokemon’s
name and each value being their list of corresponding information
associated with said pokemon
pokedex: Master dictionary that is being processed (dict)
names: Pokemon names to search for(set)
Returns: dictionary (dict)
"""
dictionary={}
#goes through names in a set
for name in names:
#iterating over nested dict pokedex starting with generation keys
for gen in pokedex:
#iterating over type keys
for types in pokedex[gen]:
#iterating over pokemon name keys
for pok_name in pokedex[gen][types]:
#if name from the given set is in the pokedex name keys
if name in pokedex[gen][types]:
#building a list of pokemon values
stats=pokedex[gen][types][name]
stats=stats[1:]+[gen,types]
dictionary[name]=stats
return dictionary
def display_pokemon(name, info):
"""
Takes in a pokemon name and a list of information about said pokemon
and displays a pokedex entry for a single pokemon by building up a string
gen: Generation value (int)
name: Pokemon name (str)
info: List of pokemon values to be processed (list)
ab: List of abilities
abilities: String of pokemon abilities (str)
Returns: str
"""
gen=info[-2]
#gets rid of Nonetype element
if info[-1][1]==None:
types=info[-1][0]
else:
types=", ".join(info[-1])
ab=list(info[0])
#sorts abilities list
ab=sorted(ab)
#makes a string out of abilities list
abilities=", ".join(ab)
hp=info[1]
capture_rate=info[2]
weight=info[3]
speed=info[4]
#if statement to decide whether the pokemon is legendary or not legendary
if info[5]==True:
is_legen="Legendary"
else:
is_legen="Not Legendary"
return ("\n{}\n\tGen: {}\n\tTypes: {}\n\tAbilities: {}\n\tHP: {}"
"\n\tCapture Rate: {}\n\tWeight: {}\n\tSpeed: {}\n\t{}".format(name,
gen,types,abilities,hp,capture_rate,weight,speed,is_legen))
def find_pokemon_from_abilities(pokedex, abilities):
"""
Takes in a set of abilities and finds all pokemon who have all those
abilities specified in the parameter. It then returns a set of those
pokemon’s names who had all of those abilities.
pokedex: The master dictionary to be processed(dict)
abilities: Abilities to find matches with (set)
Returns: The names of the pokemon who have all the abilities
in the parameter (set)
"""
#new list that is used to collect pokemon names
matches_list=[]
for gen in pokedex:
for types in pokedex[gen]:
for name in pokedex[gen][types]:
value=pokedex[gen][types].get(name)
ab_set = value[1]
if ab_set.issuperset(abilities):
matches_list.append(name)
matches_set=set(matches_list)
return matches_set
def find_matchups(pokedex, name, matchup_type):
"""
This function takes in a pokedex dictionary, a pokemon name and a type
effectiveness (super effective, effective, normal, etc.),finds the
corresponding set of types that affect that pokemon based on the type
effectiveness. It then finds all other pokemon who have who have at least
one of their 2 types that is in that set.Returns a list of tuples, with
each tuple containing 2 elements. The first element is the name of a
pokemon who has at least one of their types in the type effectiveness’
set found above. The 2d element is the tuple of the types of said pokemon.
pokedex: Master dictionary of pokemon data (dict)
name: Pokemon name to be processed (str)
matchup_type: Pokemon type to look matchups for (str)
true_false: Value used to manipulate the loops (bool)
Return: list_of tup (list)
"""
list_of_tup=[]
true_false=False
#iterating through the master dictionary
for gen in pokedex:
for types in pokedex[gen]:
if name in pokedex[gen][types]:
#extracting a set out of a list inside a nested dictionary
pok_set_dict=pokedex[gen][types][name][0]
#try-except so the program doesn't crash if key is not valid
try:
matchup_values=pok_set_dict[matchup_type]
#setting true_false to true to use it in if-statement
true_false=True
except KeyError:
pass
#iterating through the master dictionary
if true_false:
for gen in pokedex:
for types in pokedex[gen]:
for pok_type in matchup_values:
if pok_type in types:
for pok in pokedex[gen][types]:
#if a list is empty meaning nothing was added yet
if list_of_tup==[]:
#iterating through the tuple to check if there
#is a nonetype value to get rid of it
for item in types:
if item==None:
#tuple with just one type value
types = (types[0],)
#tuple with name and types tuple
pok_tuple=(pok,types)
#adding tuple to the list
list_of_tup.append(pok_tuple)
else:
temp_true_false=True
#iterates through the list of tuples
for item in list_of_tup:
#checks if the pokemon already in the list
if pok== item[0]:
temp_true_false=False
#gets rid of nonetype element
for the_type in types:
if the_type==None:
#tuple with only one type value
types=(types[0],)
#if the value is not in the list, appends it
#to the list of tuples
if temp_true_false:
pok_tup=(pok,types)
list_of_tup.append(pok_tup)
if true_false:
#sorts the list
list_of_tup=sorted(list_of_tup)
return list_of_tup
#return none if invalid type
else:
return None
def main():
print("Welcome to your personal Pokedex!\n")
fp = open_file("pokemon") #opening the file
pokedex = read_file(fp) #building a dict with the data from the file
option=''
while option.lower()!='q': #loop that quits only if ooptin is Q or q
option =input(PROMPT)
if option =='1' or option == '2' or option == '3':
if option =='1':
names_string = input (("\nEnter a list of pokemon names,"
" separated by commas: "))
#edits the string,strips it of commas and spaces, splits into
#the list
names_list=names_string.strip(",").replace(" ", "").split(",")
#transforming list into set
names_set=set(names_list)
#dictionary that is returned from a function
pok_dict=find_pokemon(pokedex, names_set)
print_list=[]
#loop to go through the dictionary
for name in pok_dict:
#returns string from a function
pok_string=display_pokemon(name, pok_dict[name])
#appends string to the list to print it later
print_list.append (pok_string)
print_list=sorted(print_list)#sorts the list
for item in print_list:
print (item)
elif option =='2':
ab=input('Enter a list of abilities, separated by commas: ')
#edits the string,strips it of commas and spaces, splits into
#the list
ab_list = ab.strip(",").strip(" ").split(",")
ab_strip=[]
#strips strings of whitespaces and adds to the list
for word in ab_list:
word=word.strip(' ')
ab_strip.append(word)
abilities=set(ab_strip)
#calling a function to get a set
pok_set=find_pokemon_from_abilities(pokedex, abilities)
#turns set into the list
pok_list=list(pok_set)
#sorts the list
pok_list=sorted(pok_list)
#.join method to turn a list into a string for printing
print("Pokemon:",", ".join(pok_list))
elif option =='3':
pok_name=input('Enter a pokemon name: ')
matchup_type=input("Enter a matchup type: ")
#getting a list from a function
matchup_list=find_matchups(pokedex, pok_name, matchup_type)
#if the return is empty, than the input was invalid, so it
#prints the error message
if not matchup_list:
print ("Invalid input")
else:
for tup in matchup_list:
#.join method to turn list into a string for printing
print ("{}:".format(tup[0]), ", ".join(tup[1]))
#if option is not 1-3 or 'Q' or 'q', prints error message
elif option.lower()!='q':
print ('Invalid option {}'.format(option))
if __name__ == "__main__":
main() | palinaskakun/Pokedex | proj09.py | proj09.py | py | 16,462 | python | en | code | 0 | github-code | 90 |
34762866985 | import tensorflow as tf
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def build_estimator_request(request, data):
COLUMNS=data["COLUMNS"]
FIELD_TYPES=data["FIELD_TYPES"]
feats=data["features"]
feature_dict={}
for i,c in enumerate(COLUMNS):
if FIELD_TYPES[c]=="string":
feature_dict[c]=_bytes_feature(value=feats[i].encode())
if FIELD_TYPES[c]=="number":
feature_dict[c]=_float_feature(value=feats[i])
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
serialized = example.SerializeToString()
request.inputs['inputs'].CopyFrom(
tf.contrib.util.make_tensor_proto(serialized, shape=[1]))
return request
class EstimatorRequest:
def __init__(self,COLUMNS,FIELD_TYPES):
self.columns=COLUMNS
self.field_types=FIELD_TYPES
def __call__(self,request, data):
return build_estimator_request(request,{
"COLUMNS":self.columns,
"FIELD_TYPES":self.field_types,
"features":data
}) | Mirco-Nani/tensorflow_model_server | inout/requests.py | requests.py | py | 1,238 | python | en | code | 0 | github-code | 90 |
8842637771 | from pymysql import *
class MysqlClient:
def __init__(self,user,password,database,host='localhost',port=3306):
self.conn = connect(host=host,port=port,user=user,password=password,database=database,charset='utf8')
self.cs = self.conn.cursor()
def close(self):
self.cs.close()
self.conn.close()
def run_sql(self,sql,*args):
try:
# 执行sql语句
sql = sql.replace('None','NULL')
self.cs.execute(sql,args)
self.conn.commit() # 把修改提交到数据库
except Exception as e:
print(e)
self.conn.rollback()
def show(self,type='all'):
if type=='all':
return self.cs.fetchall()
elif type=='one':
return self.cs.fetchone()
elif isinstance(type,int):
return self.cs.fetchmany(type)
def insert_one(self,*args):
'''id(default 0),goods_id,tags_id,price,goods_url,query_url'''
sql = "insert into goods values({},{},{},{},'{}','{}');".format(*args)
self.run_sql(sql)
def update_one(self,*args,**kwargs):
set_content = ''
for key in kwargs:
if isinstance(kwargs[key],str):
set_content += f"{key}='{kwargs[key]}',"
else:
set_content+=f"{key}={kwargs[key]},"
if len(args)==1:
sql = "update goods set "+set_content[:-1]+f" where goods_id={args[0]};"
else:
sql = "update goods set "+set_content[:-1]+f" where goods_id={args[0]} and tag_ids={args[1]};"
self.run_sql(sql)
def delete_one(self,*args):
if len(args)==1:
sql = f"delete from goods where goods_id={args[0]};"
else:
sql = f"delete from goods where goods_id={args[0]} and tag_ids={args[1]};"
self.run_sql(sql)
def find(self,*args,col_list=[],**kwargs):
col_content = ''
if col_list:
for i in col_list:
col_content += i+','
else:
col_content='* '
if kwargs:
query_content = ''
for key in kwargs:
if isinstance(kwargs[key], str):
query_content += f"{key}='{kwargs[key]}' and "
else:
query_content += f"{key}={kwargs[key]} and "
sql = "select " + col_content[:-1] + " from goods where "+query_content[:-4]
else:
if not args:
sql = "select " + col_content[:-1] + f" from goods;"
elif len(args) == 1:
sql = "select " + col_content[:-1] + f" from goods where goods_id={args[0]};"
else:
sql = "select " + col_content[:-1] + f" from goods where goods_id={args[0]} and tag_ids={args[1]};"
self.cs.execute(sql)
return self.show()
def create_table(self):
sql = '''create table goods(id int unsigned primary key auto_increment not null,
goods_id int unsigned,
tag_ids int unsigned,
price float ,
goods_url varchar(300),
query_url varchar(300));'''
self.run_sql(sql)
def drop_table(self):
sql = 'drop table goods;'
self.run_sql(sql)
if __name__ == '__main__':
s = MysqlClient('root','312429','goods')
# print(s.create_table())
#
s.insert_one(0,12345,None,9.6,'qeqeweqwew','qweqeqwewe')
# s.update_one(12345,price=8,goods_url='aaaa')
# s.delete_one(12345)
# print(s.find(12345, col_list=['goods_url', 'query_url'],))
s.close()
| Clinkz-1/notes | buff/utils/mysql_test.py | mysql_test.py | py | 3,579 | python | en | code | 1 | github-code | 90 |
1879916936 | from unittest import TestCase
import requests
from acceptance_tests import DASHBOARD_SERVER_URL
class StatusTests(TestCase):
POSITIVE_STATUS = 'OK'
def test_health(self):
response = requests.get(f'{DASHBOARD_SERVER_URL}/health')
self.assertEqual(response.status_code, 200)
expected_status = {
'overall_status': self.POSITIVE_STATUS,
'detailed_status': {
'database_connection': self.POSITIVE_STATUS,
}
}
self.assertDictEqual(response.json(), expected_status)
def test_status(self):
response = requests.get(f'{DASHBOARD_SERVER_URL}/status')
self.assertEqual(response.status_code, 200)
| openedx/edx-analytics-dashboard | acceptance_tests/test_status.py | test_status.py | py | 712 | python | en | code | 72 | github-code | 90 |
17800899976 | import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
df_movies = pd.read_csv("data/movies_metadata.csv", usecols = ["id", "original_title", "revenue"])
df_rating = pd.read_csv("data/ratings_small.csv", usecols = ["movieId", "rating"])
df_rating.rename(columns = {"movieId": "id"}, inplace=True)
df_rating["id"] = df_rating["id"].astype(str)
df_average_ratings = df_rating.groupby("id").mean()
df = df_movies.merge(df_average_ratings, on = "id").dropna()
data = df[["rating", "revenue"]].values
inertias = []
for i in range(1,11):
kmeans = KMeans(n_clusters=i, init=auto)
kmeans.fit(data)
inertias.append(kmeans.inertia_)
plt.plot(range(1,11), inertias, marker='o')
plt.title('Elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.show()
kmeans = KMeans(n_clusters=3)
kmeans.fit(data)
plt.scatter(df["rating"], df["revenue"], c=kmeans.labels_)
plt.show() | giovanni-cutri/clustering-experiments | movies/cluster-analysis.py | cluster-analysis.py | py | 927 | python | en | code | 0 | github-code | 90 |
31423113693 | #! usr/bin/env/python3
# coding:utf-8
# @Time: 2019-10-10 16:12
# Author: turpure
from src.services.base_service import BaseService
import requests
import json
class Worker(BaseService):
def get_products(self):
base_url = 'http://111.231.88.85:38080/hysj_v2/ebay_api/item_infos?u_name=youran&time=1570688732&sign=2d0a3f02e005e56f65f65810efb01bff&station=america'
res = requests.post(base_url)
ret = res.json()['result']
for ele in ret:
yield (ele['item_id'], ele['main_image'], ele['title'], ele['cids'], ele['price'], ele['sold'],
ele['sold_the_previous_day'], ele['payment_the_previous_day'], ele['sold_the_previous_growth'],
ele['sales_week1'], ele['sales_week2'], ele['sales_week_growth'], ele['payment_week1'], ele['payment_week2'],
ele['item_location'], ele['watchers'], ele['last_modi_time'], ele['stat_time'],
ele['gen_time'], ele['seller'], ele['store'], ele['store_location'], ele['category_structure'],
ele['sales_three_day1'], ele['sales_three_day2'], ele['sales_three_day_growth'], ele['payment_three_day1'],
ele['payment_three_day2'], ele['visit'], ele['sales_three_day_flag'], ele['item_url'], ele['marketplace'],
ele['popular'], 'US')
return ret
def save_products(self, rows):
sql = ('insert into proEngine.ebay_products (item_id,main_image,title,cids,'
'price,sold,sold_the_previous_day,payment_the_previous_day,sold_the_previous_growth,'
'sales_week1,sales_week2,sales_week_growth,payment_week1,payment_week2,item_location,'
'watchers,last_modi_time,stat_time,gen_time,seller,store,store_location,category_structure,'
'sales_three_day1,sales_three_day2,sales_three_day_growth,payment_three_day1,payment_three_day2,'
'visit,sales_three_day_flag,item_url,marketplace,popular, station) values ('
'%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)')
self.warehouse_cur.executemany(sql, rows)
self.warehouse_con.commit()
def run(self):
try:
rows = self.get_products()
self.save_products(rows)
self.logger.info('success to get ebay products from haiYing')
except Exception as why:
self.logger.error(f'fail to get ebay products from haiYing cause of {why}')
finally:
self.close()
if __name__ == '__main__':
worker = Worker()
worker.run()
| yourant/ur_cleaner | sync/haiying/ebay_products.py | ebay_products.py | py | 2,615 | python | en | code | 0 | github-code | 90 |
5719628386 | #-------------------------------------------------------------------------------
# Name: parseHTMLimages.py
# Purpose: Displays the number of images in given url
# Usage: Requires one argument: a url.
#
# Author: Johanson Onyegbula
#
# Created: 25/06/2020
#-------------------------------------------------------------------------------
import sys, os, BeautifulSoup, urllib2
def main():
url = sys.argv[1]
response = urllib2.urlopen(url)
soup = BeautifulSoup.BeautifulSoup(response.read())
imgs = soup.findAll('img')
noImg = len(imgs)
print('{} images found.'.format(noImg))
for img in imgs:
print('image src:{};'.format(img['src']))
if __name__ == '__main__':
main()
| Johanson20/Python4Geoprocessing | ch20/parseHTMLimages.py | parseHTMLimages.py | py | 736 | python | en | code | 0 | github-code | 90 |
34709470554 | from pynboids import Boid
from random import randint
import pygame as pg
'''
Multilayer Boids test
Copyright (c) 2021 Nikolaus Stromberg
'''
BPL = 12 # How many boids per layer
WRAP = False # False avoids edges, True wraps boids to other side.
BGCOLOR = (0, 0, 42) # Background color in RGB.
FPS = 48 # 30-90
def main():
pg.init() # prepare window
pg.display.set_caption("Multilayer Test")
currentRez = (pg.display.Info().current_w, pg.display.Info().current_h)
screen = pg.display.set_mode(currentRez, pg.FULLSCREEN | pg.SCALED) #pg.HWSURFACE | pg.DOUBLEBUF |
pg.display.toggle_fullscreen() # linux workaround
pg.mouse.set_visible(False)
layer1_surf = pg.Surface(currentRez)
layer2_surf = pg.Surface(currentRez)
layer3_surf = pg.Surface(currentRez)
layer1_surf.set_colorkey(0)
layer2_surf.set_colorkey(0)
layer3_surf.set_colorkey(0)
layer1_Boids = pg.sprite.Group()
layer2_Boids = pg.sprite.Group()
layer3_Boids = pg.sprite.Group()
for n in range(BPL):
#randColor.hsva = (((randint(120,300) + 180) % 360),85,85) # randint(10,60) goldfish
layer1_Boids.add(Boid(layer1_surf, True, (((randint(120,300) + 180) % 360),50,33)))
layer2_Boids.add(Boid(layer2_surf, True, (((randint(120,300) + 180) % 360),64,66)))
layer3_Boids.add(Boid(layer3_surf, True, (((randint(120,300) + 180) % 360),80,99)))
lyr1Boids = layer1_Boids.sprites()
lyr2Boids = layer2_Boids.sprites()
lyr3Boids = layer3_Boids.sprites()
clock = pg.time.Clock()
# main loop
while True:
events = pg.event.get()
for e in events:
if e.type == pg.QUIT or e.type == pg.KEYDOWN and e.key == pg.K_ESCAPE:
return
dt = clock.tick(FPS) / 1000
screen.fill(BGCOLOR)
layer1_surf.fill(0)
layer2_surf.fill(0)
layer3_surf.fill(0)
layer1_Boids.update(lyr1Boids, dt, WRAP)
layer2_Boids.update(lyr2Boids, dt, WRAP)
layer3_Boids.update(lyr3Boids, dt, WRAP)
layer1_Boids.draw(layer1_surf)
layer2_Boids.draw(layer2_surf)
layer3_Boids.draw(layer3_surf)
pg.Surface.blit(screen, layer1_surf, (0,0))
pg.Surface.blit(screen, layer2_surf, (0,0))
pg.Surface.blit(screen, layer3_surf, (0,0))
pg.display.update()
if __name__ == '__main__':
main() # by Nik
pg.quit()
| TrendingTechnology/PyNBoids | multilayertest.py | multilayertest.py | py | 2,445 | python | en | code | null | github-code | 90 |
25020671528 | #!/usr/bin/env python
# coding=utf-8
import numpy as np
from cycler import cycler
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as mpatches
import random
import crc16
from zlib import crc32
import hashlib
import pickle
import time
from abc import ABCMeta, abstractmethod
import sys
sys.path.append("..")
import plot_config
IP_MAX = 0xFFFFFFFF
SWITCH_CACHE_SIZE = 1000
class Switch:
__metaclass__ = ABCMeta
CACHE_SIZE = SWITCH_CACHE_SIZE
ip_list = None
cache_list = None
def __init__(self):
self.cache_list = {}
self.ip_list = {}
@abstractmethod
def receive(self, ip, spoofing):
pass
def cache_decay(self):
for (ip, hit_count) in self.ip_list.items():
self.ip_list[ip] = int(hit_count * 0.8)
for (ip, hit_count) in self.cache_list.items():
self.cache_list[ip] = int(hit_count * 0.8)
def calculate_hop_percentage(self, hot_ip_list):
hot_count = 0
for ip in hot_ip_list:
if ip in self.cache_list:
hot_count += 1
return hot_count / float(len(self.cache_list))
class NetHCFSwitch(Switch):
def __init__(self):
Switch.__init__(self)
def receive(self, ip, spoofing):
if spoofing:
return
# update value
if ip in self.ip_list:
self.ip_list[ip] += 1
else:
self.ip_list[ip] = 1
if ip in self.cache_list:
# if ip already in cache
self.cache_list[ip] += 1
return
# ip is not in cache
# decide update or not
if len(self.cache_list) < self.CACHE_SIZE:
# if cache is not full
# just insert
self.cache_list[ip] = self.ip_list[ip]
return
# if cache is full
# see if we need to replace some
# get the lowest value in cache
min_hit = 0xFFFFFFFF
for (ip_int, hit_count) in self.cache_list.items():
if hit_count < min_hit:
min_hit = hit_count
min_ip = ip_int
if min_hit < self.ip_list[ip]:
# need to replace!
del self.cache_list[min_ip]
self.cache_list[ip] = self.ip_list[ip]
class NetCacheSwitch(Switch):
def __init__(self):
Switch.__init__(self)
def cache_decay(self):
for (ip, hit_count) in self.ip_list.items():
self.ip_list[ip] = int(hit_count * 0.9)
for (ip, hit_count) in self.cache_list.items():
self.cache_list[ip] = int(hit_count * 0.9)
def receive(self, ip, spoofing):
# update value
if ip in self.ip_list:
self.ip_list[ip] += 1
else:
self.ip_list[ip] = 1
if ip in self.cache_list:
# if ip already in cache
self.cache_list[ip] += 1
return
# ip is not in cache
# decide update or not
if len(self.cache_list) < self.CACHE_SIZE:
# if cache is not full
# just insert
self.cache_list[ip] = self.ip_list[ip]
# if cache is full
# see if we need to replace some
# get the lowest value in cache
min_hit = 0xFFFFFFFF
for (ip_int, hit_count) in self.cache_list.items():
if hit_count <= min_hit:
min_hit = hit_count
min_ip = ip_int
del self.cache_list[min_ip]
self.cache_list[ip] = self.ip_list[ip]
class NoUpdateSwitch(Switch):
def __init__(self):
Switch.__init__(self)
def receive(self, ip, spoofing):
if len(self.cache_list) < self.CACHE_SIZE:
# if cache is not full
# just insert
self.cache_list[ip] = 1
def generate_ip():
return random.randint(0, IP_MAX)
def simulate():
HOT_IP_SIZE = 850
FIX_IP_SIZE = 50
ATTACK_START_TIME = 200
HOT_IP_PKT_PER_SEC = 2
RAND_IP_PER_SEC = 100
ATTACK_IP_PER_SEC = 200
ATTACK_PKT_PER_IP_PER_SEC = 15
# simulate
s1 = NetHCFSwitch()
s2 = NetCacheSwitch()
s3 = NoUpdateSwitch()
hot_ips = []
fix_ips = []
hot_precentage = [[], [], []]
# first generate a set of ips to fill up the cache
for i in range(SWITCH_CACHE_SIZE):
ip = generate_ip()
s1.receive(ip, False)
s2.receive(ip, False)
s3.receive(ip, False)
if len(hot_ips) < HOT_IP_SIZE:
hot_ips.append(ip)
elif len(fix_ips) < FIX_IP_SIZE:
fix_ips.append(ip)
time = -1
while True:
time += 1
print("Time: %d" % time)
# replace part of hot ips
for i in range(random.randint(10, 30)):
rand_index = random.randint(0, len(hot_ips) - 1)
del hot_ips[rand_index]
for i in range(random.randint(10, 30)):
rand_ip = generate_ip()
hot_ips.append(rand_ip)
# send hot ips
for ip in hot_ips:
for i in range(random.randint(3, 5)):
s1.receive(ip, False)
s2.receive(ip, False)
s3.receive(ip, False)
# send fix ips
for ip in fix_ips:
for i in range(HOT_IP_PKT_PER_SEC):
s1.receive(ip, False)
s2.receive(ip, False)
s3.receive(ip, False)
# randomly send some ips
for i in range(random.randint(80, 130)):
ip = generate_ip()
s1.receive(ip, False)
s2.receive(ip, False)
s3.receive(ip, False)
# attack
if time > ATTACK_START_TIME:
for i in range(random.randint(100, 300)):
ip = generate_ip()
for j in range(random.randint(10, 40)):
s1.receive(ip, True)
s2.receive(ip, True)
s3.receive(ip, True)
hot_precentage[0].append(s1.calculate_hop_percentage(hot_ips + fix_ips))
hot_precentage[1].append(s2.calculate_hop_percentage(hot_ips + fix_ips))
hot_precentage[2].append(s3.calculate_hop_percentage(hot_ips + fix_ips))
print("%f, %f, %f" % (hot_precentage[0][time], hot_precentage[1][time], hot_precentage[2][time]))
s1.cache_decay()
s2.cache_decay()
s3.cache_decay()
if time > 400:
break
return hot_precentage
restore_from_file = True
pickle_filename = "./simulation_data.pickle"
if restore_from_file:
print("Restoring data from file " + pickle_filename)
with open(pickle_filename, "rb") as f:
hot_precentage = pickle.load(f)
else:
hot_precentage = simulate()
# save data into file
print("Storing data into file " + pickle_filename)
with open(pickle_filename, "wb") as f:
pickle.dump(hot_precentage, f)
period = len(hot_precentage[0])
# plotting
plt.figure(figsize=plot_config.fig_size)
ax = plt.subplot(111)
xmajor = int(period / 8)
xmajorLocator = MultipleLocator(xmajor)
xmajorFormatter = FormatStrFormatter('%1d')
xminorLocator = MultipleLocator(xmajor / 2.0)
ymajor = int(1.0 / 5 * 100)
ymajorLocator = MultipleLocator(ymajor)
ymajorFormatter = FormatStrFormatter('%4d')
yminorLocator = MultipleLocator(ymajor / 2.0)
ax.xaxis.set_major_locator(xmajorLocator)
ax.xaxis.set_major_formatter(xmajorFormatter)
ax.yaxis.set_major_locator(ymajorLocator)
ax.yaxis.set_major_formatter(ymajorFormatter)
ax.xaxis.set_minor_locator(xminorLocator)
ax.yaxis.set_minor_locator(yminorLocator)
ax.xaxis.grid(True, which='major', ls='dotted')
ax.yaxis.grid(True, which='major', ls='dotted')
color = 0
plt.plot(range(period), [j * 100 for j in hot_precentage[0]], '-', label="NetHCF Update")
plt.plot(range(period), [j * 100 for j in hot_precentage[1]], '-', label="NetCache Update")
plt.plot(range(period), [j * 100 for j in hot_precentage[2]], '-', label="No Update")
plt.ylim(0, 100)
plt.xlim(0, period)
plt.legend(loc='lower right', bbox_to_anchor=(1, 0.2), fontsize=plot_config.font_size, shadow=False)
ax.annotate('Attack Starts', xy=(200, 82),
xytext=(50, 70), fontsize=plot_config.font_size,
arrowprops=dict(facecolor=plot_config.colors[0], arrowstyle='fancy',
connectionstyle="arc3,rad=0.5"),
)
line0 = mpl.lines.Line2D([200, 200], [-2, 102], lw=1, color='grey', linestyle='-.', alpha=0.8)
line0.set_clip_on(False)
ax.add_line(line0)
for label in ax.xaxis.get_ticklabels():
label.set_fontsize(plot_config.font_size)
# plt.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=False, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=False) # labels along the bottom edge are off
for label in ax.yaxis.get_ticklabels():
label.set_fontsize(plot_config.font_size)
plt.xlabel('Time(s)', fontsize=plot_config.font_size)
plt.ylabel('Per of Hot & Legitimate IPs (%)', fontsize=plot_config.font_size)
plt.tight_layout(rect=[0, 0, 1, 1])
plt.subplots_adjust(wspace=0, hspace=0.05)
plt.savefig('cache_update.pdf')
plt.show() | goodnighthy/MTNF | traffic_analysis/line/cache_update_method/simulation.py | simulation.py | py | 9,231 | python | en | code | 0 | github-code | 90 |
73496877415 | import json
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from rest_framework import serializers, viewsets
from backend.exception import ErrorCode, PlatformError
from backend.models import Report
from backend.util import UserHolder, Response, parse_data, page_params, save
class ReportSerializer(serializers.ModelSerializer):
class Meta:
model = Report
fields = ['id', 'name', 'created_at', 'updated_at']
class ReportViewSet(viewsets.ModelViewSet):
queryset = Report
serializer_class = ReportSerializer
def list(self, request, *args, **kwargs):
"""
分页查询项目
可全量分页(当然只有自己的数据)
可传入分组
可传入 name 模糊
"""
data = parse_data(request, 'GET')
page, page_size, name, record_id, status = page_params(data, 'name', 'record_id', 'status').values()
if record_id is None:
raise PlatformError.error_args(ErrorCode.MISSING_NECESSARY_KEY, 'record_id')
projects = Report.objects.filter(owner=UserHolder.current_user()).exact(record_id=record_id)\
.contains(name=name).exact(status=status)
page_projects = Paginator(projects, page_size)
result = page_projects.page(page)
return Response.success(result)
def retrieve(self, request, *args, **kwargs):
"""
根据 id 查询项目详细信息
"""
parse_data(request, 'GET')
report = get_by_id(kwargs['pk'])
decoding(report)
return Response.success(report)
# -------------------------------------------- 以上为 RESTFUL 接口,以下为调用接口 -----------------------------------------
def get_by_id(id):
"""
根据 id 查询项目
"""
try:
report = Report.objects.get(owner=UserHolder.current_user(), id=id)
except ObjectDoesNotExist:
raise PlatformError.error_args(ErrorCode.DATA_NOT_EXISTED, '用例报告', 'id')
return report
def create(report):
"""
创建用例结果
"""
save(report)
return report
def decoding(report):
"""
对部分参数进行解码操作
"""
if report.extend_keys:
report.extend_keys = json.loads(report.extend_keys)
if report.extend_values:
report.extend_values = json.loads(report.extend_values)
if report.expected_keys:
report.expected_keys = json.loads(report.expected_keys)
if report.expected_values:
report.expected_values = json.loads(report.expected_values)
| felixu1992/testing-platform | backend/handler/record/report.py | report.py | py | 2,593 | python | en | code | 0 | github-code | 90 |
18524173729 | n, m = map(int, input().split())
l = [list(map(int, input().split())) for i in range(n)]
sum_list = [0] * 8
l_ppp = [0] * n
l_ppm = [0] * n
l_pmp = [0] * n
l_mpp = [0] * n
l_pmm = [0] * n
l_mpm = [0] * n
l_mmp = [0] * n
l_mmm = [0] * n
for i in range(n):
l_ppp[i] = l[i][0] + l[i][1] + l[i][2]
l_ppm[i] = l[i][0] + l[i][1] - l[i][2]
l_pmp[i] = l[i][0] - l[i][1] + l[i][2]
l_mpp[i] = - l[i][0] + l[i][1] + l[i][2]
l_pmm[i] = l[i][0] - l[i][1] - l[i][2]
l_mpm[i] = - l[i][0] + l[i][1] - l[i][2]
l_mmp[i] = - l[i][0] - l[i][1] + l[i][2]
l_mmm[i] = - l[i][0] - l[i][1] - l[i][2]
l_ppp = sorted(l_ppp, reverse=True)
l_ppm = sorted(l_ppm, reverse=True)
l_pmp = sorted(l_pmp, reverse=True)
l_mpp = sorted(l_mpp, reverse=True)
l_pmm = sorted(l_pmm, reverse=True)
l_mpm = sorted(l_mpm, reverse=True)
l_mmp = sorted(l_mmp, reverse=True)
l_mmm = sorted(l_mmm, reverse=True)
sum_list[0] = sum(l_ppp[:m])
sum_list[1] = sum(l_ppm[:m])
sum_list[2] = sum(l_pmp[:m])
sum_list[3] = sum(l_mpp[:m])
sum_list[4] = sum(l_pmm[:m])
sum_list[5] = sum(l_mpm[:m])
sum_list[6] = sum(l_mmp[:m])
sum_list[7] = sum(l_mmm[:m])
print(max(sum_list)) | Aasthaengg/IBMdataset | Python_codes/p03326/s944296622.py | s944296622.py | py | 1,157 | python | en | code | 0 | github-code | 90 |
16646026404 | from urllib import request
google_url_loc = 'http://samplecsvs.s3.amazonaws.com/TechCrunchcontinentalUSA.csv'
def download_csv_file(csv_url):
response = request.urlopen(csv_url)
csv = response.read()
str_data = str(csv)
lines = str_data.split('\\n')
dst = r'online.csv'
fp = open(dst, "w")
for line in lines:
fp.write(line + "\n")
fp.close()
download_csv_file(google_url_loc)
| kusuma-bharath/Python | ex_dwnld_f_web.py | ex_dwnld_f_web.py | py | 421 | python | en | code | 0 | github-code | 90 |
72207987498 | # -*- coding: utf-8 -*-
# @Time : 2019/11/18 0018 9:56
# @Author : 没有蜡笔的小新
# @E-mail : sqw123az@sina.com
# @FileName: Integer Break.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
"""
Given a positive integer n, break it into the sum of at least two positive integers and maximize the product of those integers. Return the maximum product you can get.
Example 1:
Input: 2
Output: 1
Explanation: 2 = 1 + 1, 1 × 1 = 1.
Example 2:
Input: 10
Output: 36
Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.
Note: You may assume that n is not less than 2 and not larger than 58.
"""
def integerBreak(n: int) -> int:
if n == 2:
return 1
elif n == 3:
return 2
dp = [0] * (n + 1)
dp[2] = 2
dp[3] = 3
for i in range(4, len(dp)):
dp[i] = max(dp[i - 2] * 2, dp[i - 3] * 3)
return dp[n]
if __name__ == '__main__':
n = 2
result = integerBreak(n)
print(result)
| Asunqingwen/LeetCode | medium/Integer Break.py | Integer Break.py | py | 954 | python | en | code | 0 | github-code | 90 |
18490607909 | n = int(input())
v = list(map(int, input().split()))
from collections import Counter
odd_ary = v[::2]
even_ary = v[1::2]
# 最頻値top2
oc = Counter(odd_ary).most_common(2)
ec = Counter(even_ary).most_common(2)
if len(oc) == 1:
oc.append((0, 0))
if len(ec) == 1:
ec.append((0, 0))
if oc[0][0] != ec[0][0]:
print(n - (oc[0][1] + ec[0][1]))
else:
print(n - max(oc[0][1] + ec[1][1], oc[1][1] + ec[0][1]))
| Aasthaengg/IBMdataset | Python_codes/p03244/s387851570.py | s387851570.py | py | 422 | python | en | code | 0 | github-code | 90 |
7544954871 | from datetime import datetime
import logging
import tempfile
import os
# import logger
log = logging.getLogger(__name__)
def print_status(status):
'''print timestamped status update'''
print('--[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] ' + status + '--')
log.info(status)
def create_temp_folder():
'''make temporary folder at specified location'''
TMP_DIR = tempfile.mkdtemp()
return TMP_DIR
def close_temp_folder(tmp_dir):
'''destroy temporary folder after it is no longer used'''
os.removedirs(tmp_dir)
def create_output_folder(cwd):
'''create timestamped output folder at specified location'''
# fetch current time
CURRENT_TIME = datetime.now().strftime("%Y%m%d%H%M%S")
# path to output folder
OUTPUT_FOLDER = cwd + '/' + CURRENT_TIME
# create folder if it doesn't already exist
if not os.path.exists(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
# print output folder name
print_status('Output folder: ' + OUTPUT_FOLDER)
return OUTPUT_FOLDER
def run_command(cmd_list, stdout_file = None, stderr_file = None, shell =True):
'''run command with subprocess.call
if stdout or stderr arguments are passed, save to specified file
'''
import subprocess
print_status(' '.join(cmd_list)) # print status
# if neither stdout or stderr specified
if stdout_file is None and stderr_file is None:
print(cmd_list)
subprocess.call(cmd_list, shell = shell)
# if only stdout is specified
elif stdout_file is not None and stderr_file is None:
with open(stdout_file, 'w') as so:
subprocess.call(cmd_list, stdout = so, shell = shell)
# if only stderr is specified
elif stdout_file is None and stderr_file is not None:
with open(stderr_file, 'w') as se:
subprocess.call(cmd_list, stderr = se, shell = shell)
# if both stdout and stderr are specified
elif stdout_file is not None and stderr_file is not None:
with open(stdout_file, 'w') as so:
with open(stderr_file, 'w') as se:
subprocess.call(cmd_list, stdout = so, stderr = se, shell = shell)
else: pass
def test_executable(cmd):
'''check that a particular command can be run as an executable'''
import shutil
assert shutil.which(cmd) is not None, 'Executable ' + cmd + ' cannot be run'
def get_notebook_path(out_dir, main_dir, notebook_name,experiment):
'''get name of 20835-genotyping.ipynb file in current working directory
copy to output folder
'''
import os
import shutil
cwd = os.getcwd() # get working directory
#notebook_path = cwd + '/20835-miseq-genotyping.ipynb'
notebook_path = main_dir + '/' + notebook_name + '.ipynb'
# copy to output folder
shutil.copy2(notebook_path, out_dir + '/' + str(experiment) + '.ipynb')
print('Copied Jupyter Notebook to : ' + out_dir + '/' + str(experiment) + '.ipynb')
def copy_file_to_results(out_dir, filepath_list):
'''get name of 20835-genotyping.ipynb file in current working directory
copy to output folder
'''
import os
import shutil
for filepath_i in filepath_list:
basefilename = os.path.basename(filepath_i)
shutil.copy2(filepath_i, out_dir + '/'+ basefilename)
print('Copied '+ basefilename + ' file to : ' + filepath_i)
def file_size(f):
'''return file size'''
import os
return os.stat(f).st_size
def create_folder(directory):
if not os.path.exists(directory):
os.makedirs(directory)
print('Created_Folder: ' + directory)
else:
print('Results Foler already exists: ' + directory)
return()
def test_dir(directory,shortname):
if not os.path.exists(directory):
print( directory + 'directory does not exist for variable: ' +shortname+' ;. Script has terminated')
raise RuntimeError(directory + 'directory does not exist for variable: ' +shortname+' ;. Script has terminated')
return
| DABAKER165/pepMeld | pepMeld/utils.py | utils.py | py | 3,735 | python | en | code | 2 | github-code | 90 |
27353926491 | import logging
from flask import Flask
from flask import request, Response
from viberbot.api.viber_requests import ViberMessageRequest, \
ViberConversationStartedRequest, ViberSubscribedRequest, \
ViberFailedRequest
from messages.messages import send_text_message, send_next_block
from bot.bot import viber
from config import LOCAL_PORT
app = Flask(__name__)
@app.route('/', methods=['POST'])
def incoming():
logging.debug("received request. post data: {0}".format(request.get_data()))
# every viber message is signed, you can verify the signature using this method
if not viber.verify_signature(request.get_data(),
request.headers.get('X-Viber-Content-Signature')):
return Response(status=403)
# this utils supplies a simple way to receive a request object
viber_request = viber.parse_request(request.get_data())
if isinstance(viber_request, ViberConversationStartedRequest):
welcome_text = "Щоб почати спілкування, надішліть \"Старт\""
send_text_message(viber_request.user.id, welcome_text)
elif isinstance(viber_request, ViberMessageRequest):
user_message = viber_request.message.text
user_id = viber_request.sender.id
if user_message == "Старт":
user_message = "0"
send_next_block(user_id, current_option_id=user_message)
elif isinstance(viber_request, ViberSubscribedRequest):
subscribe_text = "Дякуємо за підписку!"
send_text_message(viber_request.user.id, subscribe_text)
elif isinstance(viber_request, ViberFailedRequest):
logging.warning(
"client failed receiving message. failure: {0}".format(viber_request))
return Response(status=200)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=(LOCAL_PORT or 443), debug=True)
| UAWarDevelopers/-viber_first_aid | main.py | main.py | py | 1,903 | python | en | code | 0 | github-code | 90 |
28449966986 | from engine.engine_template import EngineTemplate
from wifuxlogger import WifuxLogger as LOG
import network
sta_if = network.WLAN(network.STA_IF)
def run(cmds):
return eval("{}({})".format(cmds[1],EngineTemplate.exec_formatter_api(cmds)))
def connect(cmds):
blueprint = EngineTemplate.parameter_parser(cmds)
if not sta_if.isconnected():
LOG.debug('connecting to network...')
try:
sta_if.active(True)
sta_if.connect(blueprint["--name"], blueprint["--password"])
except Exception as ex:
return LOG.error(ex)
return LOG.info('Connection Successfull')
def disconnect(cmds):
sta_if.active(False)
sta_if.active(True)
return LOG.debug(sta_if.isconnected())
def ifconfig(cmds):
info = sta_if.ifconfig()
message = ""
if sta_if.isconnected():
message = "<BROADCASTING>"
else:
message = "<>"
return LOG.info("""
--------------------------
Network Information
--------------------------
{}
--------------------------
Access Point Name: str(sta_if.config('essid'))
Access Point MAC Address: str(sta_if.config('mac'))
--------------------------
Your Device IP: {}
Subnet Mask: {}
Gateway: {}
DNS: {}""".format(message,info[0],info[1],info[2],info[3]))
def ls(cmds):
wlans = sta_if.scan()
message = "\n"
for i in wlans:
message += str(i[0])+"\n"
return LOG.debug("Available WLANs")+message
def on(cmds):
sta_if.active(True)
return LOG.info("WiFi ON")
def off(cmds):
sta_if.active(False)
return LOG.info("WiFi OFF")
def status(cmds):
status_code = sta_if.status()
if status_code == 1000:
return LOG.info("No Connection and No Activities")
elif status_code == 1001:
return LOG.info("Connecting")
elif status_code == 202:
return LOG.error("Failed due to password error")
elif status_code == 201:
return LOG.warning("Failed, because there is no access point reply")
elif status_code == 1010:
return LOG.info("Connected")
elif status_code == 203:
return LOG.error("Failed")
elif status_code == 200:
return LOG.error("Timeout")
elif status_code == 204:
return LOG.error("Handshake timeout")
| gooz-project/gooz-os-v1.0.0 | dev/wifi/core.py | core.py | py | 2,260 | python | en | code | 5 | github-code | 90 |
73402688616 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="link-crab",
version="0.2.1",
author="Krisztián Pál Klucsik",
author_email="klucsik.krisztian@gmail.com",
description="A link crawler and permission testing tool for websites",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/klucsik/link-crab",
packages=setuptools.find_packages(),
install_requires=[
'beautifulsoup4',
'colorama',
'Flask',
'Flask-user',
'Flask-babelex',
'email_validator',
'PyYAML',
'requests',
'selenium',
'pytest'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | klucsik/link-crab | setup.py | setup.py | py | 951 | python | en | code | 0 | github-code | 90 |
28221838843 | s1 = input()
s2 = input()
len1, len2 = len(s1), len(s2)
# 长度数组
lst_lst = list()
# 初始数组
for i in range(len1+1):
tmp_lst = list()
for j in range(len2+1):
if i == 0:
tmp_lst.append(j)
elif j == 0:
tmp_lst.append(i)
else:
tmp_lst.append(0)
lst_lst.append(tmp_lst)
for i in range(0, len1):
for j in range(0, len2):
if s1[i] == s2[j]:
lst_lst[i+1][j+1] = lst_lst[i][j]
else:
lst_lst[i+1][j+1] = min(lst_lst[i][j+1],
lst_lst[i+1][j],
lst_lst[i][j]) + 1
for i in lst_lst:
for j in i:
print("%5d" % j, end="")
print("")
print("最小编辑长度为:", lst_lst[len1][len2]) | HandsomeLuoyang/LuoGuProblems | 最小编辑距离-动态规划.py | 最小编辑距离-动态规划.py | py | 783 | python | en | code | 0 | github-code | 90 |
28770149543 | import random
from time import sleep
from observer import Observer
from data import fire_departments
from fire_units import FireDepartment, FireTruck, FreeTruck, BusyTruck
from event import Event, MZ, PZ
from iterator import Iterator
from strategies import DefaultStrategies, StrategyMZ, StrategyPZ
class Manager:
def __init__(self):
self._fire_stations = []
self._observer = Observer()
self._sorted_list = []
for department in fire_departments:
station = FireDepartment(department[0],department[1],department[2])
self._fire_stations.append(station)
while True:
sleep(1)
self._sorted_list = []
cord_x = random.uniform(49.95855025648944, 50.154564013341734)
cord_y = random.uniform(19.688292482742394, 20.02470275868903)
if random.uniform(0, 1) < 0.7:
event = MZ(cord_x, cord_y)
print("nowa akcja MZ")
else:
event = PZ(cord_x, cord_y)
print("nowa akcja PZ")
self._sorted_list = self._observer.collect_info(self._fire_stations, event)
print("dostepne wozy",len(self._sorted_list))
iterator = iter(Iterator(self._sorted_list))
"""
for i in self._sorted_list:
print(i)
"""
if isinstance(event, MZ) == True:
StrategyMZ(event, iterator)
else:
StrategyPZ(event, iterator)
| mzkaoq/simulation_of_deploying_firetrucks_for_emergency_situation | manager.py | manager.py | py | 1,521 | python | en | code | 0 | github-code | 90 |
22775678379 | import copy
import os
import jsonlines
from tqdm import tqdm
from typing import List, Optional, Tuple
import parlai.utils.logging as logging
from parlai.core.teachers import ChunkTeacher
from .build import build, DATASET_NAME_LOCAL
from .utils.text_utils import simplify_nq_example
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
def _count_lines_in_file(fname):
num_lines = 0
with open(fname, 'r') as fi:
for _ in fi:
num_lines += 1
return num_lines
def _context_type_key(is_html):
return 'document_html' if is_html else 'document_text'
def _create_long_answer_from_span_html(example):
"""
Creates a list of long answer candidates, from their spans on the document.
This function gets the full article from the input example dictionary (using
key 'document_html'), then iterates through the long answer spans (from
'long_answer_candidates' key) and creates a list of slices from the article,
using the 'start_byte' and 'end_byte' values in the list of long answer
candidate spans.
Returns a list of long answers. Each long answer is a substring from the
original HTML text.
:param example: a dict that contains one example/entry from NQ dataset.
"""
context_text = example[_context_type_key(is_html=True)].encode()
candidate_long_answers = []
for long_answer_span in example['long_answer_candidates']:
start_index = long_answer_span['start_byte']
end_index = long_answer_span['end_byte']
answer = context_text[start_index:end_index].decode()
candidate_long_answers.append(answer)
return candidate_long_answers
def _create_long_answer_from_span_text(simplified_example):
"""
Creates a list of long answer candidates, from their spans on the document.
This function gets the full article from the input simplified example
dictionary (using key 'document_text'), then iterates through the long
answer spans (from 'long_answer_candidates' key) and creates a list of
slices from the article, using the 'start_token' and 'end_token' values in
the list of long answer candidate spans.
Returns a list of long answers. Each long answer is a substring from the
simplified HTML text.
:param simplified_example: a dict that contains one simplified example/entry
from NQ dataset.
"""
context_text = simplified_example[_context_type_key(is_html=False)]
candidate_long_answers = []
splitted_tokens = context_text.split(' ')
for long_answer_span in simplified_example['long_answer_candidates']:
start_index = long_answer_span['start_token']
end_index = long_answer_span['end_token']
answer = ' '.join(splitted_tokens[start_index:end_index])
candidate_long_answers.append(answer)
return candidate_long_answers
class NaturalQuestionsTeacher(ChunkTeacher):
"""
The base teacher class for Natural Questions dataset challenge.
This class implements the core functionalities for other teachers. The other four
variations of teachers are made by setting two object attributes (use_html,
use_long_answer) to either True or False.
"""
@classmethod
def add_cmdline_args(
cls, parser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
nq = parser.add_argument_group('Natural Questions Teacher')
nq.add_argument(
'--use-html',
type='bool',
default=False,
help='Use HTML for the context (does nothing if `use-context` is False)',
)
nq.add_argument(
'--use-long-answer', type='bool', default=False, help='Use long answers'
)
nq.add_argument(
'--use-context',
type='bool',
default=True,
help='Include context blurb or not',
)
def __init__(self, opt, shared=None):
build(opt)
self.use_html = opt.get('use_html', False)
self.use_long_answer = opt.get('use_long_answer', False)
self.use_context = opt.get('use_context', False)
self.id = 'natural_questions'
self.opt = copy.deepcopy(opt)
self.dtype = self.opt['datatype'].split(':')[0]
self.dpath = os.path.join(self.opt['datapath'], DATASET_NAME_LOCAL, self.dtype)
self.n_samples = None
super().__init__(self.opt, shared)
def _simplify(self, example):
if self.use_html:
return example
return simplify_nq_example(example)
def _get_data_folder(self):
return self.dpath
def get_fold_chunks(self, opt) -> List[int]:
if 'train' == self.dtype:
return list(range(50))
elif 'valid' == self.dtype:
return list(range(5))
raise ValueError(f'Invalid data type: "{self.dtype}"')
def get_num_samples(self, opt) -> Tuple[int, int]:
if self.n_samples:
return self.n_samples
logging.log(f'Counting the number of samples in {self.dtype}')
files = os.listdir(self.dpath)
n_samples = 0
for fname in tqdm(files):
if fname.startswith('.'): # some of the OS specific files
continue
n_samples += _count_lines_in_file(os.path.join(self.dpath, fname))
logging.info(f'{n_samples} examples found in {self.dtype} dataset.')
self.n_samples = (n_samples, n_samples)
return self.n_samples
def _get_candidate_long_answers(self, example):
if self.use_html:
return _create_long_answer_from_span_html(example)
else:
return _create_long_answer_from_span_text(example)
def _get_short_answers(self, example):
context = example[_context_type_key(self.use_html)]
if self.use_html:
offset_unit = 'byte'
context = context.encode()
else:
offset_unit = 'token'
context = context.split(' ')
short_answers = []
for annotation in example['annotations']:
if 'short_answers' in annotation and annotation['short_answers']:
for sa in annotation['short_answers']:
start_ind = sa[f'start_{offset_unit}']
end_ind = sa[f'end_{offset_unit}']
ans = context[start_ind:end_ind]
if self.use_html:
short_answers.append(ans.decode())
else:
short_answers.append(' '.join(ans))
elif (
'yes_no_answer' in annotation
and annotation['yes_no_answer']
and annotation['yes_no_answer'] != 'NONE'
):
short_answers.append(annotation['yes_no_answer'])
return short_answers
def load_from_chunk(self, chunk_idx: int):
"""
Loads from a chunk of the dataset, given the chunk index.
Returns a list of dictionaries. Each dictionary is an example from the
main dataset and stores the components of that examples (e.g.,
contenxt, question, candidate answers etc.) as key-value pairs.
:param chunk_idx: the index of the chunk dataset chunk file.
"""
def _extract_labels_indices(example, candidate_labels):
labels = []
for label in example['annotations']:
label_ind = label['long_answer']['candidate_index']
labels.append(candidate_labels[label_ind])
return labels
fname = f'nq-{self.dtype}-{str(chunk_idx).zfill(2)}.jsonl'
fpath = os.path.join(self.dpath, fname)
output = []
with jsonlines.open(fpath, 'r') as fi:
for example in fi:
example_components = dict()
example = self._simplify(example)
question = example['question_text']
if self.use_context:
context = example[_context_type_key(self.use_html)]
example_components['text'] = f'{context}\n{question}?'
else:
example_components['text'] = f'{question}?'
if self.use_long_answer:
example_components[
'long_answers_candidate'
] = self._get_candidate_long_answers(example)
example_components['long_answers'] = _extract_labels_indices(
example, example_components['long_answers_candidate']
)
else:
example_components['short_answers'] = self._get_short_answers(
example
)
output.append(example_components)
return output
def create_message(self, example_components, entry_idx=0):
label_key = 'long_answers' if self.use_long_answer else 'short_answers'
message_dict = {
'id': self.id,
'text': example_components['text'],
'labels': example_components[label_key] or [''],
'episode_done': True,
}
if self.use_long_answer:
message_dict['label_candidates'] = example_components[
'long_answers_candidate'
]
return message_dict
class DefaultTeacher(NaturalQuestionsTeacher):
pass
| Seagate/cortx | doc/integrations/parlAI/parlai/tasks/natural_questions/agents.py | agents.py | py | 9,445 | python | en | code | 631 | github-code | 90 |
30572847665 | #https://blog.csdn.net/qq_47233366/article/details/122611672
import torch
import torchvision
import torch.nn as nn
import numpy as np
image_size = [1, 28, 28]
latent_dim = 100
batch_size = 4
#1、准备数据集
# Training
dataset = torchvision.datasets.MNIST("mnist_data", train=True, download=True,
transform=torchvision.transforms.Compose(
[
torchvision.transforms.Resize(28),
torchvision.transforms.ToTensor(),
]
)
)
#2、加载数据集
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True)
#3、搭建神经网络
##搭建生成器
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(latent_dim, 128),
torch.nn.BatchNorm1d(128),
torch.nn.GELU(),
nn.Linear(128, 256),
torch.nn.BatchNorm1d(256),
torch.nn.GELU(),
nn.Linear(256, 512),
torch.nn.BatchNorm1d(512),
torch.nn.GELU(),
nn.Linear(512, 1024),
torch.nn.BatchNorm1d(1024),
torch.nn.GELU(),
nn.Linear(1024, np.prod(image_size, dtype=np.int32)),
nn.Sigmoid(),
)
def forward(self, z):
# shape of z: [batchsize, latent_dim]
output = self.model(z)
image = output.reshape(z.shape[0], *image_size)
return image
##搭建判别器
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(np.prod(image_size, dtype=np.int32), 512),
torch.nn.GELU(),
nn.Linear(512, 256),
torch.nn.GELU(),
nn.Linear(256, 128),
torch.nn.GELU(),
nn.Linear(128, 64),
torch.nn.GELU(),
nn.Linear(64, 32),
torch.nn.GELU(),
nn.Linear(32, 1),
nn.Sigmoid(),
)
def forward(self, image):
# shape of image: [batchsize, 1, 28, 28]
prob = self.model(image.reshape(image.shape[0], -1))
return prob
#4、创建网络模型
generator = Generator()
discriminator = Discriminator()
#5、设置损失函数、优化器等参数
g_optimizer = torch.optim.Adam(generator.parameters(), lr=0.0003, betas=(0.4, 0.8), weight_decay=0.0001)
d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=0.0003, betas=(0.4, 0.8), weight_decay=0.0001)
loss_fn = nn.BCELoss()
labels_one = torch.ones(batch_size, 1)
labels_zero = torch.zeros(batch_size, 1)
#6、训练网络
num_epoch = 200
for epoch in range(num_epoch):
for i, mini_batch in enumerate(dataloader):
gt_images, _ = mini_batch
z = torch.randn(batch_size, latent_dim)
fake_images = generator(z)
g_loss = loss_fn(discriminator(fake_images), labels_one)
g_optimizer.zero_grad()
g_loss.backward()
g_optimizer.step()
real_loss = loss_fn(discriminator(gt_images), labels_one)
fake_loss = loss_fn(discriminator(fake_images.detach()), labels_zero)
d_loss = (real_loss + fake_loss)
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
#7、获取模型结果
if i % 50 == 0:
# print(f"step:{len(dataloader)*epoch+i}, recons_loss:{recons_loss.item()}, g_loss:{g_loss.item()}, d_loss:{d_loss.item()}, real_loss:{real_loss.item()}, fake_loss:{fake_loss.item()}")
print(f"step:{len(dataloader) * epoch + i}, g_loss:{g_loss.item()}, d_loss:{d_loss.item()}, real_loss:{real_loss.item()}, fake_loss:{fake_loss.item()}")
if i % 500 == 0:
image = fake_images[:16].data
torchvision.utils.save_image(image, f"image_{len(dataloader)*epoch+i}.png", nrow=4)
| wsj-create/GAN | test_gan.py | test_gan.py | py | 4,167 | python | en | code | 3 | github-code | 90 |
31779032908 | from django.contrib import admin
from . import models
class SectionInline(admin.TabularInline):
model = models.Section
class PageInline(admin.TabularInline):
model = models.Page
class CourseAdmin(admin.ModelAdmin):
readonly_fields = ('author', )
inlines = [SectionInline]
def save_model(self, request, obj, form, change):
if obj.author_id is None:
obj.author = request.user
obj.save()
class SectionAdmin(admin.ModelAdmin):
list_display = ('pk', 'name', 'course')
list_display_links = ('name',)
inlines = [PageInline]
class PageAdmin(admin.ModelAdmin):
list_display = ('pk', 'name', 'order', 'section_info')
def section_info(self, obj):
return f'Section: {obj.section.name} | Course: {obj.section.course.name}'
admin.site.register(models.Course, CourseAdmin)
admin.site.register(models.Section, SectionAdmin)
admin.site.register(models.Page, PageAdmin)
| Topliyak/teach-service | server/apps/courses/admin.py | admin.py | py | 886 | python | en | code | 0 | github-code | 90 |
35642698886 | from datetime import timedelta
from IPlugin import IPlugin
import os
# add proper error handling
class Linux(IPlugin):
def __init__(self, config, dispatcher):
self.allow_reboot_shutdown = config['allow_reboot_shutdown']
return
def handlemessage(self, bot, msg):
if msg.text.lower() == 'awake?':
td = self.uptime()
days = td.days
hours, remainder = divmod(td.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
msg.reply_text("Yeah man, for " + str(days) + " days, " + str(hours) + " hours, and " + str(minutes) + " minutes now!")
return True
elif msg.text.lower() == 'reboot dude' and self.allow_reboot_shutdown:
msg.reply_text("Gonna reboot now, pal!")
os.system("sudo shutdown -r now")
return True
elif msg.text.lower() == 'go to sleep' and self.allow_reboot_shutdown:
msg.reply_text("Alright, good night, man!")
os.system("sudo shutdown now")
return True
else:
return False
def uptime(self):
with open('/proc/uptime', 'r') as f:
return timedelta(seconds=float(f.readline().split()[0]))
def helpmessage(self):
return {"awake?": "ask Pi for his uptime",
"reboot dude": "make Pi reboot",
"go to sleep": "make Pi shut down"}
__export__ = Linux
| BerndAmend/minion_bot | plugins/linux.py | linux.py | py | 1,433 | python | en | code | 1 | github-code | 90 |
41900878821 | def sum_rows(row1: list, row2: list):
"""
Summarizes two rows of the current simplex table. The lines are set in the parameters
:param row1: the first line is the summand
:param row2: the second line is the summand
:return: sum_rows (list): result summarizes
"""
row_sum = [0 for i in range(len(row1))]
for i in range(len(row1)):
row_sum[i] = row1[i] + row2[i]
return row_sum
def multiply_const_row(const: float, row: list):
"""
Multiplies the row by a constant.
The constant and index of the multiplied string are specified in the parameters
:param const: the constant by which the string is multiplied
:param row: row to be multiplied by a constant
:return: mul_row (list): the result of multiplying a string by a constant
"""
mul_row = []
for i in row:
mul_row.append(const*i)
return mul_row
| AndreyRysistov/GomoryMethod | FunctionalApproach/table_tools.py | table_tools.py | py | 890 | python | en | code | 2 | github-code | 90 |
4465448172 | import os
from termcolor import colored
import yaml
from ..transformation.load_ground_truth import GroundTruthLoad
from ..classification.classification_task_manager import ClassificationTaskManager
from ..transformation.load_ground_truth import DatasetExporter
from ..helper_functions.logging_tool import LoggerSetup
def train_class(config, gt_file, exports_directory, c_values, gamma_values, preprocessing_values, log_level):
exports_path = config["exports_path"]
gt_data = GroundTruthLoad(config, gt_file, exports_path, log_level)
# tracks shuffled and exported
tracks_listed_shuffled = gt_data.export_gt_tracks()
# class to train
class_name = gt_data.export_train_class()
config["class_name"] = class_name
# project directory where the models and outputs will be saved
if exports_directory is None:
prefix_exports_dir = "exports"
config["exports_directory"] = "{}_{}".format(prefix_exports_dir, class_name)
else:
config["exports_directory"] = exports_directory
config = update_parameters(config=config,
c_values=c_values,
gamma_values=gamma_values,
preprocessing_values=preprocessing_values)
logger = LoggerSetup(config=config,
exports_path=exports_path,
name="train_model_{}".format(class_name),
train_class=class_name,
mode="w",
level=log_level).setup_logger()
logger.info("---- TRAINING FOR THE {} MODEL HAS JUST STARTED ----".format(class_name))
logger.debug("Type of exported GT data exported: {}".format(type(tracks_listed_shuffled)))
# name the project file
if config["project_file"] is None:
prefix_project_file = "project"
project_file_name_save = "{}_{}.yaml".format(prefix_project_file, class_name)
else:
project_file_name_save = "{}.yaml".format(config["project_file"])
logger.info("Project yaml file name: {}".format(project_file_name_save))
# save the project file
project_file_save_path = os.path.join(exports_path, project_file_name_save)
with open(os.path.join(project_file_save_path), "w") as template_file:
template_data_write = yaml.dump(config, template_file)
print("First N sample of shuffled tracks: \n{}".format(tracks_listed_shuffled[:4]))
# create the exports with the features DF, labels, and tracks together
features, labels, tracks = DatasetExporter(config=config,
tracks_list=tracks_listed_shuffled,
train_class=class_name,
exports_path=exports_path,
log_level=log_level
).create_df_tracks()
logger.debug("Types of exported files from GT:")
logger.debug("Type of features: {}".format(type(features)))
logger.debug("Type of labels: {}".format(type(labels)))
logger.debug("Type of Tracks: {}".format(type(tracks)))
model_manage = ClassificationTaskManager(config=config,
train_class=class_name,
X=features,
y=labels,
tracks=tracks,
exports_path=exports_path,
log_level=log_level)
classification_time = model_manage.apply_processing()
print(colored("Classification ended successfully in {} minutes.".format(classification_time), "green"))
logger.info("Classification ended successfully in {} minutes.".format(classification_time))
def update_parameters(config, c_values, gamma_values, preprocessing_values):
"""Update the project file with user-provided preferences
Args:
config: The config data to be updated.
c_values: C value to be updated.
gamma_values: gamma value to be updated.
preprocessing_values: preprocessing values to be updated.
"""
for pref in config['classifiers']['svm']:
if c_values:
pref['C'] = c_values
if gamma_values:
pref['gamma'] = gamma_values
if preprocessing_values:
pref['preprocessing'] = preprocessing_values
return config
| tzamalisp/gsoc-music-classification-sklearn | classification/train_class.py | train_class.py | py | 4,524 | python | en | code | 0 | github-code | 90 |
21156333267 | from pages.locators import SupportPageLocators
class TestData:
number_of_users = [2, 4, 8, 16]
list_of_correct_ids = [2, 4, 6, 8]
list_of_incorrect_ids = [23, 44, 56]
number_of_delays = [2, 3, 4]
create_user_data = {
"name": "morpheus",
"job": "leader"
}
update_user_data = {
"name": "morpheus",
"job": "zion resident"
}
list_login_user_correct_data = [
{
"email": "eve.holt@reqres.in",
"password": "pistol"
},
{
"email": "eve.holt@reqres.in",
"password": "cityslicka"
}
]
login_user_without_password_data = {
"email": "peter@klaven"
}
list_login_user_without_password_data = [
{
'email': 'peter@klaven'
},
{
'email': 'eve.holt@reqres.in'
},
{
'email': 'charles.morris@reqres.in'
}
]
list_login_user_without_email_data = [
{
"password": "cityslicka"
},
{
"name": "morpheus",
"job": "zion resident"
},
{
}
]
list_of_web_request_type_get = [
{
'url_for_api': 'api/users?page=2',
'request_name': 'list users'
},
{
'url_for_api': 'api/users/2',
'request_name': 'single user'
},
{
'url_for_api': 'api/users/23',
'request_name': 'single user not found'
},
{
'url_for_api': 'api/unknown',
'request_name': 'list resource'
},
{
'url_for_api': 'api/unknown/2',
'request_name': 'single resource'
},
{
'url_for_api': 'api/unknown/23',
'request_name': 'single resource not found'
},
{
'url_for_api': 'api/users?delay=3',
'request_name': 'delayed_response'
}
]
list_of_web_request_type_post = [
{
'url_for_api': 'api/users',
'request_name': 'create',
'data': create_user_data
},
{
'url_for_api': 'api/register',
'request_name': 'register_successful',
'data': list_login_user_correct_data[1]
},
{
'url_for_api': 'api/register',
'request_name': 'register_unsuccessful',
'data': login_user_without_password_data
},
{
'url_for_api': 'api/login',
'request_name': 'login_successful',
'data': list_login_user_correct_data[1]
},
{
'url_for_api': 'api/login',
'request_name': 'login_unsuccessful',
'data': login_user_without_password_data
},
]
list_of_web_request_type_put = [
{
'url_for_api': 'api/users/2',
'request_name': 'update put',
'data': update_user_data
}
]
list_of_web_request_type_patch = [
{
'url_for_api': 'api/users/2',
'request_name': 'update patch',
'data': update_user_data
}
]
list_of_web_request_type_delete = [
{
'url_for_api': 'api/users/2',
'request_name': 'delete'
}
]
list_of_conditions_for_wrong_card_data = ['invalid number', 'past expiry', 'incomplete number']
list_powered_text = [*SupportPageLocators.POWERED_BY_BUTTON]
list_terms_text = [*SupportPageLocators.TERMS_BUTTON]
list_privacy_text = [*SupportPageLocators.PRIVACY_BUTTON]
list_opened_link_title = [*SupportPageLocators.OPENED_LINK_TITLE]
list_for_footer_links = [
{
'how_link': list_powered_text[0],
'what_link': list_powered_text[1],
'expected_text': 'Powered by Stripe',
'opened_how': list_opened_link_title[0],
'opened_what': list_opened_link_title[1]
},
{
'how_link': list_terms_text[0],
'what_link': list_terms_text[1],
'expected_text': 'Terms',
'opened_how': list_opened_link_title[0],
'opened_what': list_opened_link_title[1]
},
{
'how_link': list_privacy_text[0],
'what_link': list_privacy_text[1],
'expected_text': 'Privacy',
'opened_how': list_opened_link_title[0],
'opened_what': list_opened_link_title[1]
}
]
| bulatshuh/reqres_test | lib/test_data.py | test_data.py | py | 4,541 | python | en | code | 0 | github-code | 90 |
40580928256 | """
207. Course Schedule
There are a total of numCourses courses you have to take, labeled from 0 to numCourses - 1. You are given an array prerequisites where prerequisites[i] = [ai, bi] indicates that you must take course bi first if you want to take course ai.
For example, the pair [0, 1], indicates that to take course 0 you have to first take course 1.
Return true if you can finish all courses. Otherwise, return false.
Example 1:
Input: numCourses = 2, prerequisites = [[1,0]]
Output: true
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
Example 2:
Input: numCourses = 2, prerequisites = [[1,0],[0,1]]
Output: false
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should also have finished course 1. So it is impossible.
Constraints:
1 <= numCourses <= 2000
0 <= prerequisites.length <= 5000
prerequisites[i].length == 2
0 <= ai, bi < numCourses
All the pairs prerequisites[i] are unique.
"""
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
# Collec the pre-requiests and hte incoming edges.
incoming = [0] *numCourses
neighbors = collections.defaultdict(list)
for a,b in prerequisites:
incoming[a] += 1
neighbors[b].append(a)
# Pick the ones with zero incoming edges, we have to visit this first
q = deque()
for i,c in enumerate(incoming):
if c == 0:
q.append(i)
count = 0
# Pick neigbors and reduce the incoming edge count and add it to visit. if all the parents are visited
while(q):
item = q.popleft()
count += 1
for nxt in neighbors[item]:
incoming[nxt] -= 1
if (incoming[nxt] == 0):
q.append(nxt)
return count == numCourses | venkatsvpr/Problems_Solved | LC_Course_Schedule.py | LC_Course_Schedule.py | py | 2,115 | python | en | code | 3 | github-code | 90 |
28191315347 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^process_registration$', views.process_registration),
url(r'^logincheck$', views.loginchk),
url(r'^travels/add$', views.newtrippage),
url(r'^travels/process_trip$', views.processtrip),
url(r'^travels/destination/(?P<number>\d+)$', views.destinationpage),
url(r'^travels/join/(?P<number>\d+)$', views.processjoin),
#url(r'^users/adminupdateinfo$', views.adminupdateinfo),
#url(r'^users/normalupdateinfo$', views.normalupdateinfo),
#url(r'^users/adminupdatepassword$', views.adminupdatepassword),
#url(r'^users/editprofile/(?P<number>\d+)$', views.editprofilepage),
url(r'^logout$', views.logout),
##url(r'^users/show/(?P<number>\d+)$', views.showuserpage),
#url(r'^users/send_message$', views.sendmessage),
#url(r'^users/post_comment$', views.postcomment),
#url(r'^users/updatepassword$', views.updatepassword)
#url(r'^users/(?P<number>\d+)$', views.userview),
#url(r'^books/(?P<number>\d+)$', views.bookview),
#url(r'^delete_review/(?P<number>\d+)$', views.deletereview),
#url(r'^add_review/(?P<number>\d+)$', views.addreview)
] | timquayle/tb | apps/tripsched/urls.py | urls.py | py | 1,221 | python | en | code | 0 | github-code | 90 |
6661967750 |
from io import BytesIO
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['figure.facecolor']='w'
plt.rcParams['savefig.facecolor']='w'
plt.rcParams['text.color']='b'
plt.rcParams['xtick.labelsize']=16
plt.rcParams['ytick.labelsize']=16
import pandas as pd
from collections import namedtuple
CalFunc=namedtuple('CalFunc','col func name')
Report_msg=namedtuple('Report_msg','df axes sheet_name')
def desc_ays_columns_msgs(x_df):
'''
列信息
'''
# 每列缺失值
df_na=x_df.isna().sum()
df_na.name='缺失值'
# 每列的数据类型
df_types=x_df.dtypes
df_types.name='列类型'
# 放入 DataFrame
df_tmp=pd.DataFrame([df_na,df_types])
# 统计描述
df_des=x_df.describe(include='all')
# 合并表
tmp=pd.concat([df_des,df_tmp])
return tmp,'describe'
def desc_ays_na_rows(x_df):
'''
缺失值记录
'''
cond= x_df.isna().any(axis=1)
res=x_df[cond]
return res,'缺失值记录'
def desc_ays_corr(x_df):
'''
相关性
'''
return x_df.corr(),'corr'
g_default_methods=[
desc_ays_columns_msgs,
desc_ays_na_rows,
desc_ays_corr
]
def create_desc_ays(x_df,methods=None,file_name='desc_ays.xlsx'):
'''
创建统计描述
'''
# 默认方法 + 外部传入的方法
if methods:
methods=g_default_methods+methods
else:
methods=g_default_methods.copy()
# 每个方法执行,并输出到excel的工作表上
with pd.ExcelWriter(file_name) as ew:
for m in methods:
res,wrk_name=m(x_df)
res.to_excel(ew,wrk_name)
ew.save()
def cal(df , keys , *cal_funcs):
'''
cal_funcs : 返回值必须是 namedtuple('CalFunc','col func name')
比如:helper.CalFunc(col='销售数量',func='sum',name='总销量')
'''
# 把传入的每个统计方法,转为字典
funcs=(f() for f in cal_funcs)
agg_dicts={
v.name : (v.col,v.func)
for v in funcs
}
# 汇总
res=(
df.groupby(keys)
.agg(**agg_dicts)
)
return res
def df_top_n(x_df,col,n,by=None):
'''
求 top n
col:数值列
n:top n
by:按某个组求 top n
'''
if by:
res= x_df.groupby(by,as_index=False).apply(
lambda x:x.nlargest(n,col)
)
return res.reset_index(0,drop=True)
return x_df.nlargest(n,col)
def export_excel(report_msgs,file_name='anl_report.xlsx'):
with pd.ExcelWriter(file_name) as ew:
for r in report_msgs:
df,ax,wrk_name=r.df,r.axes,r.sheet_name
imgdata = BytesIO()
fig=ax.get_figure()
fig.patch.set_alpha(0.3)
# ax.tick_params(labelsize=16)
fig.savefig(imgdata, format="png")
imgdata.seek(0)
df.to_excel(ew,wrk_name)
wrk=ew.sheets[wrk_name]
cols = len(df.columns) + df.index.nlevels + 1
wrk.insert_image(
1, cols, "",
{'image_data': imgdata}
)
ew.save() | jameszlj/NLP_with_python | input/helper.py | helper.py | py | 3,170 | python | en | code | 2 | github-code | 90 |
19197547894 | from time import sleep
from .StepperDriver import *
try:
import RPi.GPIO as GPIO
except:
import Mock.GPIO as GPIO
class A4988Driver(StepperDriver):
__STEPS_PER_REVOLUTION = 200
def __init__(self, step_pin, dir_pin):
StepperDriver.__init__(self)
self.__step_pin = step_pin
self.__dir_pin = dir_pin
self.start_process()
def step(self, steps:int, velocity=None, queue=False):
if steps == 0:
return
if queue:
self.add_step_to_queue(steps, velocity)
return
dir = StepperDirection.CW if steps > 0 else StepperDirection.CCW
self.__set_direction(dir)
if velocity is None:
velocity = self._velocity
delay = self.__get_delay(velocity)
steps = abs(steps)
for _ in range(steps):
GPIO.output(self.__step_pin, GPIO.HIGH)
sleep(delay)
GPIO.output(self.__step_pin, GPIO.LOW)
sleep(delay)
def __set_direction(self, dir: StepperDirection):
GPIO.output(self.__dir_pin, True if dir ==StepperDirection.CW else False)
def __get_delay(self, velocity: StepperVelocity):
if velocity == StepperVelocity.NORMAL:
return 0.000500 #0.0005 # 500us
elif velocity == StepperVelocity.FAST:
return 0.000400 #0.0004 # 400us
else:
return 0.000700 #0.0007 # 700us
| MatheusKunnen/integration-workshop-3 | vending-machine/motion-system/MotionSystemController/A4988Driver.py | A4988Driver.py | py | 1,442 | python | en | code | 0 | github-code | 90 |
5438103749 | import argparse
import re
import pandas as pd
def get_commandline_args():
description = ('gets table of Lebedev coordinates and weights from the '
'source code from John Burkardt')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--source_filename',
help='filename of Lebedev source code')
parser.add_argument('--output_filename',
help='name of file which table will be written to')
args = parser.parse_args()
return args.source_filename, args.output_filename
def match_lebedev_functions(file_text):
# finding everything between `n = 0` and `n = n - 1` lines
# first match is lebedev order as it appears in function title
# second match is everything between n = 0 and the return statement
pattern = ('void ld(\d+)'
' \( double \*x, double \*y, double \*z, double \*w \)'
'(?:.|\n)*?(?:n = 0;\n)((?:.|\n)*?)(?:return)')
matches = re.findall(pattern, file_text)
return matches
def parse_lebedev_line(function_line):
gen_oh_pattern = r'gen_oh \( (\d+), .*?\)\s*;'
weight_pattern = r'v = ((?:\d|\.|e|-)+?)\s*;'
a_pattern = r'a = ((?:\d|\.|e|-)+?)\s*;'
b_pattern = r'b = ((?:\d|\.|e|-)+?)\s*;'
gen_oh_match = re.findall(gen_oh_pattern, function_line)
weight_match = re.findall(weight_pattern, function_line)
a_match = re.findall(a_pattern, function_line)
b_match = re.findall(b_pattern, function_line)
if gen_oh_match:
return 'rule', gen_oh_match[0]
elif weight_match:
return 'weight', weight_match[0]
elif a_match:
return 'a', a_match[0]
elif b_match:
return 'b', b_match[0]
else:
return None
def parse_lebedev_function(function_text):
function_lines = function_text.splitlines()
generator_point_data_list = []
generator_point_data = {}
for function_line in function_lines:
key_value = parse_lebedev_line(function_line)
if not key_value:
continue
key, value = key_value
generator_point_data[key] = value
if key == 'rule':
generator_point_data_list.append(generator_point_data.copy())
return generator_point_data_list
def main():
source_filename, output_filename = get_commandline_args()
f = open(source_filename)
file_text = f.read()
f.close()
matches = match_lebedev_functions(file_text)
generator_point_main_dataframe = pd.DataFrame()
for match in matches:
generator_point_data_list = parse_lebedev_function( match[1] )
generator_point_dataframe = pd.DataFrame.from_dict(generator_point_data_list)
generator_point_dataframe['order'] = match[0].lstrip('0')
generator_point_main_dataframe = pd.concat([generator_point_main_dataframe, generator_point_dataframe])
generator_point_main_dataframe.to_csv(output_filename, index=False)
if __name__ == '__main__':
main()
| lucasmyers97/lebedev-quadrature | scripts/get_lebedev_table_from_burkardt.py | get_lebedev_table_from_burkardt.py | py | 3,016 | python | en | code | 2 | github-code | 90 |
13674356510 | """
This script computes all results and plots concerning the flexibility potential of EV that were published in the paper
"Quantifying the Flexibility of Electric Vehicles in Germany and California – A Case Study".
"""
__author__ = "Michel Zadé"
__copyright__ = "2020 TUM-EWK"
__credits__ = []
__license__ = "GPL v3.0"
__version__ = "1.0"
__maintainer__ = "Michel Zadé"
__email__ = "michel.zade@tum.de"
__status__ = "Complete"
from joblib import Parallel, delayed
from tqdm import tqdm
import multiprocessing
import pandas as pd
import itertools
import analysis.ev_case_study as ev_case_study
import os
# Define input and output paths
output_path = 'output/'
input_path = 'input/'
figure_path = 'figures/'
rtp_input_path = 'input/RTP/'
# Read veh availabilities from file
veh_availabilities = pd.read_csv('input/chts_veh_availability.csv')
print('1. Prepare input data.')
# Extract a subsample for testing
veh_availabilities = veh_availabilities[:]
veh_availabilities = veh_availabilities.reset_index()
# Define case study details
params = {'power_levels': [3.7, 11, 22],
'pricing': ['ToU', 'Constant', 'Con_mi', 'ToU_mi', 'RTP'],
'veh_availability': veh_availabilities.values.tolist()}
# Create output folder
ev_case_study.create_output_folder(output_path=output_path,
power_levels=params['power_levels'],
pricing_strategies=params['pricing'])
# Create all possible combinations of params
keys = list(params)
param_variations = list()
param_con = {'conversion_distance_2_km': 1.61,
'conversion_km_2_kwh': 0.2,
'rtp_input_data_path': rtp_input_path,
'output_path': output_path,
'pricing_strategies': ['ToU', 'Constant', 'Con_mi', 'ToU_mi', 'RTP'],
'plotting': False,
'info': False}
for values in itertools.product(*map(params.get, keys)):
# Store in list
param_variations.append(list(values))
print('2. Calculate flexibility offers.')
# Run flex calculation in parallel
Parallel(n_jobs=int(multiprocessing.cpu_count()))(
delayed(ev_case_study.calc_ev_flex_offers_parallel)(i, param_con) for i in tqdm(param_variations))
print('3. Aggregate optimal charging schedules, costs, and flexibility offers.')
# Aggregate single offers
ev_case_study.aggregate_ev_flex(veh_availabilities,
output_path=output_path,
rtp_input_data_path=rtp_input_path)
print('4. Plot results.')
# Create empty figures folder
ev_case_study.create_figures_folder(figure_folder_path=figure_path)
# Plot number of available vehicles at home over a week (only for one power level, since it won't change)
ev_case_study.plot_n_avail_veh(output_path=output_path + str(params['power_levels'][0]) + '/',
figure_path=figure_path)
# Plot aggregated flexibility offers in a heat map
ev_case_study.plot_flex_heatmap(output_path=output_path)
# List all power levels
power_levels = os.listdir(output_path)
# df for overall costs
overall_costs = pd.DataFrame(columns=power_levels, index=params['pricing'])
for power in power_levels:
# Read and sum up overall costs from aggregated files
overall_costs[power]['ToU'] = pd.read_hdf(output_path + str(power) + '/Aggregated Data/opt_sum_data.h5')['c_tou_energy'].sum()
overall_costs[power]['ToU_mi'] = pd.read_hdf(output_path + str(power) + '/Aggregated Data/opt_sum_data.h5')['c_tou_mi_energy'].sum()
overall_costs[power]['Constant'] = pd.read_hdf(output_path + str(power) + '/Aggregated Data/opt_sum_data.h5')['c_con_energy'].sum()
overall_costs[power]['Con_mi'] = pd.read_hdf(output_path + str(power) + '/Aggregated Data/opt_sum_data.h5')['c_con_mi_energy'].sum()
overall_costs[power]['RTP'] = pd.read_hdf(output_path + str(power) + '/Aggregated Data/opt_sum_data.h5')['c_rtp_energy'].sum()
# Plot aggregated flexibility offers over time
ev_case_study.plot_opt_flex_timeseries(power, output_path=output_path + str(power) + '/', figure_path=figure_path)
# Plot overall cost
ev_case_study.plot_overall_cost(overall_costs=overall_costs, figure_path=figure_path)
| tum-ewk/OpenTUMFlex | analysis/run_ev_case_study.py | run_ev_case_study.py | py | 4,191 | python | en | code | 20 | github-code | 90 |
239205234 | #!/usr/bin/env python
import time
import rospy
import math
import pandas as pd
import numpy as np
from geometry_msgs.msg import Twist,PoseStamped
from nav_msgs.msg import Odometry,Path
from array import *
import tf
import os
import rospy
import pickle
global x,y
class Test1():
def __init__(self):
self.goal_x = 3
self.goal_y = 6
self.goal_theta = -math.pi/2
#### to change
self.init_x = 0
self.init_y = 0
self.init_theta = 0
#################
rospy.init_node('Test1', anonymous=False)
rospy.on_shutdown(self.shutdown)
# do not change
self.world_x = 0
self.world_y = 0
self.world_theta = 0
self.x, self.y, self.theta = 0, 0, 0 # self coordinate
self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
rospy.Subscriber('/odom', Odometry, self.callback)
move_cmd = Twist()
s = [self.init_x, self.init_y, self.init_theta]
# initial action
# set initial action to move towards goal
a = self.get_next_action(s, [self.goal_x, self.goal_y, self.goal_theta])
move_cmd.linear.x = a[0]
move_cmd.angular.z = a[1]
rate = rospy.Rate(10)
start = time.time()
init_time = time.time()
while not rospy.is_shutdown():
# publish speed to robot
self.cmd_vel.publish(move_cmd)
# check if goal has been reached
if self.is_goal_reached():
print("Goal reached!")
self.cmd_vel.publish(Twist()) # stop the robot
break
# get next action
s = [self.world_x, self.world_y, self.world_theta]
a = self.get_next_action(s, [self.goal_x, self.goal_y, self.goal_theta])
move_cmd.linear.x = a[0]
move_cmd.angular.z = a[1]
# what to do every 0.1 s
self.world_x = self.x*math.cos(self.init_theta) - self.y*math.sin(self.init_theta) + self.init_x
self.world_y = self.x*math.sin(self.init_theta) + self.y*math.cos(self.init_theta) + self.init_y
self.world_theta = self.theta + self.init_theta
print(self.world_x, self.world_y, self.world_theta)
rate.sleep()
def is_goal_reached(self):
# check if robot has reached the goal
return abs(self.world_x - self.goal_x) < 0.1 and abs(self.world_y - self.goal_y) < 0.1 and abs(self.world_theta - self.goal_theta) < 0.1
# def get_next_action(self, s, goal):
# # calculate next action based on current state and goal
# x, y, theta = s
# goal_x, goal_y, goal_theta = goal
# # calculate distance and angle to goal
# dx = goal_x - x
# dy = goal_y - y
# goal_dist = math.sqrt(dx**2 + dy**2)
# goal_angle = math.atan2(dy, dx)
# # calculate angular velocity
# angle_error = goal_angle - theta
# while angle_error > math.pi:
# angle_error -= 2 * math.pi
# while angle_error < -math.pi:
# angle_error += 2 * math.pi
# angular_vel = angle_error
# # calculate linear velocity
# linear_vel = 0.5 * goal_dist
# # limit linear and angular velocities
# if linear_vel > 0.5:
# linear_vel = 0.5
# elif linear_vel < -0.5:
# linear_vel = -0.5
# if angular_vel > 1.0:
# angular_vel = 1.0
# elif angular_vel < -1.0:
# angular_vel = -1.0
# return [linear_vel, angular_vel]
def get_next_action(self, s, goal):
# calculate next action based on current state and goal
x, y, theta = s
goal_x, goal_y, goal_theta = goal
# calculate distance and angle to goal
dx = goal_x - x
dy = goal_y - y
goal_dist = math.sqrt(dx**2 + dy**2)
goal_angle = math.atan2(dy, dx)
# calculate angular velocity
angle_error = goal_angle - theta
while angle_error > math.pi:
angle_error -= 2 * math.pi
while angle_error < -math.pi:
angle_error += 2 * math.pi
if abs(angle_error) > 0.1: # if the robot is not facing the goal
angular_vel = angle_error
linear_vel = 0.0
else: # if the robot is facing the goal
angular_vel = 0.0
linear_vel = 0.5 * goal_dist
# limit linear and angular velocities
if linear_vel > 0.5:
linear_vel = 0.5
elif linear_vel < -0.5:
linear_vel = -0.5
if angular_vel > 1.0:
angular_vel = 1.0
elif angular_vel < -1.0:
angular_vel = -1.0
return [linear_vel, angular_vel]
def shutdown(self):
# stop turtlebot
rospy.loginfo("Stop TurtleBot")
# a default Twist has linear.x of 0 and angular.z of 0. So it'll stop TurtleBot
self.cmd_vel.publish(Twist())
# sleep just makes sure TurtleBot receives the stop command prior to shutting down the script
rospy.sleep(1)
def callback(self,msg):
# self coordinate
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
(_, _, yaw) = tf.transformations.euler_from_quaternion([msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w])
self.x = x
self.y = y
self.theta = yaw
if __name__ == '__main__':
Test1()
quit()
| LiYifei1218/turtlebot-motion-planning | turtlebot.py | turtlebot.py | py | 5,781 | python | en | code | 0 | github-code | 90 |
319854124 | import argparse
import os
from net.initialization.folders.default_folders import default_folders_dict
from net.initialization.folders.experiment_complete_folders import experiment_complete_folders_dict
from net.initialization.path.experiment_complete_result_path import experiment_complete_result_path_dict
from net.initialization.utility.create_folder_and_subfolder import create_folder_and_subfolder
def initialization_complete(network_name: str,
experiment_complete_ID: str,
parser: argparse.Namespace,
debug: bool) -> dict:
"""
Initialization of experiment complete results folder
:param network_name: network name
:param experiment_complete_ID: experiment complete ID
:param parser: parser of parameters-parsing
:param debug: debug option
:return: path dictionary
"""
# ------------ #
# FOLDERS DICT #
# ------------ #
# default folders
default_folders = default_folders_dict(where=parser.where)
# experiment complete folders
experiment_complete_folders = experiment_complete_folders_dict()
# --------- #
# PATH DICT #
# --------- #
# experiment complete
experiment_complete_name = network_name + "|" + experiment_complete_ID
experiment_complete_path = os.path.join(default_folders['experiments_complete'], experiment_complete_name)
# experiment complete path
experiment_complete_results_path = experiment_complete_result_path_dict(experiment_path=experiment_complete_path,
experiment_complete_folders=experiment_complete_folders)
# ------------- #
# CREATE FOLDER #
# ------------- #
# create experiment folder
if not debug:
if parser.mode in ['script_test_complete', 'script_detections']:
# create folder
create_folder_and_subfolder(main_path=experiment_complete_path,
subfolder_path_dict=experiment_complete_results_path)
print("Experiment Complete result folder: COMPLETE")
else:
print("Experiment Complete result folder: ALREADY COMPLETE")
else:
print("Debug Initialization")
# ----------- #
# RESULT PATH #
# ----------- #
# detections
detections_test_complete_filename = "detections-test|" + experiment_complete_ID + ".csv"
detections_test_complete_path = os.path.join(experiment_complete_results_path['detections'], detections_test_complete_filename)
detections_test_NMS_complete_filename = "detections-test-NMS={}x{}|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".csv"
detections_test_NMS_complete_path = os.path.join(experiment_complete_results_path['detections'], detections_test_NMS_complete_filename)
# metrics-test
metrics_test_complete_filename = "metrics-test|" + experiment_complete_ID + ".csv"
metrics_test_complete_path = os.path.join(experiment_complete_results_path['metrics_test'], metrics_test_complete_filename)
metrics_test_NMS_complete_filename = "metrics-test-NMS={}x{}|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".csv"
metrics_test_NMS_complete_path = os.path.join(experiment_complete_results_path['metrics_test'], metrics_test_NMS_complete_filename)
# plots-test
FROC_test_complete_filename = "FROC|" + experiment_complete_ID + ".png"
FROC_test_complete_path = os.path.join(experiment_complete_results_path['plots_test'], FROC_test_complete_filename)
FROC_linear_test_complete_filename = "FROC-Linear|" + experiment_complete_ID + ".png"
FROC_linear_test_complete_path = os.path.join(experiment_complete_results_path['plots_test'], FROC_linear_test_complete_filename)
ROC_test_complete_filename = "ROC|" + experiment_complete_ID + ".png"
ROC_test_complete_path = os.path.join(experiment_complete_results_path['plots_test'], ROC_test_complete_filename)
score_distribution_test_complete_filename = "Score-Distribution|" + experiment_complete_ID + ".png"
score_distribution_test_complete_path = os.path.join(experiment_complete_results_path['plots_test'], score_distribution_test_complete_filename)
# coords test
FROC_test_complete_coords_filename = "FROC-coords|" + experiment_complete_ID + ".csv"
FROC_test_complete_coords_path = os.path.join(experiment_complete_results_path['coords_test'], FROC_test_complete_coords_filename)
ROC_test_complete_coords_filename = "ROC-coords|" + experiment_complete_ID + ".csv"
ROC_test_complete_coords_path = os.path.join(experiment_complete_results_path['coords_test'], ROC_test_complete_coords_filename)
# plots-test NMS
FROC_test_NMS_complete_filename = "FROC-NMS={}x{}|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".png"
FROC_test_NMS_complete_path = os.path.join(experiment_complete_results_path['plots_test_NMS'], FROC_test_NMS_complete_filename)
FROC_linear_test_NMS_complete_filename = "FROC-Linear-NMS={}x{}|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".png"
FROC_linear_test_NMS_complete_path = os.path.join(experiment_complete_results_path['plots_test_NMS'], FROC_linear_test_NMS_complete_filename)
ROC_test_NMS_complete_filename = "ROC-NMS={}x{}|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".png"
ROC_test_NMS_complete_path = os.path.join(experiment_complete_results_path['plots_test_NMS'], ROC_test_NMS_complete_filename)
score_distribution_test_NMS_complete_filename = "Score-Distribution-NMS={}x{}|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".png"
score_distribution_test_NMS_complete_path = os.path.join(experiment_complete_results_path['plots_test_NMS'], score_distribution_test_NMS_complete_filename)
# coords test NMS
FROC_test_NMS_complete_coords_filename = "FROC-NMS={}x{}-coords|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".csv"
FROC_test_NMS_complete_coords_path = os.path.join(experiment_complete_results_path['coords_test_NMS'], FROC_test_NMS_complete_coords_filename)
ROC_test_NMS_complete_coords_filename = "ROC-NMS={}x{}-coords|".format(parser.NMS_box_radius, parser.NMS_box_radius) + experiment_complete_ID + ".csv"
ROC_test_NMS_complete_coords_path = os.path.join(experiment_complete_results_path['coords_test_NMS'], ROC_test_NMS_complete_coords_filename)
path = {
'detections': {
'test': detections_test_complete_path,
'test_NMS': {
'{}x{}'.format(parser.NMS_box_radius, parser.NMS_box_radius): detections_test_NMS_complete_path,
},
},
'detections_rocalc': experiment_complete_results_path['detections_rocalc'],
'rocalc_cases': experiment_complete_results_path['detections_rocalc_subfolder']['cases'],
'rocalc_detections': experiment_complete_results_path['detections_rocalc_subfolder']['detections'],
'metrics': {
'test': metrics_test_complete_path,
'test_NMS': {
'{}x{}'.format(parser.NMS_box_radius, parser.NMS_box_radius): metrics_test_NMS_complete_path,
}
},
'plots_test': {
'FROC': FROC_test_complete_path,
'FROC_linear': FROC_linear_test_complete_path,
'ROC': ROC_test_complete_path,
'coords': {
'FROC': FROC_test_complete_coords_path,
'ROC': ROC_test_complete_coords_path,
},
'score_distribution': score_distribution_test_complete_path,
},
'plots_test_NMS': {
'{}x{}'.format(parser.NMS_box_radius, parser.NMS_box_radius): {
'FROC': FROC_test_NMS_complete_path,
'FROC_linear': FROC_linear_test_NMS_complete_path,
'ROC': ROC_test_NMS_complete_path,
'coords': {
'FROC': FROC_test_NMS_complete_coords_path,
'ROC': ROC_test_NMS_complete_coords_path,
},
'score_distribution': score_distribution_test_NMS_complete_path,
}
}
}
return path
| cirorusso2910/GravityNet | net/initialization/init_complete.py | init_complete.py | py | 8,363 | python | en | code | 7 | github-code | 90 |
38416352358 | from flask import render_template, request, jsonify, abort, redirect, url_for, flash
from mdurocherart.contact import bp
from mdurocherart.contact.models import send_email, format_email, send_email_with_attachments
from mdurocherart.utils import _validate_file
@bp.route("/", methods=["GET"])
def homepage():
return render_template("contact/homepage.html")
@bp.route("/process_contact", methods=["POST"])
def process_contact():
req = request.form.to_dict()
image_file = request.files['input-upload']
accept_file, status = _validate_file(image_file)
if not accept_file:
flash('Please only attach png or jpeg files.')
return redirect(url_for('contact.homepage'))
email_obj = format_email(req, image_file)
if email_obj.subject == 'quote':
status = send_email_with_attachments(email_obj)
else:
status = send_email(email_obj)
if status is not True:
flash('There seems to be an with error your request, please try again later!')
return redirect(url_for('contact.homepage'))
flash('Thank you for reaching out!')
return redirect(url_for('contact.homepage'))
| DKasonde/art_portfolio_site | src/mdurocherart/contact/routes.py | routes.py | py | 1,146 | python | en | code | 0 | github-code | 90 |
16363856248 | #!/usr/bin/env python
import unittest
from ct.crypto.asn1 import tag
class TagTest(unittest.TestCase):
"""Test tag encoding."""
def test_encode_read(self):
valid_tags = (
# (initializers, encoding)
((0, tag.UNIVERSAL, tag.PRIMITIVE), "\x00"),
((1, tag.UNIVERSAL, tag.PRIMITIVE), "\x01"),
((16256, tag.UNIVERSAL, tag.PRIMITIVE), "\x1f\xff\x00"),
((16, tag.UNIVERSAL, tag.CONSTRUCTED), "\x30"),
((17, tag.UNIVERSAL, tag.CONSTRUCTED), "\x31"),
((16256, tag.UNIVERSAL, tag.CONSTRUCTED), "\x3f\xff\x00"),
((0, tag.APPLICATION, tag.PRIMITIVE), "\x40"),
((1, tag.APPLICATION, tag.PRIMITIVE), "\x41"),
((16256, tag.APPLICATION, tag.PRIMITIVE), "\x5f\xff\x00"),
((0, tag.APPLICATION, tag.CONSTRUCTED), "\x60"),
((1, tag.APPLICATION, tag.CONSTRUCTED), "\x61"),
((16256, tag.APPLICATION, tag.CONSTRUCTED), "\x7f\xff\x00"),
((0, tag.CONTEXT_SPECIFIC, tag.PRIMITIVE), "\x80"),
((1, tag.CONTEXT_SPECIFIC, tag.PRIMITIVE), "\x81"),
((16256, tag.CONTEXT_SPECIFIC, tag.PRIMITIVE), "\x9f\xff\x00"),
((0, tag.CONTEXT_SPECIFIC, tag.CONSTRUCTED), "\xa0"),
((1, tag.CONTEXT_SPECIFIC, tag.CONSTRUCTED), "\xa1"),
((16256, tag.CONTEXT_SPECIFIC, tag.CONSTRUCTED), "\xbf\xff\x00"),
((0, tag.PRIVATE, tag.PRIMITIVE), "\xc0"),
((1, tag.PRIVATE, tag.PRIMITIVE), "\xc1"),
((16256, tag.PRIVATE, tag.PRIMITIVE), "\xdf\xff\x00"),
((0, tag.PRIVATE, tag.CONSTRUCTED), "\xe0"),
((1, tag.PRIVATE, tag.CONSTRUCTED), "\xe1"),
((16256, tag.PRIVATE, tag.CONSTRUCTED), "\xff\xff\x00"),
)
for init, enc in valid_tags:
number, tag_class, encoding = init
t = tag.Tag(number, tag_class, encoding)
self.assertEqual(t.number, number)
self.assertEqual(t.tag_class, tag_class)
self.assertEqual(t.encoding, encoding)
self.assertEqual(t.value, enc)
self.assertEqual((t, ""), tag.Tag.read(enc))
self.assertEqual((t, "rest"), tag.Tag.read(enc + "rest"))
for i in range(len(valid_tags)):
for j in range(i+1, len(valid_tags)):
self.assertNotEqual(tag.Tag(*valid_tags[i][0]),
tag.Tag(*valid_tags[j][0]))
if __name__ == '__main__':
unittest.main()
| google/certificate-transparency | python/ct/crypto/asn1/tag_test.py | tag_test.py | py | 2,505 | python | en | code | 862 | github-code | 90 |
38090004240 | #encoding=utf-8
import cv2
import numpy as np
import pickle
import matplotlib.pyplot as plt
import sys,os
from PIL import Image
##读取保存的列表文件
totalList = pickle.load(open("./totalList.txt",'rb'))
#patchInfo用于保存每一种场景有多少张图片以及每张图片有多少个patch
patchInfo = []
imageN = 0
patchN = 0
##保存截取的结果列表到本地的totalList.txt文件
fileSrc = "D:/PythonSpace/imgaeCutTest/data/"
fileList = os.listdir(fileSrc)
for fileN,f in enumerate(fileList):
imgSrc = fileSrc+f+'/'
cropList = totalList[fileN]
for imgN,img in enumerate(os.listdir(imgSrc)):
imgDir = imgSrc+img
imgTemp = Image.open(imgDir).resize((640,640))
imageN+=1
for ptcN, t in enumerate(cropList):
imgTemp.crop(t).save("./stored/"+f+'_'+str(imgN)+'_'+str(ptcN)+'.jpg')
patchN+=1
patchInfo.append((imageN,patchN))
imageN = 0
patchN = 0
print("-->finish "+f)
pickle.dump(patchInfo,open("./patchInfo.txt","wb"),True)
print(patchInfo)
| hongge831/scene_change_detection | tools/imgaeCutTest/makeData.py | makeData.py | py | 1,049 | python | en | code | 0 | github-code | 90 |
32409714161 | # This is a sample Python script.
# Press Maiusc+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import tkinter as tk
home_gui = tk.Tk()
graph_gui = tk.Tk()
def home_button1_action():
print('primo estratto!')
home_gui.deiconify()
graph_gui.deiconify()
def home_button2_action():
print('secondo estratto!')
def graph_button1_action():
graph_gui.deiconify()
home_gui.deiconify()
# controls...
home_button1 = tk.Button(home_gui, text='primo estratto', command=home_button1_action).pack()
home_button2 = tk.Button(home_gui, text='secondo estratto', command=home_button2_action).pack()
home_button3 = tk.Button(home_gui, text='Exit', command=quit).pack()
graph_button1 = tk.Button(graph_gui, text='Exit', command=graph_button1_action).pack()
home_gui.title('Home')
home_gui.geometry('300x200+10+10')
graph_gui.title('Graph')
graph_gui.geometry('300x200+10+10')
graph_gui.deiconify()
home_gui.mainloop()
| gamico001/regression_se | Test/test_More_GUI.py | test_More_GUI.py | py | 1,077 | python | en | code | 0 | github-code | 90 |
14227546868 | from turtle import Turtle, Screen
import random
#
#
#
class Food(Turtle):
def __init__(self):
"""
Constructor.-
"""
super().__init__()
self.shape("turtle")
self.penup()
self.shapesize(stretch_len=1, stretch_wid=1)
self.color("green")
self.speed("fastest")
self.refresh()
def refresh(self):
"""
Refresh food location.-
"""
random_x = random.randint(-280, 280)
random_y = random.randint(-280, 280)
self.goto(x=random_x, y=random_y)
| fjpolo/Udemy100DaysOfCodeTheCompletePyhtonProBootcamp | Day020_021/food.py | food.py | py | 594 | python | en | code | 8 | github-code | 90 |
25112489446 | import pytesseract
from PIL import Image, ImageOps, ImageFilter
# Set the path to the tesseract executable if it's not in the PATH
# pytesseract.pytesseract.tesseract_cmd = '/usr/local/bin/tesseract' # Path to tesseract on macOS
# Function to preprocess and invert image colors
def preprocess_and_invert_image(image_path):
# Open the image
img = Image.open(image_path)
# Convert image to grayscale
img = img.convert('L')
# Apply image filters to enhance quality
img = img.filter(ImageFilter.MedianFilter()) # Reduce noise
img = ImageOps.autocontrast(img) # Improve contrast
# Invert image colors
img = ImageOps.invert(img)
# Further filters after inversion
img = img.filter(ImageFilter.SHARPEN)
return img
# Function to perform OCR on a preprocessed image
def ocr_on_preprocessed_image(image_path):
# Preprocess and invert the image colors
img = preprocess_and_invert_image(image_path)
# Use Tesseract to do OCR on the image
text = pytesseract.image_to_string(img)
return text
# Example usage
# Replace 'path_to_negative_image.jpg' with the actual path to your negative image file
extracted_text = ocr_on_preprocessed_image('image.jpg')
print(extracted_text)
| shmrymbd/ocr | untitled folder/new2.py | new2.py | py | 1,241 | python | en | code | 0 | github-code | 90 |
42130473014 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vohni', '0005_auto_20170401_1306'),
]
operations = [
migrations.AlterModelOptions(
name='day',
options={'verbose_name_plural': 'Дні', 'verbose_name': 'День'},
),
migrations.RemoveField(
model_name='day',
name='order_in_journey',
),
]
| suchOK/saintelm_2017 | vohni/migrations/0006_auto_20170401_1316.py | 0006_auto_20170401_1316.py | py | 513 | python | en | code | 0 | github-code | 90 |
18314235369 | from collections import defaultdict
import sys
sys.setrecursionlimit(10**5)
def solve():
N = int(input())
G = defaultdict(list)
for i in range(N-1):
a, b = map(int,input().split())
a -= 1
b -= 1
G[a].append((b,i))
G[b].append((a,i))
res = [0] * (N-1)
dfs(0,-1,-1,G,res)
print(max(res))
print(*res, sep='\n')
def dfs(v,p,pc,G,res):
next_c = 1
for next_v, num in G[v]:
if next_v == p:
continue
if next_c == pc:
next_c += 1
res[num] = next_c
dfs(next_v,v,next_c,G,res)
next_c += 1
if __name__ == '__main__':
solve() | Aasthaengg/IBMdataset | Python_codes/p02850/s766291151.py | s766291151.py | py | 686 | python | en | code | 0 | github-code | 90 |
24191592158 | import math
# Get two numbers from the user.
num_items = int(input(f"Enter the number of items: "))
items_per_box = int(input(f"Enter the number of items per box: "))
# Compute the number of boxes by dividing
# and then calling the math.ceil function.
num_boxes = math.ceil(num_items / items_per_box)
# Display a blank line.
print()
# Display the results for the user to see.
print(f"For {num_items} items, packing {items_per_box}"
f" items in each box, you will need {num_boxes} boxes.")
| byui-cse/cse111-course | docs/lesson02/check_solution.py | check_solution.py | py | 497 | python | en | code | 2 | github-code | 90 |
18197876729 | x,n=[int(x) for x in input().split()]
if n==0:
print(x)
else:
p=[int(x) for x in input().split()]
ans=[]
Flag=False
while ans==[]:
for i in range(105):
if x-i not in p:
ans.append(x-i)
Flag=True
break
elif x+i not in p:
ans.append(x+i)
Flag=True
break
if Flag:
break
print(min(ans)) | Aasthaengg/IBMdataset | Python_codes/p02641/s530230180.py | s530230180.py | py | 368 | python | en | code | 0 | github-code | 90 |
18218587039 | import sys
read = sys.stdin.readline
import time
import math
import itertools as it
def inp():
return int(input())
def inpl():
return list(map(int, input().split()))
start_time = time.perf_counter()
# ------------------------------
N, K = inpl()
dp = [False] * N
for i in range(K):
d = inp()
A = inpl()
for a in A:
dp[a-1] = True
cnt = 0
for bl in dp:
if not bl:
cnt += 1
print(cnt)
# -----------------------------
end_time = time.perf_counter()
print('time:', end_time-start_time, file=sys.stderr) | Aasthaengg/IBMdataset | Python_codes/p02688/s268638487.py | s268638487.py | py | 540 | python | en | code | 0 | github-code | 90 |
72558385258 | class ExternalError(Exception):
pass
def load_yaml(yml_txt, file_name=None, valid=None):
"""Load a YAML file and optionally apply a validator."""
import yaml
try:
if file_name:
with open(file_name) as f:
yml_txt = f.read()
yml = yaml.safe_load(yml_txt)
if valid:
valid(yml)
except Exception as e:
raise ExternalError(e)
return(yml)
yaml_txt = """
debug: False
log_file: /var/log/blah
retries: 5
"""
yaml_var = load_yaml(yaml_txt)
print(yaml_var.__repr__())
| briancamp/brlib | load_yaml.py | load_yaml.py | py | 557 | python | en | code | 0 | github-code | 90 |
18460259329 | H,W = list(map(int, input().split()))
S = [[1 if s=='#' else -1 for s in input()] for _ in range(H)]
def surround(matrix, fill=0):
if not isinstance(matrix, list):
raise
if not all([isinstance(rows, list) for rows in matrix]):
raise
if not all([len(rows)==len(matrix[0]) for rows in matrix]):
raise
r = len(matrix)
c = len(matrix[0])
new_matrix = [[fill]*(c+2)] + [[fill]+rows+[fill] for rows in matrix] + [[fill]*(c+2)]
return new_matrix
S = surround(S, 0)
# 黒: 1 白:-1 枠:0
from itertools import product
to_visit = set(list(product(range(1,H+1), range(1, W+1))))
from collections import deque
answer = 0
while to_visit:
h,w = to_visit.pop()
dq = deque([(h,w)])
reachable = set([(h,w)])
blacks = 0
whites = 0
if S[h][w]==1:
blacks += 1
else:
whites += 1
while dq:
h,w = dq.pop()
nexts = [(h-1,w),(h+1,w),(h,w-1),(h,w+1)]
for nxt in nexts:
if nxt in reachable:
continue
else:
if S[h][w] + S[nxt[0]][nxt[1]]==0:
to_visit.remove(nxt)
dq.append(nxt)
reachable.add(nxt)
if S[nxt[0]][nxt[1]]==1:
blacks +=1
else:
whites +=1
answer += blacks * whites
print(answer) | Aasthaengg/IBMdataset | Python_codes/p03157/s717959563.py | s717959563.py | py | 1,238 | python | en | code | 0 | github-code | 90 |
40247486539 | """Crea una función llamada devolver_distintos() que reciba 3
integers como parámetros.
Si la suma de los 3 numeros es mayor a 15, va a devolver el
número mayor.
Si la suma de los 3 numeros es menor a 10, va a devolver el
número menor.
Si la suma de los 3 números es un valor entre 10 y 15
(incluidos) va a devolver el número de valorintermedio."""
def devolver_distintos(int1, int2, int3):
suma = int1 + int2 + int3
lista =[int1 , int2, int3]
if suma > 15:
return f'el número mayor es {max(lista)}'
elif suma < 10:
return f'el número menor es {min(lista)}'
else:
lista.sort()
return f'el número de valor intermedio es {lista[1]}'
print(devolver_distintos(1, 2, 7))
| DARANCOG/Python-Projects | Día #5/Ejercicio_1.py | Ejercicio_1.py | py | 732 | python | es | code | 0 | github-code | 90 |
73514797738 | import itertools
import math
from collections import defaultdict, Counter
def sieve(x: int) -> list:
_out = [0] * (x + 1)
_out[0] = _out[1] = 0
i = 2
while i <= math.sqrt(x):
if not _out[i]:
k = i ** 2
while k <= x:
# This condition ensures that only the smallest prime is
# recorded. Necessary when coupled with the factorize function
if not _out[k]:
_out[k] = i
k += i
i += 1
return _out
class FactorsOfList:
def __init__(self, x: list):
self.x = x
self.discovered = defaultdict(set)
self.least_primes = sieve(max(x))
def decompose(self, x, primes=None):
if x in self.discovered:
return self.discovered[x]
leprime = self.least_primes[x]
if leprime == 0:
self.discovered[x] = self.discovered[x].union({x})
return {x}
self.discovered[x]= self.discovered[x].union({x})
if not primes:
primes = defaultdict(lambda: 0)
primes[leprime] += 1
self.discovered[x] = self.discovered[x].union(
{leprime ** primes[leprime]})
self.discovered[x] = self.discovered[x].union({i for i in itertools.chain(
self.decompose(int(x/leprime), primes))})
return self.discovered[x]
def factorize(self):
for x in self.x:
yield self.decompose(x)
def solve(A: list):
count_A = Counter(A)
set_A = set(A)
lnA = len(A)
fktr = FactorsOfList(A)
output = []
for f in fktr.factorize():
if f == {1}:
output += [lnA - count_A[1]]
continue
output += [lnA - sum([count_A[d] for d in f]) - count_A[1]]
return output
if __name__ == '__main__':
A = [1, 2, 2, 2, 7, 14]
fktr = FactorsOfList(A)
print([f for f in fktr.factorize()])
print(solve(A)) | kerwei/treasure-trove | nondivisor.py | nondivisor.py | py | 1,953 | python | en | code | 0 | github-code | 90 |
21756340460 | import os
import re
import argparse
import subprocess
import shutil
def main():
parser = argparse.ArgumentParser(description="基于Blast筛选Novoplasty产生的几个option中哪个是最佳序列")
parser.add_argument("-i", "--input", required=False, default=r"D:\working\Develop\EasyMiner Develop\EasyMiner\bin\Debug\net6.0-windows\results\6_Bupleurum_hamiltonii_HQ_R\NOVOPlasty", help="选项文件夹的路径")
parser.add_argument("-r", "--ref", required=False, default=r"Bupleurum_scorzonerifolium#MT534601.fasta", help="参考序列的路径")
parser.add_argument("-o", "--output", required=False, default=r"best.fasta", help="结果文件的路径")
args = parser.parse_args()
folder_path = args.input
pattern = re.compile(r'^Option.*\.fasta$')
fasta_files = [file for file in os.listdir(folder_path) if pattern.match(file)]
subject_file = args.ref
output_db = "blast_db" # 指定数据库名称
output_file = "blast_output.txt"
# 如果数据库文件不存在,就创建
if not os.path.exists(output_db + ".nhr"):
makeblastdb_cmd = [r"..\analysis\makeblastdb.exe", "-in", subject_file, "-dbtype", "nucl", "-out", output_db]
print(" ".join(makeblastdb_cmd))
subprocess.run(makeblastdb_cmd, check=True)
longest_total_length = 0
longest_query_file = ""
for query_file in fasta_files:
query_file = os.path.join(r".\NOVOPlasty", query_file)
blastn_cmd = [r"..\analysis\blastn.exe", "-query", query_file, "-db", output_db, "-out", output_file, "-outfmt", "6", "-evalue", "10"]
subprocess.run(blastn_cmd, check=True)
# print(query_file)
# 解析并获取10个最长片段的起始位置和总长度
top_fragments = get_top_fragments(output_file, 1000)
total_length = 0
for fragment in top_fragments:
fields = fragment.split("\t")
query_start = int(fields[6])
query_end = int(fields[7])
fragment_length = query_end - query_start
total_length += fragment_length
print(f"片段起始位置: {query_start}, 片段结束位置: {query_end}, 片段长度: {fragment_length}")
# 如果当前文件的总长度最长,则更新最长文件和总长度
if total_length > longest_total_length:
longest_total_length = total_length
longest_query_file = query_file
# 复制总长度最长的文件到输出文件
if longest_query_file:
print(longest_query_file)
shutil.copy(longest_query_file, os.path.join(folder_path,args.output))
def get_top_fragments(blast_output_file, num_top):
fragments = []
with open(blast_output_file, "r") as blast_output:
lines = blast_output.readlines()
lines.sort(key=lambda x: int(x.strip().split("\t")[3]), reverse=True) # 根据片段长度降序排序
top_lines = lines[:num_top]
for line in top_lines:
fragments.append(line.strip())
return fragments
if __name__ == "__main__":
main()
| sculab/EasyMiner | scripts/check_option_blast.py | check_option_blast.py | py | 3,091 | python | en | code | 1 | github-code | 90 |
70807740457 | import sys
import os
import json
import tce_py.tce_report as report
def _read_conf(path):
with open(path, "r") as fp:
conf = json.load(fp)
p = os.path.abspath(path)
conf["tcedir"] = os.path.dirname(p)
return conf
def main(argv):
if len(argv) != 5:
raise ValueError("invalid cmdline")
mconf = _read_conf(argv[1])
mrep = argv[2]
conf = _read_conf(argv[3])
rep = argv[4]
mr = report.read(mconf, mrep)
r = report.read(conf, rep)
report.merge(mr, r)
report.write(mr, mrep)
return 0
if __name__ == "__main__":
argv = sys.argv
rc = 1
try:
rc = main(argv)
except Exception as e:
sys.stderr.write("%s: fail to resove gcov: %s\n" % (argv[0], e))
sys.exit(rc)
| oktetlabs/test-environment | tools/tce/tce_merge_report.py | tce_merge_report.py | py | 769 | python | en | code | 4 | github-code | 90 |
24381503317 | """
This file is part of Athena.
Athena is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Athena is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Athena. If not, see <http://www.gnu.org/licenses/>.
"""
import smtplib
from email.mime.text import MIMEText
from datetime import datetime
from elasticsearch import Elasticsearch
INDEX = 'bills'
from legislation.settings import EMAIL_SERVER
from legislation_models.models import Search
es = Elasticsearch('localhost:9200')
def send_notification(search_name, updates, to_address, hits, total):
subject = 'New Legislation - %s' % search_name
msg_lines = ['New%s legislation for your saved search %s (showing %s/%s)' %
('/updated' if updates else '', search_name, len(hits), total)]
for hit in hits:
msg_lines.append(
'<a href="https://godsigma.zolnetwork.com/legis/bill/%(bill_id)s">%(number)s - %(summary)s</a>' % {
'bill_id': hit.id,
'number': hit.number,
'summary': hit.summary
})
msg_text = '\n\n'.join(msg_lines)
msg = MIMEText(msg_text)
msg['Subject'] = subject
msg['From'] = 'athena@zolnetwork.com'
msg['To'] = to_address
s = smtplib.SMTP(EMAIL_SERVER)
s.send_message(msg)
s.quit()
def _send_notifications(notification_period):
now = datetime.now()
saved_searches = Search.objects.filter(notification=notification_period)
for saved_search in saved_searches:
es_search = saved_search.get_elasticsearch_query().using(es).index(INDEX)
if saved_search.notify_on_update:
range_field = 'modified_date'
else:
range_field = 'index_date'
es_search = es_search.query('range', **{range_field: {'gte': saved_search.last_notify_date}})
total = es_search.count()
send_notification(saved_search.name,
saved_search.notify_on_update,
saved_search.owner.email,
es_search.size(10),
total)
saved_search.last_notify_date = now
saved_search.save()
def send_daily_notifications():
_send_notifications(Search.NOTIFICATION_DAILY)
def send_weekly_notifications():
_send_notifications(Search.NOTIFICATION_WEEKLY)
if __name__ == '__main__':
send_daily_notifications()
send_weekly_notifications() | magdalene/athena-legislation | legislation/data_processing/notify.py | notify.py | py | 2,867 | python | en | code | 0 | github-code | 90 |
18072358639 | # ABC042D - いろはちゃんとマス目 / Iroha and a Grid (ARC058D)
def comb(n: int, r: int) -> int:
return fact[n] * inv[n - r] * inv[r]
def main():
global fact, inv
H, W, A, B = tuple(map(int, input().split()))
MOD = 10 ** 9 + 7
# table of factorials
fact, x = [1] * (H + W + 1), 1
for i in range(1, H + W + 1):
x = (x * i) % MOD
fact[i] = x
# table of inverse factorials
inv = [1] * (H + W + 1)
inv[-1] = pow(fact[-1], MOD - 2, MOD)
x = inv[-1]
for i in range(H + W - 1, 0, -1):
x = (x * (i + 1)) % MOD
inv[i] = x
ans = 0
for i in range(B, W):
ans += comb(i + H - A - 1, i) * comb(W - i - 1 + A - 1, A - 1)
print(ans % MOD)
if __name__ == "__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p04046/s205926498.py | s205926498.py | py | 773 | python | en | code | 0 | github-code | 90 |
13307195719 | #!/usr/bin/env python3
import nice
import json
import re
from bs4 import BeautifulSoup
url_override = {
# Badly written disambiguation page:
'https://de.wikipedia.org/wiki/Christian_Petry': 'https://de.wikipedia.org/wiki/Christian_Petry_(Politiker)',
'https://de.wikipedia.org/wiki/Charles_Huber': 'https://de.wikipedia.org/wiki/Charles_M._Huber',
# Wikipedia doesn't list him with his 'name' as per bundestag.de:
'https://de.wikipedia.org/wiki/Karl-Heinz_Helmut_Wange': 'https://de.wikipedia.org/wiki/Karl-Heinz_Wange',
'https://de.wikipedia.org/wiki/Gerd_Müller': 'https://de.wikipedia.org/wiki/Gerd_M%C3%BCller_%28CSU%29',
'https://de.wikipedia.org/wiki/Christian_Frhr._von_Stetten': 'https://de.wikipedia.org/wiki/Christian_Freiherr_von_Stetten',
# Bad default:
'https://de.wikipedia.org/wiki/Andreas_Rimkus': 'https://de.wikipedia.org/wiki/Andreas_Rimkus_(Politiker)',
'https://de.wikipedia.org/wiki/Karl_Lamers': 'https://de.wikipedia.org/wiki/Karl_A._Lamers',
'https://de.wikipedia.org/wiki/Michael_Groß': 'https://de.wikipedia.org/wiki/Michael_Gro%C3%9F_%28Politiker%29',
'https://de.wikipedia.org/wiki/Peter_Stein': 'https://de.wikipedia.org/wiki/Peter_Stein_%28Politiker%29',
}
known_missing = {
'Iris Ripsam',
'Karl-Heinz Wange',
}
# Returns True or False
def is_not_found(soup):
return soup.find('div', id='noarticletext') is not None
# Returns:
# - None: not a disambiguation page
# - some string: actual url
# If it is a disambiguation page, all implicit assumptions will be 'assert'-ed.
def get_disambiguated_url(soup, expect_party):
PARTY_TO_TEXT = {
'SPD': 'SPD', 'CSU': 'CSU', 'CDU': 'CDU', 'DIE LINKE': 'Die Linke',
'GRÜNE': 'Bündnis 90/Die Grünen',
}
if soup.find('table', id='Vorlage_Begriffsklaerung') is None:
return None
content = soup.find('div', id='mw-content-text')
assert content is not None
ul = content.find('ul')
assert ul is not None
print('[WARN] Hit disambiguation page')
found_urls = []
# FIXME: why does pattern.match require the leading and trailing .*?
# I would expect that behavior with fullmatch, but not with match.
death_pattern = re.compile('.*\(\d{4}[-–—]\d{4}\).*')
found_mdb = None
for li in ul.find_all('li'):
text = li.get_text()
if 'Politiker' not in text:
continue
if 'MdB' in text or 'Bundestag' in text:
assert found_mdb is None, found_mdb
a = li.find('a')
assert a is not None
# Let's hope the href-scheme doesn't change too soon:
found_mdb = 'https://de.wikipedia.org' + a['href']
if PARTY_TO_TEXT[expect_party] not in text:
# Don't just print 'text', as I might need that URL.
print('[WARN] Found someone of wrong party: {}'.format(li))
continue
if death_pattern.match(text) is not None:
# Don't just print 'text', as I might need that URL.
print('[WARN] Ignore dead person: {}'.format(li))
continue
a = li.find('a')
assert a is not None
# Let's hope the href-scheme doesn't change too soon:
found_urls.append('https://de.wikipedia.org' + a['href'])
assert len(found_urls) >= 1, (found_urls, ul)
if len(found_urls) == 1:
return found_urls[0]
assert found_mdb is not None, (found_urls, ul)
print('[WARN] Using MdB override')
return found_mdb
def as_soup(path):
# I'm fully aware that loading directly from the response content might be
# faster, but this guarantees that the "filesystem cache"-thing actually works.
with open(path, 'r') as fp:
return BeautifulSoup(fp.read(), 'html.parser')
# Returns either the soup of the "real" document, or None if no such thing found.
def get_page_for(name, expect_party):
# Minuses and Umlauts can stay. Hooray!
urlish_name = name.replace(' ', '_')
url = 'https://de.wikipedia.org/wiki/' + urlish_name
if url in url_override:
print('[WARN] Using override for ' + name)
url = url_override[url]
path = nice.get(url)
soup = as_soup(path)
if is_not_found(soup):
if name in known_missing:
print('[WARN] Not found (and whitelisted): ' + name)
else:
print('[ERR!] Unexpectedly not found: ' + name)
raise AssertionError(name)
return None
disambig_url = get_disambiguated_url(soup, expect_party)
if disambig_url is None:
return url, soup
url = disambig_url
path = nice.get(url)
soup = as_soup(path)
if is_not_found(soup):
# This really, really should not happen.
# Let's hope that female politicians don't have 'Politikerin' as disambiguation.
print('[ERR!] Confused about: ' + name)
raise AssertionError(path)
# This wouldn't even make sense, or at least there is hopefully only one
# politician for each name. Note that other parts of this toolchain fail
# horribly in this case anyway.
assert get_disambiguated_url(soup, expect_party) is None, 'name'
return url, soup
def get_img_desc_link(name, page_soup):
outer_div = page_soup.find('div', 'thumbinner')
# Pages where the "image" is not usable (e.g., is actually a video)
IMG_BLACKLIST = {
'Carsten Träger', # is a video
'Heiko Schmelzle', # is a video
'Marina Kermer', # is a video
}
# If there's no image at all, that's fine.
if outer_div is None or name in IMG_BLACKLIST:
return None
# Pages where the image description is "unexpected"
IMG_WHITELIST = {
'Burkhard Lischka', # bullshit
'Cajus Caesar', # Gajus
'Charles Huber', # M.
'Christian Frhr. von Stetten', # no 'Frhr.'
'Christian Kühn', # Chris
'Johann David Wadephul', # No 'David'
'Karl Lamers', # A.
'Karin Evers-Meyer', # Typo in description
'Matthias Birkwald', # W.
'Norbert Spinrath', # Typo in description
'Philipp Graf Lerchenfeld', # Philipp Graf von und zu Lerchenfeld is a very special von und zu snowflake.
'Ulli Nissen', # Ulrike vs. Ulli
'Waltraud Wolff', # "Wahlkampfmotiv 2013" is actually quite sensible!
}
# Sanity check to see whether it's actually a photo of that person:
assert name in outer_div.get_text() or name in IMG_WHITELIST, page_soup.title
inner_div = outer_div.find('div', 'magnify')
# There should always be a "magnify" link. I hope.
assert inner_div is not None, page_soup.title
a = inner_div.find('a', 'internal')
# And it should have an internal link to the page that describes the image. I hope.
assert a is not None, page_soup.title
# href="/wiki/Datei:Bahr,_Ulrike-9287.jpg"
# becomes https://de.wikipedia.org/wiki/Datei:Bahr,_Ulrike-9287.jpg
return 'https://de.wikipedia.org' + a['href']
# Retrieve the copyright *holder* information, or in German: "Urheber".
# This is orthogonal to the license/permissions information.
def parse_copyright(soup):
COPYRIGHT_SANITIZE = {
# Politicians
"Achtung: Dieses Bild ist nicht gemeinfrei. Es ist zwar frei benutzbar aber gesetzlich"
" gesch\u00fctzt.\n\n\nNote: this image is not in the Public Domain. It is free to use"
" but protected by law.\n\n\n\n\n\n\n\nBitte benutzen sie nach M\u00f6glichkeit als"
" Bildbeschreibung:\nBl\u00f6mke/Kosinsky/Tsch\u00f6pe\n": 'Blömke/Kosinsky/Tschöpe',
"\u00a9\u00a0Ralf Roletschek\n": 'Ralf Roletschek',
"Christine Buchholz (full rights of use)": 'Christine Buchholz',
# Birds
"English: Uploaded by Aelwyn with": "Aelwyn",
"This illustration was made by Marek Szczepanek\n": "Marek Szczepanek",
"Frank Liebig \n\n\n": "Frank Liebig",
"Dave Menke (1946\u20132011) \u00a0\n\n": "Dave Menke",
"Self: Commons user MichaelMaggs": "Michael Maggs",
}
# "_aut" means: author information. This is what we are looking for.
author_td = soup.find(id='fileinfotpl_aut') # Sometimes td, sometimes th
assert author_td is not None, "No copyright holder for file?!"
assert author_td.name in ['td', 'th'], author_td
# However, the author is stored in the adjacent HTML table-cell,
# as author_td itself is non-informative.
author_text = author_td.parent.get_text()
prefixes = ['\nUrheber\n',
'\nUrheber bzw.\nNutzungsrechtinhaber\n',
'\nFotograf\n',
'\nAuthor\n'
]
DUMB_PREFIXES = ['Fotograf: ', 'Official White House Photo by ']
assert 'Susie' not in author_text, author_text
for prefix in prefixes:
if not author_text.startswith(prefix):
continue
found = author_text[len(prefix):].strip()
for expect_start, replacement in COPYRIGHT_SANITIZE.items():
if found.startswith(expect_start):
found = replacement
for dumb in DUMB_PREFIXES:
if found.startswith(dumb):
found = found[len(dumb):]
return found
assert False, author_text
KNOWN_LICENSES = {
'Der Urheberrechtsinhaber erlaubt es jedem, dieses Werk für jeglichen Zweck, inklusive uneingeschränkter Weiterveröffentlichung, kommerziellem Gebrauch und Modifizierung, zu nutzen.': 'public domain',
'erlaubt es jedem, diese für jeden Zweck zu benutzen, vorausgesetzt, dass der Urheberrechtsinhaber ordnungsgemäß genannt wird.': 'public domain',
'Der Urheber gestattet jedermann jede Form der Nutzung, unter der Bedingung der angemessenen Nennung seiner Urheberschaft.\nWeiterverbreitung, Bearbeitung und kommerzielle Nutzung sind gestattet.': 'custom: attribution',
'Lizenz „Freie Kunst“': 'custom: attribution (FAL)',
'GNU-Lizenz für freie Dokumentation, Version 1.2,': 'GFDL 1.2',
'GNU-Lizenz für freie Dokumentation, Version 1.2 oder einer späteren Version': 'GFDL 1.2+',
'\nPublic domainPublic domainfalsefalse\n\n': 'public domain',
'Creative-Commons-Lizenz „CC0 1.0 Verzicht auf das Copyright“': 'CC0 1.0',
# Include the closing quotation mark to ensure unambiguous identification.
'Creative-Commons-Lizenz „Namensnennung 2.0 generisch“': 'CC-BY-2.0',
'Creative-Commons-Lizenz „Namensnennung 2.0 Deutschland“': 'CC-BY-2.0 de',
'Creative-Commons-Lizenz „Namensnennung 3.0 nicht portiert“': 'CC-BY-3.0 unported',
'Creative-Commons-Lizenz „Namensnennung 3.0 Deutschland“': 'CC-BY-3.0 de',
'Creative-Commons-Lizenz „Namensnennung 4.0 international“': 'CC-BY-4.0 int',
'Creative-Commons-Lizenz „Namensnennung – Weitergabe unter gleichen Bedingungen 2.0 generisch“': 'CC-BY-SA-2.0',
'Creative Commons Attribution-Share Alike 2.0 Generic license.': 'CC-BY-SA-2.0',
'Creative-Commons-Lizenz „Namensnennung – Weitergabe unter gleichen Bedingungen 2.0 Deutschland“': 'CC-BY-SA-2.0 de',
'http://creativecommons.org/licenses/by-sa/2.0/de/legalcode': 'CC-BY-SA-2.0 de',
'Creative-Commons-Lizenz „Namensnennung – Weitergabe unter gleichen Bedingungen 2.5 generisch“': 'CC-BY-SA-2.5',
'Creative-Commons-Lizenzen „Namensnennung – Weitergabe unter gleichen Bedingungen 2.5 generisch“': 'CC-BY-SA-2.5',
# If multiple versions available, use the first one
'Creative-Commons-Lizenzen „Namensnennung – Weitergabe unter gleichen Bedingungen 3.0 nicht portiert“': 'CC-BY-SA-3.0- unported',
'http://creativecommons.org/licenses/by-sa/3.0/legalcode': 'CC-BY-SA-3.0 unported',
'Creative-Commons-Lizenz „Namensnennung – Weitergabe unter gleichen Bedingungen 3.0 nicht portiert“': 'CC-BY-SA-3.0 unported',
'Creative-Commons-Lizenz „Namensnennung – Weitergabe unter gleichen Bedingungen 3.0 Deutschland“': 'CC-BY-SA-3.0 de',
'Creative-Commons-Lizenz „Namensnennung – Weitergabe unter gleichen Bedingungen 3.0 Österreich“': 'CC-BY-SA-3.0 at',
'Creative-Commons-Lizenz „Namensnennung – Weitergabe unter gleichen Bedingungen 4.0 international“': 'CC-BY-SA-4.0 int',
}
LICENSE_PREFERENCE_ORDER = [
'public domain',
'CC0 1.0',
'CC-BY-SA-4.0 int',
'CC-BY-4.0 int',
'CC-BY-SA-3.0 de',
'CC-BY-SA-3.0 unported',
'CC-BY-SA-3.0 at',
'CC-BY-SA-3.0- unported',
'CC-BY-3.0 de',
'CC-BY-3.0 unported',
'CC-BY-SA-2.5',
'CC-BY-SA-2.0 de',
'CC-BY-SA-2.0',
'CC-BY-2.0 de',
'CC-BY-2.0',
'GFDL 1.2+',
'GFDL 1.2',
'custom: attribution (FAL)',
'custom: attribution',
]
def assert_license_sanity():
# With that function name, I'm very willing to just write "assert False"
for lid in KNOWN_LICENSES.values():
assert lid in LICENSE_PREFERENCE_ORDER, lid
# Parse "the" license of the file.
def parse_license(soup):
all_licenses = []
for license_table in soup.find_all(None, 'licensetpl'): # not always a table
license_text = license_table.get_text()
found = False
for text, lid in KNOWN_LICENSES.items():
if text in license_text:
assert not found, 'Multiple contradicting licenses within same paragraph?!'
all_licenses.append(lid)
found = True
assert lid in LICENSE_PREFERENCE_ORDER, lid
# Don't break, check for duplicates!
assert found, license_text # If this fails, add a new entry in KNOWN_LICENSES
assert len(all_licenses) > 0
for l in LICENSE_PREFERENCE_ORDER:
if l in all_licenses:
return l
assert False, all_licenses
def parse_img_url(soup):
wrapper_div = soup.find('div', 'fullImageLink')
a = wrapper_div.find('a')
# given: //upload.wikimedia.org/wikipedia/commons/8/88/Portr%C3%A4t_Wolfgang_Hellmich.jpg
# Link: https://upload.wikimedia.org/wikipedia/commons/8/88/Portr%C3%A4t_Wolfgang_Hellmich.jpg
return 'https:' + a['href']
# Returns an 'imgs' entry.
def get_img_desc(img_desc_url):
path = nice.get(img_desc_url)
soup = as_soup(path)
return {
'copyright': parse_copyright(soup),
'license': parse_license(soup),
'url': parse_img_url(soup),
}
WHITELIST_AMBIGUOUS = {
'Angela Merkel',
'Cajus Caesar',
'Carsten Müller',
'Cem Özdemir',
'Christian Schmidt', # There's another politician "B" of this name. I don't mean B.
'Joachim Pfeiffer',
'Kristina Schröder',
'Sascha Raabe',
'Thomas Feist',
'Gernot Erler',
'Jens Koeppen',
'Johannes Kahrs',
'Klaus Ernst',
'Manfred Grund',
'Michael Brand',
'Peter Altmaier',
'Sibylle Pfeiffer',
'Stephan Mayer',
'Ulla Schmidt',
'Volker Beck',
}
def run():
with open("aggregate_each.json", 'r') as fp:
entries = json.load(fp)
for e in entries:
name = e['name']
orig_name = name
name = re.sub(' [A-ZÖÄÜ]\. ', ' ', name)
if name != orig_name:
print('[WARN] Sanitized name {} to {}'.format(orig_name, name))
findings = get_page_for(name, e['party'])
if findings is None:
continue
page_url, page_soup = findings
e['srcs']['wiki'] = page_url
if page_soup.find(id='bksicon') is not None and name not in WHITELIST_AMBIGUOUS:
print('[WARN] Name {} is ambiguous, but wasn\'t asked to choose'.format(name))
img_desc_url = get_img_desc_link(name, page_soup)
if img_desc_url is None:
# No image? Okay :(
continue
e['imgs']['wiki'] = get_img_desc(img_desc_url)
with open("wikify_each.json", 'w') as fp:
json.dump(entries, fp, sort_keys=True, indent=2)
if __name__ == '__main__':
assert_license_sanity()
run()
print('Done.')
| Schwenger/House-Of-Tweets | tools/PhotoMiner/wikify_each.py | wikify_each.py | py | 15,868 | python | en | code | 0 | github-code | 90 |
27997923331 | from jinja2 import Environment, FileSystemLoader, select_autoescape, meta, Template
from sql_gen.template_source import TemplateSource
from sql_gen.prompter import Prompter
from sql_gen.filter_loader import load_filters
import filters.description
from filters.description import DescriptionFilter
import os,sys
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
print(user_paths)
sys.path.append("/home/dgarcia/dev/python/em_dev_tools/sql_gen")
class TemplateOption(object):
def __init__(self,id, name):
self.id =id
self.name =name
class TemplateSelector():
def select_template(self, env):
template_list = env.list_templates(".sql")
self.create_options(template_list)
self.show_options()
template_number = self.prompt_to_select_template()
template_name = self.get_option_by_id(template_number).name
return self.build_template_source(template_name,env)
def create_options(self, template_list):
self.template_option_list=[]
for counter, template in enumerate(template_list):
template_option =TemplateOption(counter, template)
self.template_option_list.append(template_option)
return self.template_option_list
def show_options(self):
for template_option in self.template_option_list:
print(str(template_option.id) + ". " +template_option.name)
def prompt_to_select_template(self):
template_number = raw_input("Please select template to parse: ")
while self.get_option_by_id(template_number) is None:
template_number = raw_input("Please select template to parse: ")
self.show_options()
return template_number
def get_option_by_id(self, template_number):
for template_option in self.template_option_list:
if template_number == str(template_option.id):
return template_option
return None
def build_template_source(self, template_name, env):
source = env.loader.get_source(env,template_name)[0]
template_source = TemplateSource(env.parse(source))
template_source.template_name = template_name
return template_source
##main
env = Environment(
loader=FileSystemLoader('/home/dgarcia/dev/python/em_dev_tools/sql_gen/templates'),
autoescape=select_autoescape(['html', 'xml']))
load_filters(env)
template_selector = TemplateSelector()
template_source= template_selector.select_template(env)
prompter = Prompter(template_source)
context = prompter.build_context()
template = env.get_template(template_source.template_name)
print(template.render(context))
| vecin2/em-dev-tools | build/lib.linux-x86_64-2.7/sql_gen/sql_gen.py | sql_gen.py | py | 2,683 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.