content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Test the comparison of sets
l = [1,2,3,4,1,1]
print l
s = set(l)
print s
print '# equal'
eq = set(l)
print eq
print '# forwards'
print s.isdisjoint(eq)
print s > eq
print s.issuperset(eq)
print s >= eq
print s == eq
print s != eq
print s.issubset(eq)
print s <= eq
print s < eq
print '# backwards'
print eq.isdisjoint(s)
print eq > s
print eq.issuperset(s)
print eq >= s
print eq == s
print eq != s
print eq.issubset(s)
print eq <= s
print eq < s
| [
2,
6208,
262,
7208,
286,
5621,
198,
75,
796,
685,
16,
11,
17,
11,
18,
11,
19,
11,
16,
11,
16,
60,
198,
4798,
300,
198,
82,
796,
900,
7,
75,
8,
198,
4798,
264,
198,
198,
4798,
705,
2,
4961,
6,
198,
27363,
796,
900,
7,
75,
... | 2.366492 | 191 |
import socket
# create a socket object
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
host = socket.gethostname()
port = 9999
# connection to hostname on the port.
s.connect((host, port))
# Receive no more than 1024 bytes
msg = s.recv(1024)
s.close()
print (msg.decode('ascii'))
| [
11748,
17802,
201,
198,
2,
2251,
257,
17802,
2134,
201,
198,
82,
796,
17802,
13,
44971,
7,
44971,
13,
8579,
62,
1268,
2767,
11,
17802,
13,
50,
11290,
62,
2257,
32235,
8,
201,
198,
2,
651,
1957,
4572,
1438,
201,
198,
4774,
796,
178... | 2.7 | 120 |
import argparse
import os
import sqlite3
import requests
ROUTES_URL = 'http://api.bus.southampton.ac.uk/dump/routes'
STOPS_URL = 'http://api.bus.southampton.ac.uk/dump/stops'
OPERATORS_URL = 'http://api.bus.southampton.ac.uk/dump/operators'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--create_db", action='store_true')
parser.add_argument("--database", "-d", default="bus_finder.db")
args = parser.parse_args()
main(args.database, args.create_db)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
44161,
578,
18,
198,
198,
11748,
7007,
198,
198,
49,
12425,
1546,
62,
21886,
796,
705,
4023,
1378,
15042,
13,
10885,
13,
35782,
23427,
13,
330,
13,
2724,
14,
39455,
14,
81,
448,
274,
... | 2.623116 | 199 |
# coding=utf-8
from uuid import UUID
from django.test import TestCase
from django_mptt_admin.util import get_tree_queryset, get_tree_from_queryset, get_javascript_value, serialize_id
from ..models import Country
from .utils import read_testdata
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
334,
27112,
1330,
471,
27586,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
42625,
14208,
62,
76,
457,
83,
62,
28482,
13,
22602,
1330,
651,
62,
21048,
62,
10819,
8... | 2.988095 | 84 |
from contextlib import contextmanager
from typing import Optional, TypeVar
from fastapi.concurrency import contextmanager_in_threadpool
from fastapi.exceptions import HTTPException
from fastapi.requests import Request
from fastapi.responses import Response
from fastapi_login.fastapi_login import LoginManager
from watch_n_learn.database.main import create_session
from watch_n_learn.database.models import User
from watch_n_learn.helper.environment import FASTAPI_LOGIN_TOKEN_VALUE
FASTAPI_LOGIN_COOKIE_NAME = "watch_n_learn-authentication-token"
_BaseResponse = TypeVar("_BaseResponse", bound=Response)
login_manager = LoginManager(
FASTAPI_LOGIN_TOKEN_VALUE, "/internal/sign-in",
cookie_name=FASTAPI_LOGIN_COOKIE_NAME
)
@login_manager.user_loader()
| [
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
19720,
1330,
32233,
11,
5994,
19852,
198,
198,
6738,
3049,
15042,
13,
1102,
34415,
1330,
4732,
37153,
62,
259,
62,
16663,
7742,
198,
6738,
3049,
15042,
13,
1069,
11755,
1330,
14626,
16922,
... | 3.259574 | 235 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
RoBERTa Model
@article{liu2019roberta,
title = {RoBERTa: A Robustly Optimized BERT Pretraining Approach},
author = {Yinhan Liu and Myle Ott and Naman Goyal and Jingfei Du and
Mandar Joshi and Danqi Chen and Omer Levy and Mike Lewis and
Luke Zettlemoyer and Veselin Stoyanov},
journal={arXiv preprint arXiv:1907.11692},
year = {2019},
}
"""
__all__ = ['RobertaModel', 'list_pretrained_roberta', 'get_pretrained_roberta']
from typing import Tuple
import os
import mxnet as mx
from mxnet import use_np
from mxnet.gluon import nn, HybridBlock
from .transformer import TransformerEncoderLayer
from ..base import get_model_zoo_home_dir, get_repo_model_zoo_url, get_model_zoo_checksum_dir
from ..utils.config import CfgNode as CN
from ..utils.registry import Registry
from ..utils.misc import load_checksum_stats, download
from ..initializer import TruncNorm
from ..attention_cell import gen_self_attn_mask
from ..registry import BACKBONE_REGISTRY
from ..layers import PositionalEmbedding, get_activation
from ..data.tokenizers import HuggingFaceByteBPETokenizer
PRETRAINED_URL = {
'fairseq_roberta_base': {
'cfg': 'fairseq_roberta_base/model-565d1db7.yml',
'merges': 'fairseq_roberta_base/gpt2-396d4d8e.merges',
'vocab': 'fairseq_roberta_base/gpt2-f1335494.vocab',
'params': 'fairseq_roberta_base/model-98b4532f.params'
},
'fairseq_roberta_large': {
'cfg': 'fairseq_roberta_large/model-6e66dc4a.yml',
'merges': 'fairseq_roberta_large/gpt2-396d4d8e.merges',
'vocab': 'fairseq_roberta_large/gpt2-f1335494.vocab',
'params': 'fairseq_roberta_large/model-e3f578dc.params'
},
'fairseq_roberta_large_mnli': {
'cfg': 'fairseq_roberta_large_mnli/model-6e66dc4a.yml',
'merges': 'fairseq_roberta_large_mnli/gpt2-396d4d8e.merges',
'vocab': 'fairseq_roberta_large_mnli/gpt2-f1335494.vocab',
'params': 'fairseq_roberta_large_mnli/model-5288bb09.params'
}
}
FILE_STATS = load_checksum_stats(os.path.join(get_model_zoo_checksum_dir(), 'roberta.txt'))
roberta_cfg_reg = Registry('roberta_cfg')
@roberta_cfg_reg.register()
@roberta_cfg_reg.register()
@use_np
@use_np
@use_np
def get_pretrained_roberta(model_name: str = 'fairseq_roberta_base',
root: str = get_model_zoo_home_dir()) \
-> Tuple[CN, HuggingFaceByteBPETokenizer, str]:
"""Get the pretrained RoBERTa weights
Parameters
----------
model_name
The name of the RoBERTa model.
root
The downloading root
Returns
-------
cfg
Network configuration
tokenizer
The HuggingFaceByteBPETokenizer
params_path
Path to the parameters
"""
assert model_name in PRETRAINED_URL, '{} is not found. All available are {}'.format(
model_name, list_pretrained_roberta())
cfg_path = PRETRAINED_URL[model_name]['cfg']
merges_path = PRETRAINED_URL[model_name]['merges']
vocab_path = PRETRAINED_URL[model_name]['vocab']
params_path = PRETRAINED_URL[model_name]['params']
local_paths = dict()
for k, path in [('cfg', cfg_path), ('vocab', vocab_path),
('merges', merges_path), ('params', params_path)]:
local_paths[k] = download(url=get_repo_model_zoo_url() + path,
path=os.path.join(root, path),
sha1_hash=FILE_STATS[path])
tokenizer = HuggingFaceByteBPETokenizer(local_paths['merges'], local_paths['vocab'])
cfg = RobertaModel.get_cfg().clone_merge(local_paths['cfg'])
return cfg, tokenizer, local_paths['params']
BACKBONE_REGISTRY.register('roberta', [RobertaModel,
get_pretrained_roberta,
list_pretrained_roberta])
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 2.377755 | 1,951 |
import torch
from .volumetric_rendering import *
from .generators import ImplicitGenerator3d
| [
11748,
28034,
198,
6738,
764,
10396,
388,
19482,
62,
13287,
278,
1330,
1635,
198,
6738,
764,
8612,
2024,
1330,
34347,
3628,
8645,
1352,
18,
67,
198
] | 3.576923 | 26 |
"""
This module is in charge of providing the source code corresponding to a 'code id'.
In this prototype, it returns a hardcoded example.
In the future, it should connect to an Aleph node and retrieve the code from there.
"""
import asyncio
import json
import hashlib
import logging
import os
import re
from os.path import isfile, join, abspath
from shutil import make_archive
import aiohttp
from aleph_message.models import ProgramMessage
from aleph_message.models.program import (
Encoding,
MachineVolume,
ImmutableVolume,
PersistentVolume,
VolumePersistence,
)
from .conf import settings
from firecracker.models import FilePath
logger = logging.getLogger(__name__)
| [
37811,
198,
1212,
8265,
318,
287,
3877,
286,
4955,
262,
2723,
2438,
11188,
284,
257,
705,
8189,
4686,
4458,
198,
198,
818,
428,
14879,
11,
340,
5860,
257,
1327,
40976,
1672,
13,
198,
818,
262,
2003,
11,
340,
815,
2018,
284,
281,
930... | 3.487562 | 201 |
from flask_babel import lazy_gettext as _
from flask_login import current_user
from wtforms.fields import TextAreaField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired
from ..forms.base import BaseObjectForm
from ..models import TastingNote
| [
6738,
42903,
62,
65,
9608,
1330,
16931,
62,
1136,
5239,
355,
4808,
198,
6738,
42903,
62,
38235,
1330,
1459,
62,
7220,
198,
6738,
266,
83,
23914,
13,
25747,
1330,
8255,
30547,
15878,
198,
6738,
266,
83,
23914,
13,
25747,
13,
6494,
20,
... | 3.723684 | 76 |
import pytest
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_X_y
from imblearn.base import BaseSampler
from imblearn.utils.estimator_checks import check_estimator
from imblearn.utils import check_target_type
class BaseBadSampler(BaseEstimator):
"""Sampler without inputs checking."""
_sampling_type = 'bypass'
class NotFittedSampler(BaseBadSampler):
"""Sampler without target checking."""
class NoAcceptingSparseSampler(BaseBadSampler):
"""Sampler which does not accept sparse matrix."""
class NotTransformingTargetOvR(BaseBadSampler):
"""Sampler which does not transform OvR enconding."""
@pytest.mark.filterwarnings("ignore:'y' should be of types")
@pytest.mark.filterwarnings("ignore: Can't check dok sparse matrix for nan")
@pytest.mark.parametrize(
'Estimator, err_type, err_msg',
[(BaseBadSampler, AssertionError, "TypeError not raised by fit"),
(NotFittedSampler, AssertionError, "No fitted attribute"),
(NoAcceptingSparseSampler, TypeError, "A sparse matrix was passed"),
(NotTransformingTargetOvR, ValueError, "bad input shape"),
(NotPreservingDtypeSampler, AssertionError, "X dytype is not preserved")]
)
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1341,
35720,
13,
8692,
1330,
7308,
22362,
320,
1352,
198,
6738,
1341,
35720,
13,
26791,
1330,
2198,
62,
55,
62,
88,
198,
198,
6738,
545,
903,
1501,
13,
8692,
1330... | 3.032338 | 402 |
import tensorflow as tf
| [
11748,
11192,
273,
11125,
355,
48700,
628,
198
] | 3.25 | 8 |
import shutil
| [
11748,
4423,
346,
198
] | 3.5 | 4 |
"""The Operator class defines acceptable operators.
It will be used to identify dialogue act item operator
"""
__author__ = 'Javeria Habib'
from enum import Enum
class Operator(Enum):
"""The Operator class defines acceptable operators.
It will be used to identify dialogue act item operator"""
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
AND = 7
OR = 8
NOT = 9
IN = 10
BETWEEN = 11
def __str__(self):
"""
Prints the Operator
Returns:
a string representation of the Operator
"""
opr = 'UNK'
if self.name == 'EQ':
opr = '='
elif self.name == 'NE':
opr = '!='
elif self.name == 'LT':
opr = '<'
elif self.name == 'LE':
opr = '<='
elif self.name == 'GT':
opr = '>'
elif self.name == 'GE':
opr = '>='
elif self.name in ['AND', 'OR', 'NOT', 'IN', 'BETWEEN']:
opr = self.name
return f'{opr}'
| [
37811,
464,
35946,
1398,
15738,
10909,
12879,
13,
198,
1026,
481,
307,
973,
284,
5911,
10721,
719,
2378,
10088,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
41,
8770,
544,
19654,
571,
6,
198,
198,
6738,
33829,
1330,
2039,
388,
628,... | 2.02907 | 516 |
from sebs.cache import Cache
from sebs.faas.config import Credentials, Resources, Config
from sebs.utils import LoggingHandlers
from sebs.storage.config import MinioConfig
from typing import cast, Optional
| [
6738,
384,
1443,
13,
23870,
1330,
34088,
198,
6738,
384,
1443,
13,
13331,
292,
13,
11250,
1330,
327,
445,
14817,
11,
13864,
11,
17056,
198,
6738,
384,
1443,
13,
26791,
1330,
5972,
2667,
12885,
8116,
198,
6738,
384,
1443,
13,
35350,
13... | 3.684211 | 57 |
# Copyright (C) 2019-2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from typing import Any
import logging as log
import os.path as osp
from attrs import define
import pycocotools.mask as mask_utils
from datumaro.components.annotation import (
AnnotationType, Bbox, Caption, CompiledMask, Label, LabelCategories, Mask,
Points, PointsCategories, Polygon, RleMask,
)
from datumaro.components.extractor import (
DEFAULT_SUBSET_NAME, DatasetItem, SourceExtractor,
)
from datumaro.components.media import Image
from datumaro.util import parse_json_file, take_by
from datumaro.util.image import lazy_image, load_image
from datumaro.util.mask_tools import bgr2index
from datumaro.util.meta_file_util import has_meta_file, parse_meta_file
from .format import CocoPath, CocoTask
class _CocoExtractor(SourceExtractor):
"""
Parses COCO annotations written in the following format:
https://cocodataset.org/#format-data
"""
@staticmethod
@define
| [
2,
15069,
357,
34,
8,
13130,
12,
1238,
1828,
8180,
10501,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
6738,
19720,
1330,
4377,
198,
11748,
18931,
355,
2604,
198,
11748,
28686,
13,
6978,
355,
267,
2777... | 3.012232 | 327 |
import json
from pathlib import Path
| [
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198
] | 4.111111 | 9 |
import sys
sys.path.append('../..')
from typing import Set, Dict, Tuple, Sequence, List, Any
from src.array._118_Pascal import *
num = 5
print(pascalTriangle(num))
assert(2 == pascalTriangle(num))
print("All pass") | [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
40720,
492,
11537,
198,
6738,
19720,
1330,
5345,
11,
360,
713,
11,
309,
29291,
11,
45835,
11,
7343,
11,
4377,
198,
6738,
12351,
13,
18747,
13557,
16817,
62,
47,
27747,
1330,
1635,
... | 2.868421 | 76 |
# _filter_static_genes.py
__module_name__ = "_filter_static_genes.py"
__author__ = ", ".join(["Michael E. Vinyard"])
__email__ = ", ".join(["vinyard@g.harvard.edu",])
# package imports #
# --------------- #
import licorice
import scipy.optimize
import matplotlib.pyplot as plt
import numpy as np
import vinplots
from ._calculate_running_quantile import _calculate_running_quantile
def _calculate_variability_score(
X, min_mean=0, n_bins=50, fit_percentile=0.1, error_weight=1
):
"""
Calculate v-score (above-Poisson noise statistic) for genes in the input sparse counts matrix
Return v-scores and other stats
"""
mu_gene, filtered_var_gene, gene_idx = _get_variable_gene(X, min_mean)
data_x = np.log(mu_gene)
data_y = np.log(filtered_var_gene / mu_gene)
x, y = _calculate_running_quantile(data_x, data_y, fit_percentile, n_bins)
g_log = lambda input: np.log(input[1] * np.exp(-input[0]) + input[2])
hist, bins = np.histogram(np.log(filtered_var_gene[mu_gene > 0]), bins=200)
bins = bins[:-1] + np.diff(bins) / 2
max_idx = np.argmax(hist)
c = np.max((np.exp(bins[max_idx]), 1))
error_func = lambda b2: np.sum(abs(g_log([x, c, b2]) - y) ** error_weight)
b = scipy.optimize.fmin(func=error_func, x0=[0.1], disp=False)
a = c / (1 + b) - 1
var_scores = filtered_var_gene / ((1 + a) * (1 + b) + b * mu_gene)
CV_input, CV_eff = np.sqrt((1 + a) * (1 + b) - 1), np.sqrt(b)
return var_scores, CV_eff, CV_input, gene_idx, mu_gene, filtered_var_gene, a, b
def _filter_static_genes(
adata,
base_idx=[],
min_var_score_percentile=85,
min_counts=3,
min_cells=3,
plot=True,
sample_name="Variable genes",
return_hv_genes=False,
):
"""
Filter genes by expression level and variability
Return list of filtered gene indices
"""
n_cells = adata.X.shape[0]
base_idx = _get_base_idx(base_idx, n_cells)
(
var_scores,
CV_eff,
CV_input,
gene_idx,
mu_gene,
filtered_var_gene,
a,
b,
) = _calculate_variability_score(
adata.X[base_idx, :], min_mean=0, n_bins=50, fit_percentile=0.1, error_weight=1
)
gene_idx = gene_idx[var_scores > 0]
mu_gene = mu_gene[var_scores > 0]
filtered_var_gene = filtered_var_gene[var_scores > 0]
min_var_score = np.percentile(var_scores, min_var_score_percentile)
idx = ((adata.X[:, gene_idx] >= min_counts).sum(0).A.squeeze() >= min_cells) & (
var_scores >= min_var_score
)
if plot:
_plot_var_score(filtered_var_gene, mu_gene, idx, a, b, sample_name)
highly_variable_genes_idx = gene_idx[idx]
n_var_genes = licorice.font_format(str(len(highly_variable_genes_idx)), ["BOLD"])
print("{} variable genes identified".format(n_var_genes))
_annotate_adata_highvar_genes(adata, highly_variable_genes_idx)
if return_hv_genes:
return highly_variable_genes_idx | [
198,
2,
4808,
24455,
62,
12708,
62,
5235,
274,
13,
9078,
198,
198,
834,
21412,
62,
3672,
834,
796,
45434,
24455,
62,
12708,
62,
5235,
274,
13,
9078,
1,
198,
834,
9800,
834,
796,
33172,
27071,
22179,
7,
14692,
13256,
412,
13,
569,
... | 2.157401 | 1,385 |
# -*- coding: utf-8 -*-
# Time: 2021-10-20 11:17
# Copyright (c) 2021
# author: Euraxluo
import unittest
from loguru import logger
import logging
import HTMLReport.src.tools.result as test_result # type:ignore
import HTMLReport.src.test_runner as test_runner # type:ignore
logger.getLogger = logging.getLogger # type:ignore
test_result.logging = logger
test_runner.logging = logger
if __name__ == '__main__':
import uvicorn # type:ignore
uvicorn.run("test_job_api:app")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
2,
3862,
25,
33448,
12,
940,
12,
1238,
1367,
25,
1558,
198,
2,
15069,
357,
66,
8,
33448,
198,
2,
1772,
25,
36554,
897,
2290,
78,
198,
11748,
555,
715,
395,
19... | 2.778409 | 176 |
from typing import Optional, Dict
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal, Independent
from ding.torch_utils import fc_block, noise_block, NoiseLinearLayer, MLP
from ding.rl_utils import beta_function_map
from ding.utils import lists_to_dicts, SequenceType
class DiscreteHead(nn.Module):
"""
Overview:
The ``DiscreteHead`` used to output discrete actions logit. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
output ``logit``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 1,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = False,
) -> None:
"""
Overview:
Init the ``DiscreteHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``DiscreteHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value output.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
"""
super(DiscreteHead, self).__init__()
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.Q = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, output_size)
)
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``DiscreteHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keyword ``logit`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, M)`, where ``M = output_size``.
Examples:
>>> head = DiscreteHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict) and outputs['logit'].shape == torch.Size([4, 64])
"""
logit = self.Q(x)
return {'logit': logit}
class DistributionHead(nn.Module):
"""
Overview:
The ``DistributionHead`` used to output Q-value distribution. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
outputs ``logit`` and ``distribution``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 1,
n_atom: int = 51,
v_min: float = -10,
v_max: float = 10,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = False,
eps: Optional[float] = 1e-6,
) -> None:
"""
Overview:
Init the ``DistributionHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``DistributionHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value distribution.
- n_atom (:obj:`int`): The number of atoms (discrete supports). Default is ``51``.
- v_min (:obj:`int`): Min value of atoms. Default is ``-10``.
- v_max (:obj:`int`): Max value of atoms. Default is ``10``.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
- eps (:obj:`float`): Small constant used for numerical stability.
"""
super(DistributionHead, self).__init__()
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.Q = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, output_size * n_atom)
)
self.output_size = output_size
self.n_atom = n_atom
self.v_min = v_min
self.v_max = v_max
self.eps = eps # for numerical stability
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``DistributionHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`) and \
``distribution`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, M)`, where ``M = output_size``.
- distribution: :math:`(B, M, n_atom)`.
Examples:
>>> head = DistributionHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default n_atom is 51
>>> assert outputs['distribution'].shape == torch.Size([4, 64, 51])
"""
q = self.Q(x)
q = q.view(*q.shape[:-1], self.output_size, self.n_atom)
dist = torch.softmax(q, dim=-1) + self.eps
q = dist * torch.linspace(self.v_min, self.v_max, self.n_atom).to(x)
q = q.sum(-1)
return {'logit': q, 'distribution': dist}
class RainbowHead(nn.Module):
"""
Overview:
The ``RainbowHead`` used to output Q-value distribution. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
outputs ``logit`` and ``distribution``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 1,
n_atom: int = 51,
v_min: float = -10,
v_max: float = 10,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = True,
eps: Optional[float] = 1e-6,
) -> None:
"""
Overview:
Init the ``RainbowHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``RainbowHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value output.
- n_atom (:obj:`int`): The number of atoms (discrete supports). Default is ``51``.
- v_min (:obj:`int`): Min value of atoms. Default is ``-10``.
- v_max (:obj:`int`): Max value of atoms. Default is ``10``.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
- eps (:obj:`float`): Small constant used for numerical stability.
"""
super(RainbowHead, self).__init__()
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.A = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, output_size * n_atom)
)
self.Q = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, n_atom)
)
self.output_size = output_size
self.n_atom = n_atom
self.v_min = v_min
self.v_max = v_max
self.eps = eps
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``RainbowHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`) and \
``distribution`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, M)`, where ``M = output_size``.
- distribution: :math:`(B, M, n_atom)`.
Examples:
>>> head = RainbowHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default n_atom is 51
>>> assert outputs['distribution'].shape == torch.Size([4, 64, 51])
"""
a = self.A(x)
q = self.Q(x)
a = a.view(*a.shape[:-1], self.output_size, self.n_atom)
q = q.view(*q.shape[:-1], 1, self.n_atom)
q = q + a - a.mean(dim=-2, keepdim=True)
dist = torch.softmax(q, dim=-1) + self.eps
q = dist * torch.linspace(self.v_min, self.v_max, self.n_atom).to(x)
q = q.sum(-1)
return {'logit': q, 'distribution': dist}
class QRDQNHead(nn.Module):
"""
Overview:
The ``QRDQNHead`` (Quantile Regression DQN) used to output action quantiles. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
output ``logit``, ``q``, and ``tau``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 1,
num_quantiles: int = 32,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = False,
) -> None:
"""
Overview:
Init the ``QRDQNHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``QRDQNHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value output.
- num_quantiles (:obj:`int`): The number of quantiles. Default is ``32``.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
"""
super(QRDQNHead, self).__init__()
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.Q = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, output_size * num_quantiles)
)
self.num_quantiles = num_quantiles
self.output_size = output_size
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``QRDQNHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`), \
``q`` (:obj:`torch.Tensor`), and ``tau`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, M)`, where ``M = output_size``.
- q: :math:`(B, M, num_quantiles)`.
- tau: :math:`(B, M, 1)`.
Examples:
>>> head = QRDQNHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default num_quantiles is 32
>>> assert outputs['q'].shape == torch.Size([4, 64, 32])
>>> assert outputs['tau'].shape == torch.Size([4, 32, 1])
"""
q = self.Q(x)
q = q.view(*q.shape[:-1], self.output_size, self.num_quantiles)
logit = q.mean(-1)
tau = torch.linspace(0, 1, self.num_quantiles + 1)
tau = ((tau[:-1] + tau[1:]) / 2).view(1, -1, 1).repeat(q.shape[0], 1, 1).to(q)
return {'logit': logit, 'q': q, 'tau': tau}
class QuantileHead(nn.Module):
"""
Overview:
The ``QuantileHead`` used to output action quantiles. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
output ``logit``, ``q``, and ``quantiles``.
Interfaces:
``__init__``, ``forward``, ``quantile_net``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 1,
num_quantiles: int = 32,
quantile_embedding_size: int = 128,
beta_function_type: Optional[str] = 'uniform',
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = False,
) -> None:
"""
Overview:
Init the ``QuantileHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``QuantileHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value output.
- num_quantiles (:obj:`int`): The number of quantiles.
- quantile_embedding_size (:obj:`int`): The embedding size of a quantile.
- beta_function_type (:obj:`str`): Type of beta function. See ``ding.rl_utils.beta_function.py`` \
for more details. Default is ``uniform``.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
"""
super(QuantileHead, self).__init__()
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.Q = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, output_size)
)
self.num_quantiles = num_quantiles
self.quantile_embedding_size = quantile_embedding_size
self.output_size = output_size
self.iqn_fc = nn.Linear(self.quantile_embedding_size, hidden_size)
self.beta_function = beta_function_map[beta_function_type]
def quantile_net(self, quantiles: torch.Tensor) -> torch.Tensor:
"""
Overview:
Deterministic parametric function trained to reparameterize samples from a base distribution. \
By repeated Bellman update iterations of Q-learning, the optimal action-value function is estimated.
Arguments:
- x (:obj:`torch.Tensor`): The encoded embedding tensor of parametric sample.
Returns:
- quantile_net (:obj:`torch.Tensor`): Quantile network output tensor after reparameterization.
Shapes:
- quantile_net :math:`(quantile_embedding_size, M)`, where ``M = output_size``.
Examples:
>>> head = QuantileHead(64, 64)
>>> quantiles = torch.randn(128,1)
>>> qn_output = head.quantile_net(quantiles)
>>> assert isinstance(qn_output, torch.Tensor)
>>> # default quantile_embedding_size: int = 128,
>>> assert qn_output.shape == torch.Size([128, 64])
"""
quantile_net = quantiles.repeat([1, self.quantile_embedding_size])
quantile_net = torch.cos(
torch.arange(1, self.quantile_embedding_size + 1, 1).to(quantiles) * math.pi * quantile_net
)
quantile_net = self.iqn_fc(quantile_net)
quantile_net = F.relu(quantile_net)
return quantile_net
def forward(self, x: torch.Tensor, num_quantiles: Optional[int] = None) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``QuantileHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`), \
``q`` (:obj:`torch.Tensor`), and ``quantiles`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, M)`, where ``M = output_size``.
- q: :math:`(num_quantiles, B, M)`.
- quantiles: :math:`(quantile_embedding_size, 1)`.
Examples:
>>> head = QuantileHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default num_quantiles is 32
>>> assert outputs['q'].shape == torch.Size([32, 4, 64])
>>> assert outputs['quantiles'].shape == torch.Size([128, 1])
"""
if num_quantiles is None:
num_quantiles = self.num_quantiles
batch_size = x.shape[0]
q_quantiles = torch.FloatTensor(num_quantiles * batch_size, 1).uniform_(0, 1).to(x)
logit_quantiles = torch.FloatTensor(num_quantiles * batch_size, 1).uniform_(0, 1).to(x)
logit_quantiles = self.beta_function(logit_quantiles)
q_quantile_net = self.quantile_net(q_quantiles)
logit_quantile_net = self.quantile_net(logit_quantiles)
x = x.repeat(num_quantiles, 1)
q_x = x * q_quantile_net # 4*32,64
logit_x = x * logit_quantile_net
q = self.Q(q_x).reshape(num_quantiles, batch_size, -1)
logit = self.Q(logit_x).reshape(num_quantiles, batch_size, -1).mean(0)
return {'logit': logit, 'q': q, 'quantiles': q_quantiles}
class FQFHead(nn.Module):
"""
Overview:
The ``FQFHead`` used to output action quantiles. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
output ``logit``, ``q``, ``quantiles``, ``quantiles_hats``, ``q_tau_i`` and ``entropies``.
Interfaces:
``__init__``, ``forward``, ``quantile_net``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 1,
num_quantiles: int = 32,
quantile_embedding_size: int = 128,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = False,
) -> None:
"""
Overview:
Init the ``FQFHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``FQFHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value output.
- num_quantiles (:obj:`int`): The number of quantiles.
- quantile_embedding_size (:obj:`int`): The embedding size of a quantile.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
"""
super(FQFHead, self).__init__()
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.Q = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, output_size)
)
self.num_quantiles = num_quantiles
self.quantile_embedding_size = quantile_embedding_size
self.output_size = output_size
self.fqf_fc = nn.Sequential(nn.Linear(self.quantile_embedding_size, hidden_size), nn.ReLU())
self.register_buffer(
'sigma_pi',
torch.arange(1, self.quantile_embedding_size + 1, 1).view(1, 1, self.quantile_embedding_size) * math.pi
)
# initialize weights_xavier of quantiles_proposal network
quantiles_proposal_fc = nn.Linear(hidden_size, num_quantiles)
torch.nn.init.xavier_uniform_(quantiles_proposal_fc.weight, gain=0.01)
torch.nn.init.constant_(quantiles_proposal_fc.bias, 0)
self.quantiles_proposal = nn.Sequential(quantiles_proposal_fc, nn.LogSoftmax(dim=1))
def quantile_net(self, quantiles: torch.Tensor) -> torch.Tensor:
"""
Overview:
Deterministic parametric function trained to reparameterize samples from the quantiles_proposal network. \
By repeated Bellman update iterations of Q-learning, the optimal action-value function is estimated.
Arguments:
- x (:obj:`torch.Tensor`): The encoded embedding tensor of parametric sample.
Returns:
- quantile_net (:obj:`torch.Tensor`): Quantile network output tensor after reparameterization.
Examples:
>>> head = FQFHead(64, 64)
>>> quantiles = torch.randn(4,32)
>>> qn_output = head.quantile_net(quantiles)
>>> assert isinstance(qn_output, torch.Tensor)
>>> # default quantile_embedding_size: int = 128,
>>> assert qn_output.shape == torch.Size([4, 32, 64])
"""
batch_size, num_quantiles = quantiles.shape[:2]
quantile_net = torch.cos(self.sigma_pi.to(quantiles) * quantiles.view(batch_size, num_quantiles, 1))
quantile_net = self.fqf_fc(quantile_net) # (batch_size, num_quantiles, hidden_size)
return quantile_net
def forward(self, x: torch.Tensor, num_quantiles: Optional[int] = None) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``FQFHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`), \
``q`` (:obj:`torch.Tensor`), ``quantiles`` (:obj:`torch.Tensor`), \
``quantiles_hats`` (:obj:`torch.Tensor`), \
``q_tau_i`` (:obj:`torch.Tensor`), ``entropies`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, M)`, where ``M = output_size``.
- q: :math:`(B, num_quantiles, M)`.
- quantiles: :math:`(B, num_quantiles + 1)`.
- quantiles_hats: :math:`(B, num_quantiles)`.
- q_tau_i: :math:`(B, num_quantiles - 1, M)`.
- entropies: :math:`(B, 1)`.
Examples:
>>> head = FQFHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
>>> # default num_quantiles is 32
>>> assert outputs['q'].shape == torch.Size([4, 32, 64])
>>> assert outputs['quantiles'].shape == torch.Size([4, 33])
>>> assert outputs['quantiles_hats'].shape == torch.Size([4, 32])
>>> assert outputs['q_tau_i'].shape == torch.Size([4, 31, 64])
>>> assert outputs['quantiles'].shape == torch.Size([4, 1])
"""
if num_quantiles is None:
num_quantiles = self.num_quantiles
batch_size = x.shape[0]
log_q_quantiles = self.quantiles_proposal(
x.detach()
) # (batch_size, num_quantiles), not to update encoder when learning w1_loss(fraction loss)
q_quantiles = log_q_quantiles.exp()
# Calculate entropies of value distributions.
entropies = -(log_q_quantiles * q_quantiles).sum(dim=-1, keepdim=True) # (batch_size, 1)
assert entropies.shape == (batch_size, 1)
# accumalative softmax
q_quantiles = torch.cumsum(q_quantiles, dim=1)
# quantile_hats: find the optimal condition for τ to minimize W1(Z, τ)
tau_0 = torch.zeros((batch_size, 1)).to(x)
q_quantiles = torch.cat((tau_0, q_quantiles), dim=1) # [batch_size, num_quantiles+1]
q_quantiles_hats = (q_quantiles[:, 1:] + q_quantiles[:, :-1]).detach() / 2. # (batch_size, num_quantiles)
q_quantile_net = self.quantile_net(q_quantiles_hats) # [batch_size, num_quantiles, hidden_size(64)]
# x.view[batch_size, 1, hidden_size(64)]
q_x = (x.view(batch_size, 1, -1) * q_quantile_net) # [batch_size, num_quantiles, hidden_size(64)]
q = self.Q(q_x) # [batch_size, num_quantiles, action_dim(64)]
logit = q.mean(1)
with torch.no_grad():
q_tau_i_net = self.quantile_net(
q_quantiles[:, 1:-1].detach()
) # [batch_size, num_quantiles-1, hidden_size(64)]
q_tau_i_x = (x.view(batch_size, 1, -1) * q_tau_i_net) # [batch_size, (num_quantiles-1), hidden_size(64)]
q_tau_i = self.Q(q_tau_i_x) # [batch_size, num_quantiles-1, action_dim(64)]
return {
'logit': logit,
'q': q,
'quantiles': q_quantiles,
'quantiles_hats': q_quantiles_hats,
'q_tau_i': q_tau_i,
'entropies': entropies
}
class DuelingHead(nn.Module):
"""
Overview:
The ``DuelingHead`` used to output discrete actions logit. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
output ``logit``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 1,
a_layer_num: Optional[int] = None,
v_layer_num: Optional[int] = None,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = False,
) -> None:
"""
Overview:
Init the ``DuelingHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``DuelingHead``.
- output_size (:obj:`int`): The number of outputs.
- a_layer_num (:obj:`int`): The number of layers used in the network to compute action output.
- v_layer_num (:obj:`int`): The number of layers used in the network to compute value output.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
"""
super(DuelingHead, self).__init__()
if a_layer_num is None:
a_layer_num = layer_num
if v_layer_num is None:
v_layer_num = layer_num
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.A = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
a_layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, output_size)
)
self.V = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
v_layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, 1)
)
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``DuelingHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keyword ``logit`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, M)`, where ``M = output_size``.
Examples:
>>> head = DuelingHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['logit'].shape == torch.Size([4, 64])
"""
a = self.A(x)
v = self.V(x)
q_value = a - a.mean(dim=-1, keepdim=True) + v
return {'logit': q_value}
class StochasticDuelingHead(nn.Module):
"""
Overview:
The ``Stochastic Dueling Network`` proposed in paper ACER (arxiv 1611.01224). \
Dueling network architecture in continuous action space. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
outputs ``q_value`` and ``v_value``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(
self,
hidden_size: int,
action_shape: int,
layer_num: int = 1,
a_layer_num: Optional[int] = None,
v_layer_num: Optional[int] = None,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
noise: Optional[bool] = False,
last_tanh: Optional[bool] = True,
) -> None:
"""
Overview:
Init the ``Stochastic DuelingHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``StochasticDuelingHead``.
- action_shape (:obj:`int`): The number of continuous action shape, usually integer value.
- layer_num (:obj:`int`): The number of default layers used in the network to compute action and value \
output.
- a_layer_num (:obj:`int`): The number of layers used in the network to compute action output. Default is \
``layer_num``.
- v_layer_num (:obj:`int`): The number of layers used in the network to compute value output. Default is \
``layer_num``.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- noise (:obj:`bool`): Whether use ``NoiseLinearLayer`` as ``layer_fn`` in Q networks' MLP. \
Default ``False``.
- last_tanh (:obj:`bool`): If ``True`` Apply ``tanh`` to actions. Default ``True``.
"""
super(StochasticDuelingHead, self).__init__()
if a_layer_num is None:
a_layer_num = layer_num
if v_layer_num is None:
v_layer_num = layer_num
layer = NoiseLinearLayer if noise else nn.Linear
block = noise_block if noise else fc_block
self.A = nn.Sequential(
MLP(
hidden_size + action_shape,
hidden_size,
hidden_size,
a_layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, 1)
)
self.V = nn.Sequential(
MLP(
hidden_size,
hidden_size,
hidden_size,
v_layer_num,
layer_fn=layer,
activation=activation,
norm_type=norm_type
), block(hidden_size, 1)
)
if last_tanh:
self.tanh = nn.Tanh()
else:
self.tanh = None
def forward(
self,
s: torch.Tensor,
a: torch.Tensor,
mu: torch.Tensor,
sigma: torch.Tensor,
sample_size: int = 10,
) -> Dict[str, torch.Tensor]:
"""
Overview:
Use encoded embedding tensor to run MLP with ``StochasticDuelingHead`` and return the prediction dictionary.
Arguments:
- s (:obj:`torch.Tensor`): Tensor containing input embedding.
- a (:obj:`torch.Tensor`): The original continuous behaviour action.
- mu (:obj:`torch.Tensor`): The ``mu`` gaussian reparameterization output of actor head at current \
timestep.
- sigma (:obj:`torch.Tensor`): The ``sigma`` gaussian reparameterization output of actor head at \
current timestep.
- sample_size (:obj:`int`): The number of samples for continuous action when computing the Q value.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords \
``q_value`` (:obj:`torch.Tensor`) and ``v_value`` (:obj:`torch.Tensor`).
Shapes:
- s: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- a: :math:`(B, A)`, where ``A = action_size``.
- mu: :math:`(B, A)`.
- sigma: :math:`(B, A)`.
- q_value: :math:`(B, 1)`.
- v_value: :math:`(B, 1)`.
"""
batch_size = s.shape[0] # batch_size or batch_size * T
hidden_size = s.shape[1]
action_size = a.shape[1]
state_cat_action = torch.cat((s, a), dim=1) # size (B, action_size + state_size)
a_value = self.A(state_cat_action) # size (B, 1)
v_value = self.V(s) # size (B, 1)
# size (B, sample_size, hidden_size)
expand_s = (torch.unsqueeze(s, 1)).expand((batch_size, sample_size, hidden_size))
# in case for gradient back propagation
dist = Independent(Normal(mu, sigma), 1)
action_sample = dist.rsample(sample_shape=(sample_size, ))
if self.tanh:
action_sample = self.tanh(action_sample)
# (sample_size, B, action_size)->(B, sample_size, action_size)
action_sample = action_sample.permute(1, 0, 2)
# size (B, sample_size, action_size + hidden_size)
state_cat_action_sample = torch.cat((expand_s, action_sample), dim=-1)
a_val_sample = self.A(state_cat_action_sample) # size (B, sample_size, 1)
q_value = v_value + a_value - a_val_sample.mean(dim=1) # size (B, 1)
return {'q_value': q_value, 'v_value': v_value}
class RegressionHead(nn.Module):
"""
Overview:
The ``RegressionHead`` used to output actions Q-value.
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
output ``pred``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 2,
final_tanh: Optional[bool] = False,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None
) -> None:
"""
Overview:
Init the ``RegressionHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``RegressionHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value output.
- final_tanh (:obj:`bool`): If ``True`` apply ``tanh`` to output. Default ``False``.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
"""
super(RegressionHead, self).__init__()
self.main = MLP(hidden_size, hidden_size, hidden_size, layer_num, activation=activation, norm_type=norm_type)
self.last = nn.Linear(hidden_size, output_size) # for convenience of special initialization
self.final_tanh = final_tanh
if self.final_tanh:
self.tanh = nn.Tanh()
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``RegressionHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keyword ``pred`` (:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- pred: :math:`(B, M)`, where ``M = output_size``.
Examples:
>>> head = RegressionHead(64, 64)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['pred'].shape == torch.Size([4, 64])
"""
x = self.main(x)
x = self.last(x)
if self.final_tanh:
x = self.tanh(x)
if x.shape[-1] == 1 and len(x.shape) > 1:
x = x.squeeze(-1)
return {'pred': x}
class ReparameterizationHead(nn.Module):
"""
Overview:
The ``ReparameterizationHead`` used to output action ``mu`` and ``sigma``.
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
outputs ``mu`` and ``sigma``.
Interfaces:
``__init__``, ``forward``.
"""
default_sigma_type = ['fixed', 'independent', 'conditioned']
default_bound_type = ['tanh', None]
def __init__(
self,
hidden_size: int,
output_size: int,
layer_num: int = 2,
sigma_type: Optional[str] = None,
fixed_sigma_value: Optional[float] = 1.0,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
bound_type: Optional[str] = None,
) -> None:
"""
Overview:
Init the ``ReparameterizationHead`` layers according to the provided arguments.
Arguments:
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to ``ReparameterizationHead``.
- output_size (:obj:`int`): The number of outputs.
- layer_num (:obj:`int`): The number of layers used in the network to compute Q value output.
- sigma_type (:obj:`str`): Sigma type used. Choose among \
``['fixed', 'independent', 'conditioned']``. Default is ``None``.
- fixed_sigma_value (:obj:`float`): When choosing ``fixed`` type, the tensor ``output['sigma']`` \
is filled with this input value. Default is ``None``.
- activation (:obj:`nn.Module`): The type of activation function to use in MLP. \
If ``None``, then default set activation to ``nn.ReLU()``. Default ``None``.
- norm_type (:obj:`str`): The type of normalization to use. See ``ding.torch_utils.network.fc_block`` \
for more details. Default ``None``.
- bound_type (:obj:`str`): Bound type to apply to output ``mu``. Choose among ``['tanh', None]``. \
Default is ``None``.
"""
super(ReparameterizationHead, self).__init__()
self.sigma_type = sigma_type
assert sigma_type in self.default_sigma_type, "Please indicate sigma_type as one of {}".format(
self.default_sigma_type
)
self.bound_type = bound_type
assert bound_type in self.default_bound_type, "Please indicate bound_type as one of {}".format(
self.default_bound_type
)
self.main = MLP(hidden_size, hidden_size, hidden_size, layer_num, activation=activation, norm_type=norm_type)
self.mu = nn.Linear(hidden_size, output_size)
if self.sigma_type == 'fixed':
self.sigma = torch.full((1, output_size), fixed_sigma_value)
elif self.sigma_type == 'independent': # independent parameter
self.log_sigma_param = nn.Parameter(torch.zeros(1, output_size))
elif self.sigma_type == 'conditioned':
self.log_sigma_layer = nn.Linear(hidden_size, output_size)
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``ReparameterizationHead`` and return the prediction \
dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords ``mu`` (:obj:`torch.Tensor`) and ``sigma`` \
(:obj:`torch.Tensor`).
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- mu: :math:`(B, M)`, where ``M = output_size``.
- sigma: :math:`(B, M)`.
Examples:
>>> head = ReparameterizationHead(64, 64, sigma_type='fixed')
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> assert outputs['mu'].shape == torch.Size([4, 64])
>>> assert outputs['sigma'].shape == torch.Size([4, 64])
"""
x = self.main(x)
mu = self.mu(x)
if self.bound_type == 'tanh':
mu = torch.tanh(mu)
if self.sigma_type == 'fixed':
sigma = self.sigma.to(mu.device) + torch.zeros_like(mu) # addition aims to broadcast shape
elif self.sigma_type == 'independent':
log_sigma = self.log_sigma_param + torch.zeros_like(mu) # addition aims to broadcast shape
sigma = torch.exp(log_sigma)
elif self.sigma_type == 'conditioned':
log_sigma = self.log_sigma_layer(x)
sigma = torch.exp(torch.clamp(log_sigma, -20, 2))
return {'mu': mu, 'sigma': sigma}
class MultiHead(nn.Module):
"""
Overview:
The ``MultiHead`` used to output actions logit. \
Input is a (:obj:`torch.Tensor`) of shape ``(B, N)`` and returns a (:obj:`Dict`) containing \
output ``logit``.
Interfaces:
``__init__``, ``forward``.
"""
def __init__(self, head_cls: type, hidden_size: int, output_size_list: SequenceType, **head_kwargs) -> None:
"""
Overview:
Init the ``MultiHead`` layers according to the provided arguments.
Arguments:
- head_cls (:obj:`type`): The class of head, choose among [``DuelingHead``, ``DistributionHead``, \
''QuatileHead'', ...].
- hidden_size (:obj:`int`): The ``hidden_size`` of the MLP connected to the ``Head``.
- output_size_list (:obj:`int`): Sequence of ``output_size`` for multi discrete action, e.g. ``[2, 3, 5]``.
- head_kwargs: (:obj:`dict`): Dict containing class-specific arguments.
"""
super(MultiHead, self).__init__()
self.pred = nn.ModuleList()
for size in output_size_list:
self.pred.append(head_cls(hidden_size, size, **head_kwargs))
def forward(self, x: torch.Tensor) -> Dict:
"""
Overview:
Use encoded embedding tensor to run MLP with ``MultiHead`` and return the prediction dictionary.
Arguments:
- x (:obj:`torch.Tensor`): Tensor containing input embedding.
Returns:
- outputs (:obj:`Dict`): Dict containing keywords ``logit`` (:obj:`torch.Tensor`) \
corresponding to the logit of each ``output`` each accessed at ``['logit'][i]``.
Shapes:
- x: :math:`(B, N)`, where ``B = batch_size`` and ``N = hidden_size``.
- logit: :math:`(B, Mi)`, where ``Mi = output_size`` corresponding to output ``i``.
Examples:
>>> head = MultiHead(DuelingHead, 64, [2, 3, 5], v_layer_num=2)
>>> inputs = torch.randn(4, 64)
>>> outputs = head(inputs)
>>> assert isinstance(outputs, dict)
>>> # output_size_list is [2, 3, 5] as set
>>> # Therefore each dim of logit is as follows
>>> outputs['logit'][0].shape
>>> torch.Size([4, 2])
>>> outputs['logit'][1].shape
>>> torch.Size([4, 3])
>>> outputs['logit'][2].shape
>>> torch.Size([4, 5])
"""
return lists_to_dicts([m(x) for m in self.pred])
head_cls_map = {
# discrete
'discrete': DiscreteHead,
'dueling': DuelingHead,
'sdn': StochasticDuelingHead,
'distribution': DistributionHead,
'rainbow': RainbowHead,
'qrdqn': QRDQNHead,
'quantile': QuantileHead,
# continuous
'regression': RegressionHead,
'reparameterization': ReparameterizationHead,
# multi
'multi': MultiHead,
}
| [
6738,
19720,
1330,
32233,
11,
360,
713,
198,
198,
11748,
10688,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
17080,
2455,
507,
1330,
14435,
11,
... | 2.096265 | 23,695 |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
import mmh3
from bitarray import bitarray
from random import randint
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
8085,
71,
18,
198,
198,
6738,
1643,
18747,
1330,
1643,
18747,
198,
6738,
4738,
1330,
43720,
600,
628,
198
] | 2.704545 | 44 |
from django.contrib import admin
from blog.models import Category, Article, Comment, Contact
admin.site.register(Category)
@admin.register(Article)
@admin.register(Comment)
@admin.register(Contact)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
4130,
13,
27530,
1330,
21743,
11,
10172,
11,
18957,
11,
14039,
198,
198,
28482,
13,
15654,
13,
30238,
7,
27313,
8,
628,
198,
31,
28482,
13,
30238,
7,
14906,
8,
628,
198... | 3.433333 | 60 |
from model import *
| [
6738,
2746,
1330,
1635,
628
] | 4.2 | 5 |
# -*- coding: utf-8 -*-
from openprocurement.api.utils import (
get_now,
context_unpack,
json_view,
set_ownership,
APIResource,
raise_operation_error,
)
from openprocurement.tender.core.utils import save_tender, optendersresource, apply_patch
from openprocurement.tender.core.validation import validate_complaint_data, validate_patch_complaint_data
from openprocurement.tender.belowthreshold.utils import check_tender_status
from openprocurement.tender.belowthreshold.validation import (
validate_update_complaint_not_in_allowed_status,
validate_add_complaint_not_in_allowed_tender_status,
validate_update_complaint_not_in_allowed_tender_status,
)
@optendersresource(
name="belowThreshold:Tender Complaints",
collection_path="/tenders/{tender_id}/complaints",
path="/tenders/{tender_id}/complaints/{complaint_id}",
procurementMethodType="belowThreshold",
description="Tender complaints",
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
1280,
36942,
495,
434,
13,
15042,
13,
26791,
1330,
357,
198,
220,
220,
220,
651,
62,
2197,
11,
198,
220,
220,
220,
4732,
62,
403,
8002,
11,
198,
220,
220,
220,... | 2.80826 | 339 |
import wanakana
import csv
| [
11748,
266,
272,
461,
2271,
198,
11748,
269,
21370,
628,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198
] | 1.782609 | 23 |
import pygame
| [
11748,
12972,
6057,
198
] | 3.5 | 4 |
# -*- coding: utf-8 -*-
from css import style
sm = style.CsStyleManager()
rules = sm.link(open('test.css').read())
p1 = P('paragraph1')
p2 = P('p2', p1, 'note')
a1 = A('a1', p2)
h2 = H2('caption', p1)
assert sm.calc(p1, rules) == {'color': 'red', 'font-family': None, u'margin': u'0'}
assert sm.calc(p2, rules) == {'font-family': None, u'background': u'yellow', 'color': 'red', u'font-weight': u'bold', u'margin': u'0'}
assert sm.calc(a1, rules) == {'font-weight': u'normal', u'background': u'yellow', 'color': 'red', 'font-family': None, u'margin': u'0', 'text-decoration': 'blink'}
assert sm.calcPseudo(a1, ["hover"], rules, inheritance=False) == {'hover': {u'text-decoration': u'none'}}
# selector test
assert style.SelectorWalker.isapplicable("P#paragraph1 P.note A", a1) == True
assert style.SelectorWalker.isapplicable("P#paragraph1 A", a1) == True
assert style.SelectorWalker.isapplicable("P A", a1) == True
assert style.SelectorWalker.isapplicable("P P#paragraph1 P.note A", a1) == False
assert style.SelectorWalker.isapplicable("P#paragraph1 H2 .note A", a1) == False
assert style.SelectorWalker.isapplicable("P#paragraph1 .note H2", h2) == False
assert style.SelectorWalker.isapplicable("P#paragraph1 H2", h2) == True
assert style.SelectorWalker.isapplicable("A", a1) == True
assert style.SelectorWalker.isapplicable("#a1", a1) == True
assert style.SelectorWalker.isapplicable(".note", p2) == True
assert style.SelectorWalker.isapplicable("P .note", p2) == True
assert style.SelectorWalker.isapplicable("P > A", a1) == True
assert style.SelectorWalker.isapplicable("P#paragraph1 > A", a1) == False
assert style.SelectorWalker.isapplicable("P#paragraph1 > P A", a1) == True
assert style.SelectorWalker.isapplicable("P A", style.PseudoNode("hover", a1)) == False
assert style.SelectorWalker.isapplicable("P A:hover", style.PseudoNode("hover", a1)) == True
assert style.SelectorWalker.isapplicable("P A:hover:eee", style.PseudoNode("hover:eee", a1)) == True
assert style.SelectorWalker.isapplicable("P A:hover:eee", style.PseudoNode("hover:eee", a1)) == True
assert style.SelectorWalker.isapplicable("P#paragraph1 P A:hover:eee", style.PseudoNode("hover:eee", a1)) == True
#print style.SelectorWalker.isapplicable("P#paragraph1 P", a1, enable_debug=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
269,
824,
1330,
3918,
628,
628,
628,
198,
5796,
796,
3918,
13,
32274,
21466,
13511,
3419,
198,
38785,
796,
895,
13,
8726,
7,
9654,
10786,
9288,
13,
25471,
27691,
... | 2.679623 | 849 |
from collections import defaultdict
from typing import Any, Dict, List, Optional
from unittest import TestCase
from unittest.mock import MagicMock, patch
from openslides_backend.services.datastore.commands import GetManyRequest
from openslides_backend.services.datastore.extended_adapter import (
ExtendedDatastoreAdapter,
)
from openslides_backend.shared.patterns import Collection
from openslides_backend.shared.typing import DeletedModel
from tests.util import get_fqid
| [
6738,
17268,
1330,
4277,
11600,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
8529,
198,
198,
6738,
9808... | 3.446043 | 139 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket, struct
import select
import time
import json
import sys
import os
DISTANCE_THRESHOLD = 0.5
MAXIMUM_MOTOR_INPUT = 99
with_connection = True
if with_connection:
print("Establishing the connection to the BBG device...")
else:
print("Ignoring the connection...")
if with_connection:
sys.path.insert(1, os.path.join(sys.path[0], '../Interface/src'))
from connections.beagleboneGreenWirelessConnection import BeagleboneGreenWirelessConnection
######## Setup BBG connection #######
c = BeagleboneGreenWirelessConnection()
I2C_interface = "PCA9685@I2C[1]"
c.connect()
print('Status: {}'.format(c.getState()))
c.sendMessages([json.dumps({"type": "Settings", "name": I2C_interface, "dutyFrequency": '50 Hz'})])
time.sleep(3)
c.sendMessages([json.dumps({"type": "Settings", "name": I2C_interface, "scan": False})])
#####################################
############# setup UDP communication #############
# function to get the data from Unity
# local IP. Do not change that
UDP_IP = "127.0.0.1"
# socket to which data is being received
UDP_PORT_DISTANCES = 8051
# open the receiving socket
distances_socket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
distances_socket.bind((UDP_IP, UDP_PORT_DISTANCES))
##################################################
distances_dict = { "frontObstacle" : 0,
"backObstacle" : 0,
"upObstacle" : 0,
"downObstacle" : 0,
"leftObstacle" : 0,
"rightObstacle" : 0 }
opposites = { "frontObstacle" : "backObstacle",
"backObstacle" : "frontObstacle",
"upObstacle" : "downObstacle",
"downObstacle" : "upObstacle",
"leftObstacle" : "rightObstacle",
"rightObstacle" : "leftObstacle"}
motorsIndexes = { "frontObstacle" : 4,
"backObstacle" : 3,
"upObstacle" : 9,
"downObstacle" : 1,
"leftObstacle" : 0,
"rightObstacle" : 5 }
# MAIN LOOP
while(True):
distances = get_data(distances_socket)
# had to sleep otherwise hardware overwhelmed
time.sleep(0.05)
if len(distances):
print("acquired distances, total number = ", len(distances))
# send only the last packet otherwise too many packets sent too fast
packet = distances[-1]
# 6 floats
strs = 'ffffff'
# unpack.
unpacked = struct.unpack(strs, packet)
# parse the data
fillDict(unpacked)
print(distances_dict)
for orientation in distances_dict.keys():
if with_connection:
# if close enough to a wall
if (distances_dict[orientation] < DISTANCE_THRESHOLD):
if(distances_dict[opposites[orientation]] < DISTANCE_THRESHOLD):
# take difference. Ignore if negative
value = (distances_dict[orientation] * (-MAXIMUM_MOTOR_INPUT/DISTANCE_THRESHOLD) + MAXIMUM_MOTOR_INPUT) \
- (distances_dict[opposites[orientation]] * (-MAXIMUM_MOTOR_INPUT/DISTANCE_THRESHOLD) + MAXIMUM_MOTOR_INPUT)
if (value < 0):
continue
else:
c.sendMessages([json.dumps({"dim": motorsIndexes[orientation], "value": value, "type": "Set", "name": I2C_interface})])
else:
# make the motors vibrate
value = distances_dict[orientation] * (-MAXIMUM_MOTOR_INPUT/DISTANCE_THRESHOLD) + MAXIMUM_MOTOR_INPUT # affine transformation
c.sendMessages([json.dumps({"dim": motorsIndexes[orientation], "value": value, "type": "Set", "name": I2C_interface})])
else:
# reset motors
c.sendMessages([json.dumps({"dim": motorsIndexes[orientation], "value": 0, "type": "Set", "name": I2C_interface})])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
17802,
11,
2878,
198,
11748,
2922,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
28686,
... | 2.088323 | 2,004 |
import pandas as pd
import sys
import os
import csv
DiagnosisTrackerfilename="DiagnosisTracker.csv"
if __name__ == "__main__":
images_path=sys.argv[1] #Path of all images folder
check_new_file_and_update_in_csv(images_path) #check new image file and update in CSV
print("CSV File checked and updated")
y=showCSVData(DiagnosisTrackerfilename) # show existing CSV data
startImageTracking() #start tracking images
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
198,
18683,
4660,
5958,
35694,
34345,
2625,
18683,
4660,
5958,
35694,
13,
40664,
1,
220,
628,
628,
220,
220,
220,
220,
628,
198,
198,
... | 2.544974 | 189 |
from __future__ import unicode_literals
from django.apps import AppConfig
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.75 | 20 |
"""Test the Eshop class."""
import pytest
from pytest_homeassistant_custom_component.async_mock import AsyncMock, Mock, patch
from custom_components.nintendo_wishlist.eshop import (
NO_BOX_ART_URL,
EShop,
get_percent_off,
)
@pytest.fixture
def client_mock():
"""Pytest fixture to mock the algoliasearch client."""
client = Mock()
games = [
{
"boxart": "image.png",
"msrp": 24.99,
"nsuid": 70010000531,
"salePrice": 9.99,
"title": "Picross",
},
{
"boxart": "image.png",
"msrp": 14.99,
"nsuid": 70010000532,
"salePrice": 8.24,
"title": "Aggelos",
},
]
client.multiple_queries_async = AsyncMock(
return_value={"results": [{"hits": games, "nbPages": 1}]}
)
return client
def test_get_percent_off():
"""Test get_percent_off returns correct value."""
original_price = 14.99
sale_price = 8.24
assert 45 == get_percent_off(original_price, sale_price)
def test_init_sets_correct_fetch_method():
"""Test fetch_method is set correctly based on country."""
wishlist = ["title1"]
eshop = EShop("US", Mock(), wishlist)
assert eshop.fetch_na == eshop.fetch_method
eshop = EShop("DE", Mock(), wishlist)
assert eshop.fetch_eu == eshop.fetch_method
def test_get_na_switch_game_no_box_art_value_error():
"""Test we use no box art url if the games doesn't have a box art url."""
wishlist = ["title1"]
eshop = EShop("US", Mock(), wishlist)
game = {
"msrp": 14.99,
"nsuid": 70010000532,
"salePrice": 8.24,
"title": "Aggelos",
}
expected = {
"box_art_url": NO_BOX_ART_URL,
"normal_price": "$14.99",
"nsuid": 70010000532,
"percent_off": 45,
"sale_price": "$8.24",
"title": "Aggelos",
}
assert expected == eshop.get_na_switch_game(game)
def test_get_na_switch_game_bad_prefix_value_error():
"""Test no box art url is used if the game has the wrong extension."""
wishlist = ["title1"]
eshop = EShop("US", Mock(), wishlist)
game = {
"boxart": "image.exe",
"msrp": 14.99,
"nsuid": 70010000532,
"salePrice": 8.24,
"title": "Aggelos",
}
expected = {
"box_art_url": NO_BOX_ART_URL,
"normal_price": "$14.99",
"nsuid": 70010000532,
"percent_off": 45,
"sale_price": "$8.24",
"title": "Aggelos",
}
assert expected == eshop.get_na_switch_game(game)
def test_get_na_switch_game_success():
"""Test we return the expected SwitchGame instance from the response."""
wishlist = ["title1"]
eshop = EShop("US", Mock(), wishlist)
game = {
"boxart": "image.png",
"msrp": 14.99,
"nsuid": 70010000532,
"salePrice": 8.24,
"title": "Aggelos",
}
expected = {
"box_art_url": "https://www.nintendo.comimage.png",
"normal_price": "$14.99",
"nsuid": 70010000532,
"percent_off": 45,
"sale_price": "$8.24",
"title": "Aggelos",
}
assert expected == eshop.get_na_switch_game(game)
async def test__get_page(client_mock):
"""Test _get_page returns the expected results."""
wishlist = ["Aggelos"]
eshop = EShop("US", Mock(), wishlist)
actual = await eshop._get_page(client_mock, [{"params": "f=1"}])
expected = {
"games": {
70010000532: {
"box_art_url": "https://www.nintendo.comimage.png",
"normal_price": "$14.99",
"nsuid": 70010000532,
"percent_off": 45,
"sale_price": "$8.24",
"title": "Aggelos",
}
},
"num_pages": 1,
}
assert expected == actual
async def test_fetch_na(client_mock):
"""Test the fetch_na method returns the expected results."""
wishlist = ["Aggelos"]
eshop = EShop("US", Mock(), wishlist)
with patch(
"custom_components.nintendo_wishlist.eshop.SearchClient.create"
) as create:
create.return_value.__aenter__.return_value = client_mock
actual = await eshop.fetch_na()
expected = {
70010000532: {
"box_art_url": "https://www.nintendo.comimage.png",
"normal_price": "$14.99",
"nsuid": 70010000532,
"percent_off": 45,
"sale_price": "$8.24",
"title": "Aggelos",
}
}
assert expected == actual
async def test__get_eu_page():
"""Test the _get_eu_page method returns the expected result."""
response = {
"response": {
"numFound": 2,
"docs": [
{
"title": "Aggelos",
"image_url": "//nintendo.com/image.png",
"nsuid_txt": ["70010000532"],
"price_discount_percentage_f": 10,
},
{
"title": "Resident Evil",
"image_url": "//nintendo.com/image.png",
"nsuid_txt": ["70010000531"],
"price_discount_percentage_f": 60,
},
],
}
}
wishlist = ["Aggelos"]
session_mock = AsyncMock()
resp_mock = AsyncMock()
resp_mock.json = AsyncMock(return_value=response)
session_mock.get.return_value.__aenter__.return_value = resp_mock
eshop = EShop("DE", session_mock, wishlist)
actual = await eshop._get_eu_page()
expected = {
"games": {
70010000532: {
"box_art_url": "https://nintendo.com/image.png",
"nsuid": 70010000532,
"percent_off": 10,
"title": "Aggelos",
}
},
"num_pages": 1,
}
assert expected == actual
def test_get_eu_switch_game():
"""Test the get_eu_switch_game method returns the expected result."""
wishlist = ["Aggelos"]
eshop = EShop("DE", Mock(), wishlist)
game = {
"title": "Aggelos",
"image_url": "//nintendo.com/image.png",
"nsuid_txt": ["70010000532"],
"price_discount_percentage_f": 10,
}
actual = eshop.get_eu_switch_game(game)
expected = {
"box_art_url": "https://nintendo.com/image.png",
"nsuid": 70010000532,
"percent_off": 10,
"title": "Aggelos",
}
assert expected == actual
def test_get_eu_switch_game_with_https_prefix_on_image_url():
"""Regression test for when the image_url actually has a protocol."""
wishlist = ["Aggelos"]
eshop = EShop("DE", Mock(), wishlist)
game = {
"title": "Aggelos",
"image_url": "https://nintendo.com/image.png",
"nsuid_txt": ["70010000532"],
"price_discount_percentage_f": 10,
}
actual = eshop.get_eu_switch_game(game)
expected = {
"box_art_url": "https://nintendo.com/image.png",
"nsuid": 70010000532,
"percent_off": 10,
"title": "Aggelos",
}
assert expected == actual
async def test_fetch_eu():
"""Test the fetch_eu method returns the expected result."""
page_response = {
"games": {
70010000532: {
"box_art_url": "https://nintendo.com/image.png",
"nsuid": 70010000532,
"percent_off": 10,
"title": "Aggelos",
}
},
"num_pages": 1,
}
pricing_response = {
"prices": [
{
"title_id": 70010000532,
"regular_price": {"amount": 24.99},
"discount_price": {"amount": 8.24},
}
]
}
mock_resp = AsyncMock()
mock_resp.json = AsyncMock(return_value=pricing_response)
wishlist = ["Aggelos"]
session_mock = AsyncMock()
session_mock.get.return_value.__aenter__.return_value = mock_resp
eshop = EShop("DE", session_mock, wishlist)
eshop._get_eu_page = AsyncMock(return_value=page_response)
actual = await eshop.fetch_eu()
expected = {
70010000532: {
"box_art_url": "https://nintendo.com/image.png",
"nsuid": 70010000532,
"percent_off": 10,
"title": "Aggelos",
"normal_price": 24.99,
"sale_price": 8.24,
}
}
assert expected == actual
| [
37811,
14402,
262,
412,
24643,
1398,
526,
15931,
198,
11748,
12972,
9288,
198,
6738,
12972,
9288,
62,
11195,
562,
10167,
62,
23144,
62,
42895,
13,
292,
13361,
62,
76,
735,
1330,
1081,
13361,
44,
735,
11,
44123,
11,
8529,
198,
198,
673... | 1.977596 | 4,285 |
expected_output = {
"domain_number":{
0:{
"message_event_ip_dscp":59,
"message_general_ip_dscp":47,
"profile_state":"disabled",
"profile_type":"DEFAULT",
"transport_method":"802.3"
}
}
}
| [
40319,
62,
22915,
796,
1391,
198,
220,
220,
366,
27830,
62,
17618,
1298,
90,
198,
220,
220,
220,
220,
220,
657,
29164,
198,
220,
220,
220,
220,
220,
220,
220,
220,
366,
20500,
62,
15596,
62,
541,
62,
67,
1416,
79,
1298,
3270,
11,
... | 1.893939 | 132 |
import numpy as np
import tools21cm as t2c
box_dims = 200
dims = [128,128,128]
gauss = np.random.normal(loc=0., scale=1., size=dims)
kbins = 10
mubins = 2
def test_cross_power_spectrum_1d():
'''
With this test, cross_power_spectrum_nd and radial_average are also test.
'''
pp, kk = t2c.cross_power_spectrum_1d(gauss, gauss, kbins=kbins, box_dims=box_dims)
slope = (np.log10(pp*kk**3/2/np.pi**2)[kbins-3]-np.log10(pp*kk**3/2/np.pi**2)[3])/(np.log10(kk)[kbins-3]-np.log10(kk)[3])
assert np.abs(slope-3)<=0.1
def test_cross_power_spectrum_mu():
'''
With this test, cross_power_spectrum_nd and mu_binning are also test.
'''
pp, mm, kk = t2c.cross_power_spectrum_mu(gauss, gauss, kbins=kbins, mubins=mubins, box_dims=box_dims)
slope = (np.log10(pp[0,:]*kk**3/2/np.pi**2)[kbins-3]-np.log10(pp[0,:]*kk**3/2/np.pi**2)[3])/(np.log10(kk)[kbins-3]-np.log10(kk)[3])
assert np.abs(slope-3)<=0.1
def test_power_spectrum_1d():
'''
With this test, power_spectrum_nd and radial_average are also test.
'''
pp, kk = t2c.power_spectrum_1d(gauss, kbins=kbins, box_dims=box_dims)
slope = (np.log10(pp*kk**3/2/np.pi**2)[kbins-3]-np.log10(pp*kk**3/2/np.pi**2)[3])/(np.log10(kk)[kbins-3]-np.log10(kk)[3])
assert np.abs(slope-3)<=0.1
def test_dimensionless_ps():
'''
With this test, power_spectrum_nd and radial_average are also test.
'''
dd, kk = t2c.dimensionless_ps(gauss, kbins=kbins, box_dims=box_dims)
slope = (np.log10(dd)[kbins-3]-np.log10(dd)[3])/(np.log10(kk)[kbins-3]-np.log10(kk)[3])
assert np.abs(slope-3)<=0.25
def test_power_spectrum_mu():
'''
With this test, power_spectrum_nd and mu_binning are also test.
'''
pp, mm, kk = t2c.power_spectrum_mu(gauss, kbins=kbins, mubins=mubins, box_dims=box_dims)
slope = (np.log10(pp[0,:]*kk**3/2/np.pi**2)[kbins-3]-np.log10(pp[0,:]*kk**3/2/np.pi**2)[3])/(np.log10(kk)[kbins-3]-np.log10(kk)[3])
assert np.abs(slope-3)<=0.1
| [
11748,
299,
32152,
355,
45941,
220,
198,
11748,
4899,
2481,
11215,
355,
256,
17,
66,
220,
198,
198,
3524,
62,
67,
12078,
796,
939,
198,
67,
12078,
220,
220,
796,
685,
12762,
11,
12762,
11,
12762,
60,
198,
4908,
1046,
220,
796,
45941... | 2.003158 | 950 |
# MIT License
#
# Copyright (c) 2019 Michael J Simms. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def is_point_in_polygon(point, poly):
"""Implements the ray casting/crossing number algorithm. Returns TRUE if the point is within the bounds of the points that specify the polygon (poly is a list of points)."""
# Sanity checks.
if not isinstance(poly, list):
return False
num_crossings = 0
num_vertices = len(poly)
if num_vertices < 3: # Need at least three points to make a polygon
return False
test_x = point['x']
test_y = point['y']
for i in range(0, num_vertices):
# Cache the y coordinate for the first point on the edge.
poly_pt = poly[i]
poly_pt1_y = poly_pt['y']
# Cache the second point on the edge, handling the wrap around that happens when we close the polygon.
if i == num_vertices - 1:
poly_pt = poly[0]
poly_pt2_x = poly_pt['x']
poly_pt2_y = poly_pt['y']
else:
poly_pt = poly[i + 1]
poly_pt2_x = poly_pt['x']
poly_pt2_y = poly_pt['y']
# Test if the point is within the y limits of the edge.
crosses_y = ((poly_pt1_y <= test_y) and (poly_pt2_y > test_y)) or ((poly_pt1_y > test_y) and (poly_pt2_y <= test_y))
if crosses_y:
# Test if the ray extending to the right of the point crosses the edge.
poly_pt1_x = (poly[i])['x']
if test_x < poly_pt1_x + ((test_y - poly_pt1_y) / (poly_pt2_y - poly_pt1_y)) * (poly_pt2_x - poly_pt1_x):
num_crossings = num_crossings + 1
return num_crossings & 1
def is_point_in_poly_array(test_x, test_y, poly):
"""Implements the ray casting/crossing number algorithm. Returns TRUE if the point is within the bounds of the points that specify the polygon (poly is a list of points)."""
# Sanity checks.
if not isinstance(poly, list):
return False
num_crossings = 0
num_vertices = len(poly)
if num_vertices < 3: # Need at least three points to make a polygon
return False
for i in range(0, num_vertices):
# Cache the y coordinate for the first point on the edge.
poly_pt = poly[i]
if len(poly_pt) != 2:
return False
poly_pt1_y = poly_pt[1]
# Cache the second point on the edge, handling the wrap around that happens when we close the polygon.
if i == num_vertices - 1:
poly_pt = poly[0]
poly_pt2_x = poly_pt[0]
poly_pt2_y = poly_pt[1]
else:
poly_pt = poly[i + 1]
poly_pt2_x = poly_pt[0]
poly_pt2_y = poly_pt[1]
# Test if the point is within the y limits of the edge.
crosses_y = ((poly_pt1_y <= test_y) and (poly_pt2_y > test_y)) or ((poly_pt1_y > test_y) and (poly_pt2_y <= test_y))
if crosses_y:
# Test if the ray extending to the right of the point crosses the edge.
poly_pt1_x = (poly[i])[0]
if test_x < poly_pt1_x + ((test_y - poly_pt1_y) / (poly_pt2_y - poly_pt1_y)) * (poly_pt2_x - poly_pt1_x):
num_crossings = num_crossings + 1
return num_crossings & 1
| [
2,
220,
17168,
13789,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
13130,
3899,
449,
3184,
907,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
... | 2.468894 | 1,736 |
import unittest
from datetime import datetime
import numpy as np
import netCDF4 as nc
from ocgis.api.operations import OcgOperations
from ocgis.util.shp_cabinet import ShpCabinet
from shapely.geometry.polygon import Polygon
from ocgis import env
from ocgis.api.interpreter import OcgInterpreter
from ocgis.util.inspect import Inspect
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test360.test_high_res']
unittest.main() | [
11748,
555,
715,
395,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2010,
34,
8068,
19,
355,
299,
66,
198,
6738,
267,
66,
70,
271,
13,
15042,
13,
3575,
602,
1330,
440,
66,
70,
18843,
602,
... | 2.672727 | 165 |
#!/usr/bin/env python
import csv
import datetime
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__all__ = ("CommaDialect", "CommaRow", "Comma", "make_backup")
#__slots__ = ("row", "header", "header_dict", "parsers", "serializers")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
269,
21370,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
28311,
25,
220,
198,
220,
220,
220,
422,
10903,
9399,
1330,
10903,
9399,
198,
16341,
17267,
12331,
25,
198,
... | 2.700935 | 107 |
import logging
import sys
import os
from logging.handlers import TimedRotatingFileHandler
LOG_FILE_NAME = os.path.dirname(__file__)+'/logs/log.log'
# set up formatting
formatter = logging.Formatter('[%(asctime)s] %(levelname)s (%(process)d) %(module)s:%(lineno)d %(message)s')
# set up logging to STDOUT for all levels WARNING and higher
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.WARNING)
sh.setFormatter(formatter)
# set up logging to a file for all levels DEBUG and higher
#fh = logging.FileHandler(LOG_FILE_NAME)
fh = TimedRotatingFileHandler(LOG_FILE_NAME, when="d", interval=1, backupCount=7)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
# create logger object
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logger.addHandler(sh)
logger.addHandler(fh)
# shortcuts
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
critical = logger.critical | [
11748,
18931,
201,
198,
11748,
25064,
201,
198,
11748,
28686,
201,
198,
6738,
18931,
13,
4993,
8116,
1330,
5045,
276,
24864,
803,
8979,
25060,
201,
198,
201,
198,
25294,
62,
25664,
62,
20608,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
... | 2.749288 | 351 |
import sys
import json
from time import time
from requests_oauthlib import OAuth2Session
from collections import OrderedDict
# Local Imports
import turbo_config as config
import turbo_util as util
from turbo_db import DBSession
this = sys.modules[__name__]
# TODO: change this into a TempSessionClerk that gets shared between greenlets
# Temp session store (uses expiration_interval instead of session_max_age)
this.sessions = {}
# Create session, store session token and return it
# Check session stored in cookie, returns True if session exists and is active
# Store initial oauth parameters, if not already stored
# Application wide API calls (require application access token)
# User specific API calls (require user access token)
# Simplified JSON:API helpers for what we need
# Builds a query string for JSON:API includes=list(), fields=dict(key,list())
# NOTE: Disregards and strips pagination/links
| [
11748,
25064,
198,
11748,
33918,
198,
6738,
640,
1330,
640,
198,
6738,
7007,
62,
12162,
1071,
8019,
1330,
440,
30515,
17,
36044,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
2,
10714,
1846,
3742,
198,
11748,
29292,
62,
11250,... | 3.866667 | 240 |
import unittest
from cert_core import to_certificate_model
from mock import Mock
from cert_verifier.checks import *
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
5051,
62,
7295,
1330,
284,
62,
22583,
22460,
62,
19849,
198,
6738,
15290,
1330,
44123,
198,
198,
6738,
5051,
62,
332,
7483,
13,
42116,
1330,
1635,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
... | 2.947368 | 57 |
from django.contrib import admin
from . models import City
admin.site.register(City)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
4981,
1330,
2254,
198,
198,
28482,
13,
15654,
13,
30238,
7,
14941,
8,
198
] | 3.346154 | 26 |
import json
from flask import Flask, request, abort
from dotenv import dotenv_values
from skola_online import SkolaOnline
from fablab import Fablab
from salina import Salina
from alojz import Alojz
from functools import wraps
import datetime
import time
from pprint import *
app = Flask(__name__)
# file .env
# USERNAME=username
# PASSWORD=password
# API_KEY=api_key
config = dotenv_values(".env")
def request_data(message: str):
"""! @brief Vytvoří odpověď na požadavek.
@param message: Zpráva, která se má vrátit.
@return JSON Odpověď na požadavek.
"""
mess = {"data": message, "status": "ok", "timeFull": str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))}
print(mess)
return json.dumps(mess)
# Source: https://coderwall.com/p/4qickw/require-an-api-key-for-a-route-in-flask-using-only-a-decorator
# The actual decorator function
# http://127.0.0.1:5000/marksLast/?api_key=
# @require_apikey
@app.route("/marksLast/")
def marksLastFlask():
"""! @brief URL pro získání posledních známek.
@return JSON s posledními známkami
"""
try:
sol = SkolaOnline(config["USERNAME"], config["PASSWORD"])
lastMarks = sol.getLastMarks()
json_arr = []
for mark in lastMarks:
print(mark)
json_arr.append(mark.__dict__)
return request_data(json_arr)
except Exception as e:
return request_error(str(e))
# http://127.0.0.1:5000/marksSubject/
# @require_apikey
@app.route("/marksSubject/")
def marksSubjectFlask():
"""! @brief URL pro získání známek podle předmětu.
@return JSON se známkami podle předmětu
"""
return request_error("Neni implementovano :-(")
try:
sol = SkolaOnline(config["USERNAME"], config["PASSWORD"])
subject = request.args.get("subject")
lastMarks = sol.getLastMarksBySubject(subject)
json_arr = []
for mark in lastMarks:
print(mark)
json_arr.append(mark.__dict__)
return request_data(json_arr)
except Exception as e:
print(e)
return request_error(str(e))
# http://127.0.0.1:5000/fablabNow/
# @require_apikey
@app.route("/fablabNow/")
def fablabNowFlask():
"""! @brief URL pro získání aktuálního stavu strojů ve Fsblabu.
@return JSON s aktuálním stavem strojů ve Fsblabu
"""
# return request_error("Neni implementovano :-(")
try:
fablab = Fablab()
machinesStat = fablab.getMachinesStatus()
json_machines = []
for machine in machinesStat.machines:
json_machines.append(machine.__dict__)
json_printers = []
for printer in machinesStat.printers:
json_printers.append(printer.__dict__)
json_arr = {"members": machinesStat.members, "machines": json_machines, "printers": json_printers}
return request_data(json_arr)
except Exception as e:
print(e)
return json.dumps({"error": str(e)})
# http://127.0.0.1:5000/departures?stopid=1272&postid=2
# @require_apikey
@app.route("/departures")
# http://127.0.0.1:5000/alojz?alojzId=brno&lat=49.195060&lon=16.606837&alt=237
# @require_apikey
@app.route("/alojz")
# http://127.0.0.1:5000/ping
# @require_apikey
@app.route("/ping")
if __name__ == "__main__":
"""! @brief Spouštění aplikace.
Hlavní funkce aplikace - spouští Flask server.
"""
app.run(host="0.0.0.0")
# # app.run(debug=True, host="0.0.0.0") | [
11748,
33918,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
15614,
198,
6738,
16605,
24330,
1330,
16605,
24330,
62,
27160,
198,
6738,
1341,
5708,
62,
25119,
1330,
3661,
5708,
14439,
198,
6738,
7843,
23912,
1330,
14236,
23912,
198,
6738,
36... | 2.215109 | 1,562 |
from typing import List, NamedTuple
| [
6738,
19720,
1330,
7343,
11,
34441,
51,
29291,
628
] | 4.111111 | 9 |
A_26_02_9 = {0: {'A': 0.109, 'C': -0.186, 'E': -0.694, 'D': -0.616, 'G': 0.238, 'F': -0.231, 'I': 0.156, 'H': -0.098, 'K': 0.739, 'M': -0.153, 'L': 0.27, 'N': -0.117, 'Q': 0.048, 'P': 0.109, 'S': -0.019, 'R': 0.694, 'T': -0.034, 'W': -0.351, 'V': 0.229, 'Y': -0.093}, 1: {'A': -0.866, 'C': 0.214, 'E': 0.5, 'D': 0.294, 'G': 0.032, 'F': 0.379, 'I': -0.659, 'H': 0.722, 'K': 0.19, 'M': 0.016, 'L': -0.184, 'N': 0.207, 'Q': 0.252, 'P': -0.031, 'S': -0.486, 'R': 0.421, 'T': -0.968, 'W': 0.467, 'V': -1.031, 'Y': 0.531}, 2: {'A': -0.52, 'C': -0.067, 'E': 0.226, 'D': 0.342, 'G': 0.184, 'F': 0.214, 'I': -0.602, 'H': 0.015, 'K': 0.098, 'M': 0.091, 'L': 0.188, 'N': 0.177, 'Q': 0.287, 'P': -0.006, 'S': -0.105, 'R': 0.101, 'T': -0.298, 'W': 0.114, 'V': -0.519, 'Y': 0.081}, 3: {'A': -0.186, 'C': -0.153, 'E': -0.149, 'D': -0.052, 'G': -0.096, 'F': 0.019, 'I': 0.428, 'H': 0.042, 'K': 0.0, 'M': 0.182, 'L': 0.392, 'N': 0.088, 'Q': -0.109, 'P': -0.005, 'S': -0.099, 'R': -0.263, 'T': -0.114, 'W': -0.085, 'V': 0.28, 'Y': -0.121}, 4: {'A': 0.158, 'C': -0.125, 'E': -0.123, 'D': -0.156, 'G': 0.006, 'F': 0.189, 'I': 0.112, 'H': -0.418, 'K': 0.233, 'M': 0.11, 'L': 0.276, 'N': 0.015, 'Q': -0.15, 'P': -0.048, 'S': 0.025, 'R': 0.058, 'T': 0.138, 'W': -0.322, 'V': 0.35, 'Y': -0.328}, 5: {'A': 0.276, 'C': 0.224, 'E': 0.281, 'D': 0.093, 'G': 0.12, 'F': -0.194, 'I': -0.43, 'H': 0.007, 'K': 0.493, 'M': -0.808, 'L': -0.589, 'N': -0.071, 'Q': 0.128, 'P': 0.42, 'S': 0.152, 'R': 0.548, 'T': -0.007, 'W': -0.209, 'V': -0.068, 'Y': -0.366}, 6: {'A': -0.282, 'C': -0.068, 'E': 0.098, 'D': 0.094, 'G': -0.302, 'F': 0.095, 'I': 0.158, 'H': 0.056, 'K': 0.207, 'M': 0.113, 'L': 0.17, 'N': 0.031, 'Q': -0.047, 'P': -0.092, 'S': -0.219, 'R': -0.151, 'T': -0.053, 'W': 0.04, 'V': 0.104, 'Y': 0.048}, 7: {'A': -0.31, 'C': 0.027, 'E': 0.108, 'D': 0.283, 'G': -0.217, 'F': -0.151, 'I': -0.022, 'H': 0.154, 'K': 0.122, 'M': -0.084, 'L': -0.183, 'N': 0.034, 'Q': 0.142, 'P': -0.024, 'S': -0.137, 'R': 0.188, 'T': 0.084, 'W': 0.014, 'V': -0.074, 'Y': 0.047}, 8: {'A': 0.463, 'C': -0.214, 'E': 0.183, 'D': 0.103, 'G': 0.15, 'F': -0.903, 'I': -0.162, 'H': -0.172, 'K': 0.625, 'M': -1.084, 'L': -0.262, 'N': -0.16, 'Q': 0.5, 'P': 0.818, 'S': 0.393, 'R': 0.616, 'T': 0.494, 'W': -0.621, 'V': 0.166, 'Y': -0.932}, -1: {'con': 4.15094}} | [
32,
62,
2075,
62,
2999,
62,
24,
796,
1391,
15,
25,
1391,
6,
32,
10354,
657,
13,
14454,
11,
705,
34,
10354,
532,
15,
13,
25096,
11,
705,
36,
10354,
532,
15,
13,
45214,
11,
705,
35,
10354,
532,
15,
13,
44214,
11,
705,
38,
10354,... | 1.61831 | 1,420 |
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import numpy.random as npr
from ..utils.config import cfg
from bbox_transform import bbox_transform, bbox_overlaps, co_bbox_overlaps_batch2, bbox_transform_batch2, bbox_overlaps_batch2
import pdb
DEBUG = False
class _RelProposalTargetLayer(nn.Module):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def _sample_roi_pairs_pytorch(self, all_roi_pairs, gt_box_pairs, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = co_bbox_overlaps_batch2(all_roi_pairs[:,:,1:].contiguous(),
gt_box_pairs[:,:,:8].contiguous())
max_overlaps, gt_assignment = torch.max(overlaps, 2)
batch_size = overlaps.size(0)
num_proposal = overlaps.size(1)
num_boxes_per_img = overlaps.size(2)
offset = torch.arange(0, batch_size) * gt_box_pairs.size(1)
offset = offset.view(-1, 1).type_as(gt_assignment) + gt_assignment
labels = gt_box_pairs[:,:,8].contiguous().view(-1).index(offset.view(-1))\
.view(batch_size, -1)
fg_mask = max_overlaps >= cfg.TRAIN.RELPN_FG_THRESH
keep_inds_batch = labels.new(batch_size, rois_per_image).zero_()
labels_rel_batch = labels.new(batch_size, rois_per_image).zero_()
roi_pairs_batch = all_roi_pairs.new(batch_size, rois_per_image, 9).zero_()
# Guard against the case when an image has fewer than max_fg_rois_per_image
# foreground RoIs
for i in range(batch_size):
fg_inds = torch.nonzero(max_overlaps[i] >= cfg.TRAIN.RELPN_FG_THRESH).view(-1)
fg_num_rois = fg_inds.numel()
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = torch.nonzero((max_overlaps[i] < cfg.TRAIN.RELPN_BG_THRESH_HI) &
(max_overlaps[i] >= cfg.TRAIN.RELPN_BG_THRESH_LO)).view(-1)
bg_num_rois = bg_inds.numel()
# print(fg_num_rois, bg_num_rois)
# pdb.set_trace()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
# rand_num = torch.randperm(fg_num_rois).long().cuda()
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).long().cuda()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
# Seems torch.rand has a bug, it will generate very large number and make an error.
# We use numpy rand instead.
#rand_num = (torch.rand(bg_rois_per_this_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(bg_rois_per_this_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
bg_inds = bg_inds[rand_num]
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
#rand_num = torch.floor(torch.rand(rois_per_image) * fg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
fg_inds = fg_inds[rand_num]
fg_rois_per_this_image = rois_per_image
bg_rois_per_this_image = 0
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
#rand_num = torch.floor(torch.rand(rois_per_image) * bg_num_rois).long().cuda()
rand_num = np.floor(np.random.rand(rois_per_image) * bg_num_rois)
rand_num = torch.from_numpy(rand_num).long().cuda()
bg_inds = bg_inds[rand_num]
bg_rois_per_this_image = rois_per_image
fg_rois_per_this_image = 0
else:
print("relpn: bg_num_rois = 0 and fg_num_rois = 0, this should not happen!")
# The indices that we're selecting (both fg and bg)
keep_inds = torch.cat([fg_inds, bg_inds], 0)
keep_inds_batch[i].copy_(keep_inds)
# Select sampled values from various arrays:
labels_rel_batch[i].copy_(labels[i][keep_inds])
# Clamp relation labels for the background RoIs to 0
labels_rel_batch[i][fg_rois_per_this_image:] = 0
roi_pairs_batch[i].copy_(all_roi_pairs[i][keep_inds])
roi_pairs_batch[i,:,0] = i
return labels_rel_batch, roi_pairs_batch, keep_inds_batch
| [
2,
20368,
22369,
198,
2,
38996,
371,
12,
18474,
198,
2,
15069,
357,
66,
8,
1853,
5413,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,
2,
22503,
416,
9847,
23837,
1477,
624,
290,
11465,
7459,
198,... | 2.037722 | 2,704 |
#=======================================================================
__version__ = '''0.0.22'''
__sub_version__ = '''20120704184132'''
__copyright__ = '''(c) Alex A. Naanou 2003-2008'''
#-----------------------------------------------------------------------
import types
import new
import sys
#-----------------------------------------------------------------------
#-----------------------------------------------------------termsuper---
class termsuper(super):
'''
this is a terminated super.
to be used when the object has no emidiate superclass but may participate
in an inheritance tree where relaying calls to the next member in the mro
chain is needed.
this is equivalent to:
class X(object):
def meth(self, arg):
try:
super(X, self).meth(arg)
except (AttributeError, NotImplemented, NotImplementedError):
pass
# and using the termsuper...
class Y(object):
def meth(self, arg):
termsuper(Y, self).meth(arg)
super will fail when X is used "stand-alone" but may be needed in complex
cases, as shown here:
class Z(X, Y):
def meth(self, arg):
super(Z, self).meth(arg)
z = Z()
z.meth(123) # needs to call both X.meth and Y.meth... (if no
# termination was used then only the first in the
# MRO chain would have been called).
in the instance of a Z class both the methods of X and Y need to be called.
NOTE: it is recommended to use termsuper only when it is explicitly
needed (hence, it is not used in Z).
NOTE: it is usually cusomary not to write any super calls in cases like X,
but, this may result in problems down the road.
thus, if writing library code intended for inheritance use, it is
reccomended to allways either terminate the super call or warn that
manual calling is in order.
'''
#-----------------------------------------------------------------------
#-------------------------------------------------classinstancemethod---
##!! revise !!##
class classinstancemethod(object):
'''
a universal class/instance/direct method constructor/dispatcher.
'''
def __init__(self, func, inst_func=None, direct_func=None):
'''
'''
self.func = func
self.inst_func = inst_func != None and inst_func or func
self.direct_func = direct_func != None and direct_func or func
def __call__(self, *p, **n):
'''
'''
# we are called directly...
return self.direct_func(*p, **n)
def __get__(self, obj, cls=None):
'''
'''
if obj == None:
# we are called from a class...
return new.instancemethod(self.func, cls, type(cls))
else:
# we are called from an instance...
return new.instancemethod(self.inst_func, obj, cls)
#-----------------------------------------------------------------------
#------------------------------------------------------------property---
_property = property
class property(_property):
'''
create a property in a nicer way.
Example 1 -- using this class:
@property
def attr(self):
...
@property.setter
def attr(self, value):
...
@property.deleter
def attr(self):
...
Example 2 -- using this class:
@property
def attr(self):
...
@attr.setter
def attr(self, value):
...
@attr.deleter
def attr(self):
...
NOTE: the stile exhibited in example #1 is prefered.
NOTE: in example #1 and #2, each decorator may be used many times, and each
consecutive time will overwrite the previous handler.
NOTE: in example #1 the name of the handler is used to identify the property
in the enclosing namespace.
NOTE: in example #2 both the name of the decorator and the name of the method
are significant. this is due to the way CPython handles the result of
the decorator.
NOTE: this was inspired by the Py3.0 property interface.
For illustration, here is how things used to be:
Example 3 -- Py25-style:
def getter(self):
...
def setter(self, value):
...
def deleter(self):
...
attr = property(fget=getter, fset=setter, fdel=deleter)
del getter, setter, deleter
'''
@classinstancemethod
@classinstancemethod
@classinstancemethod
#-----------------------------------------------------------------------
#------------------------------------------------------createonaccess---
# TODO make writing and removal optional...
def createonaccess(name, constructor, doc='', local_attr_tpl='_%s', depth=1):
'''
return a property object that will create an an object via the provided
constructor on first access.
if the constructor is a string, it will be used as a method name in the
containing object. this method will be used to construct the object.
the created object will be saved in the data attribute (named local_attr_tpl % name)
in the containing namespace.
this also both supports writing and removal. on write the value will be
written to the data attribute directly. ob removal the data attribute will
be removed form the containing namespace.
an oprional doc argument will get written to the property doc.
NOTE: after removal, first access to the property will recreate it using
the constructor.
'''
local_attr = local_attr_tpl % name
# set the attr...
sys._getframe(depth).f_locals[name] \
= property(
fget=getter,
fset=setter,
fdel=remover,
doc=doc)
#-----------------------------------------------------------------------
# TODO attribute analysis for conflicts and collisions...
# TODO doc formatting...
# TODO value patterns, pattern checking and extensibility...
DOC_GENERAL_TEMPLATE = '''\
%(func_doc)s
%(desciption)s:
%(args)s
'''
##!!! we need to pad the name with enough spaces to allign the descriptions...
##!!! we need to normalize and pad the lines of the descriptions...
DOC_ARG_TEMPLATE = '''\
%(name)s - %(description)s (%(source)s)
'''
DOC_ATTR_NAME = '_declared_args'
#----------------------------------------------------------getargspec---
def getargspec(meth):
'''
NOTE: this will detect only the normally declared arguments, thus,
most tricks and workarounds are not accounted for, as well as
arguments used but not declared.
'''
cls = meth.im_class
name = meth.__name__
mro = cls.__mro__
res = []
for c in mro:
if not hasattr(c, name):
continue
res += [(c, getattr(getattr(c, name), DOC_ATTR_NAME, None))]
return res
#-----------------------------------------------------------------doc---
##!!! we need to generate the docstring AFTER the class is created and not when the decorator is called...
##!!! ...this is because we need to traverse the MRO for similar methods...
def doc(*p, **n):
'''
Generate documentation for function arguments.
Usage:
@doc("argument documentation for function f"
arg_a="",
arg_b="",
arg_c="")
def f(**n):
a, b, c = n['arg_a'], n['arg_b'], n['arg_c']
...
'''
return _doc
#-----------------------------------------------------------------------
#-----------------------------------------------------ObjectWithAttrs---
# XXX might be good to rename this... and to add method interface
# checking support...
# TODO add update callbacks (__attr_update_callbacks__ = {<attr>: <meth>[, ...]})
# TODO add regex attribute naming.... (e.g. if <attr> is a regexp
# object use it to match the attr name...)
# NOTE: might be good to use a predicate...
class ObjectWithAttrs(object):
'''
a generic object class with attribute creation an update automation.
this class checks attribute format.
'''
# this defines an acl object to be used...
__acl__ = None
# this will restrict the attribute that can be created for this
# object to the ones mentioned in the list (DO NOT SET HERE!).
# value: tuple
__attr_format__ = None
# this defines a tuple of attributes that must be defined on object
# init.
# value: tuple
__essential_attrs__ = None
## # this defines the callbacks for attr update... (RPC only at the
## # moment...)
## # value: dict
## __attr_update_callbacks__ = {}
# if this is unset the checks will ignore all attrs that are not in
# format...
# TODO revise....
__strict_attr_format__ = True
# this will enable attribute type checking... (change for legacy
# support only... though might be good for a speedup)
__check_attr_types__ = True
# this defines the values that are to be treated as "no-vals" (e.g.
# ignored on type checks...)
__none_values__ = ('', None)
def __init__(self, name, attrs={}):
'''
create an object with attrs from a dict...
'''
super(ObjectWithAttrs, self).__init__(name)
# check essential attrs....
if hasattr(self, '__essential_attrs__') and self.__essential_attrs__ != None:
essential_attrs = [ (type(attr) not in (str, unicode) and attr[0] or attr) \
for attr in self.__essential_attrs__ ]
err = []
if False in [ attr in attrs and self._isattrwritable(attr, \
attrs[attr], \
strict=hasattr(self, '__strict_attr_format__') and self.__strict_attr_format__, \
format=self.__essential_attrs__) \
or (err.append(attr), False)[-1] \
for attr in essential_attrs ]:
raise TypeError, 'essential attribute format mismatch in %s.' % (err,)
self.update(attrs)
# the isconsisten protocol...
def __isconsistent__(self):
'''
check object consistency...
'''
return _checkarttrs(self.__dict__)
# NOTE: this is not very efficient if more than one attr is added
# in a loop (e.g. on object init)...
def _isattrwritable(self, name, value, format=None, strict=True, message=None, none_vals=False):
'''
this predicate will return true if the attribute is writable.
NOTE: this function impements error reporting a side-effect.
the error message[s] will be appended to the message argument (if present).
NOTE: the argument "message" must be of type list (if present).
NOTE: the "message" argument will be modified during this functions execution.
'''
if format == None:
if hasattr(self, '__attr_format__') and self.__attr_format__ != None and len(self.__attr_format__) != 0:
format = self.__attr_format__
##!!!
none_vals = True
else:
return True
# cache the complex format...
cformat = {}
[ cformat.update({e[0]: e[1:]}) for e in format if type(e) not in (str, unicode) ]
## # NOTE: both of the folowing are quite slow...
## # cache the regex format...
## rformat = []
## # cache the predicate format...
pformat = []
if hasattr(self, '__check_attr_types__') and self.__check_attr_types__:
if name not in format:
if name in cformat:
# get data...
e = cformat[name]
etype = len(e) > 0 and type(e[0]) not in (str, unicode) and e[0] or ''
# check none_vals
if none_vals and hasattr(self, '__none_values__') and \
self.__none_values__ and value in self.__none_values__:
return True
# check type...
try:
if type(etype) in (str, unicode) or issubclass(type(value), etype):
return True
except TypeError:
# we get here if issubclass failse when
# comparing types with a function/lambda
pass
# check predicate....
# XXX (temporary??) the predicate can only be a
# function or a lambda...
## if callable(etype):
if type(etype) in (types.LambdaType, types.FunctionType):
try:
if etype(value):
return True
except Exception, msg:
print '>>>', msg
# implement the side-effect...
if message != None:
if type(message) != list:
raise TypeError, 'message paramiter must be of type "list".'
message += [msg]
except:
pass
elif not strict:
return True
return False
# legacy only....
elif name not in format and name not in cformat:
return False
return True
# sloooow _checkarttrs version... (uses _isattrwritable in a
# loop...)
def _checkarttrs(self, attrs, errors=None, none_vals=False):
'''
check if attribute dict given is compatible with format (see self._isattrwritable for details...)
NOTE: this function impements error reporting a side-effect.
all the the arguments that generate errors will be appended to the errors argument (if present).
NOTE: the argument "errors" must be of type list (if present).
NOTE: the "errors" argument will be modified during this functions execution.
'''
# NOTE: this is very inefficient!!!
if errors == None:
for name, val in attrs.items():
if not self._isattrwritable(name, val, strict=(hasattr(self, '__strict_attr_format__') and self.__strict_attr_format__), none_vals=none_vals):
return False
elif type(errors) == list:
errors[:] = []
for name, val in attrs.items():
if not self._isattrwritable(name, val, strict=(hasattr(self, '__strict_attr_format__') and self.__strict_attr_format__), none_vals=none_vals):
errors += [name]
if len(errors) > 0:
return False
else:
raise TypeError, 'errors paramiter must be of type "list".'
return True
def update(self, attrs):
'''
update object attributes.
NOTE: in strict mode this will disallow an update of non-existant
attributes.
'''
# XXX comment the folowing two lines out if __setattr__ is used...
err = []
if not self._checkarttrs(attrs, errors=err):
raise AttributeError, 'can\'t update object %s, attribute format mismatch in %s.' % (self, err)
if self.__acl__ != None:
acl_setattr = self.__acl__.setattr
for n, v in attrs.items():
acl_setattr(self, n, v)
else:
for n, v in attrs.items():
setattr(self, n, v)
# the attribute interface....
##!! test !!##
def __setattr__(self, name, val):
'''
'''
if not self._isattrwritable(name, val, strict=(hasattr(self, '__strict_attr_format__') and self.__strict_attr_format__)):
raise TypeError, 'attribute "%s" does not comply with the format of %s object.' % (name, self)
## self.__dict__[name] = val
super(ObjectWithAttrs, self).__setattr__(name, val)
def __delattr__(self, name):
'''
'''
if hasattr(self, '__essential_attrs__') and self.__essential_attrs__ != None:
if name in self.__essential_attrs__ or \
name in [ attr[0] for attr in self.__essential_attrs__ if type(attr) not in (str, unicode) ]:
raise TypeError, 'can not remove essential attribute "%s" of %s object.' % (name, self)
del self.__dict__[name]
# introspection...
# TODO make this prittier....
__help__ = classmethod(__help__)
def getattributetextformat(cls):
'''
this will return a text definition of the attr format of obj.
'''
# essential attrs:
res = 'Essential Attributes:\n'
if hasattr(cls, '__essential_attrs__') and cls.__essential_attrs__ != None:
r = _format_str(cls.__essential_attrs__)
res += r == '' and ' None\n' or r
else:
res += ' None\n'
res += '\n'
# attr format:
res += 'Attribute Format:\n'
if hasattr(cls, '__attr_format__') and cls.__attr_format__ != None:
r = _format_str(cls.__attr_format__)
res += r == '' and ' None\n' or r
else:
res += ' None\n'
return res + '\n'
getattributetextformat = classmethod(getattributetextformat)
def getattributeformat(cls, name=None):
'''
'''
# requested name...
if name != None:
res = []
if hasattr(cls, '__essential_attrs__') and cls.__essential_attrs__ != None:
res = _format_dict(cls.__essential_attrs__, name)
if res == [] and hasattr(cls, '__attr_format__') and cls.__attr_format__ != None:
res = _format_dict(cls.__attr_format__, name)
return res
# empty res...
res = {}
# essential attrs:
if hasattr(cls, '__essential_attrs__') and cls.__essential_attrs__ != None:
res['essential'] = _format_dict(cls.__essential_attrs__)
# attr format:
if hasattr(cls, '__attr_format__') and cls.__attr_format__ != None:
res['format'] = _format_dict(cls.__attr_format__)
return res
getattributeformat = classmethod(getattributeformat)
#=======================================================================
# vim:set ts=4 sw=4 nowrap :
| [
2,
23926,
1421,
18604,
198,
198,
834,
9641,
834,
796,
705,
7061,
15,
13,
15,
13,
1828,
7061,
6,
198,
834,
7266,
62,
9641,
834,
796,
705,
7061,
1264,
1238,
32869,
22883,
19924,
7061,
6,
198,
834,
22163,
4766,
834,
796,
705,
7061,
7... | 2.872862 | 5,553 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 Authorization Flow
.. warning::
This module is experimental and is subject to change signficantly
within major version releases.
This module provides integration with `requests-oauthlib`_ for running the
`OAuth 2.0 Authorization Flow`_ and acquiring user credentials.
Here's an example of using the flow with the installed application
authorization flow::
import google.oauth2.flow
# Create the flow using the client secrets file from the Google API
# Console.
flow = google.oauth2.flow.Flow.from_client_secrets_file(
'path/to/client_secrets.json',
scopes=['profile', 'email'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
# Tell the user to go to the authorization URL.
auth_url, _ = flow.authorization_url(prompt='consent')
print('Please go to this URL: {}'.format(auth_url))
# The user will get an authorization code. This code is used to get the
# access token.
code = input('Enter the authorization code: ')
flow.fetch_token(code=code)
# You can use flow.credentials, or you can just get a requests session
# using flow.authorized_session.
session = flow.authorized_session()
print(session.get('https://www.googleapis.com/userinfo/v2/me').json())
.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/stable/
.. _OAuth 2.0 Authorization Flow:
https://tools.ietf.org/html/rfc6749#section-1.2
"""
import json
import google.auth.transport.requests
import google.oauth2.credentials
import google.oauth2.oauthlib
class Flow(object):
"""OAuth 2.0 Authorization Flow
This class uses a :class:`requests_oauthlib.OAuth2Session` instance at
:attr:`oauth2session` to perform all of the OAuth 2.0 logic. This class
just provides convenience methods and sane defaults for doing Google's
particular flavors of OAuth 2.0.
Typically you'll construct an instance of this flow using
:meth:`from_client_secrets_file` and a `client secrets file`_ obtained
from the `Google API Console`_.
.. _client secrets file:
https://developers.google.com/identity/protocols/OAuth2WebServer
#creatingcred
.. _Google API Console:
https://console.developers.google.com/apis/credentials
"""
def __init__(self, oauth2session, client_type, client_config):
"""
Args:
oauth2session (requests_oauthlib.OAuth2Session):
The OAuth 2.0 session from ``requests-oauthlib``.
client_type (str): The client type, either ``web`` or
``installed``.
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
self.client_type = client_type
"""str: The client type, either ``'web'`` or ``'installed'``"""
self.client_config = client_config[client_type]
"""Mapping[str, Any]: The OAuth 2.0 client configuration."""
self.oauth2session = oauth2session
"""requests_oauthlib.OAuth2Session: The OAuth 2.0 session."""
@classmethod
def from_client_config(cls, client_config, scopes, **kwargs):
"""Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
Raises:
ValueError: If the client configuration is not in the correct
format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
if 'web' in client_config:
client_type = 'web'
elif 'installed' in client_config:
client_type = 'installed'
else:
raise ValueError(
'Client secrets must be for a web or installed app.')
session, client_config = (
google.oauth2.oauthlib.session_from_client_config(
client_config, scopes, **kwargs))
return cls(session, client_type, client_config)
@classmethod
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
"""Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
"""
with open(client_secrets_file, 'r') as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs)
@property
def redirect_uri(self):
"""The OAuth 2.0 redirect URI. Pass-through to
``self.oauth2session.redirect_uri``."""
return self.oauth2session.redirect_uri
@redirect_uri.setter
def authorization_url(self, **kwargs):
"""Generates an authorization URL.
This is the first step in the OAuth 2.0 Authorization Flow. The user's
browser should be redirected to the returned URL.
This method calls
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
and specifies the client configuration's authorization URI (usually
Google's authorization server) and specifies that "offline" access is
desired. This is required in order to obtain a refresh token.
Args:
kwargs: Additional arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
Returns:
Tuple[str, str]: The generated authorization URL and state. The
user must visit the URL to complete the flow. The state is used
when completing the flow to verify that the request originated
from your application. If your application is using a different
:class:`Flow` instance to obtain the token, you will need to
specify the ``state`` when constructing the :class:`Flow`.
"""
url, state = self.oauth2session.authorization_url(
self.client_config['auth_uri'],
access_type='offline', **kwargs)
return url, state
def fetch_token(self, **kwargs):
"""Completes the Authorization Flow and obtains an access token.
This is the final step in the OAuth 2.0 Authorization Flow. This is
called after the user consents.
This method calls
:meth:`requests_oauthlib.OAuth2Session.fetch_token`
and specifies the client configuration's token URI (usually Google's
token server).
Args:
kwargs: Arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
one of ``code`` or ``authorization_response`` must be
specified.
Returns:
Mapping[str, str]: The obtained tokens. Typically, you will not use
return value of this function and instead and use
:meth:`credentials` to obtain a
:class:`~google.auth.credentials.Credentials` instance.
"""
return self.oauth2session.fetch_token(
self.client_config['token_uri'],
client_secret=self.client_config['client_secret'],
**kwargs)
@property
def credentials(self):
"""Returns credentials from the OAuth 2.0 session.
:meth:`fetch_token` must be called before accessing this. This method
constructs a :class:`google.oauth2.credentials.Credentials` class using
the session's token and the client config.
Returns:
google.oauth2.credentials.Credentials: The constructed credentials.
Raises:
ValueError: If there is no access token in the session.
"""
return google.oauth2.oauthlib.credentials_from_session(
self.oauth2session, self.client_config)
def authorized_session(self):
"""Returns a :class:`requests.Session` authorized with credentials.
:meth:`fetch_token` must be called before this method. This method
constructs a :class:`google.auth.transport.requests.AuthorizedSession`
class using this flow's :attr:`credentials`.
Returns:
google.auth.transport.requests.AuthorizedSession: The constructed
session.
"""
return google.auth.transport.requests.AuthorizedSession(
self.credentials)
| [
2,
15069,
1584,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 2.533988 | 3,869 |
"""
This program plays the cookie clicker game in an aoutmated way using the Selenium Webdriver Browser
"""
# Import required libraries
from selenium import webdriver
from time import time
if __name__ == '__main__':
main()
| [
37811,
198,
1212,
1430,
5341,
262,
19751,
3904,
263,
983,
287,
281,
257,
448,
76,
515,
835,
1262,
262,
15300,
47477,
5313,
26230,
34270,
198,
37811,
198,
198,
2,
17267,
2672,
12782,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
... | 3.553846 | 65 |
from .ner import NER, TextOrSentences, text_to_sentences, sentences_to_text
from .result import NerResult, NerResultEntity
from .mapping import ONTONOTES_TO_WIKIPEDIA_LABEL_MAPPING
MODELS_MAPPING = {
'small_en': 'en_core_web_sm',
'small_multi': 'xx_ent_wiki_sm',
# 'large_en': 'en_core_web_lg'
}
| [
198,
6738,
764,
1008,
1330,
399,
1137,
11,
8255,
5574,
31837,
3007,
11,
2420,
62,
1462,
62,
34086,
3007,
11,
13439,
62,
1462,
62,
5239,
198,
6738,
764,
20274,
1330,
21783,
23004,
11,
21783,
23004,
32398,
198,
6738,
764,
76,
5912,
1330... | 2.459677 | 124 |
import numpy as np
import time
import imutils
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
avg = None
xvalues = list()
motion = list()
count1 = 0
count2 = 0
with PiCamera(resolution=(640,480), framerate=30) as camera:
with PiRGBArray(camera, size=(640,480)) as rawCapture:
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
frame = f.array
text = "Unoccupied"
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if avg is None:
print ("[INFO] starting background model...")
avg = gray.copy().astype("float")
continue
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) < 5000:
continue
(x, y, w, h) = cv2.boundingRect(c)
xvalues.append(x)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
flag = False
no_x = len(xvalues)
if (no_x > 2):
difference = xvalues[no_x - 1] - xvalues[no_x - 2]
if(difference > 0):
motion.append(1)
else:
motion.append(0)
if flag is True:
if (no_x > 5):
val, times = find_majority(motion)
if val == 1 and times >= 15:
count1 += 1
else:
count2 += 1
xvalues = list()
motion = list()
cv2.line(frame, (260, 0), (260,480), (0,255,0), 2)
cv2.line(frame, (420, 0), (420,480), (0,255,0), 2)
cv2.putText(frame, "In: {}".format(count1), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "Out: {}".format(count2), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Frame",frame)
cv2.imshow("Gray",gray)
cv2.imshow("FrameDelta",frameDelta)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
rawCapture.truncate(0)
cv2.destroyAllWindows()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
545,
26791,
198,
11748,
269,
85,
17,
198,
6738,
8301,
18144,
13,
18747,
1330,
13993,
36982,
19182,
198,
6738,
8301,
18144,
1330,
13993,
35632,
198,
198,
615,
70,
796,
6045,
198... | 1.757377 | 1,525 |
# %%
from IPython import get_ipython
from IPython.core.display import display
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('run', 'setup')
# %% leeftijdsgroepen: download RIVM data
#leeftijdsgroepen = SimpleNamespace()
@run
# %% Download de bevolkings cijfers van CBS, uitgesplitst op de leeftijds categorien in de dataset van het RIVM
@run
# %% leeftijdsgroepen: prepareer tabel
# Bereken de stand van zaken van besmettingen / hospitalisaties / overlijden, per cohort in absolute aantallen en aantallen per 100k, met een kleur indicator voor de aantallen.
# vervang <50 en Unknown door Onbekend
@run
# %% publish
if knack:
await knack.publish(tabel.fillna(0).assign(Datum=tabel.Datum.dt.strftime('%Y-%m-%d')), 'Leeftijdsgroep', Cache)
| [
2,
43313,
198,
6738,
6101,
7535,
1330,
651,
62,
541,
7535,
198,
6738,
6101,
7535,
13,
7295,
13,
13812,
1330,
3359,
198,
1136,
62,
541,
7535,
22446,
5143,
62,
1370,
62,
32707,
10786,
2220,
62,
2302,
3256,
705,
2306,
382,
2220,
11537,
... | 2.682848 | 309 |
from code import app
from flask import request, make_response, redirect
from code.database import Database as database
@app.route("/login", methods=['GET', 'POST'])
| [
6738,
2438,
1330,
598,
198,
6738,
42903,
1330,
2581,
11,
787,
62,
26209,
11,
18941,
198,
6738,
2438,
13,
48806,
1330,
24047,
355,
6831,
198,
198,
31,
1324,
13,
38629,
7203,
14,
38235,
1600,
5050,
28,
17816,
18851,
3256,
705,
32782,
6,... | 3.772727 | 44 |
import sys
import etcd
import traceback
from log import get_logger
logger = get_logger(__name__, '/var/log/etcd_watcher.log')
if __name__ == "__main__":
try:
args = sys.argv
logger.info("etcd client args: {}".format(str(args)))
if len(args) < 2:
logger.error("Invalid parameter length!")
op = args[1]
etcd_client = EtcdClient()
if op == "update":
key, value = args[2:4]
logger.info("update key {} to {}".format(key, value))
etcd_client.set(key, value)
elif op == "get":
key = args[2]
value = etcd_client.get(key)
logger.info("get key {} value: {}".format(key, value))
print(value)
elif op == "delete":
key = args[2]
logger.info("delete key {}".format(key))
etcd_client.delete(key)
elif op == "get_watcher_master":
value = etcd_client.get(etcd_client.master_file)
logger.info("get watcher master: {}".format(value))
print(value)
elif op == "get_etcd_master":
value = etcd_client.client.leader
logger.info("get etcd master: {}".format(value))
print(value)
elif op == "get_member_list":
value = etcd_client.client.machines
logger.info("get member list: {}".format(value))
print(value)
elif op == "get_watcher_items":
value = etcd_client.get_watcher_items()
print(value)
elif op == "get_dir_items":
dir_name = args[2]
value = etcd_client.get_dir_items(dir_name)
print(value)
else:
logger.error("Invalid op parameter!")
except Exception:
logger.error("Etcd watcher error: {}".format(traceback.format_exc()))
| [
11748,
25064,
198,
11748,
3503,
67,
198,
11748,
12854,
1891,
198,
6738,
2604,
1330,
651,
62,
6404,
1362,
198,
198,
6404,
1362,
796,
651,
62,
6404,
1362,
7,
834,
3672,
834,
11,
31051,
7785,
14,
6404,
14,
316,
10210,
62,
86,
34734,
13... | 2.017448 | 917 |
import paramiko
import os
from ocha.libs import utils
CURR_DIR = os.getcwd()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
| [
11748,
5772,
12125,
198,
11748,
28686,
198,
6738,
267,
11693,
13,
8019,
82,
1330,
3384,
4487,
198,
198,
34,
31302,
62,
34720,
796,
28686,
13,
1136,
66,
16993,
3419,
198,
198,
45824,
796,
5772,
12125,
13,
5432,
39,
11792,
3419,
198,
45... | 2.704918 | 61 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# ## clean_others
# This script remove any sequence in the “others” database that clusters at 60% with any sequence in the structural database.
# %%
import os
import sys
sys.path.append("..")
import phage_init
import subprocess
# %%
fasta_list=[
#'03_curated_fasta/others_indexed.fasta',
'03_curated_fasta/minor_capsid.fasta',
'03_curated_fasta/tail_fiber.fasta',
'03_curated_fasta/major_tail.fasta',
'03_curated_fasta/portal.fasta',
'03_curated_fasta/minor_tail.fasta',
'03_curated_fasta/baseplate.fasta',
'03_curated_fasta/collar.fasta',
'03_curated_fasta/shaft.fasta',
'03_curated_fasta/major_capsid.fasta',
'03_curated_fasta/HTJ.fasta'
]
# %%
command = '''cat 03_curated_fasta/others_tmp.fasta | perl -lpe 'BEGIN{$i=1} if (/^>/) { print STDERR ">$i\_pat_\\t$_"; s/^>.*$/>$i\_pat_/;$i++;} ' 2> 03_curated_fasta/others.index | perl -pe 'chomp unless (/^>/)' | perl -lpe 's/(?<=.)>/\\n>/' | paste - - | tr '\\t' '\\n' > 03_curated_fasta/others_indexed.fasta'''
print(command)
__=subprocess.run(command,shell=True,check=True, text=True)
# %%
command = 'cat ' + ' '.join(fasta_list) + ''' | sed '/^$/d' | perl -lpe 'BEGIN{$i=1} if (/^>/) {s/^>.*$/>$i\#vi#/;$i++;}' > 03_curated_fasta/structural_indexed.fasta'''
print(command)
__=subprocess.run(command,shell=True,check=True, text=True)
# %%
command = '''cat 03_curated_fasta/others_indexed.fasta 03_curated_fasta/structural_indexed.fasta | sed '/^$/d' > 03_curated_fasta/other_plus_structural.fasta'''
print(command)
__=subprocess.run(command,shell=True,check=True, text=True)
# %%
command='''cd-hit -i 03_curated_fasta/other_plus_structural.fasta -o 03_curated_fasta/hitmix.fasta -M 0 -T 0 -c 0.6 -n 3 > errlog'''
print(command)
__=subprocess.run(command,shell=True,check=True, text=True)
# %%
command='''perl get_others_id_argv.pl 03_curated_fasta/hitmix.fasta.clstr 03_curated_fasta/others.index 03_curated_fasta/others_indexed.fasta > 03_curated_fasta/others.fasta'''
print(command)
__=subprocess.run(command,shell=True,check=True, text=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
17519,
25,
20966,
2047,
65,
11,
9078,
25,
25067,
1... | 2.251198 | 1,043 |
import bpy
import sys
import os
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
from RT_ParticleExporter.ParticleExporter import ParticleExporter
classes = [
PEProperties,
PEXPORT_PT_main_panel,
PEXPORT_OT_Export,
PEXPORT_OT_GetParticles,
]
if __name__ == "__main__":
register() | [
11748,
275,
9078,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
65,
9078,
13,
7890,
13,
7753,
6978,
8,
198,
361,
407,
26672,
287,
25064,
13,
6978,
25,
198,
220,
220,
220,
25064,
13,... | 2.4375 | 144 |
import logging
import socket
from thrift.transport.TTransport import TTransportException
from jiffy.directory.directory_client import ReplicaChain
from jiffy.directory.ttypes import rpc_replica_chain
from jiffy.storage import block_request_service
from jiffy.storage.block_client import BlockClient
from jiffy.storage.compat import b
from jiffy.storage.command import CommandType
| [
11748,
18931,
198,
11748,
17802,
198,
198,
6738,
5636,
2135,
13,
7645,
634,
13,
51,
8291,
634,
1330,
309,
8291,
634,
16922,
198,
198,
6738,
474,
733,
88,
13,
34945,
13,
34945,
62,
16366,
1330,
18407,
3970,
35491,
198,
6738,
474,
733,
... | 3.522936 | 109 |
# Copyright 2015 Rafe Kaplan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from booze import util
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
1853,
371,
8635,
37105,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,... | 3.683333 | 180 |
import setuptools
with open("README.md", "r", encoding="utf-8") as file:
long_description = file.read()
setuptools.setup(
name="csv_db_package",
version="0.0.3",
author="Ankita Liya",
author_email="ankitaliya321@gmail.com",
description="This package is found useful for those who wants to modify their CSV file without using database."
"It creates a local server that having a functionality of uploading a csv file and "
"then perform crud operations through browser itself."
,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ankitaliya/csv_db_package",
project_urls={
"Bug Tracker": "https://github.com/ankitaliya/csv_db_package/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
package_data={'csv_db_package': ['Templates/*.html']},
install_requires=['mysql-connector-python', 'Jinja2', 'pandas'],
python_requires=">=3.7",
) | [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
2393,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
2393,
13,
961,
3419,
198,
198,
2617,
37623,
... | 2.633987 | 459 |
import PyQt5
from PyQt5 import QtWidgets, uic
from PyQt5.uic import loadUi
from PyQt5.QtWidgets import QApplication,QMainWindow,QDialog,QWidget
import arcpy
import os
import sys
app = QtWidgets.QApplication([])
win = MainPage()
win.show()
try:
sys.exit(app.exec())
except:
print("EXITING") | [
11748,
9485,
48,
83,
20,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
54,
312,
11407,
11,
334,
291,
198,
6738,
9485,
48,
83,
20,
13,
84,
291,
1330,
3440,
52,
72,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195... | 2.316176 | 136 |
"""
The MIT License (MIT)
Copyright (c) 2021-present Village
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import List, Optional, TypedDict
from .snowflake import Snowflake
from .user import User
__all__ = ("Emoji",)
| [
37811,
198,
464,
17168,
13789,
357,
36393,
8,
198,
198,
15269,
357,
66,
8,
33448,
12,
25579,
14812,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
30073,
286,
428,
3788,
290,
3917,
... | 3.820122 | 328 |
import discord
from discord.ext import commands
class yoda_say:
"""May the force be with you"""
@commands.command()
async def yoda_say(self, sentence):
""" Credit to Zenadix and muddyfish
source: https://codegolf.stackexchange.com/questions/68559/
a-yoda-speaking-challenge-this-is
"""
t = sentence.split()
u = ' '.join(t[2:]).rstrip('!.')
if t[0].lower() in 'i we you he she it they'.split():
sentence = "{}{}, {} {}.".format(
u[0].upper(),
u[1:],
['I', t[0].lower()][t[0] != 'I'],
t[1])
await self.bot.say(sentence)
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
198,
4871,
331,
11329,
62,
16706,
25,
198,
220,
220,
220,
37227,
6747,
262,
2700,
307,
351,
345,
37811,
628,
220,
220,
220,
2488,
9503,
1746,
13,
21812,
3419,
198,
220,
220,
... | 1.991124 | 338 |
import cv2
import numpy as np
import dlib
import sys
# 얼굴 검출기와 랜드마크 검출기 생성 --- ①
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
# 얼굴 및 랜드마크 검출해서 좌표 반환하는 함수 ---②
# 랜드마크 좌표로 들로네 삼각형 반환 ---③
# 삼각형 어핀 변환 함수 ---④
if __name__ == '__main__' :
# 이미지 읽기 ---⑤
img1 = cv2.imread('../img/boy_face.jpg')
img2 = cv2.imread('../img/girl_face.jpg')
cv2.imshow('img1', img1)
cv2.imshow('img2', img2)
img_draw = img2.copy()
# 각 이미지에서 얼굴 랜드마크 좌표 구하기--- ⑥
points1 = getPoints(img1)
points2 = getPoints(img2)
# 랜드마크 좌표로 볼록 선체 구하기 --- ⑦
hullIndex = cv2.convexHull(np.array(points2), returnPoints = False)
hull1 = [points1[int(idx)] for idx in hullIndex]
hull2 = [points2[int(idx)] for idx in hullIndex]
# 볼록 선체 안 들로네 삼각형 좌표 구하기 ---⑧
triangles = getTriangles(img2, hull2)
# 각 삼각형 좌표로 삼각형 어핀 변환 ---⑨
for i in range(0, len(triangles)):
t1 = [hull1[triangles[i][j]] for j in range(3)]
t2 = [hull2[triangles[i][j]] for j in range(3)]
warpTriangle(img1, img_draw, t1, t2)
# 볼록선체를 마스크로 써서 얼굴 합성 ---⑩
mask = np.zeros(img2.shape, dtype = img2.dtype)
cv2.fillConvexPoly(mask, np.int32(hull2), (255, 255, 255))
r = cv2.boundingRect(np.float32([hull2]))
center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
output = cv2.seamlessClone(np.uint8(img_draw), img2, mask, center, \
cv2.NORMAL_CLONE)
cv2.imshow("Face Swapped", output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
288,
8019,
198,
11748,
25064,
198,
198,
2,
23821,
244,
120,
166,
113,
112,
220,
166,
110,
222,
168,
114,
250,
166,
116,
108,
168,
247,
222,
31619,
252,
250,
167,
... | 1.386285 | 1,152 |
#input your own client info from
#reddit>user settings>privacy &security>manage third-party app authorization
CLIENT_ID = ""
CLIENT_SECRETS = ""
USER_AGENT = "" # any input | [
2,
15414,
534,
898,
5456,
7508,
422,
220,
198,
2,
10748,
29,
7220,
6460,
29,
13776,
1590,
1222,
12961,
29,
805,
496,
2368,
12,
10608,
598,
19601,
198,
5097,
28495,
62,
2389,
796,
13538,
198,
5097,
28495,
62,
23683,
2200,
4694,
796,
... | 3.264151 | 53 |
# coding: utf-8
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
t_4sdian = db.Table(
'4sdian',
db.Column('id', db.Integer),
db.Column('loyalty', db.Float(asdecimal=True)),
db.Column('frequency', db.Integer),
db.Column('money', db.Float(asdecimal=True)),
db.Column('L', db.Text),
db.Column('F', db.Text),
db.Column('M', db.Text),
db.Column('LFM', db.Text)
)
t_exasens = db.Table(
'exasens',
db.Column('Diagnosis', db.Text),
db.Column('ID', db.Text),
db.Column('Imaginary Part', db.Text),
db.Column('MyUnknownColumn', db.Text),
db.Column('Real Part', db.Text),
db.Column('MyUnknownColumn_[0]', db.Text),
db.Column('Gender', db.Text),
db.Column('Age', db.Text),
db.Column('Smoking', db.Text),
db.Column('MyUnknownColumn_[1]', db.Text),
db.Column('MyUnknownColumn_[2]', db.Text),
db.Column('MyUnknownColumn_[3]', db.Text),
db.Column('MyUnknownColumn_[4]', db.Text)
)
t_forestfires = db.Table(
'forestfires',
db.Column('X', db.Integer),
db.Column('Y', db.Integer),
db.Column('month', db.Text),
db.Column('day', db.Text),
db.Column('FFMC', db.Float(asdecimal=True)),
db.Column('DMC', db.Float(asdecimal=True)),
db.Column('DC', db.Float(asdecimal=True)),
db.Column('ISI', db.Float(asdecimal=True)),
db.Column('temp', db.Float(asdecimal=True)),
db.Column('RH', db.Integer),
db.Column('wind', db.Float(asdecimal=True)),
db.Column('rain', db.Float(asdecimal=True)),
db.Column('area', db.Integer)
)
t_orders_central = db.Table(
'orders_central',
db.Column('Row ID', db.Integer),
db.Column('Order ID', db.Text),
db.Column('Ship Mode', db.Text),
db.Column('Customer ID', db.Text),
db.Column('Customer Name', db.Text),
db.Column('Segment', db.Text),
db.Column('Country', db.Text),
db.Column('City', db.Text),
db.Column('State', db.Text),
db.Column('Postal Code', db.Integer),
db.Column('Product ID', db.Text),
db.Column('Category', db.Text),
db.Column('Sub-Category', db.Text),
db.Column('Product', db.Text),
db.Column('Sales', db.Float(asdecimal=True)),
db.Column('Quantity', db.Integer),
db.Column('Profit', db.Float(asdecimal=True)),
db.Column('Order Year', db.Integer),
db.Column('Order Month', db.Integer),
db.Column('Order Day', db.Integer),
db.Column('Ship Year', db.Integer),
db.Column('Ship Month', db.Integer),
db.Column('Ship Day', db.Integer),
db.Column('Discounts', db.Text)
)
t_orders_south_2015 = db.Table(
'orders_south_2015',
db.Column('Sales', db.Float(asdecimal=True)),
db.Column('Quantity', db.Integer),
db.Column('Profit', db.Float(asdecimal=True)),
db.Column('Discount', db.Float(asdecimal=True)),
db.Column('Region', db.Text),
db.Column('State', db.Text),
db.Column('Row ID', db.Integer),
db.Column('Order ID', db.Text),
db.Column('Order Date', db.Text),
db.Column('Ship Date', db.Text),
db.Column('Ship Mode', db.Text),
db.Column('Customer ID', db.Text),
db.Column('Customer Name', db.Text),
db.Column('Segment', db.Text),
db.Column('Country', db.Text),
db.Column('City', db.Text),
db.Column('Postal Code', db.Integer),
db.Column('Product ID', db.Text),
db.Column('Category', db.Text),
db.Column('Sub-Category', db.Text),
db.Column('Product Name', db.Text)
)
t_orders_south_2016 = db.Table(
'orders_south_2016',
db.Column('Sales', db.Float(asdecimal=True)),
db.Column('Quantity', db.Integer),
db.Column('Profit', db.Float(asdecimal=True)),
db.Column('Discount', db.Float(asdecimal=True)),
db.Column('Region', db.Text),
db.Column('State', db.Text),
db.Column('Row ID', db.Integer),
db.Column('Order ID', db.Text),
db.Column('Order Date', db.Text),
db.Column('Ship Date', db.Text),
db.Column('Ship Mode', db.Text),
db.Column('Customer ID', db.Text),
db.Column('Customer Name', db.Text),
db.Column('Segment', db.Text),
db.Column('Country', db.Text),
db.Column('City', db.Text),
db.Column('Postal Code', db.Integer),
db.Column('Product ID', db.Text),
db.Column('Category', db.Text),
db.Column('Sub-Category', db.Text),
db.Column('Product Name', db.Text)
)
t_orders_south_2017 = db.Table(
'orders_south_2017',
db.Column('Sales', db.Float(asdecimal=True)),
db.Column('Quantity', db.Integer),
db.Column('Profit', db.Float(asdecimal=True)),
db.Column('Discount', db.Integer),
db.Column('Region', db.Text),
db.Column('State', db.Text),
db.Column('Row ID', db.Integer),
db.Column('Order ID', db.Text),
db.Column('Order Date', db.Text),
db.Column('Ship Date', db.Text),
db.Column('Ship Mode', db.Text),
db.Column('Customer ID', db.Text),
db.Column('Customer Name', db.Text),
db.Column('Segment', db.Text),
db.Column('Country', db.Text),
db.Column('City', db.Text),
db.Column('Postal Code', db.Integer),
db.Column('Product ID', db.Text),
db.Column('Category', db.Text),
db.Column('Sub-Category', db.Text),
db.Column('Product Name', db.Text)
)
t_orders_south_2018 = db.Table(
'orders_south_2018',
db.Column('Sales', db.Float(asdecimal=True)),
db.Column('Quantity', db.Integer),
db.Column('Profit', db.Float(asdecimal=True)),
db.Column('Discount', db.Float(asdecimal=True)),
db.Column('Region', db.Text),
db.Column('State', db.Text),
db.Column('Row ID', db.Integer),
db.Column('Order ID', db.Text),
db.Column('Order Date', db.Text),
db.Column('Ship Date', db.Text),
db.Column('Ship Mode', db.Text),
db.Column('Customer ID', db.Text),
db.Column('Customer Name', db.Text),
db.Column('Segment', db.Text),
db.Column('Country', db.Text),
db.Column('City', db.Text),
db.Column('Postal Code', db.Integer),
db.Column('Product ID', db.Text),
db.Column('Category', db.Text),
db.Column('Sub-Category', db.Text),
db.Column('Product Name', db.Text)
)
t_orders_west = db.Table(
'orders_west',
db.Column('Row ID', db.Integer),
db.Column('Order ID', db.Text),
db.Column('Order Date', db.Text),
db.Column('Ship Date', db.Text),
db.Column('Ship Mode', db.Text),
db.Column('Customer ID', db.Text),
db.Column('Customer Name', db.Text),
db.Column('Segment', db.Text),
db.Column('Country', db.Text),
db.Column('City', db.Text),
db.Column('Postal Code', db.Integer),
db.Column('Region', db.Text),
db.Column('Product ID', db.Text),
db.Column('Category', db.Text),
db.Column('Sub-Category', db.Text),
db.Column('Product Name', db.Text),
db.Column('Sales', db.Float(asdecimal=True)),
db.Column('Quantity', db.Integer),
db.Column('Discount', db.Float(asdecimal=True)),
db.Column('Profit', db.Float(asdecimal=True)),
db.Column('Right_Row ID', db.Integer),
db.Column('Right_Order Date', db.Text),
db.Column('Right_Ship Date', db.Text),
db.Column('Right_Ship Mode', db.Text),
db.Column('Right_Customer ID', db.Text),
db.Column('Right_Customer Name', db.Text),
db.Column('Right_Segment', db.Text),
db.Column('Right_Country', db.Text),
db.Column('Right_City', db.Text),
db.Column('Right_State2', db.Text),
db.Column('Right_Postal Code', db.Integer),
db.Column('Right_Region', db.Text),
db.Column('Right_Product ID', db.Text),
db.Column('Right_Category', db.Text),
db.Column('Right_Sub-Category', db.Text),
db.Column('Right_Product Name', db.Text),
db.Column('Right_Sales', db.Float(asdecimal=True)),
db.Column('Right_Quantity', db.Integer),
db.Column('Right_Discount', db.Float(asdecimal=True)),
db.Column('Right_Profit', db.Float(asdecimal=True)),
db.Column('State', db.Text)
)
t_train = db.Table(
'train',
db.Column('id', db.Integer),
db.Column('club', db.Integer),
db.Column('league', db.Integer),
db.Column('birth_date', db.Text),
db.Column('height_cm', db.Integer),
db.Column('weight_kg', db.Integer),
db.Column('nationality', db.Integer),
db.Column('potential', db.Integer),
db.Column('pac', db.Integer),
db.Column('sho', db.Integer),
db.Column('pas', db.Integer),
db.Column('dri', db.Integer),
db.Column('def', db.Integer),
db.Column('phy', db.Integer),
db.Column('international_reputation', db.Integer),
db.Column('skill_moves', db.Integer),
db.Column('weak_foot', db.Integer),
db.Column('work_rate_att', db.Text),
db.Column('work_rate_def', db.Text),
db.Column('preferred_foot', db.Integer),
db.Column('crossing', db.Integer),
db.Column('finishing', db.Integer),
db.Column('heading_accuracy', db.Integer),
db.Column('short_passing', db.Integer),
db.Column('volleys', db.Integer),
db.Column('dribbling', db.Integer),
db.Column('curve', db.Integer),
db.Column('free_kick_accuracy', db.Integer),
db.Column('long_passing', db.Integer),
db.Column('ball_control', db.Integer),
db.Column('acceleration', db.Integer),
db.Column('sprint_speed', db.Integer),
db.Column('agility', db.Integer),
db.Column('reactions', db.Integer),
db.Column('balance', db.Integer),
db.Column('shot_power', db.Integer),
db.Column('jumping', db.Integer),
db.Column('stamina', db.Integer),
db.Column('strength', db.Integer),
db.Column('long_shots', db.Integer),
db.Column('aggression', db.Integer),
db.Column('interceptions', db.Integer),
db.Column('positioning', db.Integer),
db.Column('vision', db.Integer),
db.Column('penalties', db.Integer),
db.Column('marking', db.Integer),
db.Column('standing_tackle', db.Integer),
db.Column('sliding_tackle', db.Integer),
db.Column('gk_diving', db.Integer),
db.Column('gk_handling', db.Integer),
db.Column('gk_kicking', db.Integer),
db.Column('gk_positioning', db.Integer),
db.Column('gk_reflexes', db.Integer),
db.Column('rw', db.Float(asdecimal=True)),
db.Column('rb', db.Float(asdecimal=True)),
db.Column('st', db.Float(asdecimal=True)),
db.Column('lw', db.Float(asdecimal=True)),
db.Column('cf', db.Float(asdecimal=True)),
db.Column('cam', db.Float(asdecimal=True)),
db.Column('cm', db.Float(asdecimal=True)),
db.Column('cdm', db.Float(asdecimal=True)),
db.Column('cb', db.Float(asdecimal=True)),
db.Column('lb', db.Float(asdecimal=True)),
db.Column('gk', db.Text),
db.Column('y', db.Float(asdecimal=True))
)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
628,
198,
9945,
796,
16363,
2348,
26599,
3419,
628,
198,
198,
83,
62,
19,
21282,
666,
796,
20613,
13,
10962,
7,
198,
220,
220,
220,
... | 2.467584 | 4,288 |
import glob as gl
import pandas as pd
# Read original data
fname = 'diagnosis-of-covid-19-and-its-clinical-spectrum.csv'
path = gl.glob(f'../../**//**/{fname}')
df = pd.read_csv(path[0])
# Choose interesting columns and remove missing data
with open('data-columns.txt', 'r') as f:
colnames = f.read().splitlines()
df = df[colnames].dropna()
df = df.reset_index(drop=True)
print(df[df['sars_cov_2_exam_result'] == 'positive'].iloc[18])
# Total Patients
print(f'Total patients: {len(df)}')
neg = sum(df['sars_cov_2_exam_result'] == 'negative')
print(f'Negative patients: {neg}')
print(f'Positive patients: {len(df) - neg}') | [
11748,
15095,
355,
1278,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
2,
4149,
2656,
1366,
198,
69,
3672,
796,
705,
47356,
5958,
12,
1659,
12,
66,
709,
312,
12,
1129,
12,
392,
12,
896,
12,
47367,
12,
4443,
6582,
13,
40664,
6,
... | 2.538462 | 247 |
from Crypto.Cipher import AES
ZEROIV = "\x00"*16
def removePadding(blocksize, s):
'Remove rfc 1423 padding from string.'
n = ord(s[-1]) # last byte contains number of padding bytes
if n > blocksize or n > len(s):
raise Exception('invalid padding')
return s[:-n]
| [
6738,
36579,
13,
34,
10803,
1330,
34329,
201,
198,
201,
198,
57,
34812,
3824,
796,
37082,
87,
405,
1,
9,
1433,
201,
198,
4299,
4781,
47,
26872,
7,
27372,
1096,
11,
264,
2599,
201,
198,
220,
220,
220,
705,
27914,
374,
16072,
1478,
... | 2.525424 | 118 |
#!/usr/bin/env python
from distutils.core import setup
setup(name='pysett',
version='0.1',
description='python xml setting parser',
author='wizath',
author_email='wm.goldio@gmail.com',
url='https://github.com/wizath/pysett',
packages=['pysett'],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
79,
893,
3087,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
3256,
198,
220,
220,
220,
... | 2.299213 | 127 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
198
] | 3.166667 | 6 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#############################################################################
# Copyright 2018 Konrad Sakowski, Stanislaw Krukowski, Pawel Strak
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
#
import sys
import argparse
import math
import numpy
import numpy.linalg
import scipy.sparse
import scipy.sparse.linalg
import scipy.interpolate
import scipy.optimize
import scipy.ndimage
import csv
import collections
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='(Linear-)least-squares-based fitting algorithm.');
parser.add_argument('sourcefile', type=str, help='CSV data file')
parser.add_argument('N', type=int, nargs='?', default=0, help='Column number, indexed from 0')
parser.add_argument('--ct', type=int, default=0, help='Column number for time (default: 0)')
parser.add_argument('--delimiter', type=str, default='\t', help='CSV column delimiter')
parser.add_argument('--title', type=str, default="", help='Title of produced figures')
parser.add_argument('--savecsv', type=str, default=None, help='Save results to this file')
parser.add_argument('--n0', type=int, default=1, help='Initial averaging interval')
parser.add_argument('--n1', type=int, default=100, help='Final averaging interval')
parser.add_argument('--iters', type=int, default=1, help='Multiple iterations of the averaging')
parser.add_argument('--Jodj', type=float, default=None, help='Constant subtracted from the experimental J data')
parser.add_argument('--tmin', type=float, default=-numpy.inf, help='Minimal t')
parser.add_argument('--tmax', type=float, default=numpy.inf, help='Maximal t')
parser.add_argument('--tpoint', type=float, default=None, help='Auxiliary indicatory point to be put on the figures')
parser.add_argument('--st', type=int, default=1, help='Polynomial degree for least-squares fitting')
parser.add_argument('--nielinznk', action='store_true', default=False, help='Use nonlinear least squares (default is to use linear)')
parser.add_argument('--gaussian', action='store_true', default=False, help='Use Gaussian filtering (default: no)')
parser.add_argument('--averageoutfirst', action='store_true', default=False, help='First average out J, then subtract Jodj (default is to subtract Jodj from J and then to average out)')
parser.add_argument('--noplots', action='store_true', default=False, help='Suppress plots')
parser.add_argument('--ignorezeros', action='store_true', default=False, help='Ignore measurements which were equal to 0')
args = parser.parse_args()
plots = not args.noplots;
# Zwraca zakres indeksów zawarty w [0,N), który jest długości 2*n+1 i zwykle środkiem jest liczba "i", poza sytuacją, gdy "i" jest zbyt blisko brzegów i po prostu zwracany jest zakres przy brzegu o długości średnicy
main();
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
4242,
2,
198,
2,
220,
220,
15069,
2864,
17431,
6335,
13231,
12079,
11,
7299,
3044,
707,
33909,
... | 3.217597 | 1,057 |
from ImportMLModel import main
import demistomock as demisto
done = False
| [
6738,
17267,
5805,
17633,
1330,
1388,
198,
198,
11748,
1357,
396,
296,
735,
355,
1357,
396,
78,
198,
198,
28060,
796,
10352,
628
] | 3.347826 | 23 |
import json
from datetime import date, datetime, timedelta
from typing import Any, Generator, Generic, List, Optional, TypeVar
import pandas as pd
import plotly.express as px
import tinytuya
from pydantic import BaseModel
from pydantic.generics import GenericModel
from pytemperature import f2c
from pytz import timezone
from tuya_connector import TuyaOpenAPI
PERTH = timezone("Australia/Perth")
T = TypeVar("T")
d = tinytuya.Cloud()
if __name__ == "__main__":
main()
| [
11748,
33918,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
11,
28805,
12514,
198,
6738,
19720,
1330,
4377,
11,
35986,
11,
42044,
11,
7343,
11,
32233,
11,
5994,
19852,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
7110,
... | 3.154839 | 155 |
"""Automated smart testing strategies for web services.
This 'agilkia' package is for testing web services and managing set of traces.
Traces may come from user interactions, or from automated test suites, etc.
The main data structure for traces is the ``TraceSet``:
* class TraceSet supports loading/saving traces as JSON, converting to Pandas, etc.
* class Trace is used by TraceSet, and contains a list of Events.
* Each Event is a dict that contains at least the following keys:
- "action" gives the name of the action (a string);
- "inputs" is a dict of input parameter names to values;
- "outputs" is a dict of output parameter names to values.
Automated test generation facilities include:
* RandomTester generates random test sequences.
* SmartTester generates tests from an ML model
(Currently this is included in RandomTester.generate_trace_ml,
but this will be split into a separate class shortly).
"""
# This package follows a 'Convenience Store' model.
# That is, it directly exports all the features that will be useful to users.
# They do not need to import sub-modules.
#
# See the article: "What’s __init__ for me?" by Jacob Deppen on TowardsDataScience.com:
# https://towardsdatascience.com/whats-init-for-me-d70a312da583
__version__ = '0.8.0'
from . random_tester import (read_input_rules, uniq, build_interface, print_signatures,
TracePrefixExtractor, RandomTester, SmartSequenceGenerator,
DUMP_WSDL, DUMP_SIGNATURES, GOOD_PASSWORD, TRACE_END)
from . json_traces import (Event, Trace, TraceSet, TraceEncoder, TRACE_SET_VERSION,
MetaData, xml_decode, all_action_names, safe_name,
default_map_to_chars, trace_to_string, traces_to_pandas)
from . trace_set_optimizer import *
from . data_generator import *
| [
37811,
38062,
515,
4451,
4856,
10064,
329,
3992,
2594,
13,
198,
198,
1212,
705,
363,
43545,
544,
6,
5301,
318,
329,
4856,
3992,
2594,
290,
11149,
900,
286,
20675,
13,
198,
2898,
2114,
743,
1282,
422,
2836,
12213,
11,
393,
422,
16359,
... | 3.006494 | 616 |
from .correios_sigep import CorreiosSIGEPCEPProvider # noqa
from .republicavirtual import RepublicaVirtualCEPProvider # noqa
from .viacep import ViaCEPProvider # noqa
| [
6738,
764,
10215,
260,
4267,
62,
82,
328,
538,
1330,
2744,
260,
4267,
50,
3528,
8905,
5222,
47,
29495,
220,
1303,
645,
20402,
198,
6738,
764,
7856,
841,
615,
22341,
1330,
2066,
64,
37725,
5222,
47,
29495,
220,
1303,
645,
20402,
198,
... | 2.982456 | 57 |
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
WithdrawnRegistrationFactory,
RegistrationFactory,
InstitutionFactory,
AuthUserFactory,
NodeFactory,
)
from website.util import permissions
@pytest.mark.django_db
@pytest.mark.django_db
| [
11748,
12972,
9288,
198,
198,
6738,
40391,
13,
8692,
13,
33692,
13,
12286,
82,
1330,
7824,
62,
33,
11159,
198,
6738,
267,
28202,
62,
41989,
13,
22584,
1749,
1330,
357,
198,
220,
220,
220,
2080,
41549,
47133,
22810,
11,
198,
220,
220,
... | 3 | 103 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.stats, name='rq_stats'),
url(r'^queues/(?P<queue>.+)/$', views.queue, name='rq_queue'),
url(r'^workers/(?P<worker>.+)/$', views.worker, name='rq_worker'),
url(r'^jobs/(?P<job>.+)/$', views.job, name='rq_job'),
url(r'^scheduler/(?P<queue>.+)/$', views.scheduler, name='rq_scheduler'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
5009,
13,
34242,
11,
1438,
11639,
81,
80,
62,
34242,... | 2.206704 | 179 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Print experiment.xml
# https://github.com/IHEC/ihec-metadata/blob/master/specs/Ihec_metadata_specification.md
# Ihec_metadata_specification.md:
# Chromatin Accessibility, WGBS, MeDIP-Seq, MRE-Seq, ChIP-Seq, RNA-Seq
# I added ATAC-seq, RRBS following existing data format
# Missing: microRNA counts, transcription profiling by array assay
# IHEC also requires format forllowing
# https://www.ebi.ac.uk/ena/submit/read-xml-format-1-5, see SRA.experiment.xsd
import argparse
from datetime import date
import json
import logging
import sys
import requests
BASE_URL = 'https://encodeproject.org/{}'
PROJECT_PROPS = {
'ENCODE': {
"description": "ENCODE reference epigenome",
"description_url": "https://www.encodeproject.org/search/?type=ReferenceEpigenome&award.project=ENCODE", # noqa: E501
"email": "encode-help@lists.stanford.edu",
"name": "ENCODE reference epigenome",
"publishing_group": "ENCODE",
},
'Roadmap': {
"description": "NIH Roadmap reference epigenome",
"description_url": "https://www.encodeproject.org/search/?type=ReferenceEpigenome&award.project=Roadmap", # noqa: E501
"email": "encode-help@lists.stanford.edu",
"name": "NIH Roadmap reference epigenome",
"publishing_group": "NIH Roadmap",
},
}
ASSEMBLY_PROPS = {
'hg38': {"assembly": "hg38", "taxon_id": 9606},
'hg19': {"assembly": "hg19", "taxon_id": 9606},
'mm10': {"assembly": "mm10", "taxon_id": 10090},
}
# David from IHEC Data Hub asked us to submit just one hub JSON
# per project per assembly
merged_hubs = {
('ENCODE', 'hg38'): {
'hub_description': {
"date": date.today().strftime('%Y-%m-%d'),
**PROJECT_PROPS['ENCODE'],
**ASSEMBLY_PROPS['hg38']
},
'samples': {},
'datasets': {}
},
('ENCODE', 'hg19'): {
'hub_description': {
"date": date.today().strftime('%Y-%m-%d'),
**PROJECT_PROPS['ENCODE'],
**ASSEMBLY_PROPS['hg19']
},
'samples': {},
'datasets': {}
},
('ENCODE', 'mm10'): {
'hub_description': {
"date": date.today().strftime('%Y-%m-%d'),
**PROJECT_PROPS['ENCODE'],
**ASSEMBLY_PROPS['mm10']
},
'samples': {},
'datasets': {}
},
('Roadmap', 'hg38'): {
'hub_description': {
"date": date.today().strftime('%Y-%m-%d'),
**PROJECT_PROPS['Roadmap'],
**ASSEMBLY_PROPS['hg38']
},
'samples': {},
'datasets': {}
},
('Roadmap', 'hg19'): {
'hub_description': {
"date": date.today().strftime('%Y-%m-%d'),
**PROJECT_PROPS['Roadmap'],
**ASSEMBLY_PROPS['hg19']
},
'samples': {},
'datasets': {}
},
}
if __name__ == "__main__":
# execute only if run as a script
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
12578,
6306,
13,
19875,
198,
2,
3740,
1378,
12567,
13,
785,
14,
40,
39,
2943,
14,
72,
258,
66,
12,
38993,
... | 2.075052 | 1,439 |
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from .app_logic import calendar_functions
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
5436,
11395,
47139,
1352,
11,
1855,
11395,
47139,
1352,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12... | 3.516484 | 91 |
import zipfile
import os
# from tqdl import download
from tqdm import tqdm
import re
import gc
import numpy as np
import codecs
import gensim.downloader as api
from gensim.test.utils import get_tmpfile
class WE():
"""The Word embedding class.
The main class that facilitates the word embedding structure.
Attributes
----------
dim (int): Dimension of embedding
vecs (np.array):
"""
def __init__(self):
"""
Initialize WE object.
"""
# self.downloader = Downloader()
self.desc = "Word embedding loader for "
def fname_to_format(self, fname):
"""Get embedding format from file name.
Format can usually be extracted from the filename extension. We
currently support the loading of embeddings in binary (.bin),
text (.txt) and numpy format (.npy).
Args:
fname (str): file name
Return:
format (str): format (txt, bin or npy)
"""
if fname is None:
raise "fname can't be None"
return None
if fname.endswith('.txt'):
format = 'txt'
elif fname.endswith('.bin'):
format = 'bin'
else:
format = 'npy'
return format
def get_gensim_word_vecs(self, model):
""" Loading word and vecs using gensim scripts.
Args:
model (gensim object): Model for accessing all the words in
vocab, and their vectors.
"""
words = sorted([w for w in model.vocab], key=lambda w: model.vocab[w].index)
vecs = np.array([model[w] for w in words])
return words, vecs
def _load(self, fname, format, dim = 300):
"""Internal load function.
There shall be no exceptions in this function. Verify everything
beforehand. Loads word embedding at location `fname` on disk.
Args:
fname (str): Path to the embedding file on disk.
format (str): Format of word embedding. Following are the
supported formats:
- binary
- text
- numpy array
dim (int): The dimension of embedding vectors.
Return:
words (list): List of vocabulary words.
vecs (np.array): Word vectors of size (self.n, self.dim)
"""
vecs = []
words = []
if format is None:
format = self.fname_to_format(fname)
if format == 'bin':
import gensim.models
model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)
words, vecs = self.get_gensim_word_vecs(model)
elif format == 'txt':
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
tokens = line.split()
v = np.array([float(x) for x in tokens[-dim:]])
w = "_".join([str(x) for x in tokens[:-dim]])
if len(v) != dim:
print(f"Weird line: {tokens} | {len(v)}")
continue
words.append(w)
vecs.append(v)
else:
with codecs.open(fname + '.vocab', 'r',
'utf-8') as f_embed:
words = [line.strip() for line in f_embed]
vecs = np.load(fname + '.wv.npy')
self.n, self.dim = vecs.shape
self.desc = f"File: {fname}\tFormat: {format}\t" \
f"#Words: {self.n}\tDimension: {self.dim}"
return words, vecs
def load(self, fname=None, format=None, ename=None,
normalize=False, dim = 300):
"""Load word embedding from filename or embedding name.
Loads word embeddings from either filename `fname` or the
embedding name `ename`. Following formats are supported:
- bin: Binary format, load through gensim.
- txt: Text w2v or GloVe format.
- npy: Numpy format. `fname.wv.npy` contans the numpy vector
while `fname.vocab` contains the vocabulary list.
All Gensim pre-trained embeddings are integrated for easy access
via `ename`. `ename` are same as the gensim conventions.
Example:
```
we = WE()
E = we.load('glove6B.txt', dim = 300)
```
```
we = WE()
E = we.load(ename = 'glove-wiki-gigaword-50')
```
Args:
fname (str): Path to the embedding file on disk.
format (str): Format of word embedding. Following are the
supported formats:
- binary
- text
- numpy array
ename (str): Name of embedding. This will download embedding
using the `Downloader` class. In case both
ename and fname are provided, ename is given
priority.
normalize (bool): Normalize word vectors or not.
dim (int): The dimension of embedding vectors.
Default dimension is 300
Return:
self (WE object): Return self, the word embedding object.
"""
if ename is not None:
model = api.load(ename)
words, vecs = self.get_gensim_word_vecs(model)
else:
words, vecs = self._load(fname, format, dim)
self.words = words
self.vecs = vecs
self.reindex()
self.normalized = normalize
if normalize:
self.normalize()
return self
def reindex(self):
"""Reindex word vectors.
"""
self.index = {w: i for i, w in enumerate(self.words)}
self.n, self.dim = self.vecs.shape
assert self.n == len(self.words) == len(self.index)
def v(self, word):
"""Access vector for a word
Returns the `self.dim` dimensional vector for the word `word`.
Example:
E = WE().load('glove')
test = E.v('test')
Args:
word (str): Word to access vector of.
Return:
vec (np.array): `self.dim` dimension vector for `word`.
"""
vec = self.vecs[self.index[word]]
return vec
def normalize(self):
"""Normalize word embeddings.
Normaliation is done as follows:
\vec{v}_{norm} := \vec{v}/|\vec{v}|
where |\vec{v}| is the L2 norm of \vec{v}
"""
self.vecs /= np.linalg.norm(self.vecs, axis=1)[:, np.newaxis]
self.reindex()
self.desc += "\tNormalized: True"
self.normalized = True
def __repr__(self):
"""Class `__repr__` object for pretty informational print.
"""
return self.desc
# if __name__ == "__main__":
# E = WE().load(ename = "glove-wiki-gigaword-100", normalize=True)
# print(E.v('dog'))
# print(E) | [
11748,
19974,
7753,
198,
11748,
28686,
198,
2,
422,
256,
80,
25404,
1330,
4321,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
302,
198,
11748,
308,
66,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
40481,
82,
198,
117... | 1.951849 | 3,759 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# alvadescpy/wrapper.py
# v.0.1.2
# Developed in 2019 by Travis Kessler <travis.j.kessler@gmail.com>
#
# contains `alvadesc` function, a wrapper for alvaDesc software
#
# stdlib. imports
from subprocess import check_output, PIPE, Popen, call
from csv import writer, QUOTE_ALL
from typing import TypeVar
import platform
from os.path import realpath
# path to alvaDesc command line interface executable
CONFIG = {
'alvadesc_path': None
}
plt = platform.system()
if plt == 'Windows':
CONFIG['alvadesc_path'] = 'C:\\Program Files\\Alvascience\\alvaDesc\\alvaDescCLI.exe'
elif plt == 'Darwin':
CONFIG['alvadesc_path'] = '/Applications/alvaDesc.app/Contents/MacOS/alvaDescCLI'
elif plt == 'Linux':
CONFIG['alvadesc_path'] = '/usr/bin/alvaDescCLI'
else:
raise RuntimeError('Unknown/unsupported operating system: {}'.format(plt))
# custom argument variable (either str or list)
_DESC = TypeVar('_DESC', str, list)
def _sub_call(command: str) -> list:
''' _sub_call: calls alvaDesc via subprocess.Popen
Args:
command (str): command to execute
Returns:
list: list of lists, where each sublist is a molecule's descriptors
'''
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
except FileNotFoundError as exception:
raise FileNotFoundError('{}\n alvaDescCLI not found at {}'.format(
exception, CONFIG['alvadesc_path']
))
except Exception as exception:
raise Exception('{}'.format(exception))
return p.communicate()[0].decode('utf-8')
def alvadesc(script: str=None, ismiles: str=None, input_file: str=None,
inputtype: str=None, descriptors: _DESC=None, labels: bool=False,
ecfp: bool=False, pfp: bool=False, fpsize: int=1024, fpmin: int=0,
fpmax: int=2, count: bool=True, bits: int=2, fpoptions: str=None,
maccsfp: bool=False, output: str=None, threads: int=None) -> list:
''' alvadesc: calls alvaDesc's command line interface; supports all arguments
Args:
script (str): path to script file containing all available options; if
supplied, nothing else should be supplied
ismiles (str): use a single SMILES string as input
input_file (str): uses a set of molecules in this file as inputs
inputtype (str): if `input_file` is supplied, this is mandatory (e.g.
`SMILES`, `MDL`, `SYBYL`, `HYPERCHEM`)
descriptors (str, list): `ALL` for all descriptors, or a list for
specific descriptors
labels (bool): if `True`, adds descriptor and molecule labels
ecfp (bool): if `True`, calculates extended connectivity fingerprint
pfp (bool): if `True`, calculates path fingerprint
fpsize (int): size of hashed fingerprint (default 1024)
fpmin (int): minimum fragment length for hashed fingerprint (default 0)
fpmax (int): maximum fragments for hashed fingerprint (default 2)
count (bool): if `True`, counts fragments for hashed fingerprint
(default True)
bits (int): bits per pattern for hashed fingerprint (default 2)
fpoptions (str): atom types for hashed fingerprint (default Atom type,
Aromaticity, Charge, Connectivity (total), Bond order)
maccsfp (bool): if `True`, calculates MACCS116 fingerprint
output (str): if not `None`, saves descriptors to this file
threads (int): number of threads used in the calculation (default:
equal to the maximum number of CPUs)
Returns:
list: if `labels` is True, returns a list of dicts, where each dict
corresponds to a single molecule; if `labels` is False, returns a
list of lists, where each sublist contains a molecule's descriptor
values; if any fingerprint is calculated, no labels are included -
returns a list of lists
'''
if script is not None:
_ = _sub_call(' --script={}'.format(script))
return
if ismiles is not None and input_file is not None:
raise ValueError('`ismiles` and `input_file` cannot both be supplied')
if input_file is not None and inputtype is None:
raise ValueError('Must supply `inputtype` if supplying `input_file`')
command = [CONFIG['alvadesc_path']]
if ismiles is not None:
command.append('--iSMILES={}'.format(ismiles))
if input_file is not None:
command.append('--input={}'.format(input_file))
command.append('--inputtype={}'.format(inputtype))
if output is not None:
command.append('--output={}'.format(output))
if threads is not None:
command.append('--threads={}'.format(threads))
if ecfp is True or pfp is True or maccsfp is True:
if sum([ecfp, pfp, maccsfp]) > 1:
raise ValueError('Only one type of fingerprint can be calculated')
if ecfp is True:
command.append('--ecfp')
if pfp is True:
command.append('--pfp')
if maccsfp is True:
command.append('--maccsfp')
command.append('--size={}'.format(fpsize))
command.append('--min={}'.format(fpmin))
command.append('--max={}'.format(fpmax))
command.append('--bits={}'.format(bits))
if count is not True:
command.append('--count=FALSE')
if fpoptions is not None:
command.append('--fpoptions={}'.format(fpoptions))
if labels is True:
command.append('--labels')
if descriptors is not None:
if descriptors == 'ALL':
command.append('--descriptors=ALL')
elif type(descriptors) is list:
cmd = '--descriptors='
for idx, desc in enumerate(descriptors):
cmd += '{}'.format(desc)
if idx != len(descriptors) - 1:
cmd += ','
# cmd += ''
command.append(cmd)
else:
raise ValueError('Unknown `descriptors` argument: {}'.format(
descriptors
))
descriptors_raw = _sub_call(command).split('\n')[:-1]
val_start_idx = 0
if labels is True:
desc_names = descriptors_raw[0].split('\t')
val_start_idx = 1
desc_vals = []
for d in descriptors_raw[val_start_idx:]:
_vals = d.split('\t')
for vidx, v in enumerate(_vals):
try:
_vals[vidx] = float(v)
except ValueError:
continue
desc_vals.append(_vals)
if labels is False:
return desc_vals
desc_dicts = []
for mol in desc_vals:
moldict = {}
for nidx, name in enumerate(desc_names):
moldict[name] = mol[nidx]
desc_dicts.append(moldict)
return desc_dicts
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
435,
85,
2367,
66,
9078,
14,
48553,
13,
9078,
198,
2,
410,
13,
15,
13,
16,
13,
17,
198,
2,
6013,
276... | 2.408336 | 2,831 |
"""Python side of the Python to Arduino bridge."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from contextlib import contextmanager
from multiprocessing import Process, Queue
import serial
from serial.serialutil import SerialException
from communicate.bridgehead_util import message_to_rpms, pwms_to_message
@contextmanager
| [
37811,
37906,
1735,
286,
262,
11361,
284,
27634,
7696,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 3.235772 | 123 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
from clawpack.clawutil.data import ClawData
from numpy import linspace
probdata = ClawData()
probdata.read('setprob.data', force=True)
print "Parameters: u = %g, beta = %g" % (probdata.u, probdata.beta)
def qtrue(x,t):
"""
The true solution, for comparison.
Should be consistent with the initial data specified in qinit.f90.
"""
from numpy import mod, exp, where, logical_and
x0 = x - probdata.u*t
x0 = mod(x0, 1.) # because of periodic boundary conditions
q = exp(-probdata.beta * (x0-0.75)**2)
q = where(logical_and(x0 > 0.1, x0 < 0.4), q+1, q)
return q
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for q[0]
plotfigure = plotdata.new_plotfigure(name='Solution', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [0,1]
plotaxes.ylimits = [-.6,1.2]
plotaxes.title = 'q'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 0
plotitem.amr_color = ['g','b','r']
plotitem.amr_plotstyle = ['^-','s-','o-']
plotitem.amr_data_show = [1,1,1]
plotitem.amr_kwargs = [{'markersize':8},{'markersize':6},{'markersize':5}]
# Plot true solution for comparison:
plotaxes.afteraxes = plot_qtrue_with_legend
# ------------------------------------------
# Figure with each level plotted separately:
plotfigure = plotdata.new_plotfigure(name='By AMR Level', figno=2)
plotfigure.kwargs = {'figsize':(8,10)}
for level in range(1,4):
# Set up plot for this level:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(3,1,%i)' % level
plotaxes.xlimits = [0,1]
plotaxes.ylimits = [-.5,1.3]
plotaxes.title = 'Level %s' % level
plotaxes.afteraxes = plot_qtrue
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 0
plotitem.amr_color = ['g','b','r']
plotitem.amr_plotstyle = ['^-','s-','o-']
plotitem.amr_data_show = [0,0,0]
plotitem.amr_data_show[level-1] = 1 # show only one level
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='q', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'Solution'
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 0
plotitem.plotstyle = 'b-'
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html'
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
| [
198,
37811,
220,
198,
7248,
510,
262,
7110,
5538,
11,
34197,
11,
290,
3709,
284,
307,
1760,
329,
1123,
5739,
13,
198,
198,
1212,
8265,
318,
17392,
416,
262,
29353,
31878,
290,
788,
262,
198,
8818,
900,
29487,
318,
1444,
284,
900,
26... | 2.372176 | 1,682 |
from util import LabelEncoder
import editdistance
from torch import Tensor
from torchmetrics import Metric
class CharacterErrorRate(Metric):
"""
Calculates Character Error Rate, calculated as Levenshtein edit distance divided
by length of the target. Roughly speaking, this indicates the percentage or
characters that were incorrectly predicted.
"""
def update(self, preds: Tensor, target: Tensor):
"""
Update the number of edits and ground truth characters.
Args:
preds (Tensor): tensor of shape (B, P), containing character predictions
target (Tensor): tensor of shape (B, T), containing the ground truth
character sequence
"""
assert preds.ndim == target.ndim
eos_tkn_idx, sos_tkn_idx = list(
self.label_encoder.transform(["<EOS>", "<SOS>"])
)
if (preds[:, 0] == sos_tkn_idx).all(): # this should normally be the case
preds = preds[:, 1:]
eos_idxs_prd = (preds == eos_tkn_idx).float().argmax(1).tolist()
eos_idxs_tgt = (target == eos_tkn_idx).float().argmax(1).tolist()
for i, (p, t) in enumerate(zip(preds, target)):
eos_idx_p, eos_idx_t = eos_idxs_prd[i], eos_idxs_tgt[i]
p = p[:eos_idx_p] if eos_idx_p else p
t = t[:eos_idx_t] if eos_idx_t else t
p_str, t_str = map(tensor_to_str, (p, t))
editd = editdistance.eval(p_str, t_str)
self.edits += editd
self.total_chars += t.numel()
def compute(self) -> Tensor:
"""Compute Character Error Rate."""
return self.edits.float() / self.total_chars
class WordErrorRate(Metric):
"""
Calculates Word Error Rate, calculated as Levenshtein edit distance divided by
the number of words in the target. This works the same way as Character Error
Rate, except that we analyse at the word level, rather than the character level.
"""
def update(self, preds: Tensor, target: Tensor):
"""
Update the number of edits and ground truth characters.
Args:
preds (Tensor): tensor of shape (B, P), containing character predictions
target (Tensor): tensor of shape (B, T), containing the ground truth
character sequence
"""
assert preds.ndim == target.ndim
eos_tkn_idx, sos_tkn_idx = self.label_encoder.transform(["<EOS>", "<SOS>"])
if (preds[:, 0] == sos_tkn_idx).all(): # this should normally be the case
preds = preds[:, 1:]
eos_idxs_prd = (preds == eos_tkn_idx).float().argmax(1).tolist()
eos_idxs_tgt = (target == eos_tkn_idx).float().argmax(1).tolist()
for i, (p, t) in enumerate(zip(preds, target)):
eos_idx_p, eos_idx_t = eos_idxs_prd[i], eos_idxs_tgt[i]
p = (p[:eos_idx_p] if eos_idx_p else p).flatten().tolist()
t = (t[:eos_idx_t] if eos_idx_t else t).flatten().tolist()
p_words = "".join(self.label_encoder.inverse_transform(p)).split()
t_words = "".join(self.label_encoder.inverse_transform(t)).split()
editd = editdistance.eval(p_words, t_words)
self.edits += editd
self.total_words += len(t_words)
def compute(self) -> Tensor:
"""Compute Word Error Rate."""
return self.edits.float() / self.total_words
| [
6738,
7736,
1330,
36052,
27195,
12342,
198,
198,
11748,
4370,
30246,
198,
6738,
28034,
1330,
309,
22854,
198,
6738,
28034,
4164,
10466,
1330,
3395,
1173,
628,
198,
4871,
15684,
12331,
32184,
7,
9171,
1173,
2599,
198,
220,
220,
220,
37227,... | 2.190263 | 1,561 |
# Copyright 2021 Inspur
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.views import generic
import json
import logging
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from venus_dashboard.api import venus
LOG = logging.getLogger(__name__)
@urls.register
@urls.register
| [
2,
15069,
33448,
25300,
333,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 3.584746 | 236 |
import os
import unittest
import numpy
import moments
import time
suite = unittest.TestLoader().loadTestsFromTestCase(ResultsTestCase)
if __name__ == '__main__':
unittest.main()
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
198,
11748,
7188,
198,
11748,
640,
628,
198,
2385,
578,
796,
555,
715,
395,
13,
14402,
17401,
22446,
2220,
51,
3558,
4863,
14402,
20448,
7,
25468,
14402,
20448,
8,
198,
... | 2.936508 | 63 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
628,
628,
198
] | 3.25 | 8 |
import yaml
import copy
import os.path
from PIL import Image as PILImage,ImageTk
from tkinter import Canvas, Tk, NW, mainloop
#Opening the Yaml File to Print out the Contents
#Name all Nodes
#Generates all Numbers Attached to Waypoints, Finds the Next Unused Number
#Editing Existing Node Data in the Yaml File
#Deleting Nodes
#Adding Path Data in the Yaml File
#Deleting Existing Node
#Adding a New Node to The Yaml File
cli_input()
#Initial function
#Uncomment the function you wish to call
#All methods take file name to make file switching easy
#readYaml("riseholme.tmap")
#addNode("riseholme.tmap")
#addPath("riseholme.tmap")
#nodeNames("riseholme.tmap")
#editYaml("riseholme.tmap")
#deletePath("riseholme.tmap")
#print(nodeNumber("riseholme.tmap"))
#deleteNode("riseholme.tmap")
| [
11748,
331,
43695,
198,
11748,
4866,
198,
11748,
28686,
13,
6978,
198,
6738,
350,
4146,
1330,
7412,
355,
350,
4146,
5159,
11,
5159,
51,
74,
198,
6738,
256,
74,
3849,
1330,
1680,
11017,
11,
309,
74,
11,
21966,
11,
1388,
26268,
198,
2... | 2.955556 | 270 |
from erlei.underscore import underscore as _
from erlei.underscore import uunderscore as __
from erlei.pipe import pipe
__all__ = [
'_',
'__',
'pipe'
]
__version__ = "1.0.0"
name = "erlei" | [
6738,
1931,
293,
72,
13,
41116,
7295,
1330,
44810,
355,
4808,
201,
198,
6738,
1931,
293,
72,
13,
41116,
7295,
1330,
334,
41116,
7295,
355,
11593,
201,
198,
6738,
1931,
293,
72,
13,
34360,
1330,
12656,
201,
198,
201,
198,
834,
439,
8... | 2.21875 | 96 |
import glob
import os
import re
import subprocess
import time
import uuid
import pytest
import parse
import logging
from ccmlib import common
from dtest import Tester, create_ks, create_cf
from tools.assertions import assert_length_equal, assert_stderr_clean
since = pytest.mark.since
logger = logging.getLogger(__name__)
KEYSPACE = 'ks'
@since('2.2')
class TestScrubIndexes(TestHelper):
"""
Test that we scrub indexes as well as their parent tables
"""
class TestScrub(TestHelper):
"""
Generic tests for scrubbing
"""
def test_scrub_with_UDT(self):
"""
@jira_ticket CASSANDRA-7665
"""
cluster = self.cluster
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1 };")
session.execute("use test;")
session.execute("CREATE TYPE point_t (x double, y double);")
node1.nodetool("scrub")
time.sleep(2)
match = node1.grep_log("org.apache.cassandra.serializers.MarshalException: Not enough bytes to read a set")
assert len(match) == 0
| [
11748,
15095,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
850,
14681,
198,
11748,
640,
198,
11748,
334,
27112,
198,
11748,
12972,
9288,
198,
11748,
21136,
198,
11748,
18931,
198,
198,
6738,
36624,
4029,
571,
1330,
2219,
198,
198,
6738... | 2.577083 | 480 |
# base code was taken from @DeletedUser420's Userge-Plugins repo
# originally authored by Phyco-Ninja (https://github.com/Phyco-Ninja) (@PhycoNinja13b)
# I've just tweaked his file a bit (maybe a lot)
# But i sticked to the format he used which looked cool
""" Search for Anime related Info using Anilist API """
import asyncio
import requests
import time
import random
import re
from pyrogram import filters, Client
from pyrogram.types import CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, InputMediaPhoto, Message
from pyrogram.errors import UserNotParticipant
from .. import ANILIST_CLIENT, ANILIST_REDIRECT_URL, ANILIST_SECRET, OWNER, TRIGGERS as trg, BOT_NAME
from ..utils.data_parser import (
get_all_genres, get_all_tags, get_top_animes, get_user_activity, get_user_favourites, toggle_favourites,
get_anime, get_airing, get_anilist, get_character, get_additional_info, get_manga, browse_,
get_featured_in_lists, update_anilist, get_user, ANIME_DB, MANGA_DB, CHAR_DB
)
from ..utils.helper import check_user, get_btns, AUTH_USERS, rand_key, clog, control_user
from ..utils.db import get_collection
GROUPS = get_collection("GROUPS")
SFW_GRPS = get_collection("SFW_GROUPS")
DC = get_collection('DISABLED_CMDS')
AG = get_collection('AIRING_GROUPS')
CG = get_collection('CRUNCHY_GROUPS')
SG = get_collection('SUBSPLEASE_GROUPS')
no_pic = [
'https://telegra.ph/file/0d2097f442e816ba3f946.jpg',
'https://telegra.ph/file/5a152016056308ef63226.jpg',
'https://telegra.ph/file/d2bf913b18688c59828e9.jpg',
'https://telegra.ph/file/d53083ea69e84e3b54735.jpg',
'https://telegra.ph/file/b5eb1e3606b7d2f1b491f.jpg'
]
@Client.on_message(filters.command(["anime", f"anime{BOT_NAME}"], prefixes=trg))
@control_user
async def anime_cmd(client: Client, message: Message):
"""Search Anime Info"""
text = message.text.split(" ", 1)
gid = message.chat.id
gidtitle = message.chat.username or message.chat.title
gidtype = message.chat
user = message.from_user.id
if gidtype in ["supergroup", "group"] and not await (GROUPS.find_one({"id": gid})):
await GROUPS.insert_one({"id": gid, "grp": gidtitle})
await clog("ANIBOT", f"Bot added to a new group\n\n{gidtitle}\nID: `{gid}`", "NEW_GROUP")
find_gc = await DC.find_one({'_id': gid})
if find_gc is not None and 'anime' in find_gc['cmd_list'].split():
return
if len(text)==1:
k = await message.reply_text("Please give a query to search about\nexample: /anime Ao Haru Ride")
await asyncio.sleep(5)
return await k.delete()
query = text[1]
auth = False
vars_ = {"search": query}
if query.isdigit():
vars_ = {"id": int(query)}
if (await AUTH_USERS.find_one({"id": user})):
auth = True
result = await get_anime(vars_, user=user, auth=auth)
if len(result) != 1:
title_img, finals_ = result[0], result[1]
else:
k = await message.reply_text(result[0])
await asyncio.sleep(5)
return await k.delete()
buttons = get_btns("ANIME", result=result, user=user, auth=auth)
if await (SFW_GRPS.find_one({"id": gid})) and result[2].pop()=="True":
await client.send_photo(gid, no_pic[random.randint(0, 4)], caption="This anime is marked 18+ and not allowed in this group")
return
await client.send_photo(gid, title_img, caption=finals_, reply_markup=buttons)
@Client.on_message(filters.command(["manga", f"manga{BOT_NAME}"], prefixes=trg))
@control_user
async def manga_cmd(client: Client, message: Message):
"""Search Manga Info"""
text = message.text.split(" ", 1)
gid = message.chat.id
gidtitle = message.chat.username or message.chat.title
gidtype = message.chat
user = message.from_user.id
if gidtype in ["supergroup", "group"] and not await (GROUPS.find_one({"id": gid})):
await GROUPS.insert_one({"id": gid, "grp": gidtitle})
await clog("ANIBOT", f"Bot added to a new group\n\n{gidtitle}\nID: `{gid}`", "NEW_GROUP")
find_gc = await DC.find_one({'_id': gid})
if find_gc is not None and 'manga' in find_gc['cmd_list'].split():
return
if len(text)==1:
k = await message.reply_text("Please give a query to search about\nexample: /manga The teasing master Takagi san")
await asyncio.sleep(5)
return await k.delete()
query = text[1]
qdb = rand_key()
MANGA_DB[qdb] = query
auth = False
if (await AUTH_USERS.find_one({"id": user})):
auth = True
result = await get_manga(qdb, 1, auth=auth, user=user)
if len(result) == 1:
k = await message.reply_text(result[0])
await asyncio.sleep(5)
return await k.delete()
pic, finals_ = result[0], result[1][0]
buttons = get_btns("MANGA", lsqry=qdb, lspage=1, user=user, result=result, auth=auth)
if await (SFW_GRPS.find_one({"id": gid})) and result[2].pop()=="True":
buttons = get_btns("MANGA", lsqry=qdb, lspage=1, user=user, result=result, auth=auth, sfw="True")
await client.send_photo(gid, no_pic[random.randint(0, 4)], caption="This manga is marked 18+ and not allowed in this group", reply_markup=buttons)
return
await client.send_photo(gid, pic, caption=finals_, reply_markup=buttons)
@Client.on_message(filters.command(["character", f"character{BOT_NAME}"], prefixes=trg))
@control_user
async def character_cmd(client: Client, message: Message):
"""Get Info about a Character"""
text = message.text.split(" ", 1)
gid = message.chat.id
gidtype = message.chat.type
gidtitle = message.chat.username or message.chat.title
user = message.from_user.id
if gidtype in ["supergroup", "group"] and not await (GROUPS.find_one({"id": gid})):
await GROUPS.insert_one({"id": gid, "grp": gidtitle})
await clog("ANIBOT", f"Bot added to a new group\n\n{gidtitle}\nID: `{gid}`", "NEW_GROUP")
find_gc = await DC.find_one({'_id': gid})
if find_gc is not None and 'character' in find_gc['cmd_list'].split():
return
if len(text)==1:
k = await message.reply_text("Please give a query to search about\nexample: /character Nezuko")
await asyncio.sleep(5)
return await k.delete()
query = text[1]
qdb = rand_key()
CHAR_DB[qdb]=query
auth = False
if (await AUTH_USERS.find_one({"id": user})):
auth = True
result = await get_character(qdb, 1, auth=auth, user=user)
if len(result) == 1:
k = await message.reply_text(result[0])
await asyncio.sleep(5)
return await k.delete()
img = result[0]
cap_text = result[1][0]
buttons = get_btns("CHARACTER", user=user, lsqry=qdb, lspage=1, result=result, auth=auth)
await client.send_photo(gid, img, caption=cap_text, reply_markup=buttons)
@Client.on_message(filters.command(["anilist", f"anilist{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_message(filters.command(["flex", f"flex{BOT_NAME}", "user", f"user{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_message(filters.command(["top", f"top{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_message(filters.command(["airing", f"airing{BOT_NAME}"], prefixes=trg))
@control_user
async def airing_cmd(client: Client, message: Message):
"""Get Airing Detail of Anime"""
text = message.text.split(" ", 1)
gid = message.chat.id
find_gc = await DC.find_one({'_id': gid})
if find_gc is not None and 'airing' in find_gc['cmd_list'].split():
return
if len(text)==1:
k = await message.reply_text("Please give a query to search about\nexample: /airing Fumetsu no Anata e")
await asyncio.sleep(5)
return await k.delete()
query = text[1]
vars_ = {"search": query}
if query.isdigit():
vars_ = {"id": int(query), "asHtml": True}
auth = False
user = message.from_user.id
if (await AUTH_USERS.find_one({"id": user})):
auth = True
result = await get_airing(vars_, auth=auth, user=user)
if len(result) == 1:
k = await message.reply_text(result[0])
await asyncio.sleep(5)
return await k.delete()
coverImg, out = result[0]
btn = get_btns("AIRING", user=user, result=result, auth=auth)
if await (SFW_GRPS.find_one({"id": gid})) and result[2].pop()=="True":
await client.send_photo(gid, no_pic[random.randint(0, 4)], caption="This anime is marked 18+ and not allowed in this group")
return
await client.send_photo(gid, coverImg, caption=out, reply_markup=btn)
@Client.on_message(filters.command("auth", prefixes=trg))
@control_user
@Client.on_message(~filters.private & filters.command(["settings", f"settings{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_message(filters.command(["me", f"me{BOT_NAME}", "activity", f"activity{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_message(filters.command(["favourites", f"favourites{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_message(filters.private & filters.command("logout", prefixes=trg))
@control_user
@Client.on_message(filters.command(["browse", f"browse{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_message(filters.command(["gettags", f"gettags{BOT_NAME}", "getgenres", f"getgenres{BOT_NAME}"], prefixes=trg))
@control_user
@Client.on_callback_query(filters.regex(pattern=r"page_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"btn_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"topanimu_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"settogl_(.*)"))
@Client.on_callback_query(filters.regex(pattern=r"myacc_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"myfavs_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"myfavqry_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"getusrbc_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"fav_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"(lsadd|lsupdt)_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"browse_(upcoming|trending|popular)_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"(lsas|lsus|dlt)_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"(desc|ls|char)_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"lsc_(.*)"))
@check_user
@Client.on_callback_query(filters.regex(pattern=r"lsc(a|m)_(.*)"))
@check_user
| [
2,
2779,
2438,
373,
2077,
422,
2488,
5005,
33342,
12982,
27211,
338,
11787,
469,
12,
23257,
1040,
29924,
201,
198,
2,
6198,
33941,
416,
1380,
88,
1073,
12,
36091,
6592,
357,
5450,
1378,
12567,
13,
785,
14,
2725,
88,
1073,
12,
36091,
... | 2.323087 | 4,652 |
# Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
import re
from .. import If
from . import exactly_one, json_checker
def validate_json_checker(x):
"""
Property: BackupVault.AccessPolicy
"""
return json_checker(x)
def backup_vault_name(name):
"""
Property: BackupVault.BackupVaultName
"""
vault_name_re = re.compile(r"^[a-zA-Z0-9\-\_\.]{1,50}$") # noqa
if vault_name_re.match(name):
return name
else:
raise ValueError("%s is not a valid backup vault name" % name)
def validate_backup_selection(self):
"""
Class: BackupSelectionResourceType
"""
conds = [
"ListOfTags",
"Resources",
]
if check_if(conds, self.properties):
return
exactly_one(self.__class__.__name__, self.properties, conds)
| [
2,
15069,
357,
66,
8,
2321,
12,
1238,
1828,
11,
2940,
2631,
988,
1279,
4102,
31,
431,
988,
13,
2398,
29,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
4091,
38559,
24290,
2393,
329,
1336,
5964,
13,
628,
198,
11748,
302,
198,
19... | 2.391781 | 365 |
# re-write of the dosleg parser to have
# the same output as senapy
import sys
import re
from urllib.parse import urljoin
import dateparser
from bs4 import BeautifulSoup
from lawfactory_utils.urls import clean_url, download, parse_national_assembly_url, AN_OLD_URL_TEMPLATE
from anpy.dossier_from_opendata import parse as opendata_parse
def find_promulgation_date(line):
"""
>>> find_promulgation_date("Loi nº 2010-383 du 16 avril 2010 autorisant l'approbation de l'accord entre...")
'2010-04-16'
"""
line = line.split(' du ')[1]
return format_date(re.search(r"(\d\d? \w\w\w+ \d\d\d\d)", line).group(1))
def historic_doslegs_parse(html, url_an=None, logfile=sys.stderr, nth_dos_in_page=0, parse_previous_works=True, parse_next_works=True):
"""
Parse an AN dosleg like http://www.assemblee-nationale.fr/13/dossiers/accord_Montenegro_mobilite_jeunes.asp
nth_dos_in_page, parse_previous_works and parse_next_works are for internal logic
"""
data = {
'url_dossier_assemblee': clean_url(url_an),
'urgence': False,
}
soup = BeautifulSoup(html, 'lxml')
legislature, slug = parse_national_assembly_url(data['url_dossier_assemblee'])
data['assemblee_slug'] = slug
if legislature:
data['assemblee_legislature'] = legislature
else: # strange link (old dosleg)
log_error('NO LEGISLATURE IN AN LINK: ' + data['url_dossier_assemblee'])
data['assemblee_id'] = '%s-%s' % (data.get('assemblee_legislature', ''), data['assemblee_slug'])
data['steps'] = []
curr_institution = 'assemblee'
curr_stage = '1ère lecture'
last_section = None # Travaux des commissions/Discussion en séance publique
last_step_index = 0
travaux_prep_already = False
another_dosleg_inside = None
predicted_next_step = None # For unfinished projects, we try to catch the next step
previous_works = None
url_jo = None
html_lines = html.split('\n')
for i, line in enumerate(html_lines):
if '<COMMENTAIRE>' in line or '<table border="1"' in line:
continue
if '<font face="ARIAL" size="3" color="#000080">' in line:
data['long_title'] = line_text()
if '<br><b><font color="#000099">Travaux des commissions</font></b><br>' in line:
last_section = line_text()
if '<p align="center"><b><font color="#000080">Travaux préparatoires</font></b><br>' in line:
if travaux_prep_already:
if parse_next_works and not nth_dos_in_page:
log_warning('FOUND ANOTHER DOSLEG INSIDE THE DOSLEG')
another_dosleg_inside = '\n'.join(html.split('\n')[last_step_index + 1:])
if not nth_dos_in_page:
break
travaux_prep_already = False
else:
travaux_prep_already = True
if not parse_next_works and travaux_prep_already and nth_dos_in_page:
continue
# Senat 1ère lecture, CMP, ...
if '<font color="#000099" size="2" face="Arial">' in line:
text = line_text()
last_section = None
if 'Dossier en ligne sur le site du Sénat' in text:
data['url_dossier_senat'] = clean_url(parse_line().select(
'a')[-1].attrs['href'])
text = text.replace(
'(Dossier en ligne sur le site du Sénat)', '')
if 'Sénat' in text:
curr_institution = 'senat'
elif 'Assemblée nationale' in text:
curr_institution = 'assemblee'
elif 'Commission Mixte Paritaire' in text or 'Lecture texte CMP' in text:
curr_institution = 'CMP'
curr_stage = 'CMP'
elif 'Conseil Constitutionnel' in text:
curr_institution = 'conseil constitutionnel'
curr_stage = 'constitutionnalité'
elif 'Congrès du Parlement' in text:
curr_institution = 'congrès'
curr_stage = 'congrès'
if '1ère lecture' in text:
curr_stage = '1ère lecture'
elif '2e lecture' in text:
curr_stage = '2ème lecture'
elif 'Nouvelle lecture' in text:
curr_stage = 'nouv. lect.'
elif 'Lecture définitive' in text:
curr_stage = 'l. définitive'
if not curr_stage:
curr_stage = text.split('-')[-1].strip().lower()
if curr_stage == "création de la commission d'enquête":
log_warning('COMMISSION D\'ENQUETE')
return None
if '>Proposition de résolution européenne<' in line:
log_warning('PROPOSITION DE RESOLUTION EUROPEENE')
return None
if '>Accès aux Travaux préparatoires' in line and not previous_works:
previous_works = clean_url(urljoin(url_an, parse_line().find('a').attrs['href']))
curr_step = None
# conseil. consti. has no step but we should get the link
no_step_but_good_link = False
if 'Rapport portant également sur les propositions' in line:
continue
elif re.search(r'<a[^>]* href=[^>]*>(projet de loi|proposition de loi|proposition de résolution)', line, re.I):
curr_step = 'depot'
if curr_stage == 'CMP':
continue
elif ">Texte de la commission" in line or '/ta-commission/' in line:
curr_step = 'commission'
elif '/ta/' in line or '/leg/tas' in line:
if get_last_step().get('stage') != curr_stage:
curr_step = 'depot'
if curr_stage == 'CMP':
curr_step = 'commission'
else:
curr_step = 'hemicycle'
elif ('/rapports/' in line or '/rap/' in line) and last_section and 'commissions' in last_section:
if get_last_step().get('step') == 'commission':
# log_warning('DOUBLE COMMISSION LINE: %s' % line)
continue
curr_step = 'commission'
elif 'www.conseil-constitutionnel.fr/decision/' in line:
no_step_but_good_link = True
# no commissions for l. définitive
if curr_stage == 'l. définitive' and curr_step == 'commission':
continue
if curr_step or no_step_but_good_link:
# if same step previously, replace or not the url
if get_last_step().get('step') == curr_step:
# log_warning('DOUBLE STEP: %s' % line)
# remove last step since we prefer text links instead of reports links
# TODO: add report link as bonus_url
last_url = get_last_step().get('source_url')
if not last_url or ('/rapports/' in last_url or '/rap/' in last_url):
data['steps'] = data['steps'][:-1]
# looks like the last url was already a text, let's assume it's a multi-depot
else:
# multi-depot if not CMP
# TODO: re-order multi depot
if curr_institution == 'senat' and curr_stage != 'CMP':
curr_step = 'depot'
links = [a.attrs.get('href') for a in parse_line().select('a')]
links = [
href for href in links if href and 'fiches_id' not in href and '/senateur/' not in href and 'javascript:' not in href]
if not links:
log_error('NO LINK IN LINE: %s' % (line,))
continue
urls_raps = []
urls_others = []
for href in links:
if '/rap/' in href or '/rapports/' in href:
urls_raps.append(href)
else:
urls_others.append(href)
cmp_commission_other_url = None
if len(urls_others) > 0:
url = urls_others[0]
# CMP commission should produce two texts, one for each institution
if curr_step == 'commission' and curr_stage == 'CMP' and len(urls_others) > 1:
cmp_commission_other_url = clean_url(urljoin(url_an, urls_others[1]))
else:
url = urls_raps[0]
url = clean_url(urljoin(url_an, url))
real_institution = curr_institution
if curr_stage == 'CMP' and curr_step == 'hemicycle':
if 'assemblee-nationale.fr' in url:
real_institution = 'assemblee'
elif 'senat.fr' in url:
real_institution = 'senat'
step = {
'institution': real_institution,
'stage': curr_stage,
'source_url': url,
}
if curr_step:
step['step'] = curr_step
if cmp_commission_other_url:
step['cmp_commission_other_url'] = cmp_commission_other_url
# try to detect a date
for test_line in (line, html_lines[i-1]):
test_line = test_line.replace('1<sup>er</sup>', '1')
date_match = re.search(r'(déposée? le|adoptée? .*? le|modifiée? .*?|rejetée? .*?)\s*(\d\d? \w\w\w+ \d\d\d\d)', test_line, re.I)
if date_match:
step['date'] = format_date(date_match.group(2))
else:
date_match = re.search(r'(mis en ligne le)\s*(\d\d? \w\w\w+ \d\d\d\d)', test_line, re.I)
if date_match:
step['date'] = format_date(date_match.group(2))
if 'date' in step and 'beginning' not in data:
data['beginning'] = step['date']
data['steps'].append(step)
predicted_next_step = None
last_step_index = i
if 'publiée au Journal Officiel' in line and not url_jo:
links = [clean_url(a.attrs['href']) for a in parse_line().select('a') if 'legifrance' in a.attrs.get('href', '')]
if not links:
log_error('NO GOOD LINK IN LINE: %s' % (line,))
continue
url_jo = links[-1]
if 'Le Gouvernement a engagé la procédure accélérée' in line or 'engagement de la procédure accélérée' in line:
data['urgence'] = True
# Next step prediction via small clues
# TODO: this could be done via last_section (we parse two times the same thing)
# TODO: this fails for CMP hemicycle senat
if curr_stage != 'CMP':
if '>Discussion en séance publique<' in line:
predicted_next_step = {
'institution': curr_institution,
'stage': curr_stage,
'step': 'hemicycle',
}
elif '>Travaux des commissions<' in line:
predicted_next_step = {
'institution': curr_institution,
'stage': curr_stage,
'step': 'commission',
}
metas = {}
for meta in soup.select('meta'):
if 'name' in meta.attrs:
metas[meta.attrs['name']] = meta.attrs['content']
if not url_jo:
url_jo = metas.get('LIEN_LOI_PROMULGUEE')
if url_jo:
data['url_jo'] = clean_url(url_jo)
promulgation_step = {
'institution': 'gouvernement',
'stage': 'promulgation',
'source_url': data['url_jo'],
}
if metas.get('LOI_PROMULGUEE'):
data['end'] = find_promulgation_date(metas.get('LOI_PROMULGUEE'))
promulgation_step['date'] = data['end']
data['steps'].append(promulgation_step)
# add predicted next step for unfinished projects
elif predicted_next_step:
data['steps'].append(predicted_next_step)
if 'url_dossier_senat' not in data or 'dossier-legislatif' not in data['url_dossier_senat']:
senat_url = find_senat_url(data)
if senat_url:
data['url_dossier_senat'] = senat_url
# append previous works if there are some
if previous_works and parse_previous_works:
log_warning('MERGING %s WITH PREVIOUS WORKS %s' % (url_an, previous_works))
resp = download_historic_dosleg(previous_works)
prev_data = historic_doslegs_parse(
resp.text, previous_works,
logfile=logfile,
nth_dos_in_page=nth_dos_in_page, parse_next_works=False)
if prev_data:
prev_data = prev_data[nth_dos_in_page] if len(prev_data) > 1 else prev_data[0]
data = merge_previous_works_an(prev_data, data)
else:
log_warning('INVALID PREVIOUS WORKS', previous_works)
# is this part of a dosleg previous works ?
next_legislature = data['assemblee_legislature'] + 1 if 'assemblee_legislature' in data else 9999
if parse_next_works and next_legislature < 15:
# TODO: parse 15th legislature from open data if it exists
resp = download_historic_dosleg(url_an.replace('/%d/' % data['assemblee_legislature'], '/%d/' % (data['assemblee_legislature'] + 1)))
if resp.status_code == 200:
recent_data = historic_doslegs_parse(
resp.text, resp.url,
logfile=logfile,
nth_dos_in_page=nth_dos_in_page, parse_previous_works=False)
if recent_data:
log_warning('FOUND MORE RECENT WORKS', resp.url)
recent_data = recent_data[nth_dos_in_page] if len(recent_data) > 1 else recent_data[0]
data = merge_previous_works_an(data, recent_data)
if another_dosleg_inside:
others = historic_doslegs_parse(another_dosleg_inside, url_an, logfile=logfile, nth_dos_in_page=nth_dos_in_page+1)
if others:
return [data] + others
return [data]
"""
Cas non-gérés (anciens dossiers):
- renvois en commision: http://www.assemblee-nationale.fr/14/dossiers/interdiction_prescription_acquisitive_voies_rurales.asp
- senat ppl manquant: http://www.assemblee-nationale.fr/13/dossiers/comite_poids_mesures.asp
"""
| [
2,
302,
12,
13564,
286,
262,
23430,
1455,
30751,
284,
423,
198,
2,
262,
976,
5072,
355,
3308,
12826,
198,
198,
11748,
25064,
198,
11748,
302,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
198,
11748,
3128,
48610,
198,... | 2.007951 | 7,043 |
import os
import re
import numpy
import itertools
from . import config, dataset_generation, preprocessing
class FormatSentence:
"""
Take a sentence, annotated or not, and generate the vectors associated
"""
sentence = ''
words = []
__size_vector = 26+11
__dictionary = None
__vectorized_words = []
__window_size = 4
__annotated_sentence = ('', '', '')
__is_annotated = False
__pos_tag_active = False
#Compute all the vector corresponding to the words of the sentence.
#return the vector in a list format corresponding to a word with a fixed window size
@staticmethod
@staticmethod
@staticmethod
class BuildDataSet:
"""
Build a data set from annotated questions and a dictionary of vectors
"""
__dictionary = None
__window_size = 4
__file = None
__number_sentences = 0
__number_test_entries = 0
__number_train_entries = 0
data_set_input = []
data_set_output = []
__sentences = {}
@staticmethod
def generateSentence(self,subject,predicate):
"""
Add all possible triples with a missing object, and the given subject and predicate.
Subject must be a string.
Predicate must be a list of string.
self.generateSentence('foo',['bar1','bar2','bar3']) will generate all permutations of
{'foo','bar1','bar2','bar3'}, associated to the triple ('foo', 'bar1 bar2 bar3', ?)
"""
triple = (subject," ".join(predicate),"")
for sentence in itertools.permutations(predicate+[subject]):
s = " ".join(sentence)
#print(s)
#print('(%s, %s, %s)' % triple)
f_s = FormatSentence(s, self.__dictionary, triple, self.__window_size, pos_tag_active=self.__pos_tag_active)
self.addSentence(s,f_s)
def create_dataset(training_set_distribution=0.98):
"""Function called when bootstraping to train the parser."""
w_size = config.get_windows_size()
en_dict = preprocessing.Dictionary(config.get_data('embeddings-scaled.EMBEDDING_SIZE=25.txt'))
filename = os.path.join(os.path.dirname(__file__),
'data/AnnotatedKeywordsQuestions.txt')
data_set = BuildDataSet(en_dict, filename, window_size=w_size, pos_tag_active=True)
data_set.build()
data_set.generate_all()
data_set.save(config.get_data('questions'), config.get_data('answers'),
training_set_distribution=training_set_distribution)
print('Generated files saved in: \n' + config.get_data(''))
print('Database generated.')
print('Number of entries in the train set: ' + str(data_set.number_train_entries()))
print('Number of entries in the test set: ' + str(data_set.number_test_entries())) | [
11748,
28686,
198,
11748,
302,
198,
11748,
299,
32152,
198,
11748,
340,
861,
10141,
198,
198,
6738,
764,
1330,
4566,
11,
27039,
62,
20158,
11,
662,
36948,
628,
198,
4871,
18980,
31837,
594,
25,
198,
220,
220,
220,
37227,
198,
220,
220... | 2.552823 | 1,098 |
# -*- coding: utf-8 -*-
"""
builders/layers/transformer.py
Created on 01/05/19
@author: Tu Bui tb00083@surrey.ac.uk
"""
import tensorflow as tf
from tensorflow.keras import backend as K
from ..utils import scaled_dot_product_attention, positional_encoding
class SelfAttnV1(tf.keras.layers.Layer):
"""
Keras attention layer for a sequence
learn weight for each time step
This implementation uses the attention formula proposed by Sukhbaatar etal. 2015
https://papers.nips.cc/paper/5846-end-to-end-memory-networks.pdf
Example:
from tensorflow.keras.layers import Input, LSTM
from attn_rnn import AttnRNN
input_data = Input(shape=(32,128)) # [?, 32, 128]
x = LSTM(10, return_sequences=True)(input_data) # [?, 32, 10]
x, w = SelfAttn()(x) # x: [?, 10], w: [?, 32]
where w is the attention weight for each time step (useful for visualisation/evaluation)
"""
def __init__(self, units=None, **kwargs):
"""
Layer initialisation
:param units: define the embedding dimension. If not specified (default),
it will be set to feat dimension.
:param kwargs:
"""
self.units = units
super(SelfAttnV1, self).__init__(**kwargs)
def call(self, x):
"""
ui = tanh(xW+b)
a = softmax(uV)
o = sum(a*x)
:param x: input tensor [batch_size, time_step, feat_len]
:return: output tensor [batch_size, feat_len]
"""
# ui = tanh(xW+b)
ui = K.tanh(K.bias_add(K.dot(x, self.W), self.b)) # [B, T, L]
# a = softmax(uV)
ai = K.softmax(K.dot(ui, self.V), axis=1) # [B, T, 1]
o = K.sum(x * ai, axis=1, keepdims=False)
return o, ai
class SelfAttnV2(tf.keras.layers.Layer):
"""
Version2 of selfattn
if units is not None: add a dense layer after the attention to change output dimension
"""
def __init__(self, units=None, **kwargs):
"""
Layer initialisation
:param units: define the embedding dimension. If not specified (default),
it will be set to feat dimension.
:param kwargs:
"""
self.units = units
super(SelfAttnV2, self).__init__(**kwargs)
def call(self, x, mask=None):
"""
ui = tanh(xW+b)
a = softmax(uV)
o = sum(a*x)
:param x: input tensor [batch_size, time_step, feat_len]
:return: output tensor [batch_size, new_feat_len]
"""
# ui = tanh(xW+b)
ui = K.tanh(K.bias_add(K.dot(x, self.W), self.b)) # [B, T, L]
# a = softmax(uV)
ai = K.softmax(K.dot(ui, self.V), axis=1) # [B, T, 1]
o = K.sum(x * ai, axis=1, keepdims=False) # [B, T, L]
if self.units is not None:
o = self.embeding_layer(o)
return o, ai
class SampleKL(tf.keras.layers.Layer):
"""
Sample z from mean and variance tensors
z = mean + N(0,I) * tf.exp(log_var/2)
"""
class KLDivergence(tf.keras.layers.Layer):
"""
perform VAE
"""
class MultiHeadAttention(tf.keras.layers.Layer):
"""
multi head attention for transformer
"""
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
class DenseExpander(tf.keras.layers.Layer):
"""
Expand tensor using Dense conv
input: (batch_size, feat_dim_in)
output: (batch_size, seq_len, feat_dim_out)
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
50034,
14,
75,
6962,
14,
7645,
16354,
13,
9078,
198,
41972,
319,
5534,
14,
2713,
14,
1129,
198,
31,
9800,
25,
16749,
347,
9019,
256,
65,
830,
5999,
31,
1... | 2.145737 | 1,736 |
names = ['appu','chakrapani','upadhyaya',23,'TRUE']
for name in names:
print(name)
print(names[0])
print(names[2:4])
names.insert(2,"Shraddha")
print(names)
names.append("Shreshta")
print(names)
names.append("Shreshta")
print(names)
names[2]="Updated"
print(names)
names.remove("Updated")
print(names)
rec = names.pop(2)
print(rec)
print(names) | [
14933,
796,
37250,
1324,
84,
41707,
354,
461,
2416,
3216,
41707,
929,
324,
12114,
11729,
3256,
1954,
4032,
5446,
8924,
20520,
198,
198,
1640,
1438,
287,
3891,
25,
198,
220,
220,
220,
3601,
7,
3672,
8,
198,
198,
4798,
7,
14933,
58,
1... | 2.395973 | 149 |
import selectors
import socket
import types
import queue
import threading
from message_handler import MessageHandler
from datetime import datetime | [
11748,
2922,
669,
198,
11748,
17802,
198,
11748,
3858,
198,
11748,
16834,
198,
11748,
4704,
278,
198,
6738,
3275,
62,
30281,
1330,
16000,
25060,
198,
6738,
4818,
8079,
1330,
4818,
8079
] | 4.709677 | 31 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Yue-Wen FANG'
__maintainer__ = "Yue-Wen FANG"
__email__ = 'fyuewen@gmail.com'
__license__ = 'Apache License 2.0'
__creation_date__= 'Dec. 26, 2018'
"""
single inheritance
"""
class Person:
"""
define a CLASS Person with three methods
"""
class Girl(Person):
"""
class Girl inherits the attributes and methods
in the class Person
"""
def listheight(self):
"""
overwrite the methods listheight
"""
print('HEIGHT is 165 cm')
if __name__ == '__main__':
cang = Girl()
cang.listheight()
cang.speak()
cang.listweight(80)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
56,
518,
12,
54,
268,
376,
15567,
6,
198,
834,
76,
2913,
10613,
834,
796,
366,
56,
... | 2.402174 | 276 |
'''
assumptions:
- credit one, the leading provider of identity management is a division of capital one (e.g. https://capitalone.com/creditone)
- user already has an account with capital one (they've been verified id, passport, credit, ...)
- participating banks, institutions, ... realize credit one the leading
provider of identity management
- all third parties below, are authorized (they have client id/secret) partners with credit one
scenarios:
- student wants to open account at bank a
- bank a doesn't want to collect information manually from user
- they are partner with credit one
- bank a will use signup feature of credit one
- user is directed to capital one site
- user logs in and approves
- user is redirected back to partner site w/ personal info
- bank a uses personal info to create account
- student wants to open account at bank b
- bank b will collect person information from each user, but does not
have the capacity to handle due diligence to verify user
- bank b will use verify feature of credit one
- user is directed to capital one site
- user logs in and approves (allows the verification)
- bank b gets salt to hash information on it's end before sending to capital one
- bank b then sends user info to be verified
- verification is passed, bank b creates account for user
questions:
- for scope of this hackathon, do we say we are the devex api or are we the proxy between
a partner site and the devex api
'''
import os
import requests
import json
from flask import Flask, flash, jsonify, render_template, redirect, request, session, url_for
from requests_oauthlib import OAuth2Session
app = Flask(__name__, static_folder='./templates/', template_folder="./templates", instance_relative_config=True)
# load default config and secrets
app.config.from_object('config')
app.config.from_pyfile('config.py') # instance/config.py
os.environ['OAUTHLIB_INSECURE_TRANSPORT']='1' # for testing, we use non-HTTPS
# state for csrf
SESSKEY_DEXEX_STATE='devex'
# ----------------------
# web routes
# ----------------------
@app.route('/')
@app.route('/vault')
@app.route('/api/signout')
@app.route('/signin/complete')
# ----------------------
# api routes
# ----------------------
@app.route('/api/userinfo')
| [
7061,
6,
198,
562,
388,
8544,
25,
198,
220,
532,
3884,
530,
11,
262,
3756,
10131,
286,
5369,
4542,
318,
257,
7297,
286,
3139,
530,
357,
68,
13,
70,
13,
3740,
1378,
27544,
505,
13,
785,
14,
43082,
505,
8,
198,
220,
532,
2836,
154... | 3.472593 | 675 |
import numpy as np
import logging
import time
class TrainingEnvironment:
""" Class to handle the processing of the game loop."""
def run_epoch(self, num_steps, training=True):
""" Run a training epoch for a giving number of steps.
Return the average reward, and number of episodes."""
total_steps = 0
total_reward = 0
num_episodes = 0
while total_steps < num_steps:
reward, steps = self.run_episode(training=training)
total_reward += reward
total_steps += steps
num_episodes += 1
return total_reward / num_episodes, num_episodes
def step(self, action):
""" This relies on the fact that the underlying environment creates new images for each frame.
By default, opengym uses atari_py's getScreenRGB2 which creates a new array for each frame."""
total_reward = 0
obs = None
for k in range(self.frame_skip):
last_obs = obs
obs, reward, is_terminal, info = self._env.step(action)
total_reward += reward
if is_terminal:
# End episode if is terminal
if k == 0 and last_obs is None:
last_obs = obs
break
if self.consecutive_max and self.frame_skip > 1:
obs = np.maximum(last_obs, obs)
# Store observation
self.store_observation(obs)
return self._get_current_state(), total_reward, is_terminal
| [
11748,
299,
32152,
355,
45941,
198,
11748,
18931,
198,
11748,
640,
198,
198,
4871,
13614,
31441,
25,
198,
220,
220,
220,
37227,
5016,
284,
5412,
262,
7587,
286,
262,
983,
9052,
526,
15931,
628,
198,
220,
220,
220,
825,
1057,
62,
538,
... | 2.322137 | 655 |
import os,sys
import heapq
from chardet.universaldetector import UniversalDetector
#train_pos, test_pos = create_sample(’pos’, 0.75)
#train_neg, test_neg = create_sample(’neg’, 0.75)
#train = train_pos + train_neg
#test = test_pos + test_neg
| [
198,
11748,
28686,
11,
17597,
198,
11748,
24575,
80,
198,
6738,
442,
446,
316,
13,
403,
1191,
1940,
316,
9250,
1330,
14499,
11242,
9250,
628,
628,
198,
2,
27432,
62,
1930,
11,
1332,
62,
1930,
796,
2251,
62,
39873,
7,
447,
247,
1930,... | 2.217742 | 124 |