content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
import fatiando as ft
shape = (41, 41)
x, y = ft.grd.regular((-10, 10, 30, 50), shape)
height = 800 - 1000*ft.utils.gaussian2d(x, y, 3, 1, x0=0, y0=37)
rel = -7000*ft.utils.gaussian2d(x, y, 3, 5, x0=0, y0=40)
thick = height - rel
dens = 1900*np.ones_like(thick)
data = np.transpose([x, y, height, thick, dens])
with open('layers.txt', 'w') as f:
f.write("# Synthetic layer model of sediments and topography\n")
f.write("# Columns are:\n")
f.write("# lon lat height thickness density\n")
np.savetxt(f, data, fmt='%g')
ft.vis.figure(figsize=(4, 3))
ft.vis.title('Depth of sediments [m]')
ft.vis.axis('scaled')
ft.vis.pcolor(x, y, rel, shape)
ft.vis.colorbar()
ft.vis.savefig('depth.png')
ft.vis.figure(figsize=(4, 3))
ft.vis.title('Topography [m]')
ft.vis.axis('scaled')
ft.vis.pcolor(x, y, height, shape)
ft.vis.colorbar()
ft.vis.savefig('topography.png')
ft.vis.figure(figsize=(4, 3))
ft.vis.title('Thickness of sediment layer [m]')
ft.vis.axis('scaled')
ft.vis.pcolor(x, y, thick, shape)
ft.vis.colorbar()
ft.vis.savefig('thickness.png')
ft.vis.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
3735,
72,
25440,
355,
10117,
198,
198,
43358,
796,
357,
3901,
11,
6073,
8,
198,
87,
11,
331,
796,
10117,
13,
2164,
67,
13,
16338,
19510,
12,
940,
11,
838,
11,
1542,
11,
2026,
828,
5485,
8... | 2.261317 | 486 |
from .page_all_requests import * # noqa
from .page_request_details import * # noqa | [
6738,
764,
7700,
62,
439,
62,
8897,
3558,
1330,
1635,
220,
1303,
645,
20402,
201,
198,
6738,
764,
7700,
62,
25927,
62,
36604,
1330,
1635,
220,
1303,
645,
20402
] | 2.931034 | 29 |
# -*- coding: utf-8 -*-
"""
wegene.Models.AncestryArea
This file was automatically generated by APIMATIC BETA v2.0 on 02/22/2016
"""
from wegene.APIHelper import APIHelper
class AncestryArea(object):
"""Implementation of the 'ancestry area' model.
User ancestry composition areas information
Attributes:
uygur (string): composition component
ny (string): composition component
finnish_russian (string): composition component
iranian (string): composition component
dai (string): composition component
spanish (string): composition component
han_southern (string): composition component
han_northern (string): composition component
somali (string): composition component
tungus (string): composition component
sardinian (string): composition component
mayan (string): composition component
mongolian (string): composition component
egyptian (string): composition component
pima (string): composition component
gaoshan (string): composition component
lahu (string): composition component
cambodian (string): composition component
korean (string): composition component
french (string): composition component
english (string): composition component
balkan (string): composition component
sindhi (string): composition component
papuan (string): composition component
hungarian (string): composition component
kinh (string): composition component
japanese (string): composition component
eskimo (string): composition component
saudi (string): composition component
mbuti (string): composition component
she (string): composition component
tibetan (string): composition component
yoruba (string): composition component
bantusa (string): composition component
ashkenazi (string): composition component
mala (string): composition component
yakut (string): composition component
bengali (string): composition component
thai (string): composition component
kyrgyz (string): composition component
uzbek (string): composition component
miao_yao (string): composition component
"""
def __init__(self,
**kwargs):
"""Constructor for the Report class
Args:
**kwargs: Keyword Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
uygur -- string -- composition component
ny -- string -- composition component
finnish_russian -- string -- composition component
iranian -- string -- composition component
dai -- string -- composition component
spanish -- string -- composition component
han_southern -- string -- composition component
han_northern -- string -- composition component
somali -- string -- composition component
tungus -- string -- composition component
sardinian -- string -- composition component
mayan -- string -- composition component
mongolian -- string -- composition component
egyptian -- string -- composition component
pima -- string -- composition component
gaoshan -- string -- composition component
lahu -- string -- composition component
cambodian -- string -- composition component
korean -- string -- composition component
french -- string -- composition component
english -- string -- composition component
balkan -- string -- composition component
sindhi -- string -- composition component
papuan -- string -- composition component
hungarian -- string -- composition component
kinh -- string -- composition component
japanese -- string -- composition component
eskimo -- string -- composition component
saudi -- string -- composition component
mbuti -- string -- composition component
she -- string -- composition component
tibetan -- string -- composition component
yoruba -- string -- composition component
bantusa -- string -- composition component
ashkenazi -- string -- composition component
mala -- string -- composition component
yakut -- string -- composition component
bengali -- string -- composition component
thai -- string -- composition component
kyrgyz -- string -- composition component
uzbek -- string -- composition component
miao_yao -- string -- composition component
"""
# Set all of the parameters to their default values
self.uygur = None
self.ny = None
self.finnish_russian = None
self.iranian = None
self.dai = None
self.spanish = None
self.han_southern = None
self.han_northern = None
self.somali = None
self.tungus = None
self.sardinian = None
self.mayan = None
self.mongolian = None
self.egyptian = None
self.pima = None
self.gaoshan = None
self.lahu = None
self.cambodian = None
self.korean = None
self.french = None
self.english = None
self.balkan = None
self.sindhi = None
self.papuan = None
self.hungarian = None
self.kinh = None
self.japanese = None
self.eskimo = None
self.saudi = None
self.mbuti = None
self.she = None
self.tibetan = None
self.yoruba = None
self.bantusa = None
self.ashkenazi = None
self.mala = None
self.yakut = None
self.bengali = None
self.thai = None
self.kyrgyz = None
self.uzbek = None
self.miao_yao = None
# Create a mapping from API property names to Model property names
replace_names = {
"uygur": "uygur",
"ny": "ny",
"finnish_russian": "finnish_russian",
"iranian": "iranian",
"dai": "dai",
"spanish": "spanish",
"han_southern": "han_southern",
"han_northern": "han_northern",
"somali": "somali",
"tungus": "tungus",
"sardinian": "sardinian",
"mayan": "mayan",
"mongolian": "mongolian",
"egyptian": "egyptian",
"pima": "pima",
"gaoshan": "gaoshan",
"lahu": "lahu",
"cambodian": "cambodian",
"korean": "korean",
"french": "french",
"english": "english",
"balkan": "balkan",
"sindhi": "sindhi",
"papuan": "papuan",
"hungarian": "hungarian",
"kinh": "kinh",
"japanese": "japanese",
"eskimo": "eskimo",
"saudi": "saudi",
"mbuti": "mbuti",
"she": "she",
"tibetan": "tibetan",
"yoruba": "yoruba",
"bantusa": "bantusa",
"ashkenazi": "ashkenazi",
"mala": "mala",
"yakut": "yakut",
"bengali": "bengali",
"thai": "thai",
"kyrgyz": "kyrgyz",
"uzbek": "uzbek",
"miao_yao": "miao_yao"
}
# Parse all of the Key-Value arguments
if kwargs is not None:
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace_names[key], kwargs[key])
# Other objects also need to be initialised properly
if "result" in kwargs:
self.result = GenotypesModel(**kwargs["result"])
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"uygur": "uygur",
"ny": "ny",
"finnish_russian": "finnish_russian",
"iranian": "iranian",
"dai": "dai",
"spanish": "spanish",
"han_southern": "han_southern",
"han_northern": "han_northern",
"somali": "somali",
"tungus": "tungus",
"sardinian": "sardinian",
"mayan": "mayan",
"mongolian": "mongolian",
"egyptian": "egyptian",
"pima": "pima",
"gaoshan": "gaoshan",
"lahu": "lahu",
"cambodian": "cambodian",
"korean": "korean",
"french": "french",
"english": "english",
"balkan": "balkan",
"sindhi": "sindhi",
"papuan": "papuan",
"hungarian": "hungarian",
"kinh": "kinh",
"japanese": "japanese",
"eskimo": "eskimo",
"saudi": "saudi",
"mbuti": "mbuti",
"she": "she",
"tibetan": "tibetan",
"yoruba": "yoruba",
"bantusa": "bantusa",
"ashkenazi": "ashkenazi",
"mala": "mala",
"yakut": "yakut",
"bengali": "bengali",
"thai": "thai",
"kyrgyz": "kyrgyz",
"uzbek": "uzbek",
"miao_yao": "miao_yao",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
356,
70,
1734,
13,
5841,
1424,
13,
2025,
9165,
563,
30547,
628,
220,
220,
770,
2393,
373,
6338,
7560,
416,
3486,
3955,
1404,
2149,
347,
2089... | 2.105065 | 5,035 |
import argparse
import os
from typing import Type
from loguru import logger
import numpy as np
import torch
from probnmn.config import Config
from probnmn.evaluators import (
ProgramPriorEvaluator,
JointTrainingEvaluator,
ModuleTrainingEvaluator,
QuestionCodingEvaluator,
)
from probnmn.trainers import (
ProgramPriorTrainer,
JointTrainingTrainer,
ModuleTrainingTrainer,
QuestionCodingTrainer,
)
# For static type hints.
from probnmn.evaluators._evaluator import _Evaluator
from probnmn.trainers._trainer import _Trainer
parser = argparse.ArgumentParser("Run training for a particular phase.")
parser.add_argument(
"--phase",
required=True,
choices=["program_prior", "question_coding", "module_training", "joint_training"],
help="Which phase to evalaute, this must match 'PHASE' parameter in provided config.",
)
parser.add_argument(
"--config-yml", required=True, help="Path to a config file for specified phase."
)
parser.add_argument(
"--checkpoint-path", default="", help="Path to load checkpoint and and evaluate."
)
parser.add_argument_group("Compute resource management arguments.")
parser.add_argument(
"--gpu-ids", required=True, nargs="+", type=int, help="List of GPU IDs to use (-1 for CPU)."
)
parser.add_argument(
"--cpu-workers", type=int, default=0, help="Number of CPU workers to use for data loading."
)
if __name__ == "__main__":
_A = parser.parse_args()
# Create a config with default values, then override from config file, and _A.
# This config object is immutable, nothing can be changed in this anymore.
_C = Config(_A.config_yml)
# Match the phase from arguments and config parameters.
if _A.phase != _C.PHASE:
raise ValueError(
f"Provided `--phase` as {_A.phase}, does not match config PHASE ({_C.PHASE})."
)
# Print configs and args.
logger.info(_C)
for arg in vars(_A):
logger.info("{:<20}: {}".format(arg, getattr(_A, arg)))
# For reproducibility - refer https://pytorch.org/docs/stable/notes/randomness.html
# These five lines control all the major sources of randomness.
np.random.seed(_C.RANDOM_SEED)
torch.manual_seed(_C.RANDOM_SEED)
torch.cuda.manual_seed_all(_C.RANDOM_SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
serialization_dir = os.path.dirname(_A.checkpoint_path)
# Set trainer and evaluator classes according to phase, CamelCase for syntactic sugar.
TrainerClass: Type[_Trainer] = (
ProgramPriorTrainer if _C.PHASE == "program_prior" else
QuestionCodingTrainer if _C.PHASE == "question_coding" else
ModuleTrainingTrainer if _C.PHASE == "module_training" else
JointTrainingTrainer
)
EvaluatorClass: Type[_Evaluator] = (
ProgramPriorEvaluator if _C.PHASE == "program_prior" else
QuestionCodingEvaluator if _C.PHASE == "question_coding" else
ModuleTrainingEvaluator if _C.PHASE == "module_training" else
JointTrainingEvaluator
)
trainer = TrainerClass(_C, serialization_dir, _A.gpu_ids, _A.cpu_workers)
evaluator = EvaluatorClass(_C, trainer.models, _A.gpu_ids, _A.cpu_workers)
# Load from a checkpoint to trainer for evaluation (evalautor can evaluate this checkpoint
# because it was passed by assignment in constructor).
trainer.load_checkpoint(_A.checkpoint_path)
# Evalaute on full CLEVR v1.0 validation set.
val_metrics = evaluator.evaluate()
for model_name in val_metrics:
for metric_name in val_metrics[model_name]:
logger.info(
f"val/metrics/{model_name}/{metric_name}: {val_metrics[model_name][metric_name]}"
)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
19720,
1330,
5994,
198,
198,
6738,
2604,
14717,
1330,
49706,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
1861,
77,
10295,
13,
11250,
1330,
17056,
198,
6738,
1861... | 2.725291 | 1,376 |
# This file is used to configure the behavior of pytest when using the Astropy
# test infrastructure.
import os
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
# from astropy.tests.helper import enable_deprecations_as_exceptions
## Uncomment the following line to treat all DeprecationWarnings as
## exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
## as follow (although default should work for most cases).
## To ignore some packages that produce deprecation warnings on import
## (in addition to 'compiler', 'scipy', 'pygments', 'ipykernel', and
## 'setuptools'), add:
## modules_to_ignore_on_import=['module_1', 'module_2']
## To ignore some specific deprecation warning messages for Python version
## MAJOR.MINOR or later, add:
## warnings_to_ignore_by_pyver={(MAJOR, MINOR): ['Message to ignore']}
# enable_deprecations_as_exceptions()
| [
2,
770,
2393,
318,
973,
284,
17425,
262,
4069,
286,
12972,
9288,
618,
1262,
262,
8304,
28338,
198,
2,
1332,
6884,
13,
198,
11748,
28686,
198,
198,
6738,
12972,
9288,
62,
459,
28338,
62,
25677,
13,
13812,
1330,
350,
56,
51,
6465,
62,... | 3.312727 | 275 |
import os
from conans import ConanFile
| [
11748,
28686,
198,
198,
6738,
369,
504,
1330,
31634,
8979,
198
] | 3.636364 | 11 |
#!/usr/bin/env python
import asyncio
from hexbytes import HexBytes
from decimal import Decimal
import logging
from typing import (
List,
Dict,
Iterable,
Set,
Optional
)
from web3 import Web3
from web3.datastructures import AttributeDict
from hummingbot.wallet.ethereum.erc20_token import ERC20Token
from hummingbot.logger import HummingbotLogger
from hummingbot.core.event.events import (
NewBlocksWatcherEvent,
WalletWrappedEthEvent,
WalletUnwrappedEthEvent,
WalletEvent
)
from hummingbot.core.event.event_forwarder import EventForwarder
from hummingbot.core.utils.async_utils import safe_ensure_future
from .base_watcher import BaseWatcher
from .new_blocks_watcher import NewBlocksWatcher
from .contract_event_logs import ContractEventLogger
DEPOSIT_EVENT_NAME = "Deposit"
WITHDRAWAL_EVENT_NAME = "Withdrawal"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
30351,
952,
198,
6738,
17910,
33661,
1330,
22212,
45992,
198,
6738,
32465,
1330,
4280,
4402,
198,
11748,
18931,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
7343,
11,
198... | 3.021277 | 282 |
"""Authorization fixtures module"""
import pytest
from src.middleware import generate_token
from src.utilities.constants import MIMETYPE, MIMETYPE_TEXT
from ..mocks.token import EXPIRED_TOKEN as expired_token
@pytest.fixture(scope='module')
def auth_header(new_user, generate_token=generate_token):
"""Fixture that creates authorization header"""
user = new_user.save()
data = {
"id": user.id,
'first_name': user.first_name,
'last_name': user.last_name,
'username': user.username,
'email': user.email,
}
access_token, _ = generate_token(data)
return {
'Authorization': 'Bearer {}'.format(access_token),
'Content-Type': MIMETYPE,
'Accept': MIMETYPE
}
@pytest.fixture(scope='module')
def auth_header_text(new_user_two, generate_token=generate_token):
"""Fixture that creates authorization header"""
user = new_user_two.save()
data = {
"id": user.id,
'first_name': user.first_name,
'last_name': user.last_name,
'username': user.username,
'email': user.email,
}
access_token, _ = generate_token(data)
return {
'Authorization': 'Bearer {}'.format(access_token),
'Content-Type': MIMETYPE_TEXT,
'Accept': MIMETYPE_TEXT
}
@pytest.fixture(scope='module')
def auth_header_without_bearer_in_token(
new_user_three, generate_token=generate_token):
"""Fixture that creates authorization header"""
user = new_user_three.save()
data = {
"id": user.id,
'first_name': user.first_name,
'last_name': user.last_name,
'username': user.username,
'email': user.email,
}
access_token, _ = generate_token(data)
return {
'Authorization': access_token,
'Content-Type': MIMETYPE,
'Accept': MIMETYPE
}
@pytest.fixture(scope='module')
def auth_header_with_expired_token():
"""Fixture that creates authorization header"""
return {
'Authorization': 'Bearer {}'.format(expired_token),
'Content-Type': MIMETYPE,
'Accept': MIMETYPE
}
| [
37811,
13838,
1634,
34609,
8265,
37811,
198,
11748,
12972,
9288,
198,
6738,
12351,
13,
27171,
1574,
1330,
7716,
62,
30001,
198,
6738,
12351,
13,
315,
2410,
13,
9979,
1187,
1330,
337,
3955,
2767,
56,
11401,
11,
337,
3955,
2767,
56,
11401... | 2.3596 | 901 |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Classes which implement tasks which builder has to be capable of doing.
Logic above these classes has to set the workflow itself.
"""
import json
import logging
from atomic_reactor.core import DockerTasker, LastLogger
from atomic_reactor.util import ImageName, print_version_of_tools, df_parser
from atomic_reactor.constants import DOCKERFILE_FILENAME
logger = logging.getLogger(__name__)
class ImageAlreadyBuilt(Exception):
""" This method expects image not to be built but it already is """
class ImageNotBuilt(Exception):
""" This method expects image to be already built but it is not """
class InsideBuilder(LastLogger, BuilderStateMachine):
"""
This is expected to run within container
"""
def __init__(self, source, image, **kwargs):
"""
"""
LastLogger.__init__(self)
BuilderStateMachine.__init__(self)
print_version_of_tools()
self.tasker = DockerTasker()
info, version = self.tasker.get_info(), self.tasker.get_version()
logger.debug(json.dumps(info, indent=2))
logger.info(json.dumps(version, indent=2))
# arguments for build
self.source = source
self.base_image = None
self.image_id = None
self.built_image_info = None
self.image = ImageName.parse(image)
# get info about base image from dockerfile
build_file_path, build_file_dir = self.source.get_build_file_path()
self.df_dir = build_file_dir
self._df_path = None
# If the Dockerfile will be entirely generated from the container.yaml
# (in the Flatpak case, say), then a plugin needs to create the Dockerfile
# and set the base image
if build_file_path.endswith(DOCKERFILE_FILENAME):
self.set_df_path(build_file_path)
@property
def inspect_base_image(self):
"""
inspect base image
:return: dict
"""
logger.info("inspecting base image '%s'", self.base_image)
inspect_data = self.tasker.inspect_image(self.base_image)
return inspect_data
def inspect_built_image(self):
"""
inspect built image
:return: dict
"""
logger.info("inspecting built image '%s'", self.image_id)
self.ensure_is_built()
# dict with lots of data, see man docker-inspect
inspect_data = self.tasker.inspect_image(self.image_id)
return inspect_data
def get_base_image_info(self):
"""
query docker about base image
:return dict
"""
logger.info("getting information about base image '%s'", self.base_image)
image_info = self.tasker.get_image_info_by_image_name(self.base_image)
items_count = len(image_info)
if items_count == 1:
return image_info[0]
elif items_count <= 0:
logger.error("image '%s' not found", self.base_image)
raise RuntimeError("image '%s' not found", self.base_image)
else:
logger.error("multiple (%d) images found for image '%s'", items_count,
self.base_image)
raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count,
self.base_image))
def get_built_image_info(self):
"""
query docker about built image
:return dict
"""
logger.info("getting information about built image '%s'", self.image)
image_info = self.tasker.get_image_info_by_image_name(self.image)
items_count = len(image_info)
if items_count == 1:
return image_info[0]
elif items_count <= 0:
logger.error("image '%s' not found", self.image)
raise RuntimeError("image '%s' not found" % self.image)
else:
logger.error("multiple (%d) images found for image '%s'", items_count, self.image)
raise RuntimeError("multiple (%d) images found for image '%s'" % (items_count,
self.image))
| [
37811,
198,
15269,
357,
66,
8,
1853,
2297,
10983,
11,
3457,
198,
3237,
2489,
10395,
13,
198,
198,
1212,
3788,
743,
307,
9518,
290,
9387,
739,
262,
2846,
198,
1659,
262,
347,
10305,
5964,
13,
4091,
262,
38559,
24290,
2393,
329,
3307,
... | 2.348485 | 1,848 |
import cowsay
import itertools
import threading
import time
import sys
import os
import argparse
import pandas as pd
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Name Roulette v2.1.0. A random picker for names. A digital terminal script based on the game spin the bottle.')
parser.add_argument(
'file',
metavar='filename',
type=str,
help='Files accepted are csv and txt files. Add the filename of the text or csv file containing the names as an argument, e.g., fileName.csv. Each name should be entered in a new line. You may refer to the README.md to see more examples.'
)
parser.add_argument(
'amount',
metavar='amount',
nargs='?',
default=1,
type=int,
help='The number of people to be chosen randomly.'
)
parser.add_argument(
'--repeat',
action="store_true",
required=False,
help='Loop through the names of the players forever. Not including the --repeat flag will remove the player from the list once they are chosen.'
)
parser.add_argument(
'--display',
action="store_true",
required=False,
help='Show the list of names.'
)
parser.add_argument(
'--cowsay',
action="store_true",
required=False,
help='Show chosen name/s with cowsay illustration.'
)
args = parser.parse_args()
draw_name(get_names(args.file), args.amount, args.repeat, args.display, args.cowsay)
| [
11748,
22575,
323,
198,
11748,
340,
861,
10141,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
361,
11593,
3672,
834,
855,
1,
834,
... | 2.595238 | 588 |
import gensim
from gensim.models import Word2Vec, word2vec
from gensim.models.callbacks import CallbackAny2Vec
import os
os.system('clear')
# Creating a class to fetch loss after every epoch
w2v = Word2Vec(vector_size=100, # size of the w2v is allocated to be 300
window = 5, # window size assigned is assumed to be 5
min_count =1, # words must come atleast 1 time in the entire corpus
workers = 4, # maximum 4 threads are required to run in parallel
sg =1, # we are using Skip gram model
negative = 5, # atmost 5 negative words are acceptable
sample = 1e-5) # Sampling rate = 10^-5
#Building up the vocbulary
data = open('/home/tanmay/Desktop/NLP Workflow/Working with a big data/collected_data.txt', mode = 'r')
data = data.readline()
w2v.build_vocab(data)
print(data)
#now we have to train the model
w2v.train(data,
total_examples=w2v.corpus_count,
epochs= 1001,
report_delay=1,
compute_loss=True,
callbacks= [callback()])
w2v.save('/home/tanmay/Desktop/NLP Workflow/Word2vector modeling/w2v_model.h5')
| [
11748,
308,
641,
320,
198,
6738,
308,
641,
320,
13,
27530,
1330,
9678,
17,
53,
721,
11,
1573,
17,
35138,
198,
6738,
308,
641,
320,
13,
27530,
13,
13345,
10146,
1330,
4889,
1891,
7149,
17,
53,
721,
198,
11748,
28686,
198,
198,
418,
... | 2.263158 | 532 |
import connexion
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.text_person_name_annotation import TextPersonNameAnnotation
from openapi_server.models.text_person_name_annotation_request import TextPersonNameAnnotationRequest # noqa: E501
from openapi_server.models.text_person_name_annotation_response import TextPersonNameAnnotationResponse # noqa: E501
from openapi_server.nlp_config import bert
def create_text_person_name_annotations(): # noqa: E501
"""Annotate person names in a clinical note
Return the person name annotations found in a clinical note # noqa: E501
:rtype: TextPersonNameAnnotationResponse
"""
res = None
status = None
if connexion.request.is_json:
try:
annotation_request = TextPersonNameAnnotationRequest.from_dict(connexion.request.get_json()) # noqa: E501
note = annotation_request._note # noqa: E501
annotations = []
name_annotations = bert.get_entities(note.text, "PER")
add_name_annotation(annotations, name_annotations)
res = TextPersonNameAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
print(error)
res = Error("Internal error", status, str(error))
return res, status
| [
11748,
369,
12413,
295,
198,
6738,
1280,
15042,
62,
15388,
13,
27530,
13,
18224,
1330,
13047,
220,
1303,
645,
20402,
25,
412,
33548,
198,
6738,
1280,
15042,
62,
15388,
13,
27530,
13,
5239,
62,
6259,
62,
3672,
62,
1236,
14221,
1330,
82... | 2.712 | 500 |
# Python program to print left view of Binary Tree
# A binary tree node
# Constructor to create a new node
# Recursive function pritn left view of a binary tree
# A wrapper over leftViewUtil()
# Driver program to test above function
root = Node(12)
root.left = Node(10)
root.right = Node(20)
root.right.left = Node(25)
root.right.right = Node(40)
leftView(root)
| [
2,
11361,
1430,
284,
3601,
1364,
1570,
286,
45755,
12200,
198,
198,
2,
317,
13934,
5509,
10139,
628,
220,
220,
220,
1303,
28407,
273,
284,
2251,
257,
649,
10139,
628,
198,
2,
3311,
30753,
2163,
778,
270,
77,
1364,
1570,
286,
257,
13... | 3.151261 | 119 |
s = 0
q = 0
for i in range(6):
n = float(input())
if n > 0:
q += 1
s += n
if q != 0: m = s / q
else: m = 0
print(q, 'valores positivos')
print('{:.1f}'.format(m))
| [
82,
796,
657,
198,
80,
796,
657,
198,
1640,
1312,
287,
2837,
7,
21,
2599,
198,
220,
220,
220,
299,
796,
12178,
7,
15414,
28955,
198,
220,
220,
220,
611,
299,
1875,
657,
25,
198,
220,
220,
220,
220,
220,
220,
220,
10662,
15853,
3... | 1.87 | 100 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2019-02-06 15:58
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
22,
319,
13130,
12,
2999,
12,
3312,
1315,
25,
3365,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.690909 | 55 |
"""
``StatsNbaSummaryLoader`` loads summary data for a game and
creates :obj:`~pbpstats.resources.games.stats_nba_game_item.StatsNbaGameItem`
objects for game
The following code will load summary data for game id "0021900001" from
a file located in a subdirectory of the /data directory
.. code-block:: python
from pbpstats.data_loader.stats_nba.summary.file import StatsNbaSummaryFileLoader
from pbpstats.data_loader.stats_nba.summary.loader import StatsNbaSummaryLoader
source_loader = StatsNbaSummaryFileLoader("/data")
summary_loader = StatsNbaSummaryLoader("0021900001", source_loader)
print(summary_loader.items[0].data) # prints game summary dict for game
"""
from pbpstats.data_loader.stats_nba.base import StatsNbaLoaderBase
from pbpstats.resources.games.stats_nba_game_item import StatsNbaGameItem
class StatsNbaSummaryLoader(StatsNbaLoaderBase):
"""
Loads stats.nba.com source summary data for game.
Summary data is stored in items attribute
as :obj:`~pbpstats.resources.games.stats_nba_game_item.StatsNbaGameItem` objects
:param str game_id: NBA Stats Game Id
:param source_loader: :obj:`~pbpstats.data_loader.stats_nba.summary.file.StatsNbaSummaryFileLoader` or :obj:`~pbpstats.data_loader.stats_nba.summary.web.StatsNbaSummaryWebLoader` object
"""
data_provider = "stats_nba"
resource = "Games"
parent_object = "Game"
| [
37811,
198,
15506,
29668,
45,
7012,
22093,
17401,
15506,
15989,
10638,
1366,
329,
257,
983,
290,
198,
20123,
274,
1058,
26801,
25,
63,
93,
40842,
79,
34242,
13,
37540,
13,
19966,
13,
34242,
62,
77,
7012,
62,
6057,
62,
9186,
13,
29668,... | 2.997859 | 467 |
from discord.ext import commands
import discord
import platform
| [
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
36446,
198,
11748,
3859,
628,
198
] | 4.714286 | 14 |
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.conf import settings
from mako.models import FileConfig
from api.models import File
import logging
from typing import List, Tuple
import uuid
from uuid import UUID
import os
import pathlib
import zipfile
import tarfile
import hashlib
from shutil import copyfile, rmtree
logger = logging.getLogger('django')
SAVED_FILES = settings.SAVED_FILES
######
######
#to-do: get rid of list stuff
def save_files(uploaded_file: InMemoryUploadedFile, software, relative_dir, file_type) -> (List[File], int):
""" process all requested files
Parameters
----------
uploaded_file : InMemoryUploadedFile
file uploaded by the user
software : Software object
software that the file is related to
relative_dir : str
relative directory within software
file_type : str
file type within software
Returns
-------
tuple (list of File objects, list of FileObjects)
first item is a list of all new File objects that were created and
the second item is a list of all File objects that were found
to be duplicates
"""
#get valid file type
valid_file_types = FileConfig.objects.load().valid_file_formats
valid_file_types_str = ','.join(valid_file_types)
logger.debug(f'valid file types: {valid_file_types_str}')
#create a unique temporary directory and save files to it
unique_dir = uuid.uuid4() # scope all operations to a directory for this request
temp_file_path = save_to_temp(unique_dir, uploaded_file)
logger.info(f'Begining file processing for: {temp_file_path}')
logger.info(f'Checking for file extensions using: {valid_file_types_str}')
#process files in temp directory and save to permanent one
saved_files = []
new_files = 0
#get valid and unique files
for file_name, file_path in process_files(valid_file_types, temp_file_path):
#hash file
hash_string = hash_file(file_path)
#create new file object
saved_file = save_file(file_name, file_path, hash_string, software, relative_dir, file_type)
new_files += 1
saved_files.append(saved_file)
#remove temp directory
clean_temp_directory(temp_file_path)
logger.info(f'Finished processing files. Total: {len(saved_files)} New: {new_files}')
return saved_files, new_files
def save_to_temp(unique_dir: UUID, file: InMemoryUploadedFile) -> str:
""" Save the file to temporary storage
Parameters
----------
unique_dir: UUID
unique directory that this operation is scoped to
file : django.core.files.uploadedfile.InMemoryUploadedFile
File uploaded by the user
Returns
-------
file_path : str
file path of the saved file
"""
file_directory = f'files/temp/{unique_dir}'
file_path = f'{file_directory}/{file.name}'
if not os.path.exists(file_directory):
os.makedirs(file_directory)
with open(file_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
return file_path
#add check to file type
def process_files(valid_file_types: List[str], temp_file_path: str) -> List[Tuple[str, str]]:
"""Checks that file in the temp directory that is valid software files
Returns
-------
list of str
list of files with an software extension
"""
software = []
logger.info(f'Processing files in {os.path.dirname(temp_file_path)}')
for root, dirs, files in os.walk(os.path.dirname(temp_file_path)):
for file in files:
if is_software_file(valid_file_types, os.path.join(root, file)):
software.append((file, os.path.join(root, file)))
logger.info(f'Processed files: {software}')
return software
def is_software_file(valid_file_types: List[str], file_path: str) -> bool:
"""
Parameters
----------
file_path : str
path to the file we are checking
Returns
-------
bool
if the file name has an software extension
"""
file_type = ''.join(pathlib.Path(file_path).suffixes)
valid_file = file_type in valid_file_types
if not valid_file:
logger.warning(f'Ignoring for incorrect file type: {file_type} - {file_path} among {valid_file_types}')
return valid_file
def hash_file(file_path: str) -> str:
"""Generate a hash for the file located at this file path
Parameters
----------
file_path : str
file path to hash
Returns
-------
str
md5 hash of the file
"""
md5 = hashlib.md5()
with open(file_path, 'rb') as file:
for chunk in iter(lambda: file.read(4096), b''):
md5.update(chunk)
hash_string = md5.hexdigest()
logger.debug(f'Created hash {hash_string} for file: {file_path}')
return hash_string
def save_file(file_name: str, file_path: str, hash_string: str, software_key, relative_dir, file_type) -> File:
"""save a file into the database
Parameters
----------
file_name : str
original name of the file
file_path : str
full file path
hash_string: str
md5 hash of the file
Returns
-------
File
File object created in the database
"""
_, file_extension = os.path.splitext(file_name)
uuid_name = f'{uuid.uuid4()}{file_extension}'
save_path = os.path.join(SAVED_FILES, uuid_name)
copyfile(file_path, save_path)
latest_file = File.objects.filter(hash=hash_string, software=software_key)
if latest_file.exists():
logger.debug(f'File already exists: {file_name}')
new_version = getattr(latest_file.latest('version'), 'version')+1
else:
logger.debug(f'New File is being created: {file_name} at {file_path}')
new_version = 0
return File.objects.create(
name = file_name,
uuid_name=uuid_name,
file_path=save_path,
hash=hash_string,
version=new_version,
software=software_key,
relative_dir=relative_dir,
file_type=file_type
)
def clean_temp_directory(temp_file_path: str) -> None:
"""remove all files & directories from the temp directory
Parameters
----------
temp_file_path - file path of the original file uploaded by the user
"""
logger.debug(f'Clearing temporary file path: {os.path.dirname(temp_file_path)}')
rmtree(os.path.dirname(temp_file_path)) | [
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
25850,
276,
7753,
1330,
554,
30871,
41592,
276,
8979,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
285,
25496,
13,
27530,
1330,
9220,
16934,
198,
6738,
40391,
13,
27530,
1330,
9... | 2.617433 | 2,478 |
import numpy as np
import pytest
from panqec.bpauli import bsf_to_pauli, bsf_wt
from panqec.error_models import PauliErrorModel, DeformedXZZXErrorModel
from panqec.codes import Toric3DCode
from panqec.decoders import DeformedToric3DMatchingDecoder
from panqec.bsparse import to_array
from panqec.utils import get_direction_from_bias_ratio
@pytest.mark.parametrize('pauli,bias,expected', [
('X', 0.5, (1/3, 1/3, 1/3)),
('Y', 0.5, (1/3, 1/3, 1/3)),
('Z', 0.5, (1/3, 1/3, 1/3)),
('X', np.inf, (1, 0, 0)),
('Y', np.inf, (0, 1, 0)),
('Z', np.inf, (0, 0, 1)),
('X', 1, (0.5, 0.25, 0.25)),
('Y', 1, (0.25, 0.5, 0.25)),
('Z', 1, (0.25, 0.25, 0.5)),
])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
3425,
80,
721,
13,
46583,
2518,
72,
1330,
275,
28202,
62,
1462,
62,
79,
2518,
72,
11,
275,
28202,
62,
46569,
198,
6738,
3425,
80,
721,
13,
18224,
62,
27530,
1330,
33... | 2.032641 | 337 |
from ..factory import Type
| [
6738,
11485,
69,
9548,
1330,
5994,
628
] | 4 | 7 |
import numpy as np
import vtk
if __name__ == '__main__':
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
410,
30488,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.448276 | 29 |
import time
| [
11748,
640,
198
] | 4 | 3 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields, validate
from polyaxon_schemas.ops.run import BaseRunConfig, BaseRunSchema
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
6738,
22397,
42725,
1330,
7032,
11,
26571,
198,
198,
6738,
7514,
897,
261,
62,
... | 3.225806 | 62 |
"""
:Author: Bilal Shaikh <bilal.shaikh@columbia.edu>
Ashwin Srinivasan <ashwins@mit.edu>
::Date: 2018-07-19
:Copyright: 2018, Karr Lab
:License: MIT
"""
import wc_kb
import wc_kb_gen
import numpy
from wc_onto import onto as wcOntology
from wc_utils.util.ontology import are_terms_equivalent
class ObservablesGenerator(wc_kb_gen.KbComponentGenerator):
"""
Creates observable objects for proteins and tRNAs and complexes that are assigned to specific functions. Adds these observables to the knowledge base.
Options:
* assigned_trnas (:obj:`list`): A list of the names of trnas to be created
* assigned_proteins (:obj:`list`): A list of the names of proteins to be created
* assigned_complexes (:obj:`list`): A list of the names of complexes to be created
"""
def clean_and_validate_options(self):
""" Apply default options and validate options """
options = self.options
genetic_code = options.get('genetic_code', 'normal')
assert(genetic_code in ['normal', 'reduced'])
options['genetic_code'] = genetic_code
genetic_code = options['genetic_code']
if genetic_code=='normal':
bases = "TCAG"
codons = [a + b + c for a in bases for b in bases for c in bases]
default_trnas = []
for codon in codons:
default_trnas.append('tRNA_'+codon)
elif genetic_code=='reduced':
default_trnas = ['tRNA_ATC','tRNA_CTG','tRNA_ATG','tRNA_ACG']
assigned_trnas = options.get('assigned_trnas', default_trnas)
options['assigned_trnas'] = assigned_trnas
assigned_proteins = options.get('assigned_proteins', [
'translation_init_factors',
'translation_elongation_factors',
'translation_release_factors',
'degrade_rnase',
'degrade_protease',
'rna_polymerase',
'aminoacyl_synthetase'])
prots = self.knowledge_base.cell.species_types.get(__type=wc_kb.prokaryote.ProteinSpeciesType)
assert(len(assigned_proteins) <= len(prots))
options['assigned_proteins'] = assigned_proteins
assigned_complexes = options.get('assigned_complexes', ['ribosome'])
options['assigned_complexes'] = assigned_complexes
def gen_components(self):
""" Takes random samples of the generated rnas and proteins and assigns them functions based on the included list of proteins and rnas"""
cell = self.knowledge_base.cell
cytosol = cell.compartments.get_one(id='c')
genetic_code = self.options['genetic_code']
assigned_trnas = self.options['assigned_trnas']
assigned_proteins = self.options['assigned_proteins']
assigned_complexes = self.options['assigned_complexes']
prots = self.knowledge_base.cell.species_types.get(__type=wc_kb.prokaryote.ProteinSpeciesType)
rnas = self.knowledge_base.cell.species_types.get(__type=wc_kb.prokaryote.RnaSpeciesType)
trnas = []
for rna in rnas:
if are_terms_equivalent(rna.type, wcOntology['WC:tRNA']):
trnas.append(rna)
if genetic_code=='normal':
codons = {
'I': ['ATT', 'ATC', 'ATA'],
'L': ['CTT', 'CTC', 'CTA', 'CTG', 'TTA', 'TTG'],
'V': ['GTT', 'GTC', 'GTA', 'GTG'],
'F': ['TTT', 'TTC'],
'M': ['ATG'],
'C': ['TGT', 'TGC'],
'A': ['GCT', 'GCC', 'GCA', 'GCG'],
'G': ['GGT', 'GGC', 'GGA', 'GGG'],
'P': ['CCT', 'CCC', 'CCA', 'CCG'],
'T': ['ACT', 'ACC', 'ACA', 'ACG'],
'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Y': ['TAT', 'TAC'],
'W': ['TGG'],
'Q': ['CAA', 'CAG'],
'N': ['AAT', 'AAC'],
'H': ['CAT', 'CAC'],
'E': ['GAA', 'GAG'],
'D': ['GAT', 'GAC'],
'K': ['AAA', 'AAG'],
'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG']}
elif genetic_code=='reduced':
codons = {
'I': ['ATC'],
'L': ['CTG'],
'M': ['ATG'],
'T': ['ACG']}
for aa in codons:
rna = numpy.random.choice(trnas)
trnas.remove(rna)
species = rna.species.get_or_create(compartment=cytosol)
expression = wc_kb.core.ObservableExpression(
expression=species.id(), species=[species])
for i in range(len(codons[aa])):
codon = codons[aa][i]
rna_name = 'tRNA_'+codon
observable = cell.observables.get_or_create(id=rna_name+'_obs')
observable.name = rna_name
observable.expression = expression
sampled_proteins = numpy.random.choice(
prots, len(assigned_proteins), replace=False)
assigned_proteins = iter(assigned_proteins)
for protein in sampled_proteins:
protein_name = next(assigned_proteins)
observable = cell.observables.get_or_create(id=protein_name+'_obs')
observable.name = protein_name
species = protein.species.get_or_create(compartment=cytosol)
observable.expression = wc_kb.core.ObservableExpression(
expression=species.id(), species=[species])
for comp in assigned_complexes:
comp_species = cell.species_types.get_or_create(
id=comp, __type=wc_kb.core.ComplexSpeciesType)
observable = cell.observables.get_or_create(id=comp+'_obs')
observable.name = comp
species = comp_species.species.get_one(compartment=cytosol)
observable.expression = wc_kb.core.ObservableExpression(
expression=species.id(), species=[species])
| [
37811,
198,
25,
13838,
25,
24207,
282,
19413,
13848,
1279,
33473,
282,
13,
26270,
13848,
31,
4033,
2178,
544,
13,
15532,
29,
198,
220,
220,
220,
220,
220,
220,
220,
220,
7844,
5404,
311,
12769,
38630,
272,
1279,
1077,
86,
1040,
31,
... | 2.033846 | 2,925 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 23:15:14 2021
@author: Nikhil
"""
import numpy as np
import modern_robotics as mr
import yaml
from trajectory import TrajectoryGenerator
from simulate import Odometry
from feedback_controller import FeedbackController
from math import cos, sin
import csv
import matplotlib.pyplot as plt
import speech_recognition as sr
import pyttsx3
import datetime
import os
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
if __name__ == '__main__':
clear = lambda: os.system('cls')
folder_name = 'newTask'
#present_state = np.array([[0,0,1,0],[0,1,0,0],[-1,0,0,0.5],[0,0,0,1]])
bot_params_filename = 'config/bot_params.yaml'
traj_params_filename = 'config/trajectory_params.yaml'
conf_csv_filename = 'config.csv'
traj_csv_filename = 'trajectory.csv'
plot_fileneame='plot.png'
error_csv_filename='error.csv'
# Reading params from the params file
with open(bot_params_filename) as file:
bot_params = yaml.load(file)
# Reading params from the params file
with open(traj_params_filename) as file:
traj_params = yaml.load(file)
full_configs = list()
error_list = [[],[]]
kuka_bot = Robot(bot_params, traj_params)
config = bot_params["initial_config"]
# Adding initial config
full_configs.append([round(conf,4) for conf in config["chasis"]]+ [round(conf,4) for conf in config["arm"]] + [round(conf,4) for conf in config["wheel"]] + [config["gripp_state"]])
time = 0.0
timestep = traj_params["timestep"]
# This Function will clean any
# command before execution of this python file
wishMe()
#t = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10}
while True:
query = takeCommand().lower()
# All the commands said by user will be
# stored here in 'query' and will be
# converted to lower case for easily
# recognition of command
if 'activate voice control' in query:
speak("activating voice control")
speak("mention the goal X coordinate")
query = takeCommand().lower()
# a = 1, for testing
a = int(query)
speak("mention the goal Y coordinate")
query = takeCommand().lower()
# b = 1, for testing
b = int(query)
speak("mention the goal Z coordinate")
query = takeCommand().lower()
# c = 3, for testing
c = int(query)/100 #c is the z coordinate of goal config wrt to space frame, it should be less than 0.5 orelse the robot might get broken down, thats why we have scaled it by dividing it wih 100
#c = 0
desired = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0.025],[0,0,0,1]])
desired[0,3] = desired[0,3] + a
desired[1,3] = desired[1,3] + b
desired[2,3] = desired[2,3] + c
trajs = kuka_bot.ComputeTrajectory(desired)
#present_state = desired_state
for traj,gripp_state in trajs:
# Traverse through each configuration in the trajectory
for conf_no in range(len(traj)-1):
Tse_des = traj[conf_no] # Desired configuration
Tse_des_next = traj[conf_no + 1] # Desired configuration after timestep
if type(config) == type(dict()):
confy = config
else:
confy = {}
confy["arm"] = config[3:8]
confy["chasis"] = config[0:3]
confy["wheel"] = config[8:12]
confy["gripper state"] = config[12]
# Computing required wheel and joint velocity to achieve the desired configuration
end_eff_twist, wheel_vel, joint_vel = kuka_bot.feedback_controller.FeedbackControl(confy, Tse_des, Tse_des_next)
error_list[0].append(time)
error_list[1].append(end_eff_twist)
controls = {'arm' : list(joint_vel), 'wheel' : list(wheel_vel)}
new_state = kuka_bot.odometry.NextState(confy, controls, timestep) # Compute next configuration after timestep
full_configs.append([round(conf,4) for conf in new_state["chasis"]]+ [round(conf,4) for conf in new_state["arm"]] + [round(conf,4) for conf in new_state["wheel"]] + [gripp_state])
#new_state = kuka_bot.odometry.NextState(config, controls, timestep)
config = new_state# Updating configuration
time = time + timestep
# Generate config file
print ("Generating config csv file")
with open(conf_csv_filename, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for config in full_configs:
writer.writerow(config)
elif "sleep" in query:
speak("okay im sleeping")
speak("have a nice day sir")
speak("it is an honour to be built by you")
break
else:
speak("unable to recognize your voice sir")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
2365,
838,
2242,
25,
1314,
25,
1415,
33448,
201,
198,
201,
198,
31,
9800,
25,
11271,
71,
346,
201,
198,
37811,
201,
198,
201,
... | 2.044989 | 2,734 |
import numpy as np
from robolib.datamanager.siamese_data_loader import load_one_image
from apps.erianet.erianet_util import load_erianet_model, get_3bhif_names
MODEL_FILENAME = "TestModel.model"
CLASS = 6
IMAGE = 3
model = load_erianet_model(MODEL_FILENAME)
name = input("Enter name: ")
img = int(input("Which image: "))
image = load_one_image("3BHIF", name, img)
names = get_3bhif_names()
probs = predict_face_info(image, names)
for pair in probs:
print(names[pair[0]], str(pair[1]), str(pair[2]))
| [
11748,
299,
32152,
355,
45941,
198,
6738,
3857,
349,
571,
13,
19608,
10546,
3536,
13,
13396,
1047,
68,
62,
7890,
62,
29356,
1330,
3440,
62,
505,
62,
9060,
198,
6738,
6725,
13,
263,
666,
316,
13,
263,
666,
316,
62,
22602,
1330,
3440,... | 2.514706 | 204 |
import os
from rest_framework import exceptions, status
from rest_framework.permissions import AllowAny
from rest_framework.renderers import CoreJSONRenderer, JSONRenderer
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator
from rest_framework.views import APIView
from rest_framework_swagger import renderers
from rest_framework_swagger.renderers import OpenAPICodec
from rest_framework_swagger.renderers import \
OpenAPIRenderer as BaseOpenAPIRenderer
def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):
"""
Returns schema view which renders Swagger/OpenAPI.
"""
return SwaggerSchemaView.as_view()
| [
11748,
28686,
198,
198,
6738,
1334,
62,
30604,
1330,
13269,
11,
3722,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
22507,
7149,
198,
6738,
1334,
62,
30604,
13,
10920,
19288,
1330,
7231,
40386,
49,
437,
11882,
11,
19449,
49,
437,
1... | 3.415842 | 202 |
from sql_tools import wifiTest
import random
c = 0
ap = 'SSID'
encr = True
while(c < 100):
ssid = ap + str(c)
mac = randNumb()
sign = random.randrange(0, 100, 1)
chan = random.randrange(1, 12, 1)
lat = 10.7546
lon = -52.1235
wifiTest(c, ssid, mac, sign, chan, str(encr), lat, lon)
c += 1
| [
6738,
44161,
62,
31391,
1330,
43121,
14402,
198,
11748,
4738,
628,
198,
66,
796,
657,
198,
499,
796,
705,
5432,
2389,
6,
198,
268,
6098,
796,
6407,
198,
198,
4514,
7,
66,
1279,
1802,
2599,
198,
220,
220,
220,
264,
30255,
796,
2471,
... | 2.131579 | 152 |
#!/usr/bin/python
import random
import sys
count = 0
while count < 1000:
x=random.randint(1,100)
j=random.randint(1,200)
op = ["+", "-", "*"]
op_num = random.randint(1,3)
data = ""
solve = 0
if count < 500:
if op_num == 1:
solve = x + j
data = ("what is %d + %d\n" % (x , j))
elif op_num == 2:
solve = x - j
data = ("what is %d - %d\n" % (x , j))
else:
solve = x * j
data = ("what is %d * %d\n" % (x , j))
else:
if op_num == 1:
solve = x + j
data = ("what is %s + %s\n" % (hex(x) , hex(j)))
elif op_num == 2:
solve = x - j
data = ("what is %s - %s\n" % (hex(x) , hex(j)))
else:
solve = x * j
data = ("what is %s * %s\n" % (hex(x) , hex(j)))
try:
r = input(data)
except Exception as e:
print("Not sure what you are doing, but I cannot take that...")
continue
try:
val = int(r)
except ValueError:
print("That's not an int!")
continue
if int(r) == solve:
count = count + 1
else:
print("Wrong... try again\r\n")
if count == 1000:
print('flag{p9ovNL0HeWqbmDQk_JAaNgbp_QCYMhu0}\r\n')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
4738,
198,
11748,
25064,
198,
198,
9127,
796,
657,
198,
4514,
954,
1279,
8576,
25,
198,
220,
220,
220,
2124,
28,
25120,
13,
25192,
600,
7,
16,
11,
3064,
8,
198,
220,
220,
220,... | 1.824684 | 713 |
# -*- coding: utf-8 -*-
"""Top-level package for pineapple nodes."""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
45540,
13760,
526,
15931,
198
] | 2.692308 | 26 |
"""
@package dochemistry
@mainpage Wrapper for various NoSQL, document-oriented databases
@section Introduction Introduction
doChemistry is intended to be an abstraction layer for various NoSQL and
document-oriented databases.
"""
__version__ = '0.0.1'
| [
37811,
198,
31,
26495,
2205,
4411,
4592,
198,
198,
31,
12417,
7700,
27323,
2848,
329,
2972,
1400,
17861,
11,
3188,
12,
17107,
20083,
198,
198,
31,
5458,
22395,
22395,
198,
220,
466,
41829,
4592,
318,
5292,
284,
307,
281,
34651,
7679,
... | 3.753623 | 69 |
# coding: utf-8
# The MIT License (MIT)
#
# Copyright (c) <2011-2014> <Shibzukhov Zaur, szport at gmail dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from axon._objects import defname, factory, type_factory
from axon._objects import convert, reset_factory, reset_type_factory
from axon._objects import undef, as_unicode, as_list, as_dict, as_tuple, as_name
from axon._dumper import reduce, dump_as_str
import axon
import xml.etree.ElementTree as etree
import json
@reduce(etree.Element)
del reduce_ElementTree
def xml2axon(from_, to_=None, pretty=1, braces=0):
'''
Convert from `XML` to `AXON`.
:from:
The path of input file with `XML` or `XML` string.
:to:
The path of output file with `XML`` (default is `None`).
If `to` is valid path then result of convertion to `AXON` will write to the file.
:result:
If `to` is `None` then return string with `AXON`, else return `None`.
'''
_text = from_.lstrip()
if _text.startswith('<'):
tree = etree.fromstring(from_)
else:
tree = etree.parse(from_)
root = tree._root
if to_ is None:
return axon.dumps([root], pretty=pretty, braces=braces)
else:
axon.dump(to_, [root], pretty=pretty, braces=braces)
def json2axon(from_, to_=None, pretty=1, braces=1):
'''
Convert from `JSON` to `AXON`.
:from:
The path of input file with `JSON` or `JSON` string.
:to:
The path of output file with `JSON` (default is `None`).
If `to` is valid path then result of convertion to `AXON` will write to the file.
:result:
If `to` is `None` then return string with `AXON`, else return `None`.
'''
text = from_.lstrip()
if text.startswith('[') or text.startswith('{'):
val = json.loads(from_)
else:
val = json.load(from_)
if to_ is None:
return axon.dumps([val], pretty=pretty, braces=braces)
else:
axon.dump(to_, [val], pretty=pretty, braces=braces)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
220,
198,
2,
15069,
357,
66,
8,
1279,
9804,
12,
4967,
29,
1279,
2484,
571,
89,
2724,
28026,
1168,
2899,
11,
264,
89,
634,
379,
308,
4529,
1... | 2.656331 | 1,161 |
import pytest
from aws_cdk.assertions import Template
| [
11748,
12972,
9288,
198,
198,
6738,
3253,
82,
62,
10210,
74,
13,
30493,
507,
1330,
37350,
628
] | 3.294118 | 17 |
from tkinter import*
from tkinter import messagebox
from centralizando import Centralizar
from componentes import Objetos
from tkinter.scrolledtext import ScrolledText
App09() | [
6738,
256,
74,
3849,
1330,
9,
198,
6738,
256,
74,
3849,
1330,
3275,
3524,
198,
6738,
4318,
528,
25440,
1330,
5694,
528,
283,
198,
6738,
7515,
274,
1330,
1835,
31173,
418,
198,
6738,
256,
74,
3849,
13,
1416,
8375,
5239,
1330,
1446,
8... | 3.56 | 50 |
__all__ = ('UserMenuFactory', 'UserMenuRunner', 'UserPagination',)
from scarletio import CancelledError, copy_docs, CallableAnalyzer
from ...discord.core import BUILTIN_EMOJIS
from ...discord.emoji import Emoji
from ...discord.interaction import InteractionEvent
from ...discord.message import Message
from ...discord import ChannelTextBase
from ...discord.exceptions import DiscordException, ERROR_CODES
from ...discord.preconverters import preconvert_bool
from .bases import GUI_STATE_READY, GUI_STATE_SWITCHING_PAGE, GUI_STATE_CANCELLING, GUI_STATE_CANCELLED, \
GUI_STATE_SWITCHING_CTX, GUI_STATE_VALUE_TO_NAME, PaginationBase
from .utils import Timeouter
def validate_check(check):
"""
Validates the given check.
Parameters
----------
check : `None` of `callable`
The check to validate.
Raises
------
TypeError
If `check` is not `None` neither a non-async function accepting 1 parameter.
"""
if check is None:
return
analyzer = CallableAnalyzer(check, as_method=True)
if analyzer.is_async():
raise TypeError('`check` should have NOT be be `async` function.')
min_, max_ = analyzer.get_non_reserved_positional_parameter_range()
if min_ > 1:
raise TypeError(f'`check` should accept `1` parameters, meanwhile the given callable expects at '
f'least `{min_!r}`, got `{check!r}`.')
if min_ != 1:
if max_ < 1:
if not analyzer.accepts_args():
raise TypeError(f'`check` should accept `1` parameters, meanwhile the given callable expects '
f'up to `{max_!r}`, got `{check!r}`.')
def validate_invoke(invoke):
"""
Validates the given invoker.
Parameters
----------
invoke : `callable`
The invoker to validate.
Raises
------
TypeError
If `invoke` is not async callable or accepts not 1 parameter.
"""
if invoke is None:
raise TypeError(f'`invoke` function cannot be `None`.')
analyzer = CallableAnalyzer(invoke, as_method=True)
if not analyzer.is_async():
raise TypeError('`invoke` should have be `async` function.')
min_, max_ = analyzer.get_non_reserved_positional_parameter_range()
if min_ > 1:
raise TypeError(f'`invoke` should accept `1` parameters, meanwhile the given callable expects at '
f'least `{min_!r}`, got `{invoke!r}`.')
if min_ != 1:
if max_ < 1:
if not analyzer.accepts_args():
raise TypeError(f'`invoke` should accept `1` parameters, meanwhile the given callable expects '
f'up to `{max_!r}`, got `{invoke!r}`.')
def validate_initial_invoke(initial_invoke):
"""
Validates the given default content getter.
Parameters
----------
initial_invoke : `callable`
The default content getter to validate.
Raises
------
TypeError
If `initial_invoke` is not async callable or accepts any parameters.
"""
if initial_invoke is None:
raise TypeError(f'`initial_invoke` function cannot be `None`.')
analyzer = CallableAnalyzer(initial_invoke, as_method=True)
if not analyzer.is_async():
raise TypeError('`initial_invoke` should have be `async` function.')
min_, max_ = analyzer.get_non_reserved_positional_parameter_range()
if min_ > 0:
raise TypeError(f'`initial_invoke` should accept `0` parameters, meanwhile the given callable expects at '
f'least `{min_!r}`, got `{initial_invoke!r}`.')
if min_ != 0:
if max_ < 0:
if not analyzer.accepts_args():
raise TypeError(f'`initial_invoke` should accept `0` parameters, meanwhile the given callable '
f'expects up to `{max_!r}`, got `{initial_invoke!r}`.')
def validate_close(close):
"""
Validates the given closer.
Parameters
----------
close : `callable`
The closer to validate.
Raises
------
TypeError
If `close` is not async callable or accepts not 1 parameter.
"""
if close is None:
raise TypeError(f'`close` function cannot be `None`.')
analyzer = CallableAnalyzer(close, as_method=True)
if not analyzer.is_async():
raise TypeError('`close` should have be `async` function.')
min_, max_ = analyzer.get_non_reserved_positional_parameter_range()
if min_ > 1:
raise TypeError(f'`close` should accept `1` parameters, meanwhile the given callable expects at '
f'least `{min_!r}`, got `{close!r}`.')
if min_ != 1:
if max_ < 1:
if not analyzer.accepts_args():
raise TypeError(f'`close` should accept `1` parameters, meanwhile the given callable expects '
f'up to `{max_!r}`, got `{close!r}`.')
class UserMenuFactory:
"""
Attributes
----------
allow_third_party_emojis : `bool`
Whether the runner should pick up 3rd party emojis, listed outside of `emojis`.
check : `None` or `function`
The function to call when checking whether an event should be called.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent`` or ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
> ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-------------------+-----------+
| Name | Type |
+===================+===========+
| should_process | `bool` |
+-------------------+-----------+
close : `None` or `async-function`
Function to call when the pagination is closed.
Should accept the following parameters:
+-----------+---------------------------+
| Name | Type |
+===========+===========================+
| exception | `None` or `BaseException` |
+-----------+---------------------------+
close_emoji : `None` or ``Emoji``
The emoji which triggers closing.
emojis : `None` or `tuple` of ``Emoji``
The emojis to add on the message.
initial_invoke : `async-function`
Function to generate the default page of the menu.
Should accept no parameters and return the following:
+-----------+-----------------------------------+
| Name | Type |
+===========+===================================+
| response | `None`, `str`, ``EmbedBase`` |
+-----------+-----------------------------------+
invoke : `async-function`
The function call for result when invoking the menu.
Should accept the following parameters:
+-----------+---------------------------------------------------+
| Name | Type |
+===========+===================================================+
| event | ``ReactionAddEvent`` or ``ReactionDeleteEvent`` |
+-----------+---------------------------------------------------+
> ``ReactionDeleteEvent`` is only given, when the client has no `manage_messages` permission.
Should return the following values:
+-----------+-----------------------------------+
| Name | Type |
+===========+===================================+
| response | `None`, `str`, ``EmbedBase`` |
+-----------+-----------------------------------+
klass : `type`
The factory class.
timeout : `float`
The time after the menu should be closed.
> Define it as non-positive to never timeout. Not recommended.
"""
__slots__ = ('allow_third_party_emojis', 'check', 'close', 'close_emoji', 'emojis', 'initial_invoke', 'invoke',
'klass', 'timeout')
def __new__(cls, klass):
"""
Parameters
----------
klass : `type`
The type to create factory from.
Raises
------
TypeError
- If `klass` was not given as `type` instance.
- If `klass.check` is not `None` neither a non-async function accepting 1 parameter.
- If `invoke` is not async callable or accepts not 1 parameter.
- If `close_emoji` is neither `None` or ``Emoji`` instance.
- If `emojis` is neither `None` nor `tuple` or `list`.
- If `emojis` contains a non ``Emoji`` element.
- If `initial_invoke` is not async callable or accepts any parameters.
- If `timeout` is not convertable to float.
- If `closed` is neither `None` nor `async-callable`.
- If `allow_third_party_emojis` was not given as `bool` instance.
ValueError
- If `emojis` length is over 20.
"""
if not isinstance(klass, type):
raise TypeError(f'`klass` can be given as `type` instance, got {klass.__class__.__name__}.')
check = getattr(klass, 'check', None)
validate_check(check)
invoke = getattr(klass, 'invoke', None)
validate_invoke(invoke)
close_emoji = getattr(klass, 'close_emoji', None)
if (close_emoji is not None) and (not isinstance(close_emoji, Emoji)):
raise TypeError(f'`close_emoji can be either `None` or `{Emoji.__name__}` instance, got '
f'{close_emoji.__class__.__name__}')
emojis = getattr(klass, 'emojis', None)
if (emojis is not None):
if not isinstance(emojis, (tuple, list)):
raise TypeError(f'`emojis` can be either `None`, `list` or `tuple` instance, got '
f'{emojis.__class__.__name__}.')
# Making sure
emojis = tuple(emojis)
for emoji in emojis:
if not isinstance(emoji, Emoji):
raise TypeError(f'`emojis` contains non `{Emoji.__name__}` element, got '
f'{emoji.__class__.__name__}.')
emojis_length = len(emojis)
if emojis_length == 0:
emojis = None
elif emojis_length > 20:
raise ValueError(f'`emojis` can contain up to `20` emojis, got {emojis_length!r}.')
initial_invoke = getattr(klass, 'initial_invoke', None)
validate_initial_invoke(initial_invoke)
timeout = getattr(klass, 'timeout', None)
if timeout is None:
timeout = -1.0
else:
try:
timeout = float(timeout)
except (TypeError, ValueError) as err:
raise TypeError(f'`timeout` cannot be converted to `float`, got {timeout.__class__.__mame__}; {timeout!r}') \
from err
close = getattr(klass, 'close', None)
validate_close(close)
allow_third_party_emojis = getattr(klass, 'allow_third_party_emojis', None)
if (allow_third_party_emojis is None):
allow_third_party_emojis = False
else:
allow_third_party_emojis = preconvert_bool(allow_third_party_emojis, 'allow_third_party_emojis')
self = object.__new__(cls)
self.klass = klass
self.check = check
self.close_emoji =close_emoji
self.emojis = emojis
self.initial_invoke = initial_invoke
self.invoke = invoke
self.timeout = timeout
self.close = close
self.allow_third_party_emojis = allow_third_party_emojis
return self
def __repr__(self):
"""Returns the user menu factory's representation."""
repr_parts = [
'<',
self.__class__.__name__,
' klass=',
self.klass.__name__,
]
emojis = self.emojis
if (emojis is not None):
repr_parts.append(', emojis=(')
index = 0
limit = len(emojis)
while True:
emoji = emojis[index]
repr_parts.append(emoji.name)
index += 1
if index == limit:
break
repr_parts.append(', ')
repr_parts.append(')')
close_emoji = self.close_emoji
if (close_emoji is not None):
repr_parts.append(', close_emoji=')
repr_parts.append(close_emoji.name)
allow_third_party_emojis = self.allow_third_party_emojis
if allow_third_party_emojis:
repr_parts.append(', allow_third_party_emojis=')
repr_parts.append(repr(allow_third_party_emojis))
timeout = self.timeout
if timeout > 0.0:
repr_parts.append(', timeout=')
repr_parts.append(repr(timeout))
repr_parts.append('>')
return ''.join(repr_parts)
async def __call__(self, client, channel, *args, **kwargs):
"""
Instances the factory creating an ``UserMenuRunner`` instance.
This method is a coroutine.
Returns
-------
user_menu_runner : ``UserMenuRunner``
"""
return await UserMenuRunner(self, client, channel, *args, **kwargs)
class UserMenuRunner(PaginationBase):
"""
Menu factory runner.
Parameters
----------
_canceller : `None` or `function`
The function called when the ``UserMenuRunner`` is cancelled or when it expires. This is a onetime use and
after it was used, is set as `None`.
_task_flag : `int`
A flag to store the state of the ``UserMenuRunner``.
Possible values:
+---------------------------+-------+-----------------------------------------------------------------------+
| Respective name | Value | Description |
+===========================+=======+=======================================================================+
| GUI_STATE_READY | 0 | The Pagination does nothing, is ready to be used. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_PAGE | 1 | The Pagination is currently changing it's page. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLING | 2 | The pagination is currently changing it's page, but it was cancelled |
| | | meanwhile. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_CANCELLED | 3 | The pagination is, or is being cancelled right now. |
+---------------------------+-------+-----------------------------------------------------------------------+
| GUI_STATE_SWITCHING_CTX | 4 | The Pagination is switching context. Not used by the default class, |
| | | but expected. |
+---------------------------+-------+-----------------------------------------------------------------------+
_timeouter : `None` or ``Timeouter``
Executes the timing out feature on the ``UserMenuRunner``.
channel : ``ChannelTextBase`` instance
The channel where the ``UserMenuRunner`` is executed.
client : ``Client``
The client who executes the ``UserMenuRunner``.
message : `None` or ``Message``
The message on what the ``UserMenuRunner`` is executed.
_factory : ``UserMenuFactory``
The factory of the menu containing it's details.
_instance : `None` or `Any`
The respective ``UserMenuFactory``'s class instanced.
"""
__slots__ = ('_factory', '_instance',)
async def __new__(cls, factory, client, channel, *args, message=None, **kwargs):
"""
Creates a new user menu runner instance with the given parameters.
This method is a coroutine.
Parameters
----------
factory : ``UserMenuFactory``
The respective user menu factory to execute.
client : ``Client``
The client who executes the ``UserMenuRunner``.
channel : ``ChannelTextBase`` instance
The channel where the ``UserMenuRunner`` is executed.
*args : Parameters
Additional parameters to pass to the factory's class's constructor.
message : `None` or ``Message``, Optional (Keyword Only)
The message to use instead of creating a new one.
**kwargs : Keyword parameters
Additional keyword parameters to pass to the factory's class's constructor.
Raises
------
TypeError
`channel`'s type is incorrect.
"""
if isinstance(channel, ChannelTextBase):
target_channel = channel
received_interaction = False
elif isinstance(channel, Message):
target_channel = channel.channel
received_interaction = False
elif isinstance(channel, InteractionEvent):
target_channel = channel.channel
received_interaction = True
else:
raise TypeError(f'`channel` can be given only as `{ChannelTextBase.__name__}`, `{Message.__name__}` '
f'of as {InteractionEvent.__name__} instance, got {channel.__class__.__name__}.')
self = object.__new__(cls)
self.client = client
self.channel = target_channel
self._canceller = cls._canceller_function
self._task_flag = GUI_STATE_READY
self.message = message
self._factory = factory
self._instance = None
self._timeouter = None
instance = factory.klass(self, *args, **kwargs)
self._instance = instance
default_content = await factory.initial_invoke(instance)
# Leave if nothing to do.
if default_content is None:
self._task_flag = GUI_STATE_CANCELLED
self._canceller = None
return self
try:
if message is None:
if received_interaction:
if not channel.is_acknowledged():
await client.interaction_response_message_create(channel)
message = await client.interaction_followup_message_create(channel, default_content)
else:
message = await client.message_create(channel, default_content)
self.message = message
else:
await client.message_edit(message, default_content)
except BaseException as err:
self.cancel(err)
if isinstance(err, ConnectionError):
return self
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
ERROR_CODES.cannot_message_user, # user has dm-s disallowed
):
return self
raise
if self._task_flag in (GUI_STATE_CANCELLED, GUI_STATE_SWITCHING_CTX):
self.cancel(None)
return self
emojis = factory.emojis
if (emojis is not None):
if not target_channel.cached_permissions_for(client).can_add_reactions:
await self.cancel(PermissionError())
return self
try:
for emoji in emojis:
await client.reaction_add(message, emoji)
except BaseException as err:
self.cancel(err)
if isinstance(err, ConnectionError):
return self
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # message's channel deleted
ERROR_CODES.max_reactions, # reached reaction 20, some1 is trolling us.
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return self
raise
timeout = factory.timeout
if timeout >= 0.0:
timeouter = Timeouter(self, timeout)
else:
timeouter = None
self._timeouter = timeouter
client.events.reaction_add.append(message, self)
client.events.reaction_delete.append(message, self)
return self
@copy_docs(PaginationBase.__call__)
@copy_docs(PaginationBase._handle_close_exception)
@copy_docs(PaginationBase.__repr__)
class UserPagination:
"""
Base factorizable instance to execute pagination.
Attributes
----------
menu : ``UserMenuRunner``
The menu runner running the pagination.
page_index : `int`
The current page's index.
pages : `indexable`
An indexable container, what stores the displayable contents.
Class Attributes
----------------
left2 : ``Emoji`` = `BUILTIN_EMOJIS['track_previous']`
The emoji used to move to the first page.
left : ``Emoji`` = `BUILTIN_EMOJIS['arrow_backward']`
The emoji used to move to the previous page.
right : ``Emoji`` = `BUILTIN_EMOJIS['arrow_forward']`
The emoji used to move on the next page.
right2 : ``Emoji`` = `BUILTIN_EMOJIS['track_next']`
The emoji used to move on the last page.
close_emoji : ``Emoji`` = `BUILTIN_EMOJIS['x']`
The emoji used to cancel the ``Pagination``.
emojis : `tuple` (`Emoji`, `Emoji`, `Emoji`, `Emoji`, `Emoji`) = `(left2, left, right, right2, close_emoji,)`
The emojis to add on the respective message in order.
timeout : `float`
The pagination's timeout.
"""
left2 = BUILTIN_EMOJIS['track_previous']
left = BUILTIN_EMOJIS['arrow_backward']
right = BUILTIN_EMOJIS['arrow_forward']
right2 = BUILTIN_EMOJIS['track_next']
close_emoji = BUILTIN_EMOJIS['x']
emojis = (left2, left, right, right2, close_emoji,)
timeout = 240.0
__slots__ = ('menu', 'page_index', 'pages')
def __init__(self, menu, pages):
"""
Creates a new ``UserMenuRunner`` instance with the given parameters.
Parameters
----------
menu : ``UserMenuRunner``
The respective runner which executes the pagination.
pages : `indexable-container`
An indexable container, what stores the displayable pages.
"""
self.menu = menu
self.pages = pages
self.page_index = 0
async def initial_invoke(self):
"""
Called initially
This method is a coroutine.
Returns
-------
page : `None` or `Any`
The page to kick-off the pagination with.
"""
pages = self.pages
pages_length = len(pages)
if pages_length == 0:
return None
page = pages[0]
if pages_length == 1:
self.menu.cancel()
return page
async def invoke(self, event):
"""
An emoji addition or deletion invoked the pagination.
Parameters
----------
event : ``ReactionAddEvent``, ``ReactionDeleteEvent``
The received event.
"""
emoji = event.emoji
if emoji is self.left2:
page_index = 0
elif emoji is self.left:
page_index = self.page_index-1
if page_index < 0:
page_index = 0
elif emoji is self.right:
page_index = self.page_index+1
if page_index >= len(self.pages):
page_index = len(self.pages)-1
elif emoji is self.right2:
page_index = len(self.pages)-1
else:
return
if page_index == self.page_index:
return
self.page_index = page_index
return self.pages[page_index]
async def close(self, exception):
"""
Closes the pagination.
This method is a coroutine.
Parameters
----------
exception : `None` or ``BaseException``
- `CancelledError` if closed with the close emoji.
- `TimeoutError` if closed by timeout.
- `PermissionError` if closed because cant add reactions.
- Any other value is other exception received runtime.
"""
client = self.menu.client
if exception is None:
return
if isinstance(exception, CancelledError):
try:
await client.message_delete(self.menu.message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.missing_access, # client removed
):
return
await client.events.error(client, f'{self!r}.close', err)
return
if isinstance(exception, TimeoutError):
if self.menu.channel.cached_permissions_for(client).can_manage_messages:
try:
await client.reaction_clear(self.menu.message)
except BaseException as err:
if isinstance(err, ConnectionError):
# no internet
return
if isinstance(err, DiscordException):
if err.code in (
ERROR_CODES.unknown_message, # message deleted
ERROR_CODES.unknown_channel, # channel deleted
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
):
return
await client.events.error(client, f'{self!r}.close', exception)
return
if isinstance(exception, PermissionError):
return
await client.events.error(client, f'{self!r}.close', exception)
return
| [
834,
439,
834,
796,
19203,
12982,
23381,
22810,
3256,
705,
12982,
23381,
49493,
3256,
705,
12982,
47,
363,
1883,
3256,
8,
198,
198,
6738,
10153,
1616,
952,
1330,
43780,
3353,
12331,
11,
4866,
62,
31628,
11,
4889,
540,
37702,
9107,
198,
... | 2.135363 | 13,305 |
import yaml
############################ Genrate page parts #########################################
############################ Genrate full pages #########################################
| [
11748,
331,
43695,
198,
14468,
7804,
4242,
5215,
4873,
2443,
3354,
1303,
29113,
7804,
198,
198,
14468,
7804,
4242,
5215,
4873,
1336,
5468,
1303,
29113,
7804,
628,
628,
628,
628
] | 6.666667 | 30 |
################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
################################################################################
import json
from account_operations import account_operations_handler
from assignments_operations import assignments_operations_handler
from permissionset_operations import permission_operations_handler
from config import load_config
controller = None
## Event payload from Service Event handler:
# {
# "Source": "enterprise-aws-sso",
# "DetailType": "AccountOperations",
# "Detail":
# {
# "Action": "tagged|created|moved",
# "TagKey": "",
# "TagValue": "",
# "AccountId": "",
# "AccountOuName": "",
# "AccountOldOuName": "If present if not have to look for a solutoin",
# }
# }
# {
# "PermissionSetOperations":
# {
# "Action": "created|delete",
# "PermissionSetName": "",
# }
# }
# This will be the control lambda!
# @logger.inject_lambda_context
| [
29113,
29113,
14468,
198,
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
12,
15,
198,
29113,
29113,
14468,
198,
198,
11748,
33918,
198,
19... | 3.008197 | 366 |
#Stop words
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
example_sentence = "This is an example sentence to show you, how it is working."
stop_words = set(stopwords.words("English"))
filtered_sentence = []
example_tokenize = word_tokenize(example_sentence)
print(example_tokenize)
#for w in example_tokenize:
# if w not in stop_words:
# filtered_sentence.append(w)
#print(filtered_sentence)
#Or
filtered_sentence = [w for w in example_tokenize if not w in stop_words]
print(filtered_sentence)
| [
2,
19485,
2456,
201,
198,
201,
198,
6738,
299,
2528,
74,
13,
30001,
1096,
1330,
1573,
62,
30001,
1096,
201,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
201,
198,
201,
198,
20688,
62,
34086,
594,
796,
366,
1212,
... | 2.652582 | 213 |
"""Helper functions for model tests."""
import unittest.mock as mock
import pytest # type: ignore
from app.models import (Game, Player, Round)
PROVISIONAL_NAME = "provisional name"
CONFIRMED_NAME = "Abcdef"
def mock_game():
"""Return a mock of a Game."""
return mock.create_autospec(Game)
def mock_player():
"""Return a mock of a Player."""
return mock.create_autospec(Player)
def mock_round():
"""Return a mock of a Round."""
return mock.create_autospec(Round)
@pytest.fixture
def new_player():
"""Fixture: Return a new Player."""
return Player(PROVISIONAL_NAME, "secret id")
@pytest.fixture
def confirmed_player(new_player):
"""Fixture: Return a new Player who confirmed her name."""
new_player.confirm(CONFIRMED_NAME)
return new_player
@pytest.fixture
def new_game_waiting_room():
"""Fixture: Return a new Game with 3 unconfirmed Players."""
players = [Player(PROVISIONAL_NAME, "secret id")] + \
[Player(f"P{i}", f"secret {i}") for i in range(2)]
return Game(players)
@pytest.fixture
def new_game_with_confirmed_players(new_game_waiting_room):
"""Fixture: Return a new Game with 3 confirmed Players."""
for (i, p) in enumerate(new_game_waiting_room._players):
p.confirm(CONFIRMED_NAME if i == 0 else "")
return new_game_waiting_room
| [
37811,
47429,
5499,
329,
2746,
5254,
526,
15931,
198,
11748,
555,
715,
395,
13,
76,
735,
355,
15290,
198,
198,
11748,
12972,
9288,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
2099,... | 2.667323 | 508 |
from output.models.ms_data.additional.member_type002_xsd.member_type002 import (
Ct,
Root,
)
__all__ = [
"Ct",
"Root",
]
| [
6738,
5072,
13,
27530,
13,
907,
62,
7890,
13,
2860,
1859,
13,
19522,
62,
4906,
21601,
62,
87,
21282,
13,
19522,
62,
4906,
21601,
1330,
357,
198,
220,
220,
220,
43166,
11,
198,
220,
220,
220,
20410,
11,
198,
8,
198,
198,
834,
439,
... | 2.15625 | 64 |
from enum import Enum
import pytest
from pytest import approx
from cdpy import cdp
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
6738,
33829,
1330,
2039,
388,
198,
198,
11748,
12972,
9288,
198,
6738,
12972,
9288,
1330,
5561,
198,
198,
6738,
22927,
9078,
1330,
22927,
79,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
... | 2.661765 | 68 |
# Copyright (C) 2018-2020 The python-bitcoin-utils developers
#
# This file is part of python-bitcoin-utils
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoin-utils, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import math
import hashlib
import struct
from binascii import unhexlify, hexlify
from bitcoinutils.constants import DEFAULT_TX_SEQUENCE, DEFAULT_TX_LOCKTIME, \
DEFAULT_TX_VERSION, NEGATIVE_SATOSHI, \
EMPTY_TX_SEQUENCE, SIGHASH_ALL, SIGHASH_NONE, \
SIGHASH_SINGLE, SIGHASH_ANYONECANPAY, \
ABSOLUTE_TIMELOCK_SEQUENCE, REPLACE_BY_FEE_SEQUENCE, \
TYPE_ABSOLUTE_TIMELOCK, TYPE_RELATIVE_TIMELOCK, \
TYPE_REPLACE_BY_FEE, SATOSHIS_PER_BITCOIN
from bitcoinutils.script import Script
class TxInput:
"""Represents a transaction input.
A transaction input requires a transaction id of a UTXO and the index of
that UTXO.
Attributes
----------
txid : str
the transaction id as a hex string (little-endian as displayed by
tools)
txout_index : int
the index of the UTXO that we want to spend
script_sig : list (strings)
the op code and data of the script as string
sequence : bytes
the input sequence (for timelocks, RBF, etc.)
Methods
-------
stream()
converts TxInput to bytes
copy()
creates a copy of the object (classmethod)
"""
def __init__(self, txid, txout_index, script_sig=Script([]), sequence=DEFAULT_TX_SEQUENCE):
"""See TxInput description"""
# expected in the format used for displaying Bitcoin hashes
self.txid = txid
self.txout_index = txout_index
self.script_sig = script_sig
# if user provided a sequence it would be as string (for now...)
if type(sequence) is str:
self.sequence = unhexlify(sequence)
else:
self.sequence = sequence
def stream(self):
"""Converts to bytes"""
# Internally Bitcoin uses little-endian byte order as it improves
# speed. Hashes are defined and implemented as big-endian thus
# those are transmitted in big-endian order. However, when hashes are
# displayed Bitcoin uses little-endian order because it is sometimes
# convenient to consider hashes as little-endian integers (and not
# strings)
# - note that we reverse the byte order for the tx hash since the string
# was displayed in little-endian!
# - note that python's struct uses little-endian by default
txid_bytes = unhexlify(self.txid)[::-1]
txout_bytes = struct.pack('<L', self.txout_index)
script_sig_bytes = self.script_sig.to_bytes()
data = txid_bytes + txout_bytes + \
struct.pack('B', len(script_sig_bytes)) + \
script_sig_bytes + self.sequence
return data
@classmethod
def copy(cls, txin):
"""Deep copy of TxInput"""
return cls(txin.txid, txin.txout_index, txin.script_sig,
txin.sequence)
class TxOutput:
"""Represents a transaction output
Attributes
----------
amount : int/float/Decimal
the value we want to send to this output in satoshis
script_pubkey : list (string)
the script that will lock this amount
Methods
-------
stream()
converts TxInput to bytes
copy()
creates a copy of the object (classmethod)
"""
def __init__(self, amount, script_pubkey):
"""See TxOutput description"""
if not isinstance(amount, int):
raise TypeError("Amount needs to be in satoshis as an integer")
self.amount = amount
self.script_pubkey = script_pubkey
def stream(self):
"""Converts to bytes"""
# internally all little-endian except hashes
# note struct uses little-endian by default
amount_bytes = struct.pack('<q', self.amount)
script_bytes = self.script_pubkey.to_bytes()
data = amount_bytes + struct.pack('B', len(script_bytes)) + script_bytes
return data
@classmethod
def copy(cls, txout):
"""Deep copy of TxOutput"""
return cls(txout.amount, txout.script_pubkey)
class Sequence:
"""Helps setting up appropriate sequence. Used to provide the sequence to
transaction inputs and to scripts.
Attributes
----------
value : int
The value of the block height or the 512 seconds increments
seq_type : int
Specifies the type of sequence (TYPE_RELATIVE_TIMELOCK |
TYPE_ABSOLUTE_TIMELOCK | TYPE_REPLACE_BY_FEE
is_type_block : bool
If type is TYPE_RELATIVE_TIMELOCK then this specifies its type
(block height or 512 secs increments)
Methods
-------
for_input_sequence()
Serializes the relative sequence as required in a transaction
for_script()
Returns the appropriate integer for a script; e.g. for relative timelocks
Raises
------
ValueError
if the value is not within range of 2 bytes.
"""
def for_input_sequence(self):
"""Creates a relative timelock sequence value as expected from
TxInput sequence attribute"""
if self.seq_type == TYPE_ABSOLUTE_TIMELOCK:
return ABSOLUTE_TIMELOCK_SEQUENCE
if self.seq_type == TYPE_REPLACE_BY_FEE:
return REPLACE_BY_FEE_SEQUENCE
if self.seq_type == TYPE_RELATIVE_TIMELOCK:
# most significant bit is already 0 so relative timelocks are enabled
seq = 0
# if not block height type set 23 bit
if not self.is_type_block:
seq |= 1 << 22
# set the value
seq |= self.value
seq_bytes = seq.to_bytes(4, byteorder='little')
return seq_bytes
def for_script(self):
"""Creates a relative/absolute timelock sequence value as expected in scripts"""
if self.seq_type == TYPE_REPLACE_BY_FEE:
raise ValueError('RBF is not to be included in a script.')
script_integer = self.value
# if not block-height type then set 23 bit
if self.seq_type == TYPE_RELATIVE_TIMELOCK and not self.is_type_block:
script_integer |= 1 << 22
return script_integer
class Locktime:
"""Helps setting up appropriate locktime.
Attributes
----------
value : int
The value of the block height or the Unix epoch (seconds from 1 Jan
1970 UTC)
Methods
-------
for_transaction()
Serializes the locktime as required in a transaction
Raises
------
ValueError
if the value is not within range of 2 bytes.
"""
def for_transaction(self):
"""Creates a timelock as expected from Transaction"""
locktime_bytes = self.value.to_bytes(4, byteorder='little')
return locktime_bytes
class Transaction:
"""Represents a Bitcoin transaction
Attributes
----------
inputs : list (TxInput)
A list of all the transaction inputs
outputs : list (TxOutput)
A list of all the transaction outputs
locktime : bytes
The transaction's locktime parameter
version : bytes
The transaction version
has_segwit : bool
Specifies a tx that includes segwit inputs
witnesses : list (Script)
The witness scripts that correspond to the inputs
Methods
-------
stream()
Converts Transaction to bytes
serialize()
Converts Transaction to hex string
get_txid()
Calculates txid and returns it
get_hash()
Calculates tx hash (wtxid) and returns it
get_wtxid()
Calculates tx hash (wtxid) and returns it
get_size()
Calculates the tx size
get_vsize()
Calculates the tx segwit size
copy()
creates a copy of the object (classmethod)
get_transaction_digest(txin_index, script, sighash)
returns the transaction input's digest that is to be signed according
get_transaction_segwit_digest(txin_index, script, amount, sighash)
returns the transaction input's segwit digest that is to be signed
according to sighash
"""
def __init__(self, inputs=None, outputs=None, locktime=DEFAULT_TX_LOCKTIME,
version=DEFAULT_TX_VERSION, has_segwit=False, witnesses=None):
"""See Transaction description"""
# make sure default argument for inputs, outputs and witnesses is an empty list
if inputs is None:
inputs = []
if outputs is None:
outputs = []
if witnesses is None:
witnesses = []
self.inputs = inputs
self.outputs = outputs
self.has_segwit = has_segwit
self.witnesses = witnesses
# if user provided a locktime it would be as string (for now...)
if type(locktime) is str:
self.locktime = unhexlify(locktime)
else:
self.locktime = locktime
self.version = version
@classmethod
def copy(cls, tx):
"""Deep copy of Transaction"""
ins = [TxInput.copy(txin) for txin in tx.inputs]
outs = [TxOutput.copy(txout) for txout in tx.outputs]
wits = [Script.copy(witness) for witness in tx.witnesses]
return cls(ins, outs, tx.locktime, tx.version, tx.has_segwit, wits)
def get_transaction_digest(self, txin_index, script, sighash=SIGHASH_ALL):
"""Returns the transaction's digest for signing.
| SIGHASH types (see constants.py):
| SIGHASH_ALL - signs all inputs and outputs (default)
| SIGHASH_NONE - signs all of the inputs
| SIGHASH_SINGLE - signs all inputs but only txin_index output
| SIGHASH_ANYONECANPAY (only combined with one of the above)
| - with ALL - signs all outputs but only txin_index input
| - with NONE - signs only the txin_index input
| - with SINGLE - signs txin_index input and output
Attributes
----------
txin_index : int
The index of the input that we wish to sign
script : list (string)
The scriptPubKey of the UTXO that we want to spend
sighash : int
The type of the signature hash to be created
"""
# clone transaction to modify without messing up the real transaction
tmp_tx = Transaction.copy(self)
# make sure all input scriptSigs are empty
for txin in tmp_tx.inputs:
txin.script_sig = Script([])
#
# TODO Deal with (delete?) script's OP_CODESEPARATORs, if any
# Very early versions of Bitcoin were using a different design for
# scripts that were flawed. OP_CODESEPARATOR has no purpose currently
# but we could not delete it for compatibility purposes. If it exists
# in a script it needs to be removed.
#
# the temporary transaction's scriptSig needs to be set to the
# scriptPubKey of the UTXO we are trying to spend - this is required to
# get the correct transaction digest (which is then signed)
tmp_tx.inputs[txin_index].script_sig = script
#
# by default we sign all inputs/outputs (SIGHASH_ALL is used)
#
# whether 0x0n or 0x8n, bitwise AND'ing will result to n
if (sighash & 0x1f) == SIGHASH_NONE:
# do not include outputs in digest (i.e. do not sign outputs)
tmp_tx.outputs = []
# do not include sequence of other inputs (zero them for digest)
# which means that they can be replaced
for i in range(len(tmp_tx.inputs)):
if i != txin_index:
tmp_tx.inputs[i].sequence = EMPTY_TX_SEQUENCE
elif (sighash & 0x1f) == SIGHASH_SINGLE:
# only sign the output that corresponds to txin_index
if txin_index >= len(tmp_tx.outputs):
raise ValueError('Transaction index is greater than the \
available outputs')
# keep only output that corresponds to txin_index -- delete all outputs
# after txin_index and zero out all outputs upto txin_index
txout = tmp_tx.outputs[txin_index]
tmp_tx.outputs = []
for i in range(txin_index):
tmp_tx.outputs.append( TxOutput(NEGATIVE_SATOSHI, Script([])) )
tmp_tx.outputs.append(txout)
# do not include sequence of other inputs (zero them for digest)
# which means that they can be replaced
for i in range(len(tmp_tx.inputs)):
if i != txin_index:
tmp_tx.inputs[i].sequence = EMPTY_TX_SEQUENCE
# bitwise AND'ing 0x8n to 0x80 will result to true
if sighash & SIGHASH_ANYONECANPAY:
# ignore all other inputs from the signature which means that
# anyone can add new inputs
tmp_tx.inputs = [tmp_tx.inputs[txin_index]]
# get the byte stream of the temporary transaction
tx_for_signing = tmp_tx.stream(False)
# add sighash bytes to be hashed
# Note that although sighash is one byte it is hashed as a 4 byte value.
# There is no real reason for this other than that the original implementation
# of Bitcoin stored sighash as an integer (which serializes as a 4
# bytes), i.e. it should be converted to one byte before serialization.
# It is converted to 1 byte before serializing to send to the network
tx_for_signing += struct.pack('<i', sighash)
# create transaction digest -- note double hashing
tx_digest = hashlib.sha256( hashlib.sha256(tx_for_signing).digest()).digest()
return tx_digest
def get_transaction_segwit_digest(self, txin_index, script, amount, sighash=SIGHASH_ALL):
"""Returns the segwit transaction's digest for signing.
| SIGHASH types (see constants.py):
| SIGHASH_ALL - signs all inputs and outputs (default)
| SIGHASH_NONE - signs all of the inputs
| SIGHASH_SINGLE - signs all inputs but only txin_index output
| SIGHASH_ANYONECANPAY (only combined with one of the above)
| - with ALL - signs all outputs but only txin_index input
| - with NONE - signs only the txin_index input
| - with SINGLE - signs txin_index input and output
Attributes
----------
txin_index : int
The index of the input that we wish to sign
script : list (string)
The scriptPubKey of the UTXO that we want to spend
amount : int/float/Decimal
The amount of the UTXO to spend is included in the
signature for segwit (in satoshis)
sighash : int
The type of the signature hash to be created
"""
# clone transaction to modify without messing up the real transaction
tmp_tx = Transaction.copy(self)
# TODO consult ref. impl. in BIP-143 and update if needed
# requires cleanup and further explanations
hash_prevouts = b'\x00' * 32
hash_sequence = b'\x00' * 32
hash_outputs = b'\x00' * 32
# Judging the signature type
basic_sig_hash_type = (sighash & 0x1f)
anyone_can_pay = sighash& 0xf0 == SIGHASH_ANYONECANPAY
sign_all = (basic_sig_hash_type != SIGHASH_SINGLE) and (basic_sig_hash_type != SIGHASH_NONE)
# Hash all input
if not anyone_can_pay:
hash_prevouts = b''
for txin in tmp_tx.inputs:
hash_prevouts += unhexlify(txin.txid)[::-1] + \
struct.pack('<L', txin.txout_index)
hash_prevouts = hashlib.sha256(hashlib.sha256(hash_prevouts).digest()).digest()
# Hash all input sequence
if not anyone_can_pay and sign_all:
hash_sequence = b''
for txin in tmp_tx.inputs:
hash_sequence += txin.sequence
hash_sequence = hashlib.sha256(hashlib.sha256(hash_sequence).digest()).digest()
if sign_all:
# Hash all output
hash_outputs = b''
for txout in tmp_tx.outputs:
amount_bytes = struct.pack('<q', txout.amount)
script_bytes = txout.script_pubkey.to_bytes()
hash_outputs += amount_bytes + struct.pack('B', len(script_bytes)) + script_bytes
hash_outputs = hashlib.sha256(hashlib.sha256(hash_outputs).digest()).digest()
elif basic_sig_hash_type == SIGHASH_SINGLE and txin_index < len(tmp_tx.outputs):
# Hash one output
txout = tmp_tx.outputs[txin_index]
amount_bytes = struct.pack('<q', txout.amount)
script_bytes = txout.script_pubkey.to_bytes()
hash_outputs = amount_bytes + struct.pack('B', len(script_bytes)) + script_bytes
hash_outputs = hashlib.sha256(hashlib.sha256(hash_outputs).digest()).digest()
# add sighash bytes to be hashed
tx_for_signing = self.version
# add sighash bytes to be hashed
tx_for_signing += hash_prevouts + hash_sequence
# add tx txin
txin = self.inputs[txin_index]
tx_for_signing += unhexlify(txin.txid)[::-1] + \
struct.pack('<L', txin.txout_index)
# add tx sign
tx_for_signing += struct.pack('B', len(script.to_bytes()))
tx_for_signing += script.to_bytes()
# add txin amount
tx_for_signing += struct.pack('<q', amount)
# add tx sequence
tx_for_signing += txin.sequence
# add txouts hash
tx_for_signing += hash_outputs
# add locktime
tx_for_signing += self.locktime
# add sighash type
tx_for_signing += struct.pack('<i', sighash)
return hashlib.sha256(hashlib.sha256(tx_for_signing).digest()).digest()
def stream(self, has_segwit):
"""Converts to bytes"""
data = self.version
if has_segwit and self.witnesses:
# marker
data += b'\x00'
# flag
data += b'\x01'
txin_count_bytes = chr(len(self.inputs)).encode()
txout_count_bytes = chr(len(self.outputs)).encode()
data += txin_count_bytes
for txin in self.inputs:
data += txin.stream()
data += txout_count_bytes
for txout in self.outputs:
data += txout.stream()
if has_segwit:
for witness in self.witnesses:
# add witnesses script Count
witnesses_count_bytes = chr(len(witness.script)).encode()
data += witnesses_count_bytes
data += witness.to_bytes(True)
data += self.locktime
return data
def get_txid(self):
"""Hashes the serialized (bytes) tx to get a unique id"""
data = self.stream(False)
hash = hashlib.sha256( hashlib.sha256(data).digest() ).digest()
# note that we reverse the hash for display purposes
return hexlify(hash[::-1]).decode('utf-8')
def get_wtxid(self):
"""Hashes the serialized (bytes) tx including segwit marker and witnesses"""
return get_hash()
def get_hash(self):
"""Hashes the serialized (bytes) tx including segwit marker and witnesses"""
data = self.stream(self.has_segwit)
hash = hashlib.sha256( hashlib.sha256(data).digest() ).digest()
# note that we reverse the hash for display purposes
return hexlify(hash[::-1]).decode('utf-8')
def get_size(self):
"""Gets the size of the transaction"""
return len(self.stream(self.has_segwit))
def get_vsize(self):
"""Gets the virtual size of the transaction.
For non-segwit txs this is identical to get_size(). For segwit txs the
marker and witnesses length needs to be reduced to 1/4 of its original
length. Thus it is substructed from size and then it is divided by 4
before added back to size to produce vsize (always rounded up).
https://en.bitcoin.it/wiki/Weight_units
"""
# return size if non segwit
if not self.has_segwit:
return self.get_size()
marker_size = 2
wit_size = 0
data = b''
# count witnesses data
for witness in self.witnesses:
# add witnesses script Count
witnesses_count_bytes = chr(len(witness.script)).encode()
data = witnesses_count_bytes
data += witness.to_bytes(True)
wit_size = len(data)
# TODO when TxInputWitness is created it will contain it's own len or
# size method
size = self.get_size() - (marker_size + wit_size)
vsize = size + (marker_size + wit_size) / 4
return int( math.ceil(vsize) )
def serialize(self):
"""Converts to hex string"""
return hexlify(self.stream(self.has_segwit)).decode('utf-8')
if __name__ == "__main__":
main()
| [
2,
15069,
357,
34,
8,
2864,
12,
42334,
383,
21015,
12,
35395,
12,
26791,
6505,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
21015,
12,
35395,
12,
26791,
198,
2,
198,
2,
632,
318,
2426,
284,
262,
5964,
2846,
287,
262,
38559,
24290,
... | 2.328098 | 9,296 |
import argparse
import sys
import os
import data_utils
import numpy as np
from torch import Tensor
from torch.utils.data import DataLoader
from torchvision import transforms
import yaml
import torch
from torch import nn
from model import RawGAT_ST # In main model script we used our best RawGAT-ST-mul model. To use other models you need to call revelant model scripts from RawGAT_models folder
from tensorboardX import SummaryWriter
from core_scripts.startup_config import set_random_seed
if __name__ == '__main__':
parser = argparse.ArgumentParser('ASVSpoof2019 RawGAT-ST model')
# Dataset
parser.add_argument('--database_path', type=str, default='/your/path/to/data/ASVspoof_database/', help='Change this to user\'s full directory address of LA database (ASVspoof2019- for training, development and evaluation scores). We assume that all three ASVspoof 2019 LA train, LA dev and LA eval data folders are in the same database_path directory.')
'''
% database_path (full LA directory address)/
% |- ASVspoof2019_LA_eval/flac
% |- ASVspoof2019_LA_train/flac
% |- ASVspoof2019_LA_dev/flac
'''
parser.add_argument('--protocols_path', type=str, default='/your/path/to/protocols/ASVspoof_database/', help='Change with path to user\'s LA database protocols directory address')
'''
% protocols_path/
% |- ASVspoof2019.LA.cm.eval.trl.txt
% |- ASVspoof2019.LA.cm.dev.trl.txt
% |- ASVspoof2019.LA.cm.train.trn.txt
'''
# Hyperparameters
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--num_epochs', type=int, default=300)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--weight_decay', type=float, default=0.0001)
parser.add_argument('--loss', type=str, default='WCE',help='Weighted Cross Entropy Loss ')
# model
parser.add_argument('--seed', type=int, default=1234,
help='random seed (default: 1234)')
parser.add_argument('--model_path', type=str,
default=None, help='Model checkpoint')
parser.add_argument('--comment', type=str, default=None,
help='Comment to describe the saved model')
# Auxiliary arguments
parser.add_argument('--track', type=str, default='logical',choices=['logical', 'physical'], help='logical/physical')
parser.add_argument('--eval_output', type=str, default=None,
help='Path to save the evaluation result')
parser.add_argument('--eval', action='store_true', default=False,
help='eval mode')
parser.add_argument('--is_eval', action='store_true', default=False,help='eval database')
parser.add_argument('--eval_part', type=int, default=0)
parser.add_argument('--features', type=str, default='Raw_GAT')
# backend options
parser.add_argument('--cudnn-deterministic-toggle', action='store_false', \
default=True,
help='use cudnn-deterministic? (default true)')
parser.add_argument('--cudnn-benchmark-toggle', action='store_true', \
default=False,
help='use cudnn-benchmark? (default false)')
dir_yaml = os.path.splitext('model_config_RawGAT_ST')[0] + '.yaml'
with open(dir_yaml, 'r') as f_yaml:
parser1 = yaml.load(f_yaml)
if not os.path.exists('models'):
os.mkdir('models')
args = parser.parse_args()
#make experiment reproducible
set_random_seed(args.seed, args)
track = args.track
assert track in ['logical', 'physical'], 'Invalid track given'
is_logical = (track == 'logical')
#define model saving path
model_tag = 'model_{}_{}_{}_{}_{}'.format(
track, args.loss, args.num_epochs, args.batch_size, args.lr)
if args.comment:
model_tag = model_tag + '_{}'.format(args.comment)
model_save_path = os.path.join('models', model_tag)
#set model save directory
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
transforms = transforms.Compose([
lambda x: pad(x),
lambda x: Tensor(x)
])
#GPU device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Device: {}'.format(device))
# validation Dataloader
dev_set = data_utils.ASVDataset(database_path=args.database_path,protocols_path=args.protocols_path,is_train=False, is_logical=is_logical,
transform=transforms,
feature_name=args.features, is_eval=args.is_eval, eval_part=args.eval_part)
dev_loader = DataLoader(dev_set, batch_size=args.batch_size, shuffle=True)
#model
model = RawGAT_ST(parser1['model'], device)
nb_params = sum([param.view(-1).size()[0] for param in model.parameters()])
model =(model).to(device)
# Adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,weight_decay=args.weight_decay)
if args.model_path:
model.load_state_dict(torch.load(args.model_path,map_location=device))
print('Model loaded : {}'.format(args.model_path))
# Inference
if args.eval:
assert args.eval_output is not None, 'You must provide an output path'
assert args.model_path is not None, 'You must provide model checkpoint'
produce_evaluation_file(dev_set, model, device, args.eval_output)
sys.exit(0)
# Training Dataloader
train_set = data_utils.ASVDataset(database_path=args.database_path,protocols_path=args.protocols_path,is_train=True, is_logical=is_logical, transform=transforms,
feature_name=args.features)
train_loader = DataLoader(
train_set, batch_size=args.batch_size, shuffle=True)
# Training and validation
num_epochs = args.num_epochs
writer = SummaryWriter('logs/{}'.format(model_tag))
for epoch in range(num_epochs):
running_loss = train_epoch(train_loader,model, args.lr,optimizer, device)
val_loss = evaluate_accuracy(dev_loader, model, device)
writer.add_scalar('val_loss', val_loss, epoch)
writer.add_scalar('loss', running_loss, epoch)
print('\n{} - {} - {} '.format(epoch,
running_loss,val_loss))
torch.save(model.state_dict(), os.path.join(
model_save_path, 'epoch_{}.pth'.format(epoch)))
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1366,
62,
26791,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28034,
1330,
309,
22854,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
28034,
101... | 2.356556 | 2,799 |
# %%
# Import needed libraries and modules
import napari
import numpy as np
from dask_image.imread import imread
from skimage import filters, morphology
# %%
# Use dask to read the image files, which permits for lazy loading.
all_samples = imread("./fiber_data/*.jpg")
# set the scale in micron per pixel of the images
scale = [1 / 35.5, 1 / 35.5]
# %%
# Ensure images are grayscale
all_samples = grayscale(all_samples)
# %%
# Initialize napari viewer and show the Image stack
viewer = napari.Viewer()
viewer.add_image(all_samples, scale=scale, name="Images")
# %%
# Define segmentation function. The input is an image stack
# %%
# Define skeleton function, use result of the segmentation
# %%
# Using dask `map_block` the segmentation
# The function `segment_img` is applied blockwise to `all_samples`
# Now it is lazy: not computed until called
seg = all_samples.map_blocks(segment_img, dtype=np.uint8)
# View lazy segmentation napari
viewer.add_labels(seg, scale=scale, name="Segmentation")
# %%
# Using dask `map_block` the skeletonization
# The function `skel_img` is applied blockwise to `seg`
# Now it is lazy: applied block-wise, but not computed until called
skel = seg.map_blocks(skel_img, dtype=np.uint8)
# View lazy skeleton napari
# Use `Shuffle colors` crossed-arrows icon to swap the color for improved contrast
viewer.add_labels(skel, scale=scale, name="Skeleton")
# %%
| [
2,
43313,
198,
2,
17267,
2622,
12782,
290,
13103,
198,
198,
11748,
25422,
2743,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
288,
2093,
62,
9060,
13,
320,
961,
1330,
545,
961,
198,
6738,
1341,
9060,
1330,
16628,
11,
46320,
198,
198,... | 3.09292 | 452 |
import argparse
import numpy as np
import pandas as pd
from utils import bulid_dataset
from keras.models import Model,Input
from keras.layers import LSTM,Embedding,Dense,TimeDistributed,Dropout,Bidirectional
from keras_contrib.layers import CRF
from keras_contrib.utils import save_load_utils
import matplotlib.pyplot as plt
plt.style.use("ggplot")
# 1 加载数据
ner_dataset_dir='../data/ner_dataset.csv'
dataset_dir='../data/dataset.pkl'
# 2 构建数据集
n_words, n_tags, max_len, words,tags,\
X_train, X_test, y_train, y_test=bulid_dataset(ner_dataset_dir,dataset_dir,max_len=50)
def sample():
"""
利用已经训练好的数据进行预测
:return:
"""
# 重新初始化模型,构建配置信息,和train部分一样
input = Input(shape=(max_len,))
model = Embedding(input_dim=n_words + 1, output_dim=20,
input_length=max_len, mask_zero=True)(input) # 20-dim embedding
model = Bidirectional(LSTM(units=50, return_sequences=True,
recurrent_dropout=0.1))(model) # variational biLSTM
model = TimeDistributed(Dense(50, activation="relu"))(model) # a dense layer as suggested by neuralNer
crf = CRF(n_tags) # CRF layer
out = crf(model) # output
model = Model(input, out)
# 恢复权重
save_load_utils.load_all_weights(model,filepath="../result/bilstm-crf.h5")
# 预测
i = 300
p = model.predict(np.array([X_test[i]]))
p = np.argmax(p, axis=-1)
true = np.argmax(y_test[i], -1)
print("{:15}||{:5}||{}".format("Word", "True", "Pred"))
print(30 * "=")
for w, t, pred in zip(X_test[i], true, p[0]):
print("{:15}: {:5} {}".format(words[w], tags[t], tags[pred]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="命名执行训练或者预测")
parser.add_argument('--action', required=True, help="input train or test")
args = parser.parse_args()
if args.action == 'train':
train()
if args.action == 'test':
sample()
| [
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
3384,
4487,
1330,
4807,
312,
62,
19608,
292,
316,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
11,
20560,
198,
6738,
41927,
292,
... | 2.003125 | 960 |
import time
import json
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
#awshost you got from `aws iot describe-endpoint`
awshost = "a134g88szk3vbi.iot.us-east-1.amazonaws.com"
# Edit this to be your device name in the AWS IoT console
thing = "raspberry_pi2"
awsport = 8883
caPath = "/home/levon/iot_keys/root-CA.crt"
certPath = "/home/levon/iot_keys/raspberry_pi.cert.pem"
keyPath = "/home/levon/iot_keys/raspberry_pi.private.key"
# Set up the shadow client
myShadowClient = AWSIoTMQTTShadowClient(thing)
myShadowClient.configureEndpoint(awshost, awsport)
myShadowClient.configureCredentials(caPath, keyPath, certPath)
myShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myShadowClient.configureConnectDisconnectTimeout(10)
myShadowClient.configureMQTTOperationTimeout(5)
myShadowClient.connect()
myDeviceShadow = myShadowClient.createShadowHandlerWithName("raspberry_pi", True)
# You can implement a custom callback function if you like, but once working I didn't require one. We still need to define it though.
customCallback = ""
while True:
myDeviceShadow.shadowGet(parse_payload,5)
time.sleep(60)
| [
11748,
640,
198,
11748,
33918,
198,
198,
6738,
14356,
11584,
78,
7250,
7535,
10305,
42,
13,
49215,
51,
14990,
571,
1330,
14356,
11584,
78,
15972,
48,
15751,
27447,
11792,
198,
198,
2,
707,
1477,
455,
345,
1392,
422,
4600,
8356,
1312,
... | 2.918367 | 392 |
# Generated by Django 2.2.8 on 2021-01-22 08:02
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
23,
319,
33448,
12,
486,
12,
1828,
8487,
25,
2999,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
'''
Within this script, focus to convert given set of frames to a video.
Requirements
----
You require OpenCV 3.2 to be installed.
Run
----
If need to run this script seperately, then can edit the relevant input file path and output file path.
If need to use this script within another code then can import the script and call the functions with relevant arguments.
'''
import cv2
import os
from os.path import isfile, join
if __name__ == "__main__":
run()
| [
7061,
6,
198,
22005,
428,
4226,
11,
2962,
284,
10385,
1813,
900,
286,
13431,
284,
257,
2008,
13,
198,
198,
42249,
198,
650,
198,
1639,
2421,
4946,
33538,
513,
13,
17,
284,
307,
6589,
13,
198,
198,
10987,
198,
650,
198,
1532,
761,
... | 3.607692 | 130 |
from .base import Grouper
from .pointwise_grouper import PointwiseGrouper
__all__ = ["pointwise_grouper"]
| [
6738,
764,
8692,
1330,
402,
472,
525,
198,
6738,
764,
4122,
3083,
62,
70,
472,
525,
1330,
6252,
3083,
38,
472,
525,
198,
198,
834,
439,
834,
796,
14631,
4122,
3083,
62,
70,
472,
525,
8973,
198
] | 2.891892 | 37 |
import subprocess
import matplotlib
matplotlib.use('AGG')
import svgutils.transform as sg
from svgutils.templates import VerticalLayout
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path
try:
figs = []
for i in range(2):
figs.append(plt.figure())
plt.plot(np.random.random(100))
layout = VerticalLayout()
sz = map(int, sg.from_mpl(figs[0]).get_size())
sz[1] *= 3
sz = map(str, sz)
layout.set_size(sz)
layout.add_figure(sg.from_mpl(figs[0]))
layout.add_figure(sg.from_mpl(figs[1]))
txt1 = sg.TextElement(50, 50, "HELLO", size=12)
layout.append([txt1])
layout.save(os.path.join('/', 'tmp', 'stack_plots.svg'))
try:
print('converting to pdf')
subprocess.call('/Applications/Inkscape.app/Contents/Resources/bin/inkscape --export-pdf=/tmp/stack_plots.pdf /tmp/stack_plots.svg', shell=True)
except:
print('unable to run inkscape')
finally:
plt.close('all')
| [
11748,
850,
14681,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
4760,
38,
11537,
628,
198,
11748,
38487,
70,
26791,
13,
35636,
355,
264,
70,
198,
6738,
38487,
70,
26791,
13,
11498,
17041,
1330,
38937,
32517,
1... | 2.255172 | 435 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# cdb_python_create.py
#
# Mar/26/2013
# -------------------------------------------------------------------------
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import json
sys.path.append ('/var/www/data_base/common/python_common')
#
from text_manipulate import dict_append_proc
from cdb_manipulate import cdb_write_proc
#
# -------------------------------------------------------------------------
#
# -------------------------------------------------------------------------
file_cdb = "/var/tmp/cdb/cities.cdb"
#
dict_aa = data_prepare_proc ()
#
cdb_write_proc (file_cdb,dict_aa);
#
print "Content-type: text/html\n\n"
#
print "*** OK ***<p />"
# -------------------------------------------------------------------------
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
45537,
198,
2,
197,
66,
9945,
62,
29412,
62,
17953,
13,
9078,
198,
2,
198,
2,
197,
197,
197,
197,
197,
197,
... | 3.666667 | 234 |
import numpy as np
from deepscratch.models.initializers.initialization import Initialization
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2769,
1416,
36722,
13,
27530,
13,
36733,
11341,
13,
36733,
1634,
1330,
20768,
1634,
198
] | 4.227273 | 22 |
__author__ = 'CwT'
import struct
| [
834,
9800,
834,
796,
705,
34,
86,
51,
6,
198,
11748,
2878,
198
] | 2.538462 | 13 |
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
from .base_single_doc_model import SingleDocSummModel
| [
6738,
6121,
364,
1330,
48188,
1890,
25559,
1859,
8645,
341,
11,
48188,
30642,
7509,
198,
6738,
764,
8692,
62,
29762,
62,
15390,
62,
19849,
1330,
14206,
23579,
13065,
76,
17633,
628
] | 4.193548 | 31 |
<<<<<<< HEAD
""""Test Code"""
take = solution()[0]
halves = solution()[1]
print(take(5, halves()))
=======
>>>>>>> d79c1c8db2dfefe6e4c829e5cb5aa8db78b2579e
| [
198,
198,
16791,
16791,
16791,
27,
39837,
198,
15931,
15931,
14402,
6127,
37811,
198,
20657,
796,
4610,
3419,
58,
15,
60,
198,
14201,
1158,
796,
4610,
3419,
58,
16,
60,
198,
4798,
7,
20657,
7,
20,
11,
37192,
3419,
4008,
198,
1421,
1... | 2.106667 | 75 |
# Copyright (C) 2019 Wenhua Wang
#
# This file is part of QuantLibExt, which is an extension to the
# free-software/open-source quantitative library QuantLib - http://quantlib.org/
#
# QuantLibExt is free software: you can redistribute it and/or modify it
# under the terms of the BSD license.
#
# QuantLib's license is at <http://quantlib.org/license.shtml>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the license for more details.
import os
from . import Config as config
from . import Utils as utils
from .DataLoader import DataLoader
from .CalendarIndex import CalendarIndex
| [
2,
15069,
357,
34,
8,
13130,
370,
16550,
6413,
15233,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
16972,
25835,
11627,
11,
543,
318,
281,
7552,
284,
262,
198,
2,
1479,
12,
43776,
14,
9654,
12,
10459,
26610,
5888,
16972,
25835,
532,
... | 3.646766 | 201 |
from common.scripts.cleaning import map_priority
| [
6738,
2219,
13,
46521,
13,
2375,
7574,
1330,
3975,
62,
49336,
628
] | 4.166667 | 12 |
# Generated by Django 2.0.4 on 2018-06-12 07:40
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
19,
319,
2864,
12,
3312,
12,
1065,
8753,
25,
1821,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from django.db import models
# Create your models here.
from datetime import date
from django.urls import reverse
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
from django_resized import ResizedImageField
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
6738,
4818,
8079,
1330,
3128,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330... | 3.555556 | 72 |
import generation as gn
import tree as tr
import generator as gtr
import math
import sys
#xs = [-5,-3,-2,-1,2,6,7]
#ys = [64,26,13,4,1,53,76] # 2x^2 - 3x - 1
xs = [-1, 1, 0, 3, -2, 0, -1, 3, 2, -2]
ys = [1, 1, 0, 2, -2, 5, 3, -1, 5, -4]
zs = [3, 3, 1, 12, 3, 6, 5, 9, 10, 1] # x^2 + y + 1
if __name__ == "__main__":
main() | [
11748,
5270,
355,
19967,
198,
11748,
5509,
355,
491,
198,
11748,
17301,
355,
308,
2213,
198,
11748,
10688,
198,
11748,
25064,
198,
198,
2,
34223,
796,
25915,
20,
12095,
18,
12095,
17,
12095,
16,
11,
17,
11,
21,
11,
22,
60,
198,
2,
... | 1.869318 | 176 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
2100,
796,
2124,
1... | 2.16 | 75 |
import logging
from typing import Dict, List, Optional
from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer
from great_expectations.core.usage_statistics.util import (
aggregate_all_core_expectation_types,
)
from great_expectations.rule_based_profiler.config.base import RuleBasedProfilerConfig
from great_expectations.util import deep_filter_properties_iterable
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
198,
198,
6738,
1049,
62,
1069,
806,
602,
13,
7295,
13,
26060,
62,
14269,
3969,
13,
272,
5177,
11341,
13,
272,
5177,
7509,
1330,
1052,
5177,
7509,
198,
6738,
1049,
... | 3.315789 | 133 |
"""
These code are inspired by:
- https://cs231n.github.io/neural-networks-case-study/
"""
import numpy as np
if __name__ == "__main__":
X, y = generate_spiral_data(n_points_per_class=100, n_classes=2, visualization=False) | [
37811,
198,
4711,
2438,
389,
7867,
416,
25,
198,
220,
220,
220,
532,
3740,
1378,
6359,
25667,
77,
13,
12567,
13,
952,
14,
710,
1523,
12,
3262,
5225,
12,
7442,
12,
44517,
14,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
628,
628,
... | 2.629213 | 89 |
# -*- coding: utf-8 -*-
import os
import importlib
from cnab240.registro import Registros
cwd = os.path.abspath(os.path.dirname(__file__))
nome_bancos = (fname for fname in os.listdir(cwd) if
os.path.isdir(os.path.join(cwd, fname)) and not fname.startswith('__'))
for nome_banco in nome_bancos:
banco_module = importlib.import_module('.'.join((__package__, nome_banco)))
module_path = os.path.abspath(os.path.dirname(banco_module.__file__))
module_specs_path = os.path.join(module_path, 'specs')
banco_module.registros = Registros(module_specs_path)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
1330,
8019,
198,
198,
6738,
269,
77,
397,
16102,
13,
2301,
396,
305,
1330,
13811,
4951,
198,
198,
66,
16993,
796,
28686,
13,
6978,
13,
3... | 2.285156 | 256 |
#!/usr/bin/env python3
"""
Find Code
Usage:
fc.py [options] <search-regex>
Options:
--debug Print debugging info while program is running
-m --modules Search for the names of scripts or modules (minus
filename extension) instead of for items in files.
-n --names Search names
-c --comments Search comments
-s --strings Search string literals
-t --text Search text (the default, turns off -n, -c, and -s)
-l --list-files Only print names of files that have at least one match
--skip-tests Do not search in test files or directories
--src-ext-list=SRC_EXT_LIST
Comma-separated list of filename extensions that
identify source code. [default: .c,.py,.pyx,.js]
--include-venv Search Python virtualenv dirs also
Searches source code in the current directory and recursively in all sub-
directories except those that:
- have names beginning with ``.``
- are named ``node_modules``
- look like Python virtual environments (unless --include-venv)
"""
from __future__ import print_function
import os
import re
import sys
import tokenize
import docopt
TOKENIZABLE_SRC_EXTS = ('.py', 'pyx')
def _walk_source_files(src_ext_set, skip_tests=False, debug=False,
include_venv=False):
"""
For each source file under the current directory, yield (filepath, name, ext) where:
filepath is the full source filename
name is the base filename of filepath
ext is the filename extension
"""
for dirpath, dirnames, filenames in os.walk(os.curdir):
if debug:
print("searching directory:", dirpath, file=sys.stderr)
# Sort sub-directories, skip names starting with '.'
dirnames[:] = sorted(name for name in dirnames
if not name.startswith('.'))
subdirs_to_skip = ["node_modules"]
if skip_tests:
subdirs_to_skip.extend(('test', 'tests'))
dirnames[:] = [name for name in dirnames if not name in subdirs_to_skip]
if not include_venv:
dirnames[:] = [name for name in dirnames
if not _is_venv_subdir(dirpath, name)]
for name in sorted(filenames):
base, ext = os.path.splitext(name)
if not ext in src_ext_set:
continue
if skip_tests:
if base in ('test', 'tests'):
continue
if base.startswith('test_'):
continue
filepath = os.path.join(dirpath, name)
yield filepath, name, ext
def search_for_src_files(src_ext_set, p, skip_tests=False, debug=False,
include_venv=False):
"""
Search for source code files matching the given pattern.
Print each matching filename, one per line.
src_ext_set: Set of filename extensions that identify source files
p: Regular expression compiled with re.compile()
Return: number of matching files found
"""
num_matches = 0
for filepath, name, _ in _walk_source_files(src_ext_set,
skip_tests=skip_tests,
debug=debug,
include_venv=include_venv):
if '/' in p.pattern:
name = filepath
if not p.search(name):
continue
print(filepath)
num_matches += 1
return num_matches
def search_in_src_files(src_ext_set, search_types, p, skip_tests=False,
debug=False, include_venv=False, list_files=False):
"""
Search for matching lines in source code files.
Print each match found, one per line.
src_ext_set: Set of filename extensions that identify source files
p: Regular expression compiled with re.compile()
Return: number of matches found
"""
num_matches = 0
for filepath, name, ext in _walk_source_files(src_ext_set,
skip_tests=skip_tests,
debug=debug,
include_venv=include_venv):
if debug:
print("searching file:", filepath, file=sys.stderr)
if not search_types or ext not in TOKENIZABLE_SRC_EXTS:
num_matches += search_text_file(filepath, p, list_files=list_files)
else:
num_matches += search_source_file(filepath, search_types, p,
list_files=list_files)
return num_matches
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
16742,
6127,
198,
198,
28350,
25,
198,
220,
220,
220,
277,
66,
13,
9078,
685,
25811,
60,
1279,
12947,
12,
260,
25636,
29,
198,
198,
29046,
25,
198,
220,
220,
220,
1... | 2.125166 | 2,261 |
# Copyright 2019 Brian T. Park
#
# MIT License
"""
Implements the TestDataGenerator to generate the validation test data using
acetz, which uses Python ZoneProcessor class. Pulling in ZoneProcessor also
means that it pulls in the data structures defined by zonedb.
"""
from typing import Dict
from typing import List
from typing import Optional
from typing import cast
import logging
from datetime import tzinfo, datetime, timezone, timedelta
import acetime.version
from acetime.acetz import ZoneManager
from acetime.zone_processor import ZoneProcessor
from acetime.zone_processor import DateTuple
from acetime.zonedb_types import ZoneInfoMap
from acetime.zonedb.zone_registry import ZONE_REGISTRY
from acetimetools.data_types.at_types import SECONDS_SINCE_UNIX_EPOCH
from acetimetools.data_types.validation_types import (
TestItem, TestData, ValidationData
)
class TestDataGenerator:
"""Generate the validation test data for all zones specified by the
'zone_infos'. The Transitions are extracted from the ZoneProcessor and the
UTC offsets determined by acetz.
"""
def _create_test_data_for_zone(
self,
zone_name: str,
) -> Optional[List[TestItem]]:
"""Create the TestItems for a specific zone.
"""
logging.info(f"_create_test_items(): {zone_name}")
zone_info = self.zone_infos.get(zone_name)
if not zone_info:
logging.error(f"Zone '{zone_name}' not found in acetz package")
return None
tz = self.zone_manager.gettz(zone_name)
zone_processor = ZoneProcessor(zone_info)
return self._create_transition_test_items(
zone_name, tz, zone_processor)
def _create_transition_test_items(
self,
zone_name: str,
tz: tzinfo,
zone_processor: ZoneProcessor
) -> List[TestItem]:
"""Create a TestItem for the tz for each zone, for each year from
start_year to until_year, exclusive. The 'zone_processor' object is used
as a shortcut to generate the list of transitions, so it needs to be a
different object than the zone processor embedded inside the 'tz'
object.
The following test samples are created:
* One test point for each month, on the first of the month.
* One test point for Dec 31, 23:00 for each year.
* A test point at the transition from DST to Standard, or vise versa.
* A test point one second before the transition.
Each TestData is annotated as:
* 'A', 'a': pre-transition
* 'B', 'b': post-transition
* 'S': a monthly test sample
* 'Y': end of year test sample
For [2000, 2038], this generates about 100,000 data points.
"""
items_map: Dict[int, TestItem] = {}
for year in range(self.start_year, self.until_year):
# Add samples just before and just after the DST transition.
zone_processor.init_for_year(year)
for transition in zone_processor.transitions:
# Skip if the start year of the Transition does not match the
# year of interest. This may happen since we use generate
# transitions over a 14-month interval.
start = transition.start_date_time
transition_year = start.y
if transition_year != year:
continue
# Skip if the UTC year bleed under or over the boundaries.
if transition.transition_time_u.y < self.start_year:
continue
if transition.transition_time_u.y >= self.until_year:
continue
epoch_seconds = transition.start_epoch_second
# Add a test data just before the transition
test_item = self._create_test_item_from_epoch_seconds(
tz, epoch_seconds - 1, 'A')
self._add_test_item(items_map, test_item)
# Add a test data at the transition itself (which will
# normally be shifted forward or backwards).
test_item = self._create_test_item_from_epoch_seconds(
tz, epoch_seconds, 'B')
self._add_test_item(items_map, test_item)
# Add a sample test point on the *second* of each month instead of
# the first of the month. This prevents Jan 1, 2000 from being
# converted to a negative epoch seconds for certain timezones, which
# gets converted into a UTC date in 1999 when ExtendedZoneProcessor
# is used to convert the epoch seconds back to a ZonedDateTime. The
# UTC date in 1999 causes the actual max buffer size of
# ExtendedZoneProcessor to become different than the one predicted
# by BufSizeEstimator (which samples whole years from 2000 until
# 2050), and can cause the AceTimeValidation/ExtendedAcetzTest to
# fail on the buffer size check.
for month in range(1, 13):
tt = DateTuple(y=year, M=month, d=2, ss=0, f='w')
test_item = self._create_test_item_from_datetime(
tz, tt, type='S')
self._add_test_item(items_map, test_item)
# Add a sample test point at the end of the year.
tt = DateTuple(y=year, M=12, d=31, ss=23 * 3600, f='w')
test_item = self._create_test_item_from_datetime(
tz, tt, type='Y')
self._add_test_item(items_map, test_item)
# Return the TestItems ordered by epoch
return [items_map[x] for x in sorted(items_map)]
def _create_test_item_from_datetime(
self,
tz: tzinfo,
tt: DateTuple,
type: str,
) -> TestItem:
"""Create a TestItem for the given DateTuple in the local time zone.
"""
# TODO(bpark): It is not clear that this produces the desired
# datetime for the given tzinfo if tz is an acetz. But I hope it
# gives a datetime that's roughtly around that time, which is good
# enough for unit testing.
dt = datetime(tt.y, tt.M, tt.d, tt.ss // 3600, tzinfo=tz)
unix_seconds = int(dt.timestamp())
epoch_seconds = unix_seconds - SECONDS_SINCE_UNIX_EPOCH
return self._create_test_item_from_epoch_seconds(
tz, epoch_seconds, type)
def _create_test_item_from_epoch_seconds(
self,
tz: tzinfo,
epoch_seconds: int,
type: str,
) -> TestItem:
"""Determine the expected date and time components for the given
'epoch_seconds' for the given 'tz'. The 'epoch_seconds' is the
transition time calculated by the ZoneProcessor class.
Return the TestItem with the following fields:
epoch: epoch seconds from AceTime epoch (2000-01-01T00:00:00Z)
total_offset: the expected total UTC offset at epoch_seconds
dst_offset: the expected DST offset at epoch_seconds
y, M, d, h, m, s: expected date&time components at epoch_seconds
type: 'a', 'b', 'A', 'B', 'S', 'Y'
"""
# Convert AceTime epoch_seconds to Unix epoch_seconds.
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
# Get the transition time, then feed that into acetz to get the
# total offset and DST shift
utc_dt = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
dt = utc_dt.astimezone(tz)
total_offset = int(dt.utcoffset().total_seconds()) # type: ignore
dst_offset = int(dt.dst().total_seconds()) # type: ignore
assert dt.tzinfo
abbrev = dt.tzinfo.tzname(dt)
return {
'epoch': epoch_seconds,
'total_offset': total_offset,
'dst_offset': dst_offset,
'y': dt.year,
'M': dt.month,
'd': dt.day,
'h': dt.hour,
'm': dt.minute,
's': dt.second,
'abbrev': abbrev,
'type': type,
}
@staticmethod
| [
2,
15069,
13130,
8403,
309,
13,
3250,
198,
2,
198,
2,
17168,
13789,
198,
198,
37811,
198,
3546,
1154,
902,
262,
6208,
6601,
8645,
1352,
284,
7716,
262,
21201,
1332,
1366,
1262,
198,
23253,
89,
11,
543,
3544,
11361,
13035,
18709,
273,
... | 2.325229 | 3,496 |
DOMAIN = "centrometal_boiler"
WEB_BOILER_CLIENT = "web_boiler_client"
WEB_BOILER_SYSTEM = "web_boiler_system"
WEB_BOILER_LOGIN_RETRY_INTERVAL = 60
WEB_BOILER_REFRESH_INTERVAL = 600
| [
39170,
29833,
796,
366,
1087,
398,
316,
282,
62,
2127,
5329,
1,
198,
8845,
33,
62,
8202,
4146,
1137,
62,
5097,
28495,
796,
366,
12384,
62,
2127,
5329,
62,
16366,
1,
198,
8845,
33,
62,
8202,
4146,
1137,
62,
23060,
25361,
796,
366,
... | 2.116279 | 86 |
# pylint: disable=unused-argument,invalid-name,line-too-long
import logging
from itertools import zip_longest
from pathlib import Path
from typing import List, Optional, Set, Type, TypeVar
from alembic.autogenerate import comparators
from alembic.autogenerate.api import AutogenContext
from flupy import flu
from sqlalchemy.orm import Session
from sqlalchemy.sql.elements import TextClause
import alembic_utils
from alembic_utils.depends import solve_resolution_order
from alembic_utils.exceptions import (
DuplicateRegistration,
UnreachableException,
)
from alembic_utils.experimental import collect_subclasses
from alembic_utils.reversible_op import (
CreateOp,
DropOp,
ReplaceOp,
ReversibleOp,
)
from alembic_utils.simulate import simulate_entity
from alembic_utils.statement import (
coerce_to_quoted,
coerce_to_unquoted,
escape_colon_for_sql,
normalize_whitespace,
strip_terminating_semicolon,
)
logger = logging.getLogger(__name__)
T = TypeVar("T", bound="ReplaceableEntity")
class ReplaceableEntity:
"""A SQL Entity that can be replaced"""
@property
def type_(self) -> str:
"""In order to support calls to `run_name_filters` and
`run_object_filters` on the AutogenContext object, each
entity needs to have a named type.
https://alembic.sqlalchemy.org/en/latest/api/autogenerate.html#alembic.autogenerate.api.AutogenContext.run_name_filters
"""
raise NotImplementedError()
@classmethod
def from_sql(cls: Type[T], sql: str) -> T:
"""Create an instance from a SQL string"""
raise NotImplementedError()
@property
def literal_schema(self) -> str:
"""Wrap a schema name in literal quotes
Useful for emitting SQL statements
"""
return coerce_to_quoted(self.schema)
@classmethod
def from_path(cls: Type[T], path: Path) -> T:
"""Create an instance instance from a SQL file path"""
with path.open() as sql_file:
sql = sql_file.read()
return cls.from_sql(sql)
@classmethod
def from_database(cls, sess: Session, schema="%") -> List[T]:
"""Collect existing entities from the database for given schema"""
raise NotImplementedError()
def to_sql_statement_create(self) -> TextClause:
""" Generates a SQL "create function" statement for PGFunction """
raise NotImplementedError()
def to_sql_statement_drop(self, cascade=False) -> TextClause:
""" Generates a SQL "drop function" statement for PGFunction """
raise NotImplementedError()
def to_sql_statement_create_or_replace(self) -> TextClause:
""" Generates a SQL "create or replace function" statement for PGFunction """
raise NotImplementedError()
def get_database_definition(
self: T, sess: Session, dependencies: Optional[List["ReplaceableEntity"]] = None
) -> T: # $Optional[T]:
"""Creates the entity in the database, retrieves its 'rendered' then rolls it back"""
with simulate_entity(sess, self, dependencies) as sess:
# Drop self
sess.execute(self.to_sql_statement_drop())
# collect all remaining entities
db_entities: List[T] = sorted(
self.from_database(sess, schema=self.schema), key=lambda x: x.identity
)
with simulate_entity(sess, self, dependencies) as sess:
# collect all remaining entities
all_w_self: List[T] = sorted(
self.from_database(sess, schema=self.schema), key=lambda x: x.identity
)
# Find "self" by diffing the before and after
for without_self, with_self in zip_longest(db_entities, all_w_self):
if without_self is None or without_self.identity != with_self.identity:
return with_self
raise UnreachableException()
def render_self_for_migration(self, omit_definition=False) -> str:
"""Render a string that is valid python code to reconstruct self in a migration"""
var_name = self.to_variable_name()
class_name = self.__class__.__name__
escaped_definition = self.definition if not omit_definition else "# not required for op"
return f"""{var_name} = {class_name}(
schema="{self.schema}",
signature="{self.signature}",
definition={repr(escaped_definition)}
)\n"""
@classmethod
def render_import_statement(cls) -> str:
"""Render a string that is valid python code to import current class"""
module_path = cls.__module__
class_name = cls.__name__
return f"from {module_path} import {class_name}\nfrom sqlalchemy import text as sql_text"
@property
def identity(self) -> str:
"""A string that consistently and globally identifies a function"""
return f"{self.__class__.__name__}: {self.schema}.{self.signature}"
def to_variable_name(self) -> str:
"""A deterministic variable name based on PGFunction's contents """
schema_name = self.schema.lower()
object_name = self.signature.split("(")[0].strip().lower().replace("-", "_")
return f"{schema_name}_{object_name}"
def get_required_migration_op(
self: T, sess: Session, dependencies: Optional[List["ReplaceableEntity"]] = None
) -> Optional[ReversibleOp]:
"""Get the migration operation required for autogenerate"""
# All entities in the database for self's schema
entities_in_database: List[T] = self.from_database(sess, schema=self.schema)
db_def = self.get_database_definition(sess, dependencies=dependencies)
for x in entities_in_database:
if (db_def.identity, normalize_whitespace(db_def.definition)) == (
x.identity,
normalize_whitespace(x.definition),
):
return None
if db_def.identity == x.identity:
return ReplaceOp(self)
return CreateOp(self)
##################
# Event Listener #
##################
def register_entities(
entities: List[T],
schemas: Optional[List[str]] = None,
exclude_schemas: Optional[List[str]] = None,
entity_types: Optional[List[Type[ReplaceableEntity]]] = None,
) -> None:
"""Create an event listener to watch for changes in registered entities when migrations are created using
`alembic revision --autogenerate`
**Parameters:**
* **entities** - *List[ReplaceableEntity]*: A list of entities (PGFunction, PGView, etc) to monitor for revisions
**Deprecated Parameters:**
.. deprecated:: 0.5.1 for removal in 0.6.0
*Configure schema and object inclusion/exclusion with `include_name` and `include_object` in `env.py`. For more information see https://alembic.sqlalchemy.org/en/latest/autogenerate.html#controlling-what-to-be-autogenerated*
* **schemas** - *Optional[List[str]]*: A list of SQL schema names to monitor. Note, schemas referenced in registered entities are automatically monitored.
* **exclude_schemas** - *Optional[List[str]]*: A list of SQL schemas to ignore. Note, explicitly registered entities will still be monitored.
* **entity_types** - *Optional[List[str]]*: A list of ReplaceableEntity classes to consider during migrations. Other entity types are ignored
"""
allowed_entity_types: List[Type[ReplaceableEntity]] = entity_types or collect_subclasses(
alembic_utils, ReplaceableEntity
)
@comparators.dispatch_for("schema")
def include_entity(entity: T, autogen_context: AutogenContext, reflected: bool) -> bool:
"""The functions on the AutogenContext object
are described here:
https://alembic.sqlalchemy.org/en/latest/api/autogenerate.html#alembic.autogenerate.api.AutogenContext.run_name_filters
The meaning of the function parameters are explained in the corresponding
definitions in the EnvironmentContext object:
https://alembic.sqlalchemy.org/en/latest/api/runtime.html#alembic.runtime.environment.EnvironmentContext.configure.params.include_name
This will only have an impact for projects which set include_object and/or include_name in the configuration
of their Alembic env.
"""
name = f"{entity.schema}.{entity.signature}"
parent_names = {
"schema_name": entity.schema,
# At the time of writing, the implementation of `run_name_filters` in Alembic assumes that every type of object
# will either be a table or have a table_name in its `parent_names` dict. This is true for columns and indexes,
# but not true for the type of objects supported in this library such as views as functions. Nevertheless, to avoid
# a KeyError when calling `run_name_filters`, we have to set some value.
"table_name": f"Not applicable for type {entity.type_}",
}
# According to the Alembic docs, the name filter is only relevant for reflected objects
if reflected:
name_result = autogen_context.run_name_filters(name, entity.type_, parent_names)
else:
name_result = True
# Object filters should be applied to object from local metadata and to reflected objects
object_result = autogen_context.run_object_filters(
entity, name, entity.type_, reflected=reflected, compare_to=None
)
return name_result and object_result
| [
2,
279,
2645,
600,
25,
15560,
28,
403,
1484,
12,
49140,
11,
259,
12102,
12,
3672,
11,
1370,
12,
18820,
12,
6511,
198,
11748,
18931,
198,
6738,
340,
861,
10141,
1330,
19974,
62,
6511,
395,
198,
6738,
3108,
8019,
1330,
10644,
198,
673... | 2.750586 | 3,412 |
import argparse
import os
import matplotlib
import torch
from torch.backends import cudnn
import evaluate
from supervised_solver import SupervisedSolver
from cam_solver import CAMSolver
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Train configuration.
parser.add_argument('--image_size', type=int, default=128, help='image resolution')
parser.add_argument('--dataset', type=str, default='L8Biome', choices=['L8Biome'])
parser.add_argument('--batch_size', type=int, default=256, help='mini-batch size')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--n_epochs', type=int, default=10)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--cam_threshold', type=float, default=0.10)
parser.add_argument('--cam_method', type=str, choices=['cam', 'gradcam', 'gradcampp', 'ucam'], default='cam')
parser.add_argument('--num_channels', type=int, default=10)
parser.add_argument('--pretrained', type=str2bool, default=True, help='whether to load imagenet weights')
# Test configuration.
parser.add_argument('--test_checkpoint', type=str, default='best', help='test model from this checkpoint')
# Miscellaneous.
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument('--use_tensorboard', type=str2bool, default=True)
parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu',
help='specify device, e.g. cuda:0 to use GPU 0')
parser.add_argument('--experiment_name', type=str, default=None)
parser.add_argument('--debug', action='store_true', help='disable matplotlib')
# Directories.
parser.add_argument('--l8biome_image_dir', type=str, default='data/L8Biome')
parser.add_argument('--l8sparcs_image_dir', type=str, default='/media/data/SPARCS')
parser.add_argument('--orig_image_dir', type=str, default='/media/data/landsat8-biome', help='path to complete scenes')
parser.add_argument('--model_save_dir', type=str, default='outputs/models')
parser.add_argument('--sample_dir', type=str, default='outputs/samples')
parser.add_argument('--result_dir', type=str, default='outputs/results')
parser.add_argument('--log_step', type=int, default=10)
config = parser.parse_args()
if config.experiment_name is not None:
config.model_save_dir = f'outputs/{config.experiment_name}/models'
config.sample_dir = f'outputs/{config.experiment_name}/samples'
config.result_dir = f'outputs/{config.experiment_name}/results'
print(config)
main(config)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
11748,
2603,
29487,
8019,
198,
11748,
28034,
198,
6738,
28034,
13,
1891,
2412,
1330,
269,
463,
20471,
198,
198,
11748,
13446,
198,
6738,
28679,
62,
82,
14375,
1330,
3115,
16149,
50,
14375,
... | 2.764 | 1,000 |
"""Workflow client exception classes"""
from typing import Any, Dict, Set
from .enums import ExecutionStatus
class WorkflowClientError(Exception):
"""Base class for workflow client errors"""
pass
class WorkflowDoesNotExist(WorkflowClientError):
"""Raised when a workflow does not exist in AWS Step Functions"""
def __init__(self, workflow_name: str) -> None:
"""
Args:
workflow_name: Workflow name
"""
self.workflow_name = workflow_name
super().__init__(f"Workflow {workflow_name} does not exist")
class ExecutionDoesNotExist(WorkflowClientError):
"""Raised when a execution does not exist in AWS Step Functions"""
def __init__(self, workflow_name: str, execution_id: str) -> None:
"""
Args:
workflow_name: Workflow name
execution_id: Execution ID
"""
self.workflow_name = workflow_name
self.execution_id = execution_id
super().__init__(
f"Execution {execution_id} does not exist for workflow {workflow_name}"
)
class InvalidExecutionInputData(WorkflowClientError):
"""Raised when the input data passed to a new execution is not JSON-serializable"""
def __init__(self, input_data: Any) -> None:
"""
Args:
input_data: Input data passed to the new execution
"""
self.input_data = input_data
super().__init__(f"Input data must be JSON-serializable: {input_data}")
class PollForExecutionStatusTimedOut(WorkflowClientError):
"""Raised when the max_time is exceeded while polling for execution status"""
def __init__(self, details: Dict, statuses: Set[ExecutionStatus]) -> None:
"""
Args:
details: Details about the time out. See https://github.com/litl/backoff#event-handlers
statuses: Set of statuses that were being checked during polling
"""
self.details = details
execution, = details["args"]
super().__init__(
f"{execution} failed to converge to {statuses}"
f" after {details['elapsed']} seconds"
)
class PollForExecutionStatusFailed(WorkflowClientError):
"""Raised when an execution completes but the final status was not expected"""
def __init__(
self,
execution: "execution.Execution", # noqa: F821
statuses: Set[ExecutionStatus],
) -> None:
"""
Args:
execution: Execution instance
statuses: Set of statuses that were being checked during polling
"""
self.execution = execution
super().__init__(f"{execution} failed to converge to {statuses}")
| [
37811,
12468,
11125,
5456,
6631,
6097,
37811,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
5345,
198,
198,
6738,
764,
268,
5700,
1330,
37497,
19580,
628,
198,
4871,
5521,
11125,
11792,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227... | 2.587786 | 1,048 |
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1468813642.546569
_enable_loop = True
_template_filename = '/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/admin/Message.mako'
_template_uri = '/admin/Message.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['headtags', 'col2left', 'col2main']
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"64": 11, "33": 1, "34": 5, "35": 9, "69": 11, "70": 52, "71": 52, "72": 53, "41": 3, "74": 54, "75": 54, "45": 3, "81": 75, "51": 7, "73": 53, "56": 7, "57": 8, "58": 8, "28": 0}, "uri": "/admin/Message.mako", "filename": "/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/admin/Message.mako"}
__M_END_METADATA
"""
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
6738,
285,
25496,
1330,
19124,
11,
16628,
11,
12940,
198,
4944,
7206,
20032,
1961,
796,
19124,
13,
4944,
7206,
20032,
1961,
198,
2257,
3185,
62,
49,
10619,
1137,
2751,
796,
... | 2.410995 | 382 |
import unittest
from streak_test_case import StreakTestCase
| [
11748,
555,
715,
395,
198,
198,
6738,
15113,
62,
9288,
62,
7442,
1330,
9737,
461,
14402,
20448,
198
] | 3.388889 | 18 |
import requests
import urllib
def call_server():
"""Does a simple server call, and returns whatever the server returns."""
call_url = server_url + '/tests/test_connection'
r = requests.get(call_url)
return r.text
| [
11748,
7007,
198,
11748,
2956,
297,
571,
198,
198,
4299,
869,
62,
15388,
33529,
198,
220,
220,
220,
37227,
13921,
257,
2829,
4382,
869,
11,
290,
5860,
4232,
262,
4382,
5860,
526,
15931,
198,
220,
220,
220,
869,
62,
6371,
796,
4382,
... | 3.08 | 75 |
import psycopg2
import os
from os import environ
import json
from datetime import datetime
# Global vars below
username = os.environ.get('DATABASE_USERNAME', None)
access_key = os.environ.get('DATABASE_PASS', None)
database_endpoint= os.environ.get('DATABASE_ENDPOINT', None)
| [
11748,
17331,
22163,
70,
17,
198,
11748,
28686,
198,
6738,
28686,
1330,
551,
2268,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
2,
8060,
410,
945,
2174,
198,
198,
29460,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
... | 2.82 | 100 |
import numpy as np
def hamming_loss(x, y, thread=0.5):
"""
:param x: the predicted outputs of the classifier, the output of the ith instance for the jth class is stored in x(i,j)
:param y: the actual labels of the instances, if the ith instance belong to the jth class, y(i,j)=1, otherwise y(i,j)=0
:return: the hamming auc
"""
n, d = x.shape
if x.shape[0] != y.shape[0]:
print("num of instances for output and ground truth is different!!")
if x.shape[1] != y.shape[1]:
print("dim of output and ground truth is different!!")
miss_label = 0
for i in range(n):
miss_label += cal_single_instance(x[i], y[i], thread)
hl = miss_label * 1.0 / (n * d)
return hl
| [
11748,
299,
32152,
355,
45941,
628,
198,
198,
4299,
8891,
2229,
62,
22462,
7,
87,
11,
331,
11,
4704,
28,
15,
13,
20,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1058,
17143,
2124,
25,
262,
11001,
23862,
286,
262,
1398,
74... | 2.498294 | 293 |
# Adapted from
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
TubeDETR model and criterion classes.
"""
from typing import Dict, Optional
import torch
import torch.distributed
import torch.nn.functional as F
from torch import nn
import math
import util.dist as dist
from util import box_ops
from util.misc import NestedTensor
from .backbone import build_backbone
from .transformer import build_transformer
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
class TubeDETR(nn.Module):
"""This is the TubeDETR module that performs spatio-temporal video grounding"""
def __init__(
self,
backbone,
transformer,
num_queries,
aux_loss=False,
video_max_len=200,
stride=5,
guided_attn=False,
fast=False,
fast_mode="",
sted=True,
):
"""
:param backbone: visual backbone model
:param transformer: transformer model
:param num_queries: number of object queries per frame
:param aux_loss: whether to use auxiliary losses at every decoder layer
:param video_max_len: maximum number of frames in the model
:param stride: temporal stride k
:param guided_attn: whether to use guided attention loss
:param fast: whether to use the fast branch
:param fast_mode: which variant of fast branch to use
:param sted: whether to predict start and end proba
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
self.video_max_len = video_max_len
self.stride = stride
self.guided_attn = guided_attn
self.fast = fast
self.fast_mode = fast_mode
self.sted = sted
if sted:
self.sted_embed = MLP(hidden_dim, hidden_dim, 2, 2, dropout=0.5)
def forward(
self,
samples: NestedTensor,
durations,
captions,
encode_and_save=True,
memory_cache=None,
samples_fast=None,
):
"""The forward expects a NestedTensor, which consists of:
- samples.tensor: batched frames, of shape [n_frames x 3 x H x W]
- samples.mask: a binary mask of shape [n_frames x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if not isinstance(samples, NestedTensor):
samples = NestedTensor.from_tensor_list(samples)
if encode_and_save:
assert memory_cache is None
b = len(durations)
t = max(durations)
features, pos = self.backbone(
samples
) # each frame from each video is forwarded through the backbone
src, mask = features[
-1
].decompose() # src (n_frames)xFx(math.ceil(H/32))x(math.ceil(W/32)); mask (n_frames)x(math.ceil(H/32))x(math.ceil(W/32))
if self.fast:
with torch.no_grad(): # fast branch does not backpropagate to the visual backbone
features_fast, pos_fast = self.backbone(samples_fast)
src_fast, mask_fast = features_fast[-1].decompose()
src_fast = self.input_proj(src_fast)
# temporal padding pre-encoder
src = self.input_proj(src)
_, f, h, w = src.shape
f2 = pos[-1].size(1)
device = src.device
tpad_mask_t = None
fast_src = None
if not self.stride:
tpad_src = torch.zeros(b, t, f, h, w).to(device)
tpad_mask = torch.ones(b, t, h, w).bool().to(device)
pos_embed = torch.zeros(b, t, f2, h, w).to(device)
cur_dur = 0
for i_dur, dur in enumerate(durations):
tpad_src[i_dur, :dur] = src[cur_dur : cur_dur + dur]
tpad_mask[i_dur, :dur] = mask[cur_dur : cur_dur + dur]
pos_embed[i_dur, :dur] = pos[-1][cur_dur : cur_dur + dur]
cur_dur += dur
tpad_src = tpad_src.view(b * t, f, h, w)
tpad_mask = tpad_mask.view(b * t, h, w)
tpad_mask[:, 0, 0] = False # avoid empty masks
pos_embed = pos_embed.view(b * t, f2, h, w)
else: # temporal sampling
n_clips = math.ceil(t / self.stride)
tpad_src = src
tpad_mask = mask
pos_embed = pos[-1]
if self.fast:
fast_src = torch.zeros(b, t, f, h, w).to(device)
tpad_mask_t = (
torch.ones(b, t, h, w).bool().to(device)
) # temporally padded mask for all frames, will be used for the decoding
cum_dur = 0 # updated for every video
cur_dur = 0
cur_clip = 0
for i_dur, dur in enumerate(durations):
if self.fast:
fast_src[i_dur, :dur] = src_fast[cum_dur : cum_dur + dur]
tpad_mask_t[i_dur, :dur] = mask_fast[cum_dur : cum_dur + dur]
else:
for i_clip in range(math.ceil(dur / self.stride)):
clip_dur = min(self.stride, dur - i_clip * self.stride)
tpad_mask_t[
i_dur, cur_dur - cum_dur : cur_dur - cum_dur + clip_dur
] = mask[cur_clip : cur_clip + 1].repeat(clip_dur, 1, 1)
cur_dur += clip_dur
cur_clip += 1
cum_dur += dur
tpad_src = tpad_src.view(b * n_clips, f, h, w)
tpad_mask = tpad_mask.view(b * n_clips, h, w)
pos_embed = pos_embed.view(b * n_clips, f, h, w)
tpad_mask_t = tpad_mask_t.view(b * t, h, w)
if self.fast:
fast_src = fast_src.view(b * t, f, h, w)
tpad_mask[:, 0, 0] = False # avoid empty masks
tpad_mask_t[:, 0, 0] = False # avoid empty masks
query_embed = self.query_embed.weight
# video-text encoder
memory_cache = self.transformer(
tpad_src, # (n_clips)xFx(math.ceil(H/32))x(math.ceil(W/32))
tpad_mask, # (n_clips)x(math.ceil(H/32))x(math.ceil(W/32))
query_embed, # num_queriesxF
pos_embed, # (n_clips)xFx(math.ceil(H/32))x(math.ceil(W/32))
captions, # list of length batch_size
encode_and_save=True,
durations=durations, # list of length batch_size
tpad_mask_t=tpad_mask_t, # (n_frames)x(math.ceil(H/32))x(math.ceil(W/32))
fast_src=fast_src, # (n_frames)xFx(math.ceil(H/32))x(math.ceil(W/32))
)
return memory_cache
else:
assert memory_cache is not None
# space-time decoder
hs = self.transformer(
img_memory=memory_cache[
"img_memory"
], # (math.ceil(H/32)*math.ceil(W/32) + n_tokens)x(BT)xF
mask=memory_cache[
"mask"
], # (BT)x(math.ceil(H/32)*math.ceil(W/32) + n_tokens)
pos_embed=memory_cache["pos_embed"], # n_tokensx(BT)xF
query_embed=memory_cache["query_embed"], # (num_queries)x(BT)xF
query_mask=memory_cache["query_mask"], # Bx(Txnum_queries)
encode_and_save=False,
text_memory=memory_cache["text_memory"],
text_mask=memory_cache["text_attention_mask"],
)
if self.guided_attn:
hs, weights, cross_weights = hs
out = {}
# outputs heads
if self.sted:
outputs_sted = self.sted_embed(hs)
hs = hs.flatten(1, 2) # n_layersxbxtxf -> n_layersx(b*t)xf
outputs_coord = self.bbox_embed(hs).sigmoid()
out.update({"pred_boxes": outputs_coord[-1]})
if self.sted:
out.update({"pred_sted": outputs_sted[-1]})
if self.guided_attn:
out["weights"] = weights[-1]
out["ca_weights"] = cross_weights[-1]
# auxiliary outputs
if self.aux_loss:
out["aux_outputs"] = [
{
"pred_boxes": b,
}
for b in outputs_coord[:-1]
]
for i_aux in range(len(out["aux_outputs"])):
if self.sted:
out["aux_outputs"][i_aux]["pred_sted"] = outputs_sted[i_aux]
if self.guided_attn:
out["aux_outputs"][i_aux]["weights"] = weights[i_aux]
out["aux_outputs"][i_aux]["ca_weights"] = cross_weights[i_aux]
return out
class SetCriterion(nn.Module):
"""This class computes the loss for TubeDETR."""
def __init__(self, losses, sigma=1):
"""Create the criterion.
Parameters:
losses: list of all the losses to be applied. See get_loss for list of available losses.
sigma: standard deviation for the Gaussian targets in the start and end Kullback Leibler divergence loss
"""
super().__init__()
self.losses = losses
self.sigma = sigma
def loss_boxes(self, outputs, targets, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.
"""
assert "pred_boxes" in outputs
src_boxes = outputs["pred_boxes"]
target_boxes = torch.cat([t["boxes"] for t in targets], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / max(num_boxes, 1)
loss_giou = 1 - torch.diag(
box_ops.generalized_box_iou(
box_ops.box_cxcywh_to_xyxy(src_boxes),
box_ops.box_cxcywh_to_xyxy(target_boxes),
)
)
losses["loss_giou"] = loss_giou.sum() / max(num_boxes, 1)
return losses
def loss_sted(self, outputs, num_boxes, inter_idx, positive_map, time_mask=None):
"""Compute the losses related to the start & end prediction, a KL divergence loss
targets dicts must contain the key "pred_sted" containing a tensor of logits of dim [T, 2]
"""
assert "pred_sted" in outputs
sted = outputs["pred_sted"]
losses = {}
target_start = torch.tensor([x[0] for x in inter_idx], dtype=torch.long).to(
sted.device
)
target_end = torch.tensor([x[1] for x in inter_idx], dtype=torch.long).to(
sted.device
)
sted = sted.masked_fill(
~time_mask[:, :, None], -1e32
) # put very low probability on the padded positions before softmax
eps = 1e-6 # avoid log(0) and division by 0
sigma = self.sigma
start_distrib = (
-(
(
torch.arange(sted.shape[1])[None, :].to(sted.device)
- target_start[:, None]
)
** 2
)
/ (2 * sigma ** 2)
).exp() # gaussian target
start_distrib = F.normalize(start_distrib + eps, p=1, dim=1)
pred_start_prob = (sted[:, :, 0]).softmax(1)
loss_start = (
pred_start_prob * ((pred_start_prob + eps) / start_distrib).log()
) # KL div loss
loss_start = loss_start * time_mask # not count padded values in the loss
end_distrib = (
-(
(
torch.arange(sted.shape[1])[None, :].to(sted.device)
- target_end[:, None]
)
** 2
)
/ (2 * sigma ** 2)
).exp() # gaussian target
end_distrib = F.normalize(end_distrib + eps, p=1, dim=1)
pred_end_prob = (sted[:, :, 1]).softmax(1)
loss_end = (
pred_end_prob * ((pred_end_prob + eps) / end_distrib).log()
) # KL div loss
loss_end = loss_end * time_mask # do not count padded values in the loss
loss_sted = loss_start + loss_end
losses["loss_sted"] = loss_sted.mean()
return losses
def loss_guided_attn(
self, outputs, num_boxes, inter_idx, positive_map, time_mask=None
):
"""Compute guided attention loss
targets dicts must contain the key "weights" containing a tensor of attention matrices of dim [B, T, T]
"""
weights = outputs["weights"] # BxTxT
positive_map = positive_map + (
~time_mask
) # the padded positions also have to be taken out
eps = 1e-6 # avoid log(0) and division by 0
loss = -(1 - weights + eps).log()
loss = loss.masked_fill(positive_map[:, :, None], 0)
nb_neg = (~positive_map).sum(1) + eps
loss = loss.sum(2) / nb_neg[:, None] # sum on the column
loss = loss.sum(1) # mean on the line normalized by the number of negatives
loss = loss.mean() # mean on the batch
losses = {"loss_guided_attn": loss}
return losses
def forward(self, outputs, targets, inter_idx=None, time_mask=None):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == n_annotated_frames.
The expected keys in each dict depends on the losses applied, see each loss' doc
inter_idx: list of [start index of the annotated moment, end index of the annotated moment] for each video
time_mask: [B, T] tensor with False on the padded positions, used to take out padded frames from the loss computation
"""
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["boxes"]) for t in targets)
num_boxes = torch.as_tensor(
[num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device
)
if dist.is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / dist.get_world_size(), min=1).item()
if inter_idx is not None and time_mask is not None:
# construct a map such that positive_map[k, i] = True iff num_frame i lies inside the annotated moment k
positive_map = torch.zeros(time_mask.shape, dtype=torch.bool)
for k, idx in enumerate(inter_idx):
if idx[0] < 0: # empty intersection
continue
positive_map[k][idx[0] : idx[1] + 1].fill_(True)
positive_map = positive_map.to(time_mask.device)
elif time_mask is None:
positive_map = None
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(
self.get_loss(
loss,
outputs,
targets,
num_boxes,
inter_idx,
positive_map,
time_mask,
)
)
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
for loss in self.losses:
kwargs = {}
l_dict = self.get_loss(
loss,
aux_outputs,
targets,
num_boxes,
inter_idx,
positive_map,
time_mask,
**kwargs,
)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
| [
2,
30019,
276,
422,
198,
2,
15069,
357,
66,
8,
317,
680,
86,
43898,
12670,
776,
1222,
29737,
1879,
295,
13,
49962,
739,
262,
24843,
13789,
362,
13,
15,
13,
1439,
6923,
33876,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
6... | 1.952524 | 9,015 |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cityscapes Datasets."""
import os
import re
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import api_utils
import tensorflow_datasets.public_api as tfds
_CITATION = '''\
@inproceedings{Cordts2016Cityscapes,
title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2016}
}
'''
_DESCRIPTION = '''\
Cityscapes is a dataset consisting of diverse urban street scenes across 50 different cities
at varying times of the year as well as ground truths for several vision tasks including
semantic segmentation, instance level segmentation (TODO), and stereo pair disparity inference.
For segmentation tasks (default split, accessible via 'cityscapes/semantic_segmentation'), Cityscapes provides
dense pixel level annotations for 5000 images at 1024 * 2048 resolution pre-split into training (2975),
validation (500) and test (1525) sets. Label annotations for segmentation tasks span across 30+ classes
commonly encountered during driving scene perception. Detailed label information may be found here:
https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py#L52-L99
Cityscapes also provides coarse grain segmentation annotations (accessible via 'cityscapes/semantic_segmentation_extra')
for 19998 images in a 'train_extra' split which may prove useful for pretraining / data-heavy models.
Besides segmentation, cityscapes also provides stereo image pairs and ground truths for disparity inference
tasks on both the normal and extra splits (accessible via 'cityscapes/stereo_disparity' and
'cityscapes/stereo_disparity_extra' respectively).
Ingored examples:
- For 'cityscapes/stereo_disparity_extra':
- troisdorf_000000_000073_{*} images (no disparity map present)
WARNING: this dataset requires users to setup a login and password in order to get the files.
'''
class CityscapesConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Cityscapes.
Args:
right_images (bool): Enables right images for stereo image tasks.
segmentation_labels (bool): Enables image segmentation labels.
disparity_maps (bool): Enables disparity maps.
train_extra_split (bool): Enables train_extra split. This automatically
enables coarse grain segmentations, if segmentation labels are used.
"""
@api_utils.disallow_positional_args
class Cityscapes(tfds.core.GeneratorBasedBuilder):
"""Base class for Cityscapes datasets."""
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
You have to download files from https://www.cityscapes-dataset.com/login/
(This dataset requires registration).
For basic config (semantic_segmentation) you must download
'leftImg8bit_trainvaltest.zip' and 'gtFine_trainvaltest.zip'.
Other configs do require additional files - please see code for more details.
"""
BUILDER_CONFIGS = [
CityscapesConfig(
name='semantic_segmentation',
description='Cityscapes semantic segmentation dataset.',
right_images=False,
segmentation_labels=True,
disparity_maps=False,
train_extra_split=False,
),
CityscapesConfig(
name='semantic_segmentation_extra',
description='Cityscapes semantic segmentation dataset with train_extra split and coarse labels.', # pylint: disable=line-too-long
right_images=False,
segmentation_labels=True,
disparity_maps=False,
train_extra_split=True,
),
CityscapesConfig(
name='stereo_disparity',
description='Cityscapes stereo image and disparity maps dataset.',
right_images=True,
segmentation_labels=False,
disparity_maps=True,
train_extra_split=False,
),
CityscapesConfig(
name='stereo_disparity_extra',
description='Cityscapes stereo image and disparity maps dataset with train_extra split.', # pylint: disable=line-too-long
right_images=True,
segmentation_labels=False,
disparity_maps=True,
train_extra_split=True,
),
]
# Helper functions
LEFT_IMAGE_FILE_RE = re.compile(r'([a-z\-]+)_(\d+)_(\d+)_leftImg8bit\.png')
def _get_left_image_id(left_image):
"""Returns the id of an image file.
Used to associate an image file with its corresponding label.
Example:
'bonn_000001_000019_leftImg8bit' -> 'bonn_000001_000019'
Args:
left_image: name of the image file.
Returns:
Id of the image (see example above).
"""
match = LEFT_IMAGE_FILE_RE.match(left_image)
return '{}_{}_{}'.format(*match.groups())
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
12131,
383,
309,
22854,
37535,
16092,
292,
1039,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
77... | 3.036424 | 1,812 |
import subprocess
import parmed
from .template import default_force_field, leap_template
from .utils import easy_call
def run_tleap(parm, ns_names, gaplist, sslist, leap_input=None):
# adapted from amber_adaptbx code in phenix
'''
Parameters
----------
parm : parmed.Structure
ns_names : List[str]
gaplist : List[int]
sslist : List[Tuple[int, int]]
If given, use this for force fields assignment
'''
input_pdb = 'x.pdb'
prmtop = 'x.prmtop'
rst7 = 'x.rst7'
# input PDB file
parm.write_pdb(input_pdb, altlocs='first')
tleap_input_file = "leap.in"
f = open(tleap_input_file, "w")
if leap_input is None:
leap_string = _make_leap_template(
parm,
ns_names,
gaplist,
sslist,
input_pdb=input_pdb,
prmtop=prmtop,
rst7=rst7)
else:
leap_string = leap_input.format(
input_pdb=input_pdb, prmtop=prmtop, rst7=rst7)
f.write(leap_string)
f.close()
# strangely tleap appends to the logfile so must delete first
cmd = ['tleap', '-f', tleap_input_file]
output = easy_call(cmd)
try:
return parmed.load_file(prmtop, rst7)
except parmed.exceptions.FormatNotFound as e:
print(output)
raise e
| [
11748,
850,
14681,
198,
11748,
1582,
1150,
198,
6738,
764,
28243,
1330,
4277,
62,
3174,
62,
3245,
11,
16470,
62,
28243,
198,
6738,
764,
26791,
1330,
2562,
62,
13345,
628,
198,
198,
4299,
1057,
62,
7100,
499,
7,
79,
1670,
11,
36545,
... | 2.060842 | 641 |
# -*-coding:utf8 -*-
import os, requests, json, threading, datetime, webbrowser
import tkinter.messagebox
from io import BytesIO
from tkinter import * # 使用Tkinter前需要先导入
from tkinter import ttk
from PIL import ImageTk, Image
from tkinter.ttk import Separator, Combobox
# 获取今日时间戳
today_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 打开配置文件
# 将获取的数据写入到配置文件种
# 题目的获取
# 动态生成
# 动态改变按钮的值
# 打开网页跳转
# 创建菜单
# 上一页,下一页
# 更新提示
# 退出
# # 主题自定义
# def theme_custom(self,i):
# colors = {
# 0: "#EFEBE7",
# 1: "#F9CDDC",
# 2: "#C578A4",
# 3: "#9B7EB6",
# 4: "#A8B680",
# 5: "#F9DDD3",
# 6: "#848786",
# }
# self.theme = colors[i]
# self.change_ele_bg(colors[self.theme % len(colors)])
# def change_ele_bg(self, themecolor):
# gui_style = Style()
# gui_style.configure('My.TRadiobutton', background=themecolor)
# gui_style.configure('My.TFrame', background=themecolor)
# self.init_window_name['bg'] = themecolor # 主窗口的背景色
# 获取题目到title.json文件中
# 天数获取
# 缓解软件卡顿
# 设置打开的位置
# 创建交流群窗口
# 创建搜索模块
# 创建关于窗口
# 窗口对象 图片的地址 图片的宽度和高度 距离左侧的距离 距离顶部的距离
# 图片的创建
if __name__ == "__main__":
# 题目文件
window = Tk()
# 设置window窗口标题
window.title('前端小熊 v1.0.4 @Harry')
# 设置窗口的长度和宽度
window.geometry('720x500')
# 禁止用户调整窗口大小
window.resizable(False, False)
center_window(window, 720, 500)
# 定义页面右下角的公众号
path = "https://gitee.com/rbozo/picgo_image/raw/master/image/0/gzh.png"
res_img = requests.get(path)
im = Image.open(BytesIO(res_img.content))
im = im.resize((100, 100))
img = ImageTk.PhotoImage(im)
# 绘制窗口内容
folder = os.path.exists('config/title.json')
if not folder:
thread_it(getData) # 获取题目文件
thread_it(TkinterWeb)
window.mainloop()
| [
2,
220,
532,
9,
12,
66,
7656,
25,
40477,
23,
532,
9,
12,
198,
11748,
28686,
11,
7007,
11,
33918,
11,
4704,
278,
11,
4818,
8079,
11,
3992,
40259,
198,
11748,
256,
74,
3849,
13,
20500,
3524,
198,
6738,
33245,
1330,
2750,
4879,
9399,... | 1.410688 | 1,366 |
#or gate class
from myhdl import *
| [
2,
273,
8946,
1398,
198,
6738,
616,
71,
25404,
1330,
1635,
198
] | 2.916667 | 12 |
# Generated by Django 2.1.7 on 2019-09-10 17:44
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
2931,
12,
940,
1596,
25,
2598,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from .store import config, open_group, clone_store
from .dataloader import select, help
| [
6738,
764,
8095,
1330,
4566,
11,
1280,
62,
8094,
11,
17271,
62,
8095,
198,
6738,
764,
67,
10254,
1170,
263,
1330,
2922,
11,
1037,
198
] | 3.52 | 25 |
import logging
import os
from os.path import isfile, join
import numpy as np
from data_io import file_reading
from data_io import x_y_spliting
#import matplotlib.pyplot as plt
if __name__ == '__main__':
#data_file = '../../data/gesture_data/processed_data/data.txt_trainTest10/train_0.txt'
#data_file = '../../data/arc_activity_recognition/s1_ijcal/train.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
#data_file = '../../data/arc_activity_recognition/s1_ijcal/test.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/evn/ds/DS_all_ready_to_model.csv_trainTest2_weekly_3attr/test_0.txt'
#data_file = '../../data/human/subject10_ideal.log'
#class_column = 119
#delimiter = '\t'
##null_class=1
##null_max=1000
##x_matrix, y_vector = readFile(data_file, null_class, null_max, class_column);
##print x_matrix.shape
##print y_vector.shape
#
#data_file = '../../data/human/processed/ready/data.txt'#_trainTest10/train_0.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/dsa/train_test_10_fold/test_0.txt'
#data_file = '../../data/dsa/output.txt'
#data_file = '../../data/rar/train_test_10_fold_class_based/train_0.txt_class_0.txt'
#data_file = "../../data/arabic/train_test_1_fold/train_0.txt"
#data_file = "../../data/arabic/train_test_1_fold/test_0.txt"
#data_file = "../../data/asl/train_test_3_fold/train_0.txt"
#data_file = '../../data/rar/train_test_10_fold/test_0.txt'
#data_file = '../../data/arc/train_test_10_fold/test_0.txt'
#data_file = '../../data/fixed_arc/train_test_1_fold/test_0.txt'
data_key = "phs"
data_key = "eeg"
#data_key = "fad"
data_file = "../../data/" + data_key +"/train.txt"
class_column = 0
delimiter = ' '
#data_plot(data_file, class_column, delimiter)
ret_str = data_checking(data_file, class_column, delimiter)
print(ret_str)
| [
11748,
18931,
198,
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1366,
62,
952,
1330,
2393,
62,
25782,
198,
6738,
1366,
62,
952,
1330,
2124,
62,
88,
62,
22018,
1780... | 2.29171 | 953 |
from pm4py.objects.log.importer.xes import importer as xes_importer
| [
6738,
9114,
19,
9078,
13,
48205,
13,
6404,
13,
320,
26634,
13,
48169,
1330,
848,
4337,
355,
2124,
274,
62,
320,
26634,
628
] | 3 | 23 |
#!/usr/bin/env python -i
import os
import string
import sys
import random
import threading
import getopt
thread_list = []
print('This program will process 5 files at a time Max')
for x in range(1, 6):
file__IO = input('\tEnter file name here to analize with path:: ')
t = threading.Thread(target=cat)
thread_list.append(t)
#Starting threading
for thread in thread_list:
t.daemon = True
thread.start()
for thread in thread_list:
thread.join()
print('\nEnd of program')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
532,
72,
198,
11748,
28686,
198,
11748,
4731,
198,
11748,
25064,
198,
11748,
4738,
198,
11748,
4704,
278,
198,
11748,
651,
8738,
198,
198,
16663,
62,
4868,
796,
17635,
198,
198,
4798,
10786,
... | 2.87931 | 174 |
__version__ = "0.14.dev0"
| [
834,
9641,
834,
796,
366,
15,
13,
1415,
13,
7959,
15,
1,
198
] | 2 | 13 |
# Generated by Django 3.1.10 on 2021-06-21 08:40
from django.db import migrations
from django.db import models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
940,
319,
33448,
12,
3312,
12,
2481,
8487,
25,
1821,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 2.973684 | 38 |
"""
A collection of back end subroutines (mostly SQL queries).
This module contains the routines to deal with the monitoring
sources, provided by the user via the command line.
"""
import logging, sys
from tkp.db import execute as execute
from tkp.db.associations import _empty_temprunningcatalog as _del_tempruncat
from tkp.db.associations import (
_update_1_to_1_runcat,
ONE_TO_ONE_ASSOC_QUERY,
_insert_1_to_1_runcat_flux,
_update_1_to_1_runcat_flux)
logger = logging.getLogger(__name__)
def get_monitor_entries(dataset_id):
"""
Returns the ``monitor`` entries relevant to this dataset.
Args:
dataset_id (int): Parent dataset.
Returns:
list of tuples [(monitor_id, ra, decl)]
"""
query = """\
SELECT id
,ra
,decl
FROM monitor
WHERE dataset = %(dataset_id)s
"""
qry_params = {'dataset_id': dataset_id}
cursor = execute(query, qry_params)
res = cursor.fetchall()
return res
def associate_ms(image_id):
"""
Associate the monitoring sources, i.e., their forced fits,
of the current image with the ones in the running catalog.
These associations are treated separately from the normal
associations and there will only be 1-to-1 associations.
The runcat-monitoring source pairs will be inserted in a
temporary table.
Of these, the runcat and runcat_flux tables are updated with
the new datapoints if the (monitoring) source already existed,
otherwise they are inserted as a new source.
The source pair is appended to the light-curve table
(assocxtrsource), with a type = 8 (for the first occurence)
or type = 9 (for existing runcat sources).
After all this, the temporary table is emptied again.
"""
_del_tempruncat()
_insert_tempruncat(image_id)
_insert_1_to_1_assoc()
_update_1_to_1_runcat()
n_updated = _update_1_to_1_runcat_flux()
if n_updated:
logger.debug("Updated flux for %s monitor sources" % n_updated)
n_inserted = _insert_1_to_1_runcat_flux()
if n_inserted:
logger.debug("Inserted new-band flux measurement for %s monitor sources"
% n_inserted)
_insert_new_runcat(image_id)
_insert_new_runcat_flux(image_id)
_insert_new_1_to_1_assoc(image_id)
_update_monitor_runcats(image_id)
_del_tempruncat()
def _insert_tempruncat(image_id):
"""
Here the associations of forced fits of the monitoring sources
and their runningcatalog counterparts are inserted into the
temporary table.
We follow the implementation of the normal association procedure,
except that we don't need to match with a De Ruiter radius, since
the counterpart pairs are from the same runningcatalog source.
"""
# The query is as follows:
# t0 searches for matches between the monitoring sources
# (extract_type = 2) in the current image that have
# a counterpart among the runningcatalog sources. This
# matching is done by zone, decl, ra (corrected for alpha
# infaltion towards the poles) and the dot product by
# using the Cartesian coordinates. Note that the conical
# distance is not determined by the De Ruiter radius,
# since all these sources have identical positions.
# t0 has a left outer join with the runningcatalog_flux table,
# since the image might be of a new frequency band. In that case
# all the rf.values are NULL.
# The select then determines all the new (statistical) properties
# for the runcat-monitoring pairs, which are inserted in the
# tempruncat table.
# Note that a first image does not have any matches,
# but that is taken into account by the second part of
# the associate_ms() function.
query = """\
INSERT INTO temprunningcatalog
(runcat
,xtrsrc
,distance_arcsec
,r
,dataset
,band
,stokes
,datapoints
,zone
,wm_ra
,wm_decl
,wm_uncertainty_ew
,wm_uncertainty_ns
,avg_ra_err
,avg_decl_err
,avg_wra
,avg_wdecl
,avg_weight_ra
,avg_weight_decl
,x
,y
,z
,f_datapoints
,avg_f_peak
,avg_f_peak_sq
,avg_f_peak_weight
,avg_weighted_f_peak
,avg_weighted_f_peak_sq
,avg_f_int
,avg_f_int_sq
,avg_f_int_weight
,avg_weighted_f_int
,avg_weighted_f_int_sq
)
SELECT t0.runcat_id
,t0.xtrsrc
,0 as distance_arcsec
,0 as r
,t0.dataset
,t0.band
,t0.stokes
,t0.datapoints
,t0.zone
,t0.wm_ra
,t0.wm_decl
,t0.wm_uncertainty_ew
,t0.wm_uncertainty_ns
,t0.avg_ra_err
,t0.avg_decl_err
,t0.avg_wra
,t0.avg_wdecl
,t0.avg_weight_ra
,t0.avg_weight_decl
,t0.x
,t0.y
,t0.z
,CASE WHEN rf.f_datapoints IS NULL
THEN 1
ELSE rf.f_datapoints + 1
END AS f_datapoints
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_peak
ELSE (rf.f_datapoints * rf.avg_f_peak
+ t0.f_peak)
/ (rf.f_datapoints + 1)
END AS avg_f_peak
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_peak * t0.f_peak
ELSE (rf.f_datapoints * rf.avg_f_peak_sq
+ t0.f_peak * t0.f_peak)
/ (rf.f_datapoints + 1)
END AS avg_f_peak_sq
,CASE WHEN rf.f_datapoints IS NULL
THEN 1 / (t0.f_peak_err * t0.f_peak_err)
ELSE (rf.f_datapoints * rf.avg_f_peak_weight
+ 1 / (t0.f_peak_err * t0.f_peak_err))
/ (rf.f_datapoints + 1)
END AS avg_f_peak_weight
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_peak / (t0.f_peak_err * t0.f_peak_err)
ELSE (rf.f_datapoints * rf.avg_weighted_f_peak
+ t0.f_peak / (t0.f_peak_err * t0.f_peak_err))
/ (rf.f_datapoints + 1)
END AS avg_weighted_f_peak
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_peak * t0.f_peak / (t0.f_peak_err * t0.f_peak_err)
ELSE (rf.f_datapoints * rf.avg_weighted_f_peak_sq
+ (t0.f_peak * t0.f_peak) / (t0.f_peak_err * t0.f_peak_err))
/ (rf.f_datapoints + 1)
END AS avg_weighted_f_peak_sq
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_int
ELSE (rf.f_datapoints * rf.avg_f_int
+ t0.f_int)
/ (rf.f_datapoints + 1)
END AS avg_f_int
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_int * t0.f_int
ELSE (rf.f_datapoints * rf.avg_f_int_sq
+ t0.f_int * t0.f_int)
/ (rf.f_datapoints + 1)
END AS avg_f_int_sq
,CASE WHEN rf.f_datapoints IS NULL
THEN 1 / (t0.f_int_err * t0.f_int_err)
ELSE (rf.f_datapoints * rf.avg_f_int_weight
+ 1 / (t0.f_int_err * t0.f_int_err))
/ (rf.f_datapoints + 1)
END AS avg_f_int_weight
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_int / (t0.f_int_err * t0.f_int_err)
ELSE (rf.f_datapoints * rf.avg_weighted_f_int
+ t0.f_int / (t0.f_int_err * t0.f_int_err))
/ (rf.f_datapoints + 1)
END AS avg_weighted_f_int
,CASE WHEN rf.f_datapoints IS NULL
THEN t0.f_int * t0.f_int / (t0.f_int_err * t0.f_int_err)
ELSE (rf.f_datapoints * rf.avg_weighted_f_int_sq
+ (t0.f_int * t0.f_int) / (t0.f_int_err * t0.f_int_err))
/ (rf.f_datapoints + 1)
END AS avg_weighted_f_int_sq
FROM (SELECT mon.runcat AS runcat_id
,x.id AS xtrsrc
,x.f_peak
,x.f_peak_err
,x.f_int
,x.f_int_err
,i.dataset
,i.band
,i.stokes
,r.datapoints + 1 AS datapoints
,r.zone
,r.wm_ra
,r.wm_decl
,r.wm_uncertainty_ew
,r.wm_uncertainty_ns
,r.avg_ra_err
,r.avg_decl_err
,r.avg_wra
,r.avg_wdecl
,r.avg_weight_ra
,r.avg_weight_decl
,r.x
,r.y
,r.z
FROM monitor mon
JOIN extractedsource x
ON mon.id = x.ff_monitor
JOIN runningcatalog r
ON mon.runcat = r.id
JOIN image i
ON x.image = i.id
WHERE mon.runcat IS NOT NULL
AND x.image = %(image_id)s
AND x.extract_type = 2
) t0
LEFT OUTER JOIN runningcatalog_flux rf
ON t0.runcat_id = rf.runcat
AND t0.band = rf.band
AND t0.stokes = rf.stokes
"""
qry_params = {'image_id': image_id}
cursor = execute(query, qry_params, commit=True)
cnt = cursor.rowcount
logger.debug("Inserted %s monitoring-runcat pairs in tempruncat" % cnt)
def _insert_runcat_flux():
"""Monitoring sources that were not yet fitted in this frequency band before,
will be appended to it. Those have their first f_datapoint.
"""
query = """\
INSERT INTO runningcatalog_flux
(runcat
,band
,stokes
,f_datapoints
,avg_f_peak
,avg_f_peak_sq
,avg_f_peak_weight
,avg_weighted_f_peak
,avg_weighted_f_peak_sq
,avg_f_int
,avg_f_int_sq
,avg_f_int_weight
,avg_weighted_f_int
,avg_weighted_f_int_sq
)
SELECT runcat
,band
,stokes
,f_datapoints
,avg_f_peak
,avg_f_peak_sq
,avg_f_peak_weight
,avg_weighted_f_peak
,avg_weighted_f_peak_sq
,avg_f_int
,avg_f_int_sq
,avg_f_int_weight
,avg_weighted_f_int
,avg_weighted_f_int_sq
FROM temprunningcatalog
WHERE f_datapoints = 1
"""
cursor = execute(query, commit=True)
cnt = cursor.rowcount
if cnt > 0:
logger.debug("Inserted new-band fluxes for %s monitoring sources in runcat_flux" % cnt)
def _insert_new_runcat(image_id):
"""Insert the fits of the monitoring sources as new sources
into the runningcatalog
"""
query = """\
INSERT INTO runningcatalog
(xtrsrc
,dataset
,datapoints
,zone
,wm_ra
,wm_decl
,avg_ra_err
,avg_decl_err
,wm_uncertainty_ew
,wm_uncertainty_ns
,avg_wra
,avg_wdecl
,avg_weight_ra
,avg_weight_decl
,x
,y
,z
,mon_src
)
SELECT x.id AS xtrsrc
,i.dataset
,1 AS datapoints
,x.zone
,x.ra AS wm_ra
,x.decl AS wm_decl
,x.ra_err AS avg_ra_err
,x.decl_err AS avg_decl_err
,x.uncertainty_ew AS wm_uncertainty_ew
,x.uncertainty_ns AS wm_uncertainty_ns
,x.ra / (x.uncertainty_ew * x.uncertainty_ew) AS avg_wra
,x.decl / (x.uncertainty_ns * x.uncertainty_ns) AS avg_wdecl
,1 / (x.uncertainty_ew * x.uncertainty_ew) AS avg_weight_ra
,1 / (x.uncertainty_ns * x.uncertainty_ns) AS avg_weight_decl
,x.x
,x.y
,x.z
,TRUE
FROM image i
JOIN extractedsource x
ON i.id = x.image
JOIN monitor mon
ON x.ff_monitor = mon.id
WHERE i.id = %(image_id)s
AND x.extract_type = 2
AND mon.runcat IS NULL
"""
cursor = execute(query, {'image_id': image_id}, commit=True)
ins = cursor.rowcount
if ins > 0:
logger.debug("Added %s new monitoring sources to runningcatalog" % ins)
def _update_monitor_runcats(image_id):
"""
Update ``runcat`` col of ``monitor`` table for newly extracted positions.
"""
query ="""\
UPDATE monitor
SET runcat = (SELECT rc.id
FROM runningcatalog rc
JOIN extractedsource ex
ON rc.xtrsrc = ex.id
WHERE monitor.runcat is NULL
AND ex.image = %(image_id)s
AND ex.ff_monitor = monitor.id
)
WHERE EXISTS (SELECT rc.id
FROM runningcatalog rc
JOIN extractedsource ex
ON rc.xtrsrc = ex.id
WHERE monitor.runcat is NULL
AND ex.image = %(image_id)s
AND ex.ff_monitor = monitor.id
)
"""
cursor = execute(query, {'image_id': image_id}, commit=True)
up = cursor.rowcount
logger.debug("Updated runcat cols for %s newly monitored sources" % up)
def _insert_new_runcat_flux(image_id):
"""Insert the fitted fluxes of the monitoring sources as new datapoints
into the runningcatalog_flux.
Extractedsources for which not a counterpart was found in the
runningcatalog, i.e., those that do not have an entry in the
tempruncat table (t0) will be added as a new source in the
runningcatalog_flux table.
"""
query = """\
INSERT INTO runningcatalog_flux
(runcat
,band
,stokes
,f_datapoints
,avg_f_peak
,avg_f_peak_sq
,avg_f_peak_weight
,avg_weighted_f_peak
,avg_weighted_f_peak_sq
,avg_f_int
,avg_f_int_sq
,avg_f_int_weight
,avg_weighted_f_int
,avg_weighted_f_int_sq
)
SELECT rc.id
,i.band
,i.stokes
,1 AS f_datapoints
,x.f_peak
,x.f_peak * x.f_peak
,1 / (x.f_peak_err * x.f_peak_err)
,x.f_peak / (x.f_peak_err * x.f_peak_err)
,x.f_peak * x.f_peak / (x.f_peak_err * x.f_peak_err)
,x.f_int
,x.f_int * x.f_int
,1 / (x.f_int_err * x.f_int_err)
,x.f_int / (x.f_int_err * x.f_int_err)
,x.f_int * x.f_int / (x.f_int_err * x.f_int_err)
FROM image i
JOIN extractedsource x
ON i.id = x.image
JOIN monitor mon
ON x.ff_monitor = mon.id
JOIN runningcatalog rc
ON rc.xtrsrc = x.id
WHERE i.id = %(image_id)s
AND x.extract_type = 2
AND mon.runcat IS NULL
"""
cursor = execute(query, {'image_id': image_id}, commit=True)
ins = cursor.rowcount
if ins > 0:
logger.debug("Added %s new monitoring fluxes to runningcatalog_flux" % ins)
def _insert_new_1_to_1_assoc(image_id):
"""
The forced fits of the monitoring sources which are new
are appended to the assocxtrsource (light-curve) table
as a type = 8 datapoint.
"""
query = """\
INSERT INTO assocxtrsource
(runcat
,xtrsrc
,type
,distance_arcsec
,r
,v_int
,eta_int
,f_datapoints
)
SELECT rc.id
,rc.xtrsrc
,8 AS type
,0
,0
,0 AS v_int
,0 AS eta_int
,1 as f_datapoints
FROM runningcatalog rc
JOIN extractedsource x
ON rc.xtrsrc = x.id
JOIN image i
on x.image = i.id
JOIN monitor mon
ON x.ff_monitor = mon.id
WHERE i.id = %(image_id)s
AND mon.runcat IS NULL
AND x.extract_type = 2
"""
cursor = execute(query, {'image_id': image_id}, commit=True)
cnt = cursor.rowcount
if cnt > 0:
logger.debug("Inserted %s new runcat-monitoring source pairs in assocxtrsource" % cnt)
def _insert_1_to_1_assoc():
"""
The runcat-monitoring pairs are appended to the assocxtrsource
(light-curve) table as a type = 9 datapoint.
"""
cursor = execute(ONE_TO_ONE_ASSOC_QUERY, {'type': 9}, commit=True)
cnt = cursor.rowcount
logger.debug("Inserted %s runcat-monitoring source pairs in assocxtrsource" % cnt)
| [
37811,
198,
32,
4947,
286,
736,
886,
850,
81,
448,
1127,
357,
29471,
16363,
20743,
737,
198,
198,
1212,
8265,
4909,
262,
31878,
284,
1730,
351,
262,
9904,
198,
82,
2203,
11,
2810,
416,
262,
2836,
2884,
262,
3141,
1627,
13,
198,
3781... | 1.948373 | 7,961 |
"""
LC 2155
You are given a 0-indexed binary array nums of length n. nums can be divided at index i (where 0 <= i <= n) into two arrays (possibly empty) numsleft and numsright:
numsleft has all the elements of nums between index 0 and i - 1 (inclusive), while numsright has all the elements of nums between index i and n - 1 (inclusive).
If i == 0, numsleft is empty, while numsright has all the elements of nums.
If i == n, numsleft has all the elements of nums, while numsright is empty.
The division score of an index i is the sum of the number of 0's in numsleft and the number of 1's in numsright.
Return all distinct indices that have the highest possible division score. You may return the answer in any order.
Example 1:
Input: nums = [0,0,1,0]
Output: [2,4]
Explanation: Division at index
- 0: numsleft is []. numsright is [0,0,1,0]. The score is 0 + 1 = 1.
- 1: numsleft is [0]. numsright is [0,1,0]. The score is 1 + 1 = 2.
- 2: numsleft is [0,0]. numsright is [1,0]. The score is 2 + 1 = 3.
- 3: numsleft is [0,0,1]. numsright is [0]. The score is 2 + 0 = 2.
- 4: numsleft is [0,0,1,0]. numsright is []. The score is 3 + 0 = 3.
Indices 2 and 4 both have the highest possible division score 3.
Note the answer [4,2] would also be accepted.
Example 2:
Input: nums = [0,0,0]
Output: [3]
Explanation: Division at index
- 0: numsleft is []. numsright is [0,0,0]. The score is 0 + 0 = 0.
- 1: numsleft is [0]. numsright is [0,0]. The score is 1 + 0 = 1.
- 2: numsleft is [0,0]. numsright is [0]. The score is 2 + 0 = 2.
- 3: numsleft is [0,0,0]. numsright is []. The score is 3 + 0 = 3.
Only index 3 has the highest possible division score 3.
Example 3:
Input: nums = [1,1]
Output: [0]
Explanation: Division at index
- 0: numsleft is []. numsright is [1,1]. The score is 0 + 2 = 2.
- 1: numsleft is [1]. numsright is [1]. The score is 0 + 1 = 1.
- 2: numsleft is [1,1]. numsright is []. The score is 0 + 0 = 0.
Only index 0 has the highest possible division score 2.
"""
"""
Time O(N)
Space O(1)
"""
| [
37811,
198,
5639,
362,
18742,
198,
1639,
389,
1813,
257,
657,
12,
9630,
276,
13934,
7177,
997,
82,
286,
4129,
299,
13,
997,
82,
460,
307,
9086,
379,
6376,
1312,
357,
3003,
657,
19841,
1312,
19841,
299,
8,
656,
734,
26515,
357,
39363... | 2.692612 | 758 |
#!/usr/bin/env python3
# fileencoding=utf-8
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
2393,
12685,
7656,
28,
40477,
12,
23,
198
] | 2.315789 | 19 |
# from allauth.account.views import confirm_email
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
from django.conf.urls.static import static
# swagger
from health_care.views import complete_view
from users.views import custom_confirm_email
api_info = openapi.Info(
title="Health Care API",
default_version="v1",
description="API documentation for Health Care App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = []
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
urlpatterns += [
path("accounts/", include("allauth.urls")),
path('admin/', admin.site.urls),
path("users/", include("users.urls", namespace="users")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", custom_confirm_email),
path('registration/complete/', complete_view, name='account_confirm_complete'),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path('ckeditor/', include('ckeditor_uploader.urls')),
path("api/v1/", include([
path("", include("users.api.v1.urls")),
path("", include("subscription.urls")),
path("", include('backapi.api.v1.urls')),
]))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
2,
422,
477,
18439,
13,
23317,
13,
33571,
1330,
6216,
62,
12888,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
67... | 2.772436 | 624 |
from gym.envs.zxstock import AtariEnv
| [
6738,
11550,
13,
268,
14259,
13,
42592,
13578,
1330,
35884,
4834,
85,
198
] | 2.923077 | 13 |
#!/usr/bin/env python
import csv
import rospy
import rospkg
import roslaunch
from interaccion.msg import *
from std_msgs.msg import String, Bool, Empty
if __name__ == "__main__":
try:
rosp = rospkg.RosPack()
path = rosp.get_path('interaccion') # Ruta del directorio para guardar
# los datos
sec_check = SecurityCheck(path)
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
if sec_check.update_node:
if not sec_check.running:
process = sec_check.launch.launch(sec_check.robot_node)
sec_check.running = True
else:
process.stop()
sec_check.running = False
sec_check.update_node = False
rate.sleep()
except rospy.ROSInterruptException:
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
269,
21370,
198,
11748,
686,
2777,
88,
198,
11748,
686,
2777,
10025,
198,
11748,
686,
6649,
11429,
198,
6738,
987,
4134,
295,
13,
19662,
1330,
1635,
198,
6738,
14367,
62,
9... | 1.993088 | 434 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyperApp - it all starts here.
PyperCard inspired applications in your browser via Brython. This module
provides commands for users to set up everything and carry our common tasks.
As `manage.py` is to Django, so `pypr` is to PyperApp.
Copyright © 2020 Nicholas H.Tollervey
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import appdirs
import click
import datetime
import gettext
import importlib
import locale
import logging
import os
import sys
from . import utils
__version__ = importlib.metadata.version("pyperapp")
#: Flag to indicate if the command is being run in verbose mode.
VERBOSE = False
#: The directory containing the utility's log file.
LOG_DIR = appdirs.user_log_dir(appname="pyperapp", appauthor="ntoll")
#: The location of the log file for the utility.
LOGFILE = os.path.join(LOG_DIR, "pyperapp.log")
# Ensure LOG_DIR related directories and files exist.
if not os.path.exists(LOG_DIR): # pragma: no cover
os.makedirs(LOG_DIR)
# Setup logging.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logfile_handler = logging.FileHandler(LOGFILE)
log_formatter = logging.Formatter("%(levelname)s: %(message)s")
logfile_handler.setFormatter(log_formatter)
logger.addHandler(logfile_handler)
# Configure language/locale setup.
language_code = locale.getlocale()[0]
localedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "locale"))
gettext.translation(
"pyperapp", localedir=localedir, languages=[language_code], fallback=True
).install()
@click.group(help=_("Manage PyperApp projects."))
@click.option(
"--verbose", is_flag=True, help=_("Comprehensive logging sent to stdout.")
)
@click.version_option(
version=__version__,
prog_name="pypr",
message=_("%(prog)s, a PyperApp management tool. Version %(version)s."),
)
def pypr(verbose):
"""
Entry point.
"""
if verbose:
# Configure additional logging to stdout.
global VERBOSE
VERBOSE = True
verbose_handler = logging.StreamHandler(sys.stdout)
verbose_handler.setLevel(logging.INFO)
verbose_handler.setFormatter(log_formatter)
logger.addHandler(verbose_handler)
click.echo(_("Logging to {}\n").format(LOGFILE))
now = datetime.datetime.now()
logger.info(_("### Started ") + str(now))
@pypr.command(help=_("Create a new PyperApp with the given name."))
@click.argument("name", nargs=1)
def create(name):
"""
Prompts user for arguments before handing over to the utility function.
"""
click.echo(_("Creating new PyperApp {}").format(name))
author = click.prompt(_("Author's name"))
description = click.prompt(_("A brief project description"))
utils.create(name, author, description, __version__)
click.echo(
_("Your new PyperApp is in the '{}' subdirectory.").format(name)
)
click.echo(
_("Change into the directory and type, 'pypr run' to check it works.")
)
@pypr.command(help=_("Run the application in the current directory."))
def run():
"""
Attempts to run the application found in the current directory. Will try to
find a valid manifest.toml file. If found, will copy the app settings
somewhere temporary and serve it. Finally, it'll open Chromium in app mode
for the application.
"""
utils.run()
if __name__ == "__main__":
pypr()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
20519,
525,
4677,
532,
340,
477,
4940,
994,
13,
198,
198,
20519,
525,
16962,
7867,
5479,
287,
534,
6... | 3.033934 | 1,444 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import sentencepiece as sp
from text2sql.framework.register import RegisterSet
from text2sql.framework.reader.tokenizer.tokenizer import Tokenizer
from text2sql.framework.utils.util_helper import convert_to_unicode
class SentencepieceTokenizer(object):
"""Runs SentencePiece tokenziation."""
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
Returns:
A list of wordpiece tokens.
"""
text = text.lower() if self.do_lower_case else text
text = convert_to_unicode(text.replace("\1", " "))
tokens = self.tokenizer.EncodeAsPieces(text)
output_tokens = []
for token in tokens:
if token == self.sp_unk_token:
token = self.unk_token
if token in self.vocabulary.vocab_dict:
output_tokens.append(token)
else:
output_tokens.append(self.unk_token)
return output_tokens
def convert_tokens_to_ids(self, tokens):
"""convert tokens to ids"""
return self.vocabulary.convert_tokens_to_ids(tokens)
def convert_ids_to_tokens(self, ids):
"""convert ids to tokens"""
return self.vocabulary.convert_ids_to_tokens(ids)
class WordsegTokenizer(SentencepieceTokenizer):
"""Runs Wordseg tokenziation."""
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
Returns:
A list of wordpiece tokens.
"""
text = text.lower() if self.do_lower_case else text
text = convert_to_unicode(text)
output_tokens = []
for token in text.split(self.split_token):
if token in self.vocabulary.vocab_dict:
output_tokens.append(token)
else:
sp_tokens = self.tokenizer.EncodeAsPieces(token)
for sp_token in sp_tokens:
if sp_token in self.vocab:
output_tokens.append(sp_token)
return output_tokens
def convert_tokens_to_ids(self, tokens):
"""convert tokens to ids"""
return self.vocabulary.convert_tokens_to_ids(tokens)
def convert_ids_to_tokens(self, ids):
"""convert ids to tokens"""
return self.vocabulary.convert_ids_to_tokens(ids)
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
output = []
buff = ""
for char in text:
cp = ord(char)
if _is_chinese_char(cp):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
383,
3012,
9552,
15417,
4816,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,... | 2.305952 | 2,033 |
from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
import os
| [
6738,
369,
504,
1330,
31634,
8979,
11,
4899,
11,
327,
12050,
198,
6738,
369,
504,
13,
48277,
1330,
31634,
44651,
38149,
198,
11748,
28686,
628
] | 4.24 | 25 |
import numpy as np
import pytest
from npe2 import DynamicPlugin
from qtpy.QtWidgets import QLabel, QRadioButton
from napari._qt.dialogs.qt_reader_dialog import (
QtReaderDialog,
open_with_dialog_choices,
prepare_remaining_readers,
)
from napari.errors.reader_errors import ReaderPluginError
from napari.settings import get_settings
@pytest.fixture
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
299,
431,
17,
1330,
26977,
37233,
198,
6738,
10662,
83,
9078,
13,
48,
83,
54,
312,
11407,
1330,
1195,
33986,
11,
1195,
26093,
21864,
198,
198,
6738,
25422,
2743,
13557,
... | 2.906977 | 129 |
import torch
import numpy as np
from torch import nn
class ActorConv(nn.Module):
"""
The actor and critic share a convolutional network, which encodes the local map
into a vector. Then the actor and critic combines the
"""
class CriticConv(nn.Module):
"""
The actor and critic share a convolutional network, which encodes the local map
into a vector. Then the actor and critic combines the
"""
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28034,
1330,
299,
77,
198,
198,
4871,
27274,
3103,
85,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
383,
8674,
290,
4014,
2648,
257,
3063,
2122,... | 3.255639 | 133 |
import time
import serial
def send_serial_cmd(print_prefix, command):
"""
function for sending serial commands to the OPS module
"""
data_for_send_str = command
data_for_send_bytes = str.encode(data_for_send_str)
print(print_prefix, command)
ser.write(data_for_send_bytes)
# initialize message verify checking
ser_message_start = '{'
ser_write_verify = False
# print out module response to command string
while not ser_write_verify:
data_rx_bytes = ser.readline()
data_rx_length = len(data_rx_bytes)
if data_rx_length != 0:
data_rx_str = str(data_rx_bytes)
if data_rx_str.find(ser_message_start):
ser_write_verify = True
ser = serial.Serial(
port='/dev/ttyACM0',
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1,
writeTimeout=2
)
ser.flushInput()
ser.flushOutput()
# constants for the OPS module
Ops_Speed_Output_Units = ['US', 'UK', 'UM', 'UC']
Ops_Speed_Output_Units_lbl = ['mph', 'km/h', 'm/s', 'cm/s']
Ops_Blanks_Pref_Zero = 'BZ'
Ops_Sampling_Frequency = 'SX'
Ops_Transmit_Power = 'PX'
Ops_Threshold_Control = 'MX'
Ops_Module_Information = '??'
Ops_Overlook_Buffer = 'OZ'
# initialize the OPS module
send_serial_cmd("\nOverlook buffer", Ops_Overlook_Buffer)
send_serial_cmd("\nSet Speed Output Units: ", Ops_Speed_Output_Units[0])
send_serial_cmd("\nSet Sampling Frequency: ", Ops_Sampling_Frequency)
send_serial_cmd("\nSet Transmit Power: ", Ops_Transmit_Power)
send_serial_cmd("\nSet Threshold Control: ", Ops_Threshold_Control)
send_serial_cmd("\nSet Blanks Preference: ", Ops_Blanks_Pref_Zero)
# send_serial_cmd("\nModule Information: ", Ops_Module_Information)
def ops_get_speed():
"""
capture speed reading from OPS module
"""
#captured_speeds = []
while True:
speed_available = False
Ops_rx_bytes = ser.readline()
# check for speed information from OPS module
Ops_rx_bytes_length = len(Ops_rx_bytes)
if Ops_rx_bytes_length != 0:
Ops_rx_str = str(Ops_rx_bytes)
# print("RX:"+Ops_rx_str)
if Ops_rx_str.find('{') == -1:
# speed data found
try:
Ops_rx_float = float(Ops_rx_bytes)
speed_available = True
except ValueError:
print("Unable to convert to a number the string: " + Ops_rx_str)
speed_available = False
if speed_available == True:
speed_rnd = round(Ops_rx_float)
return float(speed_rnd)
| [
11748,
640,
198,
11748,
11389,
628,
198,
4299,
3758,
62,
46911,
62,
28758,
7,
4798,
62,
40290,
11,
3141,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2163,
329,
7216,
11389,
9729,
284,
262,
40490,
8265,
198,
220,
220,
220,
3... | 2.284247 | 1,168 |
# Copyright (c) Aaron Gallagher <_@habnab.it>
# See COPYING for details.
import six
from parsley import makeProtocol, stack
from twisted.internet.error import ConnectionLost, ConnectionRefusedError
from twisted.internet import defer, protocol
from twisted.python import failure, log
from twisted.trial import unittest
from twisted.test import proto_helpers
from txsocksx.test.util import FakeEndpoint
from txsocksx import client, errors, grammar
import txsocksx.constants as c
connectionLostFailure = failure.Failure(ConnectionLost())
connectionRefusedFailure = failure.Failure(ConnectionRefusedError())
authAdditionGrammar = """
authAddition = 'addition' anything:x -> receiver.authedAddition(x)
"""
AdditionAuthSOCKS5Client = makeProtocol(
grammar.grammarSource + authAdditionGrammar,
client.SOCKS5Sender,
stack(client.SOCKS5AuthDispatcher, AuthAdditionWrapper, client.SOCKS5Receiver),
grammar.bindings)
| [
2,
15069,
357,
66,
8,
12139,
38580,
1279,
62,
31,
5976,
77,
397,
13,
270,
29,
198,
2,
4091,
27975,
45761,
329,
3307,
13,
198,
198,
11748,
2237,
198,
198,
6738,
13544,
1636,
1330,
787,
19703,
4668,
11,
8931,
198,
6738,
19074,
13,
3... | 3.244898 | 294 |