content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
import os
import io
from google.cloud import vision
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'serviceAccountKey.json'
client = vision.ImageAnnotatorClient()
FILE_NAME = 'images/002.jpeg'
with io.open(FILE_NAME, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
for text in texts:
print('\n"{}"'.format(text.description))
vertices = (['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices])
print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
| [
11748,
28686,
198,
11748,
33245,
198,
6738,
23645,
13,
17721,
1330,
5761,
198,
198,
418,
13,
268,
2268,
17816,
38,
6684,
38,
2538,
62,
2969,
31484,
6234,
62,
9419,
1961,
3525,
12576,
50,
20520,
796,
705,
15271,
30116,
9218,
13,
17752,
... | 2.611987 | 317 |
from django.urls import include, path
urlpatterns = [
path('podcasts/', include('podcast.urls')),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
2291,
11,
3108,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
46032,
82,
14,
3256,
2291,
10786,
46032,
13,
6371,
82,
11537,
828,
198,
60
] | 2.837838 | 37 |
canUpdate = lambda n, o, c, u: l(n) == l(o) or not(c or re.search('\W', n) or l(n) in map(l,u))
l = unicode.lower | [
5171,
10260,
796,
37456,
299,
11,
267,
11,
269,
11,
334,
25,
300,
7,
77,
8,
6624,
300,
7,
78,
8,
393,
407,
7,
66,
393,
302,
13,
12947,
10786,
59,
54,
3256,
299,
8,
393,
300,
7,
77,
8,
287,
3975,
7,
75,
11,
84,
4008,
198,
... | 2.092593 | 54 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Imu
from tf.transformations import quaternion_from_euler
import math
pub = None
if __name__ == '__main__':
while not rospy.is_shutdown():
configure() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
10903,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
1846,
84,
198,
6738,
48700,
13,
35636,
602,
1330,
627... | 2.757895 | 95 |
import pyeccodes.accessors as _
| [
11748,
279,
5948,
535,
4147,
13,
15526,
669,
355,
4808,
628
] | 3 | 11 |
# Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
from typing import Optional
import numpy as np
@contextmanager
def local_numpy_seed(seed: Optional[int] = None) -> None:
"""Run numpy codes with a local random seed.
If seed is None, the default random state will be used.
"""
state = np.random.get_state()
if seed is not None:
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
| [
2,
15069,
357,
66,
8,
4946,
44,
5805,
397,
13,
1439,
2489,
10395,
13,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
31,
22866,
37153,
198,
4299,
1957,
62,
... | 2.767045 | 176 |
import numpy as np
import cv2 as cv
import glob
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Calibrates according to images of a chessboard
if __name__ == "__main__":
get_calibration_matrix(0.017, 6, 8) | [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
15095,
198,
2,
19883,
9987,
198,
22213,
5142,
796,
357,
33967,
13,
5781,
44,
62,
9419,
2043,
1137,
3539,
62,
36,
3705,
1343,
269,
85,
13,
5781,
44,
... | 2.47619 | 105 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import io
from urllib.parse import urlparse
from knack.log import get_logger
from azure.cli.core.azclierror import (
InvalidArgumentValueError,
RequiredArgumentMissingError,
MutuallyExclusiveArgumentError,
)
from paramiko.hostkeys import HostKeyEntry
from paramiko.ed25519key import Ed25519Key
from paramiko.ssh_exception import SSHException
from Crypto.PublicKey import RSA, ECC, DSA
from .utils import from_base64
from ._client_factory import resource_providers_client
from . import consts
logger = get_logger(__name__)
# Helper
# pylint: disable=broad-except
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 3.991632 | 239 |
# -*- coding: utf-8 -*-
import logging
import requests
import hashlib
from django.http.response import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.conf import settings
from robokassa.models import SuccessNotification
from robokassa.forms import ResultURLForm, SuccessRedirectForm, FailRedirectForm
from robokassa.signals import fail_page_visited
from checkout.helpers import send_order_mails
from checkout.managers import Checkout, Storage
from checkout.models import Order
logger = logging.getLogger(__name__)
@csrf_exempt
@csrf_exempt
def receive_result(request):
""" обработчик для ResultURL. """
form = ResultURLForm(request.POST)
if form.is_valid():
id, sum = form.cleaned_data['InvId'], form.cleaned_data['OutSum']
# сохраняем данные об успешном уведомлении в базе, чтобы
# можно было выполнить дополнительную проверку на странице успешного
# заказа
notification = SuccessNotification.objects.create(InvId=id, OutSum=sum)
order = Order.objects.get(id=id)
order.status = Order.STATUS_PAYED
order.net = float(sum)
logger.info('Order #%d had payed. Sum: %s' % (order.id, order.net))
if not order.is_emails_sended:
send_order_mails(order, order.user)
order.is_emails_sended = True
order.save()
return HttpResponse('OK%s' % id)
return HttpResponse('error: bad signature')
@csrf_exempt
def success(request):
""" обработчик для SuccessURL """
form = SuccessRedirectForm(request.POST or request.GET)
if form.is_valid():
id, sum = form.cleaned_data['InvId'], form.cleaned_data['OutSum']
url = reverse('checkout.step', args=('receipt', ))
return HttpResponseRedirect('%s?order=%s' % (url, id))
return render(request, 'robokassa/error.html', {'form': form})
@csrf_exempt
def fail(request):
""" обработчик для FailURL """
form = FailRedirectForm(request.POST or request.GET)
try:
del request.session[Storage.SESSION_KEY]
except KeyError:
pass
if form.is_valid():
id, sum = form.cleaned_data['InvId'], form.cleaned_data['OutSum']
fail_page_visited.send(sender=form, InvId=id, OutSum=sum, extra=form.extra_params())
context = {'InvId': id, 'OutSum': sum, 'form': form}
context.update(form.extra_params())
return render(request, 'robokassa/fail.html', context)
return render(request, 'robokassa/error.html', {'form': form})
@csrf_exempt
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
18931,
198,
11748,
7007,
198,
11748,
12234,
8019,
198,
198,
6738,
42625,
14208,
13,
4023,
13,
26209,
1330,
367,
29281,
26429,
11,
367,
29281,
31077,
11,
367,
... | 2.280102 | 1,171 |
import os
import shutil
from os import path
main()
| [
11748,
28686,
201,
198,
11748,
4423,
346,
201,
198,
6738,
28686,
1330,
3108,
201,
198,
220,
220,
220,
220,
201,
198,
12417,
3419,
201,
198
] | 2.44 | 25 |
import time
# anchore modules
import anchore_engine.services.common
import anchore_engine.subsys.simplequeue
import anchore_engine.subsys.servicestatus
from anchore_engine.subsys import logger
import anchore_engine.subsys.metrics
from anchore_engine.service import ApiService, LifeCycleStages
# A regular queue configuration with no extra features enabled
default_queue_config = {
'max_outstanding_messages': -1,
'visibility_timeout': 0
}
# From services.common, is only used for service init
#queue_names = ['images_to_analyze', 'error_events', 'watcher_tasks', 'feed_sync_tasks']
# Replaces the above with configuration options for each queue
queues_to_bootstrap = {
'images_to_analyze': default_queue_config,
# 'error_events': default_queue_config,
'event_log': default_queue_config,
'watcher_tasks': default_queue_config,
'feed_sync_tasks': {
'max_outstanding_messages': 1,
'visibility_timeout': 3600 # Default 1 hour timeout for messages outstanding
}
}
queues = {}
# monitors
def _init_queues(queue_configs):
"""
Initialize the queues
:param queue_configs: dict mapping a queue name to a configuration dict
:return:
"""
for st in anchore_engine.services.common.subscription_types:
if st not in queues_to_bootstrap:
queues_to_bootstrap[st] = default_queue_config
for qname, config in queue_configs.items():
retries = 5
for i in range(0, retries):
try:
logger.info('Initializing queue: {}'.format(qname))
anchore_engine.subsys.simplequeue.create_queue(name=qname, max_outstanding_msgs=config.get(
'max_outstanding_messages', -1), visibility_timeout=config.get('visibility_timeout', 0))
break
except Exception as err:
time.sleep(1)
else:
raise Exception('Could not bootstrap queues: {}'.format(qname))
return True
| [
11748,
640,
198,
198,
2,
12619,
382,
13103,
198,
11748,
12619,
382,
62,
18392,
13,
30416,
13,
11321,
198,
11748,
12619,
382,
62,
18392,
13,
7266,
17597,
13,
36439,
36560,
198,
11748,
12619,
382,
62,
18392,
13,
7266,
17597,
13,
3168,
2... | 2.586701 | 767 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 16:12:04 2019
@author: 0rion
"""
if __name__ == '__main__' :
rm_upper= round(OneRepMax.upper(int(input("Upper Body 4-6 Rep Max in kg: "))))
rm_lower= round(OneRepMax.lower(int(input("Lower Body 4-6 Rep Max in kg: "))))
print("""Recommended upper 1RM: {}\n\nRecommended lower 1RM: {}\n""".format(rm_upper,rm_lower)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
19480,
4280,
220,
718,
1467,
25,
1065,
25,
3023,
13130,
201,
198,
201,
198,
31,
9800,
25,
657,
81,
295,
201,
198,
37811,
201,
198,
... | 2.30814 | 172 |
import utils
import em
if __name__ == '__main__':
data_seed = 1
X = utils.gen_sample(mean=[-5, 1, 8], std=[1.1, 1.1, 1.2], seed=data_seed, N=600, K=3, dims=1)
model_seed = 2
k = 3 # Number of mixture components (clusters)
gaussian_mixture, responsibilities = utils.init(X, K=k, seed=model_seed)
gaussian_mixture, responsibilities, LL = em.run(X, gaussian_mixture, plot_results=True) | [
11748,
3384,
4487,
198,
11748,
795,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1366,
62,
28826,
796,
352,
198,
220,
220,
220,
1395,
796,
3384,
4487,
13,
5235,
62,
39873,
7,
32604,
41888,
... | 2.426036 | 169 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The evolved operator ansatz."""
from typing import Optional, Union, List
import numpy as np
from qiskit.circuit import Parameter, QuantumRegister, QuantumCircuit
from .n_local.n_local import NLocal
class EvolvedOperatorAnsatz(NLocal):
"""The evolved operator ansatz."""
def __init__(
self,
operators=None,
reps: int = 1,
evolution=None,
insert_barriers: bool = False,
name: str = "EvolvedOps",
parameter_prefix: Union[str, List[str]] = "t",
initial_state: Optional[QuantumCircuit] = None,
):
"""
Args:
operators (Optional[Union[OperatorBase, QuantumCircuit, list]): The operators to evolve.
If a circuit is passed, we assume it implements an already evolved operator and thus
the circuit is not evolved again. Can be a single operator (circuit) or a list of
operators (and circuits).
reps: The number of times to repeat the evolved operators.
evolution (Optional[EvolutionBase]): An opflow converter object to construct the evolution.
Defaults to Trotterization.
insert_barriers: Whether to insert barriers in between each evolution.
name: The name of the circuit.
parameter_prefix: Set the names of the circuit parameters. If a string, the same prefix
will be used for each parameters. Can also be a list to specify a prefix per
operator.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
"""
if evolution is None:
# pylint: disable=cyclic-import
from qiskit.opflow import PauliTrotterEvolution
evolution = PauliTrotterEvolution()
super().__init__(
initial_state=initial_state,
parameter_prefix=parameter_prefix,
reps=reps,
insert_barriers=insert_barriers,
name=name,
)
self._operators = None
if operators is not None:
self.operators = operators
self._evolution = evolution
# a list of which operators are parameterized, used for internal settings
self._ops_are_parameterized = None
def _check_configuration(self, raise_on_failure: bool = True) -> bool:
"""Check if the current configuration is valid."""
if not super()._check_configuration(raise_on_failure):
return False
if self.operators is None:
if raise_on_failure:
raise ValueError("The operators are not set.")
return False
return True
@property
@property
def evolution(self):
"""The evolution converter used to compute the evolution.
Returns:
EvolutionBase: The evolution converter used to compute the evolution.
"""
return self._evolution
@evolution.setter
def evolution(self, evol) -> None:
"""Sets the evolution converter used to compute the evolution.
Args:
evol (EvolutionBase): An opflow converter object to construct the evolution.
"""
self._invalidate()
self._evolution = evol
@property
def operators(self):
"""The operators that are evolved in this circuit.
Returns:
list: The operators to be evolved (and circuits) contained in this ansatz.
"""
return self._operators
@operators.setter
def operators(self, operators=None) -> None:
"""Set the operators to be evolved.
operators (Optional[Union[OperatorBase, QuantumCircuit, list]): The operators to evolve.
If a circuit is passed, we assume it implements an already evolved operator and thus
the circuit is not evolved again. Can be a single operator (circuit) or a list of
operators (and circuits).
"""
operators = _validate_operators(operators)
self._invalidate()
self._operators = operators
self.qregs = [QuantumRegister(self.num_qubits, name="q")]
# TODO: the `preferred_init_points`-implementation can (and should!) be improved!
@property
def preferred_init_points(self):
"""Getter of preferred initial points based on the given initial state."""
if self._initial_state is None:
return None
else:
# If an initial state was set by the user, then we want to make sure that the VQE does
# not start from a random point. Thus, we return an all-zero initial point for the
# optimizer which is used (unless it gets overwritten by a higher-priority setting at
# runtime of the VQE).
# However, in order to determine the correct length, we must build the QuantumCircuit
# first, because otherwise the operators may not be set yet.
self._build()
return np.zeros(self.reps * len(self.operators), dtype=float)
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
33448,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
7330,
257,
486... | 2.611586 | 2,106 |
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
import os
import argparse
"""
The script creates 10 directories with data for subject classification.
The script creates a Monte Carlo Cross Validation split (the same test set for all train/val splits).
Example use:
for preparing MPD data:
python3 k_folds_prepare_data.py --k_folds=10 --dataset=MPD
for preparing TREC6 data:
python3 k_folds_prepare_data.py --k_folds=10 --dataset=TREC6
"""
parser = argparse.ArgumentParser(description='Classify data')
parser.add_argument('--train_val_split', required=False, type=float, default=0.8)
parser.add_argument('--k_folds', required=False, type=int, default=10)
parser.add_argument('--dataset', required=True, type=str, default="MPD")
args = parser.parse_args()
# tran val split proportion
train_val_split = args.train_val_split
# number of folds to create
folds = args.k_folds
# dataset
dataset = args.dataset
path = f"./data/{dataset}"
try:
os.mkdir(f'./data/{dataset}/test')
except FileExistsError:
None
# make the random division the same
np.random.seed(13)
data = pd.read_excel("{}{}.xlsx".format(path, dataset)).sample(frac=1)
data = data[['row', 'subject_category', 'sentence']]
data['subject_category'] = '__label__' + data['subject_category'].astype(str)
# create k_folds from original dataset
data_folds = np.array_split(data, 10)
df_test = data_folds[0]
df_test = df_test.drop("row", axis=1)
df_test['subject_category'] = df_test["subject_category"].str.replace("__label__", "")
df_test.to_excel(os.path.join("./data/test", "test.xlsx"))
# list of folder paths with folds
folds_path2 = []
for i in range(folds):
folds_path2.append('./data/{}/model_subject_category_{}/'.format(dataset, str(i)))
try:
os.mkdir('./data/{}/model_subject_category_{}'.format(dataset, str(i)))
except FileExistsError:
None # continue
df = data
# make test part the same for all models
df_test = data_folds[0]
# prepare the rest of data for train val split
df_trainval = pd.concat([df, df_test])
df_trainval = df_trainval.drop_duplicates(keep=False)
# test split
df_test = df_test.drop("row", axis=1)
df_test_sent = df_test
df_test_sent = df_test_sent.drop('subject_category', axis=1)
df_test_subj = df_test
df_test_subj.to_csv(os.path.join(folds_path2[i], "test_.tsv"), index=False, header=False, encoding='utf-8', sep='\t')
# train split
df_train = df_trainval.sample(frac=train_val_split)
# val split
df_val = pd.concat([df_trainval, df_train])
df_val = df_val.drop_duplicates(keep=False)
df_val = df_val.drop("row", axis=1)
df_val_sent = df_val
df_val_sent = df_val_sent.drop('subject_category', axis=1)
df_val_subj = df_val
df_val_subj.to_csv(os.path.join(folds_path2[i], "dev.tsv"), index=False, header=False, encoding='utf-8', sep='\t')
df_train = df_train.drop("row", axis=1)
df_train_sent = df_train
df_train_sent = df_train_sent.drop('subject_category', axis=1)
df_train_subj = df_train
df_train_subj.to_csv(os.path.join(folds_path2[i], "train.tsv"), index=False, header=False, encoding='utf-8', sep='\t') | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
509,
37,
727,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
198,
37811,
198,
464,
4226,
8075,
838,
29196,
351,
... | 2.532385 | 1,266 |
import numpy as np
import tensorflow as tf
from layers import *
from conv import *
from block import *
from model import *
################
################
################
################
| [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
11685,
1330,
1635,
198,
6738,
3063,
1330,
1635,
198,
6738,
2512,
1330,
1635,
198,
6738,
2746,
1330,
1635,
198,
198,
14468,
198,
198,
14468,
1... | 3.793103 | 58 |
import sys
import time
import struct
import serial
from hexdump import hexdump
from tqdm import tqdm
SERIALPORT = '/dev/ttyUSB0'
BAUDRATE = 9600
DEBUG = False
if __name__ == "__main__":
# example usage
with serial.Serial(SERIALPORT, BAUDRATE, timeout=0.2) as ser:
handshake(ser)
devices = device_inquiry(ser)
#print("devices: {}".format(devices))
device_select(ser, devices[0])
clocks = clock_inquiry(ser)
#print("clocks: {}".format(clocks))
clock_select(ser, clocks[0])
multi_ratios = multiplication_ratio_inquiry(ser)
#print("multiplication ratios: {}".format(multi_ratios))
operating_freqs = operating_freq_inquiry(ser)
#print("operating frequencies: {}".format(operating_freqs))
ratio1 = multi_ratios[0][0]
ratio2 = multi_ratios[1][0]
base1 = operating_freqs[0]['max_mhz'] / ratio1
base2 = operating_freqs[1]['max_mhz'] / ratio2
assert base1 == base2, "failed to find base clock for both multipliers"
bitrate_select(ser, BAUDRATE, base1, 2, ratio1, ratio2)
user_boot_mat = user_boot_mat_inquiry(ser)
#print("user boot memory area: {}".format(user_boot_mat))
user_mat = user_mat_inquiry(ser)
#print("user memory area: {}".format(user_mat))
# any key code is accepted if the key code has not been set
keycode = b'\x00' * 16
keycode_check(ser, keycode)
user_boot_mat_checksum = user_boot_mat_checksum_inquiry(ser)
#print("user boot memory checksum: {}".format(user_boot_checksum))
user_mat_checksum = user_mat_checksum_inquiry(ser)
#print("user memory checksum: {}".format(user_mat_checksum))
mem_area = 0 # user boot memory area
start_addr = user_boot_mat[0]['start_addr']
end_addr = user_boot_mat[0]['end_addr']
data = read_memory(ser, mem_area, start_addr, end_addr+1, 0x40)
with open('user_boot.bin', 'wb') as f:
f.write(data)
checksum = sum(data) & 0xFFFFFFFF
assert user_boot_mat_checksum == checksum, f"failed boot checksum validation: {user_boot_mat_checksum} != {checksum}"
mem_area = 1 # user memory area
start_addr = user_mat[0]['start_addr']
end_addr = user_mat[0]['end_addr']
data = read_memory(ser, mem_area, start_addr, end_addr+1, 0x40)
with open('user.bin', 'wb') as f:
f.write(data)
checksum = sum(data + keycode) & 0xFFFFFFFF
assert user_mat_checksum == checksum, f"failed user checksum validation (not sure why this fails for some ecus): {user_mat_checksum} != {checksum}"
| [
11748,
25064,
198,
11748,
640,
198,
11748,
2878,
198,
11748,
11389,
198,
6738,
17910,
39455,
1330,
17910,
39455,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
35009,
12576,
15490,
796,
31051,
7959,
14,
42852,
27155,
15,
6,
... | 2.323478 | 1,150 |
from typing import List
| [
6738,
19720,
1330,
7343,
628
] | 5 | 5 |
"""The jobs package is a :py:mod:`nameko` microservice which handles user file management.
This service manages all file interactions. This includes both explicit user interaction with his/her files and managing
files generated by jobs.
To run this microservice it needs to connect to a message broker - `RabbitMQ`_ is used here. Next to this server a
persistent file storage needs to be mounted to the service to manage user files. In case the whole
EODC-OpenEO-Driver setup is used this file storage also needs to be available to the gateway service. Besides this the
service does not directly connect to another microservice and also does not need a database.
Also a set of configuration settings needs to be provided. Settings are excepted to be made available as
environment variables. All environment variables need to be prefixed with ``OEO_`` (short hand for OpenEO). The full list
of required environment variables can be found in :py:class:`~files.dependencies.settings.SettingKeys`. It should
be mentioned that NO defaults are defined.
Besides this similar considerations apply as for the :mod:`~capabilities`.
.. _RabbitMQ: https://www.rabbitmq.com/
"""
| [
37811,
464,
3946,
5301,
318,
257,
1058,
9078,
25,
4666,
25,
63,
3672,
7204,
63,
4580,
15271,
543,
17105,
2836,
2393,
4542,
13,
198,
198,
1212,
2139,
15314,
477,
2393,
12213,
13,
770,
3407,
1111,
7952,
2836,
10375,
351,
465,
14,
372,
... | 4.242754 | 276 |
"""
Quiz Problem 5
Write a Python function that returns the sublist of strings in aList
that contain fewer than 4 characters. For example, if
aList = ["apple", "cat", "dog", "banana"]
- your function should return: ["cat", "dog"]
This function takes in a list of strings and returns a list of
strings. Your function should not modify aList.
"""
| [
37811,
198,
4507,
528,
20647,
642,
198,
198,
16594,
257,
11361,
2163,
326,
5860,
262,
850,
4868,
286,
13042,
287,
257,
8053,
220,
198,
5562,
3994,
7380,
621,
604,
3435,
13,
1114,
1672,
11,
611,
220,
628,
220,
257,
8053,
796,
14631,
... | 3.612245 | 98 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
628
] | 3.833333 | 6 |
valores = list()
valores.append(5)
for cont in range(0, 5):
valores.append(int(input("Digite um valor:")))
for v, c in enumerate(valores):
print(f'Na posição {v} encontrei o valor {c}')
print("Chegeui no final da lista")
a = [1, 4, 6, 8]
b = a[:]
b[2] = 8
print(f'Lista A: {a}')
print(f'Lista B: {b}') | [
2100,
2850,
796,
1351,
3419,
198,
2100,
2850,
13,
33295,
7,
20,
8,
198,
198,
1640,
542,
287,
2837,
7,
15,
11,
642,
2599,
198,
220,
220,
220,
1188,
2850,
13,
33295,
7,
600,
7,
15414,
7203,
19511,
578,
23781,
1188,
273,
11097,
22305... | 2.115646 | 147 |
from web import app
app.run(
debug=True,
host='127.0.0.1',
port=5000,
)
| [
6738,
3992,
1330,
598,
198,
198,
1324,
13,
5143,
7,
198,
220,
220,
220,
14257,
28,
17821,
11,
198,
220,
220,
220,
2583,
11639,
16799,
13,
15,
13,
15,
13,
16,
3256,
198,
220,
220,
220,
2493,
28,
27641,
11,
198,
8,
198
] | 1.976744 | 43 |
#!/usr/bin/env python
#
# utilities.py - Module providing utility functions for OVF/OVA handling
#
# February 2017, Glenn F. Matthews
# Copyright (c) 2013-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Module providing utility functions for OVF and OVA handling.
**Functions**
.. autosummary::
:nosignatures:
int_bytes_to_programmatic_units
parse_manifest
programmatic_bytes_to_int
"""
import logging
import re
logger = logging.getLogger(__name__)
def parse_manifest(manifest_text):
r"""Parse the given manifest file contents into a dictionary.
Args:
manifest_text (str): Contents of an OVF manifest file
Returns:
dict: Mapping of filename to (algorithm, checksum_string)
Examples:
::
>>> result = parse_manifest(
... "SHA1(package.ovf)= 237de026fb285b85528901da058475e56034da95\n"
... "SHA1(vmdisk1.vmdk)= 393a66df214e192ffbfedb78528b5be75cc9e1c3\n"
... )
>>> sorted(result.keys())
['package.ovf', 'vmdisk1.vmdk']
>>> result["package.ovf"]
('SHA1', '237de026fb285b85528901da058475e56034da95')
>>> result["vmdisk1.vmdk"]
('SHA1', '393a66df214e192ffbfedb78528b5be75cc9e1c3')
"""
result = {}
for line in manifest_text.split("\n"):
if not line:
continue
# Per the OVF spec, the correct format for a manifest line is:
# <algo>(<filename>)= <checksum>
# but we've seen examples in the wild that aren't quite right, like:
# <algo> (<filename>)=<checksum>
# Be forgiving of such errors:
match = re.match(r"^\s*([A-Z0-9]+)\s*\((.+)\)\s*=\s*([0-9a-f]+)\s*$",
line)
if match:
result[match.group(2)] = (match.group(1), match.group(3))
else:
logger.error('Unexpected or invalid manifest line: "%s"', line)
return result
def programmatic_bytes_to_int(base_value, programmatic_units):
"""Convert a byte value expressed in programmatic units to the raw number.
Inverse operation of :func:`int_bytes_to_programmatic_units`.
.. seealso::
`DMTF DSP0004, Common Information Model (CIM) Infrastructure
Specification 2.5
<http://www.dmtf.org/standards/published_documents/DSP0004_2.5.pdf>`_
Args:
base_value (str): Base value string (value of ``ovf:capacity``, etc.)
programmatic_units (str): Programmatic units string (value of
``ovf:capacityAllocationUnits``, etc.)
Returns:
int: Number of bytes
Examples:
::
>>> programmatic_bytes_to_int("128", "byte")
128
>>> programmatic_bytes_to_int("1", "byte * 2^10")
1024
>>> programmatic_bytes_to_int("128", "byte * 2^20")
134217728
>>> programmatic_bytes_to_int("512", "MegaBytes")
536870912
"""
if not programmatic_units:
return int(base_value)
# programmatic units like 'byte * 2^30'
match = re.search(r"2\^(\d+)", programmatic_units)
if match:
return int(base_value) << int(match.group(1))
# programmatic units like 'MegaBytes'
si_prefixes = ["", "kilo", "mega", "giga", "tera"]
match = re.search("^(.*)bytes$", programmatic_units, re.IGNORECASE)
if match:
shift = si_prefixes.index(match.group(1).lower())
# Technically the correct answer would be:
# return int(base_value) * (1000 ** shift)
# but instead we'll reflect common usage:
return int(base_value) << (10 * shift)
if programmatic_units and programmatic_units != 'byte':
logger.warning("Unknown programmatic units string '%s'",
programmatic_units)
return int(base_value)
def int_bytes_to_programmatic_units(byte_value):
"""Convert a byte count into OVF-style bytes + multiplier.
Inverse operation of :func:`programmatic_bytes_to_int`
Args:
byte_value (int): Number of bytes
Returns:
tuple: ``(base_value, programmatic_units)``
Examples:
::
>>> int_bytes_to_programmatic_units(2147483648)
('2', 'byte * 2^30')
>>> int_bytes_to_programmatic_units(2147483647)
('2147483647', 'byte')
>>> int_bytes_to_programmatic_units(134217728)
('128', 'byte * 2^20')
>>> int_bytes_to_programmatic_units(134217729)
('134217729', 'byte')
"""
shift = 0
byte_value = int(byte_value)
while byte_value % 1024 == 0:
shift += 10
byte_value /= 1024
byte_str = str(int(byte_value))
if shift == 0:
return (byte_str, "byte")
return (byte_str, "byte * 2^{0}".format(shift))
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
20081,
13,
9078,
532,
19937,
4955,
10361,
5499,
329,
440,
53,
37,
14,
41576,
9041,
198,
2,
198,
2,
3945,
2177,
11,
17551,
376,
13,
22233,
198,
2,
15069,
357,
66,
8,
22... | 2.39819 | 2,210 |
"""
Unit tests for the class NNModifier in nn_modifiers.py
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
from copy import deepcopy
import numpy as np
import os
import six
from shutil import rmtree
# Local imports
from nn import nn_constraint_checkers
from nn import nn_modifiers
from nn.neural_network import NeuralNetwork
from nn.nn_visualise import visualise_nn
from unittest_neural_network import generate_cnn_architectures, generate_mlp_architectures
from utils.base_test_class import BaseTestClass, execute_tests
def test_if_two_networks_are_equal(net1, net2, false_if_net1_is_net2=True):
""" Returns true if both net1 and net2 are equal.
If any part of net1 is copied onto net2, then the output will be false
if false_if_net1_is_net2 is True (default).
"""
is_true = True
for key in net1.__dict__.keys():
val1 = net1.__dict__[key]
val2 = net2.__dict__[key]
is_true = True
if isinstance(val1, dict):
if false_if_net1_is_net2:
is_true = is_true and (val1 is not val2)
for val_key in val1.keys():
is_true = is_true and np.all(val1[val_key] == val2[val_key])
elif hasattr(val1, '__iter__'):
if false_if_net1_is_net2:
is_true = is_true and (val1 is not val2)
is_true = is_true and np.all(val1 == val2)
else:
is_true = is_true and val1 == val2
if not is_true: # break here if necessary
return is_true
return is_true
def test_for_orig_vs_modifications(save_dir, save_prefix, old_nn,
get_modifications, constraint_checker, write_result):
""" Tests for the original network and the modifications. Also, visualises the networks.
"""
visualise_nn(old_nn, os.path.join(save_dir, '%s_orig'%(save_prefix)))
old_nn_copy = deepcopy(old_nn)
# Get the modified networks.
new_nns = get_modifications(old_nn)
# Go through each new network.
for new_idx, new_nn in enumerate(new_nns):
assert isinstance(new_nn, NeuralNetwork)
assert constraint_checker(new_nn)
visualise_nn(new_nn, os.path.join(save_dir, '%s_%d'%(save_prefix, new_idx)))
# Finally test if the networks have not changed.
assert test_if_two_networks_are_equal(old_nn, old_nn_copy)
write_result('%s (%s):: #new-networks: %d.'%(
save_prefix, old_nn.nn_class, len(new_nns)), 'test_result')
class NNModifierTestCase(BaseTestClass):
""" Unit tests for the NNModifier class. """
def __init__(self, *args, **kwargs):
""" Constructor. """
super(NNModifierTestCase, self).__init__(*args, **kwargs)
self.cnns = generate_cnn_architectures()
self.mlps = generate_mlp_architectures()
self.save_dir = '../scratch/unittest_modifier_class/'
self.cnn_constraint_checker = nn_constraint_checkers.CNNConstraintChecker(
50, 4, np.inf, 4.0, 5, 5, 100, 8000, 8)
self.mlp_constraint_checker = nn_constraint_checkers.MLPConstraintChecker(
50, 4, np.inf, 4.0, 5, 5, 100, 8000, 8)
self.cnn_modifier = nn_modifiers.NNModifier(self.cnn_constraint_checker)
self.mlp_modifier = nn_modifiers.NNModifier(self.mlp_constraint_checker)
self.modifier_wo_cc = nn_modifiers.NNModifier(None)
def _get_modifier_and_cc(self, nn):
""" Returns modifier for the neural network nn."""
if nn.nn_class == 'cnn':
modifier = self.cnn_modifier
constraint_checker = self.cnn_constraint_checker
else:
modifier = self.mlp_modifier
constraint_checker = self.mlp_constraint_checker
return modifier, constraint_checker
def test_get_primitives(self):
""" Test for the get_primitives_grouped_by_type method. """
self.report('Testing get_primitives_grouped_by_type')
test_nns = self.cnns + self.mlps
primitives, _ = self.cnn_modifier.get_primitives_grouped_by_type(self.cnns[0])
self.report('Types of primitives: %s'%(primitives.keys()), 'test_result')
for idx, nn in enumerate(test_nns):
nn_copy = deepcopy(nn)
modifier, _ = self._get_modifier_and_cc(nn)
primitives, _ = modifier.get_primitives_grouped_by_type(nn)
report_str = '%d (%s n=%d,m=%d):: '%(idx, nn.nn_class, nn.num_layers,
nn.get_total_num_edges())
total_num_primitives = 0
for _, list_or_prims in six.iteritems(primitives):
report_str += '%d, '%(len(list_or_prims))
total_num_primitives += len(list_or_prims)
report_str += 'tot=%d'%(total_num_primitives)
self.report(report_str, 'test_result')
assert test_if_two_networks_are_equal(nn_copy, nn)
def test_get_single_step_modifications(self):
""" Tests single step modifications. """
self.report('Testing single step modifications.')
save_dir = os.path.join(self.save_dir, 'single_step')
if os.path.exists(save_dir):
rmtree(save_dir)
# Now iterate through the test networks
test_nns = self.cnns + self.mlps
for idx, old_nn in enumerate(test_nns):
save_prefix = str(idx)
modifier, constraint_checker = self._get_modifier_and_cc(old_nn)
if idx in [2, 12]:
num_modifications = 'all'
else:
num_modifications = 'all'
get_modifications = lambda arg_nn: modifier.get_single_step_modifications(
arg_nn, num_modifications)
test_for_orig_vs_modifications(save_dir, save_prefix, old_nn,
get_modifications, constraint_checker, self.report)
def test_multi_step_modifications(self):
""" Tests multi step modifications. """
num_steps = 4
self.report('Testing %d-step modifications.'%(num_steps))
num_modifications = 20
save_dir = os.path.join(self.save_dir, 'multi_step_%d'%(num_steps))
if os.path.exists(save_dir):
rmtree(save_dir)
# Now iterate through the test networks
test_nns = self.cnns + self.mlps
for idx, old_nn in enumerate(test_nns):
save_prefix = str(idx)
modifier, constraint_checker = self._get_modifier_and_cc(old_nn)
get_modifications = lambda arg_nn: modifier.get_multi_step_modifications(
arg_nn, num_steps, num_modifications)
test_for_orig_vs_modifications(save_dir, save_prefix, old_nn,
get_modifications, constraint_checker, self.report)
def test_call(self):
""" Tests the __call__ function with a single input of the modifier. """
self.report('Testing the __call__ function with single input of the modifier.')
num_modifications = 20
num_steps_probs = [0.5, 0.25, 0.125, 0.075, 0.05]
save_dir = os.path.join(self.save_dir, 'modifier_call_single')
if os.path.exists(save_dir):
rmtree(save_dir)
test_nns = self.cnns + self.mlps
for idx, old_nn in enumerate(test_nns):
save_prefix = str(idx)
modifier, constraint_checker = self._get_modifier_and_cc(old_nn)
get_modifications = lambda arg_nn: modifier(arg_nn, num_modifications,
num_steps_probs)
test_for_orig_vs_modifications(save_dir, save_prefix, old_nn,
get_modifications, constraint_checker, self.report)
def test_call_with_list(self):
""" Tests the __call__ function with a single input of the modifier. """
self.report('Testing the __call__ function with a list of inputs.')
num_modifications = 40
num_steps_probs = [0.5, 0.25, 0.125, 0.075, 0.05]
save_dir = os.path.join(self.save_dir, 'modifier_call_list')
if os.path.exists(save_dir):
rmtree(save_dir)
test_probs = [self.cnns, self.mlps, generate_mlp_architectures('class')]
for idx, prob in enumerate(test_probs):
save_prefix = str(idx)
modifier = self.modifier_wo_cc
modifications = modifier(prob, num_modifications, num_steps_probs)
for new_idx, new_nn in enumerate(modifications):
assert isinstance(new_nn, NeuralNetwork)
visualise_nn(new_nn, os.path.join(save_dir, '%s_%d'%(save_prefix, new_idx)))
self.report('With list of %d nns(%s):: #new-networks: %d.'%(
len(prob), prob[0].nn_class, len(modifications)), 'test_result')
if __name__ == '__main__':
execute_tests()
| [
37811,
198,
220,
11801,
5254,
329,
262,
1398,
399,
45,
5841,
7483,
287,
299,
77,
62,
4666,
13350,
13,
9078,
198,
220,
1377,
479,
392,
292,
14814,
31,
6359,
13,
11215,
84,
13,
15532,
198,
37811,
198,
198,
2,
279,
2645,
600,
25,
155... | 2.347776 | 3,462 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-11-02 14:59
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1558,
319,
12131,
12,
1157,
12,
2999,
1478,
25,
3270,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198... | 2.709091 | 55 |
# URI Online Judge 1871
while True:
entrada = [int(i) for i in input().split()]
if entrada[0] == 0 and entrada[1] == 0:
break
soma = str(sum(entrada)).replace('0','')
print(soma) | [
2,
43975,
7467,
8974,
1248,
4869,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
24481,
4763,
796,
685,
600,
7,
72,
8,
329,
1312,
287,
5128,
22446,
35312,
3419,
60,
198,
220,
220,
220,
611,
24481,
4763,
58,
15,
60,
6624,
657,
290,
... | 2.166667 | 96 |
from hippy.builtin_klass import wrap_method
from hippy.builtin import ThisUnwrapper, Optional
from hippy.klass import def_class
from hippy.objects.base import W_Root
from hippy.objects.instanceobject import W_InstanceObject
from hippy import consts
@wrap_method(['interp', ThisUnwrapper(W_ApplevelArrayIterator),
Optional(W_Root)],
name='ArrayIterator::__construct')
@wrap_method([], name='ArrayIterator::current')
@wrap_method([], name='ArrayIterator::next')
@wrap_method([], name='ArrayIterator::key')
@wrap_method([], name='ArrayIterator::rewind')
@wrap_method([], name='ArrayIterator::valid')
k_ArrayIterator = def_class('ArrayIterator', [
ArrayIterator_construct,
ArrayIterator_current,
ArrayIterator_next,
ArrayIterator_key,
ArrayIterator_rewind,
ArrayIterator_valid],
[('storage', consts.ACC_PRIVATE)],
instance_class=W_ApplevelArrayIterator,
implements=["Iterator"]
)
| [
6738,
18568,
88,
13,
18780,
259,
62,
74,
31172,
1330,
14441,
62,
24396,
198,
6738,
18568,
88,
13,
18780,
259,
1330,
770,
3118,
48553,
11,
32233,
198,
6738,
18568,
88,
13,
74,
31172,
1330,
825,
62,
4871,
198,
6738,
18568,
88,
13,
482... | 2.794286 | 350 |
# -*- coding: utf-8 -*-
import news_processor
import results_processor
import settings
import time
from helper_methods import *
# MAIN (Baker method)
print_presentation()
settings.init()
# Input sections
option = options_input_section()
if (option == "generar_epu"):
# Generate EPU index from individual newspaper results
start_time = time.time()
results_processor.generate_epu_index()
else:
start_time = time.time()
newspaper = option
print_processing_message(newspaper)
# Execute news processing (generate raw results)
news_processor.process_news(newspaper)
# Scale results by month articles count
results_processor.scale_to_relative_count(newspaper)
# Standardize results to unit standard deviation
results_processor.scale_to_unit_standard_deviation(newspaper)
print_finish()
print("\n--- Tiempo de ejecución: %s segundos --- \n" % (round(time.time() - start_time, 1)))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
1705,
62,
41341,
198,
11748,
2482,
62,
41341,
198,
11748,
6460,
198,
11748,
640,
198,
6738,
31904,
62,
24396,
82,
1330,
1635,
198,
198,
2,
8779,
1268,
357,
33,
... | 2.993548 | 310 |
"""Auto-generated file, do not edit by hand. HN metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HN = PhoneMetadata(id='HN', country_code=504, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[237-9]\\d{7}', possible_number_pattern='\\d{8}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:2(?:0[019]|1[1-36]|[23]\\d|4[056]|5[57]|7[01389]|8[0146-9]|9[012])|4(?:2[3-59]|3[13-689]|4[0-68]|5[1-35])|5(?:4[3-5]|5\\d|6[56]|74)|6(?:4[0-378]|[56]\\d|[78][0-8]|9[01])|7(?:6[46-9]|7[02-9]|8[34])|8(?:79|8[0-35789]|9[1-57-9]))\\d{4}', possible_number_pattern='\\d{8}', example_number='22123456'),
mobile=PhoneNumberDesc(national_number_pattern='[37-9]\\d{7}', possible_number_pattern='\\d{8}', example_number='91234567'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='199', possible_number_pattern='\\d{3}', example_number='199'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{4})(\\d{4})', format=u'\\1-\\2')])
| [
37811,
27722,
12,
27568,
2393,
11,
466,
407,
4370,
416,
1021,
13,
367,
45,
20150,
37811,
198,
6738,
11485,
746,
261,
19261,
14706,
1330,
7913,
26227,
11,
14484,
15057,
24564,
11,
14484,
9171,
14706,
198,
198,
11909,
11651,
62,
47123,
28... | 2.62358 | 704 |
import unittest
from report import Instruction
import datetime
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
989,
1330,
46486,
198,
11748,
4818,
8079,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 3 | 38 |
import os
import requests
from re import sub
from PIL import Image
from io import BytesIO
from colorama import Fore
| [
11748,
28686,
198,
11748,
7007,
198,
6738,
302,
1330,
850,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
3124,
1689,
1330,
4558,
198
] | 3.866667 | 30 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import sys
import os
import platform
import datetime
import subprocess
from subprocess import check_output
import os.path
import shutil
from argparse import ArgumentParser
from os.path import join, normpath, basename
import re
import importlib
sys.path.append("tests")
from clr import *
testList = ['Syntax', 'Semantic', 'Advanced', 'Samples']
os.system('') # activate VT100 mode for windows console
azslcRelease = ""
azslcDebug = ""
az3rdParty = ""
parser = ArgumentParser()
parser.add_argument(
'--j', dest='jenkin',
action='store_true', default=False,
help="Use jenkin configuration to build and test",
)
parser.add_argument(
'--dev', dest='atomDev',
type=str,
help="The path of atom dev(to locate DXC-az version), If not specified, default windows 10 Dxc.exe will be used",
)
args = parser.parse_args()
isJenkin = "OFF"
if args.jenkin is True:
isJenkin = "ON"
if args.atomDev is not None:
az3rdParty = args.atomDev + "/Gems/Atom/Asset/Shader/External"
# Part 0 - Prerequisites
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + " Building AZSLc ..." + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
missingCritical = 0
missingOptional = 0
# Windows
if os.name == 'nt':
from shutil import which
if which("cmake") is None:
print ( fg.RED + style.BRIGHT + "[ X ] Expected CMake 3.15+ (in {} or as command)".format(criticalCMake) + style.RESET_ALL )
missingCritical = missingCritical + 1
try:
import yaml
except ImportError as err:
print ( fg.YELLOW + style.BRIGHT + "[ ! ] Trying to run pip install pyyaml ..." + style.RESET_ALL )
subprocess.check_call([sys.executable, "-m", "pip", "install", "pyyaml"])
try:
import yaml
except ImportError:
print ( fg.RED + style.BRIGHT + "[ ! ] Please run pip install pyyaml, otherwise some tests will fail to validate" + style.RESET_ALL )
missingCritical = missingCritical + 1
try:
jversion = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
pattern = '\"(\d+\.\d+).*\"'
jversionTrim = re.search(pattern, jversion.decode("utf-8") ).groups()[0]
except Exception:
jversionTrim = ""
# We could compare using 'Version' instead but it requires installing the 'packaging' package
if jversionTrim < "1.6":
print ( fg.YELLOW + style.BRIGHT + "[ ! ] You need java (JDK 1.6 or newer) if you want to rebuild the ANTLR4 generated grammar files" + style.RESET_ALL )
missingOptional = missingOptional + 1
if missingOptional > 0:
print ( fg.YELLOW + style.BRIGHT + "{} optional component(s) could be better".format(missingOptional) + style.RESET_ALL )
if missingCritical > 0:
print ( fg.RED + style.BRIGHT + "{} critical component(s) are still missing!".format(missingCritical) + style.RESET_ALL )
sys.exit(1)
# Part 1 - Build, ...
if os.name == 'nt':
print (fg.CYAN + style.BRIGHT + "Prepare solution..." + style.RESET_ALL)
subprocess.call(["prepare_solution_win.bat", "nopause", isJenkin])
print (fg.CYAN + style.BRIGHT + "... done" + style.RESET_ALL)
print (fg.CYAN + style.BRIGHT + "Build Release ..." + style.RESET_ALL )
ret = subprocess.call(["build_win.bat", "Release", isJenkin])
if ret != 0:
if ret < 0:
print ( "Killed by signal" + -ret )
sys.exit(1)
else:
print ( fg.RED + style.BRIGHT + "Could not complete test execution!" + style.RESET_ALL )
sys.exit(1)
else:
print ( fg.CYAN + style.BRIGHT + "... done" + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + "Build Debug ..." + style.RESET_ALL )
ret = subprocess.call(["build_win.bat", "Debug", isJenkin])
if ret != 0:
if ret < 0:
print ( "Killed by signal" + -ret )
sys.exit(1)
else:
print ( fg.RED + style.BRIGHT + "Could not complete test execution!" + style.RESET_ALL )
sys.exit(1)
else:
print ( fg.CYAN + style.BRIGHT + "... done" + style.RESET_ALL )
azslcRelease = "bin/win_x64/Release/azslc.exe"
if not os.path.isfile(azslcRelease):
print ( "Release build failed (expected at {})".format(azslcRelease) )
sys.exit(1)
azslcDebug = "bin/win_x64/Debug/azslc.exe"
if not os.path.isfile(azslcDebug):
print ( "Debug build failed (expected at {})".format(azslcDebug) )
sys.exit(1)
if platform.system() == 'Darwin':
print ( fg.CYAN + style.BRIGHT + "Prepare solution..." + style.RESET_ALL )
subprocess.call(["./prepare_solution_darwin.sh"])
print ( fg.CYAN + style.BRIGHT + "... done" + style.RESET_ALL )
azslcRelease = "bin/darwin/release/azslc"
if not os.path.isfile(azslcRelease):
print ( "Release build failed (expected at {})".format(azslcRelease) )
sys.exit(1)
azslcDebug = "bin/darwin/debug/azslc"
if not os.path.isfile(azslcDebug):
print ( "Debug build failed (expected at {})".format(azslcDebug) )
sys.exit(1)
if platform.system() == 'Linux':
print ( fg.CYAN + style.BRIGHT + "Prepare solution..." + style.RESET_ALL )
subprocess.call(["./prepare_solution_linux.sh"])
print ( fg.CYAN + style.BRIGHT + "... done" + style.RESET_ALL )
azslcRelease = "bin/linux/release/azslc"
if not os.path.isfile(azslcRelease):
print ( "Release build failed (expected at {})".format(azslcRelease) )
sys.exit(1)
azslcDebug = "bin/linux/debug/azslc"
if not os.path.isfile(azslcDebug):
print ( "Debug build failed (expected at {})".format(azslcDebug) )
sys.exit(1)
# Part 2 - ... test and ...
expectCorrect = 230
allowIncorrect = 0
testScriptModule = importlib.import_module("testapp")
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + " Testing Release ..." + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
numAllTests = testScriptModule.runAll("./tests", testList, azslcRelease, 0, az3rdParty)
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
print ( fg.GREEN + style.BRIGHT + "OK: {}".format(numAllTests.numPass) + style.RESET_ALL )
print ( fg.YELLOW + style.BRIGHT + "TODO: {}".format(numAllTests.numTodo) + style.RESET_ALL )
print ( fg.RED + style.BRIGHT + "FAILED: {}".format(numAllTests.numFail) + style.RESET_ALL )
if numAllTests.numPass < expectCorrect:
print ( fg.RED + style.BRIGHT + "Fix your code! (expected {} passing tests)".format(expectCorrect) + style.RESET_ALL )
sys.exit(0)
if numAllTests.numFail > allowIncorrect:
print ( fg.RED + style.BRIGHT + "Wow, we shouldn't ship with that many broken tests! (allows {} tests to be failing)".format(allowIncorrect) + style.RESET_ALL )
sys.exit(1)
print ( fg.CYAN + style.BRIGHT + " GOOD ENOUGH" + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + " Testing Debug ..." + style.RESET_ALL )
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
numAllTests = testScriptModule.runAll("./tests", testList, azslcDebug, 0, az3rdParty)
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL )
print ( fg.GREEN + style.BRIGHT + "OK: {}".format(numAllTests.numPass) + style.RESET_ALL )
print ( fg.YELLOW + style.BRIGHT + "TODO: {}".format(numAllTests.numTodo) + style.RESET_ALL )
print ( fg.RED + style.BRIGHT + "FAILED: {}".format(numAllTests.numFail) + style.RESET_ALL )
if numAllTests.numPass < expectCorrect:
print ( fg.RED, style.BRIGHT + "Fix your code! (expected {} passing tests)".format(expectCorrect) + style.RESET_ALL)
sys.exit(0)
if numAllTests.numFail > allowIncorrect:
print ( fg.RED + style.BRIGHT + "Wow, we shouldn't ship with that many broken tests! (allows {} tests to be failing)".format(allowIncorrect) + style.RESET_ALL)
sys.exit(1)
print ( fg.CYAN + style.BRIGHT + " GOOD ENOUGH" + style.RESET_ALL)
print ( fg.CYAN + style.BRIGHT + "*****************************************************" + style.RESET_ALL)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
15269,
357,
66,
8,
25767,
669,
284,
262,
4946,
513,
35,
7117,
4935,
13,
198,
1890,
1844,
6634,
290,
5964,... | 2.518549 | 3,639 |
# -*- coding: utf-8 -*-
"""Models for API requests & responses."""
import dataclasses
import logging
from typing import List, Optional, Union
from ..exceptions import ApiError, InvalidCredentials, JsonInvalid, ResponseNotOk
from ..http import Http
from ..logs import get_obj_log
from ..tools import combo_dicts, json_reload
from . import json_api
@dataclasses.dataclass
class ApiEndpoint:
"""Pass."""
method: str
path: str
request_schema_cls: Optional[type]
request_model_cls: Optional[type]
response_schema_cls: Optional[type]
response_model_cls: Optional[type]
http_args: dict = dataclasses.field(default_factory=dict)
http_args_required: List[str] = dataclasses.field(default_factory=list)
request_as_none: bool = False
response_as_text: bool = False
log_level: str = "debug"
def __str__(self):
"""Pass."""
lines = [
f"ApiEndpoint Method={self.method!r}, path={self.path!r}",
f"Request Schema={self.request_schema_cls}",
f"Request Model={self.request_model_cls}",
f"Response Schema={self.response_schema_cls}",
f"Response Model={self.response_model_cls}",
]
return "\n ".join(lines)
@property
def log(self) -> logging.Logger:
"""Pass."""
return get_obj_log(obj=self, level=self.log_level)
def perform_request(
self,
http: Http,
request_obj: Optional[json_api.base.BaseModel] = None,
**kwargs,
) -> Union[dict, json_api.base.BaseModel]:
"""Pass."""
kwargs["response"] = self.perform_request_raw(http=http, request_obj=request_obj, **kwargs)
return self.handle_response(http=http, **kwargs)
def perform_request_raw(
self,
http: Http,
request_obj: Optional[json_api.base.BaseModel] = None,
**kwargs,
):
"""Pass."""
http_args = self.get_http_args(request_obj=request_obj, **kwargs)
response = http(**http_args)
return response
def load_request(self, **kwargs) -> Optional[Union[json_api.base.BaseModel, dict]]:
"""Pass."""
if self.request_model_cls:
this_kwargs = {"api_endpoint": self}
this_kwargs = combo_dicts(kwargs, this_kwargs)
return self.request_model_cls.load_request(**this_kwargs)
return kwargs or None
def load_response(
self, data: dict, http: Http, **kwargs
) -> Union[json_api.base.BaseModel, dict]:
"""Pass."""
if kwargs.get("unloaded"):
return data
if self.response_schema_cls:
use = self.response_schema_cls
elif self.response_model_cls:
use = self.response_model_cls
else:
use = None
if use:
this_kwargs = {"api_endpoint": self}
this_kwargs = combo_dicts(kwargs, this_kwargs)
data = use.load_response(data=data, http=http, **this_kwargs)
return data
def handle_response(
self, http: Http, response, **kwargs
) -> Union[str, json_api.base.BaseModel, dict]:
"""Pass."""
if self.response_as_text:
self.handle_response_status(http=http, response=response, **kwargs)
return response.text
data = self.handle_response_json(http=http, response=response, **kwargs)
this_kwargs = {"data": data}
this_kwargs = combo_dicts(kwargs, this_kwargs)
self.handle_response_status(http=http, response=response, **this_kwargs)
return self.load_response(http=http, response=response, **this_kwargs)
def handle_response_json(self, http: Http, response, **kwargs) -> dict:
"""Get the JSON from a response.
Args:
response: :obj:`requests.Response` object to check
Raises:
:exc:`JsonInvalid`: if response has invalid json
"""
try:
data = response.json()
except Exception as exc:
raise JsonInvalid(msg="Response has invalid JSON", response=response, exc=exc)
return data
def handle_response_status(self, http: Http, response, **kwargs):
"""Check the status code of a response.
Args:
response: :obj:`requests.Response` object to check
Raises:
:exc:`.InvalidCredentials`: if response has has a 401 status code
:exc:`.ResponseNotOk`: if response has a bad status code
"""
hook = kwargs.get("response_status_hook")
if callable(hook):
hook(http=http, response=response, **kwargs)
if response.status_code == 401:
raise InvalidCredentials(msg="Invalid credentials", response=response)
try:
response.raise_for_status()
except Exception as exc:
raise ResponseNotOk(
msg=f"Response has a bad HTTP status code {response.status_code}",
response=response,
exc=exc,
)
def get_http_args(
self,
request_obj: Optional[json_api.base.BaseModel] = None,
http_args: Optional[dict] = None,
**kwargs,
) -> dict:
"""Pass."""
args = {}
args["method"] = self.method
if request_obj is not None:
self.check_request_obj(request_obj=request_obj)
this_kwargs = {"path": self.path}
this_kwargs = combo_dicts(kwargs, this_kwargs)
args["path"] = request_obj.dump_request_path(**this_kwargs)
if not self.request_as_none:
this_kwargs = {"api_endpoint": self, "schema_cls": self.request_schema_cls}
this_kwargs = combo_dicts(kwargs, this_kwargs)
if self.method == "get":
args["params"] = request_obj.dump_request_params(**this_kwargs)
else:
args["json"] = request_obj.dump_request(**this_kwargs)
else:
args["path"] = self.path.format(**kwargs)
args.update(self.http_args or {})
args.update(http_args or {})
for arg in self.http_args_required:
if not args.get(arg):
msgs = [
f"Missing required HTTP argument {arg!r}",
f"While in {self}",
f"HTTP arguments:\n{json_reload(args)}",
f"Missing required HTTP argument {arg!r}",
]
raise ApiError("\n\n".join(msgs))
return args
def check_request_obj(self, request_obj: json_api.base.BaseModel):
"""Pass."""
model_check = self.request_model_cls
if model_check and not isinstance(request_obj, model_check):
otype = type(request_obj)
msg = f"Request object must be of type {model_check!r}, not of type {otype!r}"
raise ApiError(msg)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
5841,
1424,
329,
7824,
7007,
1222,
9109,
526,
15931,
198,
11748,
4818,
330,
28958,
198,
11748,
18931,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
4479,
198,
198,
... | 2.165621 | 3,188 |
# -*- coding: utf-8 -*-
def cell_check(section):
'''
Executa as regras do game of life em um recorte 3x3 para
saber o estado da célula central
'''
# contador de vizinhos
neighbors = 0
# referência para o centro do recorte
center = section[1][1]
# somando todos os elementos do grupo
for row in section:
for cell in row:
neighbors += cell
# removendo o valor da célula central para que sobre somente
# a soma dos vizinhos
neighbors -= center
# aplicando as regras do game of life
# note que a regra dois não precisa ser ativamente aplicada, pois
# ela não altera o estado da célula avaliada
if neighbors <= 1:
# menos de dois vizinhos a célula central morre por baixa população
center = 0
elif neighbors == 3:
# exatamente três a célula nasce por reprodução
center = 1
elif neighbors >= 4:
# mais que três a célula morre de super população
center = 0
# retorna o valor da célula central
return center
def get_section(matrix, row, col):
'''
Extraí um recorte da vizinhança 3x3 em um plano
dada a coordenada da célula central
'''
# monta um plano 3x3 somente com células mortas para fazer uma cópia
# da área a ser analizada
section = [[0 for _ in range(3)] for _ in range(3)]
# percorre as redondezas da célula de posição row x col
# copiando seu valor para section
for sec_r, r in enumerate(range(row-1, row+2)):
for sec_c, c in enumerate(range(col-1, col+2)):
section[sec_r][sec_c] = matrix[r % 50][c % 50]
# devolve o recorte 3x3 do plano
return section
def game_of_life(seed):
'''
Recebe uma seed de um plano 50x50 executa o game of life e devolve
a geração seguinte
'''
# cria um plano vazio para armazenar a nova geração pois não podemos
# operar diretamente na geração corrente para não gerar efeito colateral
next_gen = [[0 for _ in range(50)] for _ in range(50)]
# percorre o plano tirando recortes 3x3 da vizinhança da célula central
# e os avaliando para descobrir a geração seguinte de cada célula
for r, row in enumerate(seed):
for c, col in enumerate(row):
next_gen[r][c] = cell_check(get_section(seed, r, c))
# devolve a geração seguinte
return next_gen | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
4299,
2685,
62,
9122,
7,
5458,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
8393,
29822,
355,
842,
8847,
466,
983,
286,
1204,
795,
23781,
664,
419,
... | 2.293605 | 1,032 |
#!/usr/bin/python
"""Python main file."""
# -*- coding: utf-8 -*-
# -----------------------------------------
# author : Ahmet Ozlu
# mail : ahmetozlu93@gmail.com
# date : 05.05.2019
# -----------------------------------------
import color_correlation
import cv2
import dewapper
import signature_extractor
import unsharpen
source_image = cv2.imread("test.jpg")
img = 0
try:
# read the source input image and call the dewarp_book function
# to perform cropping with the margin and book dewarping
img = dewapper.dewarp_book(source_image)
cv2.imwrite("step 1 - page_dewarped.jpg", img)
print("- step1 (cropping with the argins + book dewarpping): OK")
except Exception as e:
print("type error: " + str(e))
print("ERROR IN CROPPING & BOOK DEWARPING! PLEASE CHECK LIGTHNING,"
" SHADOW, ZOOM LEVEL AND ETC. OF YOUR INPUT BOOK IMAGE!")
try:
# call the unsharpen_mask method to perform signature extraction
img = signature_extractor.extract_signature(cv2.cvtColor(img,
cv2.COLOR_BGR2GRAY))
cv2.imwrite("step 2 - signature_extracted.jpg", img)
print("- step2 (signature extractor): OK")
except Exception as e:
print("type error: " + str(e))
print("ERROR IN SIGNATURE EXTRACTION! PLEASE CHECK LIGTHNING, SHADOW,"
" ZOOM LEVEL AND ETC. OF YOUR INPUT BOOK IMAGE!")
try:
# call the unsharpen_mask method to perform unsharpening mask
unsharpen.unsharpen_mask(img)
cv2.imwrite("step 3 - unsharpen_mask.jpg", img)
print("- step3 (unsharpening mask): OK")
except Exception as e:
print("type error: " + str(e))
print("ERROR IN BOOK UNSHARPING MASK! PLEASE CHECK LIGTHNING, SHADOW,"
" ZOOM LEVEL AND ETC. OF YOUR INPUT BOOK IMAGE!")
try:
# call the funcBrightContrast method to perform color correction
img = color_correlation.funcBrightContrast(img)
cv2.imwrite("step 4 - color_correlated.jpg", img)
print("- step4 (color correlation): OK")
except Exception as e:
print("type error: " + str(e))
print("ERROR IN BOOK COLOR CORRELATION! PLEASE CHECK LIGTHNING, SHADOW,"
" ZOOM LEVEL AND ETC. OF YOUR INPUT BOOK IMAGE!")
cv2.imwrite("output.jpg", img)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
37906,
1388,
2393,
526,
15931,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
20368,
45537,
198,
2,
1772,
220,
220,
220,
220,
220,
1058,
7900,
4164,
18024,
... | 2.569954 | 872 |
import random
import re
from enum import Enum
import utils
from test_shifts.base_shift import BaseShift
| [
11748,
4738,
198,
11748,
302,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
11748,
3384,
4487,
198,
6738,
1332,
62,
1477,
19265,
13,
8692,
62,
30846,
1330,
7308,
33377,
628,
198
] | 3.451613 | 31 |
import saopy.model
from saopy.model import owlss___Service as Service
| [
11748,
473,
11081,
13,
19849,
198,
198,
6738,
473,
11081,
13,
19849,
1330,
39610,
824,
17569,
16177,
355,
4809,
198
] | 3.55 | 20 |
"""
Copyright: MAXON Computer GmbH
Author: Yannick Puech
Description:
- Configures the active LOD object to use "Manual Groups".
- The selected objects referenced in the objects list are moved under the LOD object and are referenced in each group.
Class/method highlighted:
- LodObject.GetManualModeObjectListDescID()
Compatible:
- Win / Mac
- R19, R20, R21
"""
import c4d
if __name__ == '__main__':
main() | [
37811,
198,
15269,
25,
25882,
1340,
13851,
402,
2022,
39,
198,
13838,
25,
575,
1236,
624,
350,
518,
354,
198,
198,
11828,
25,
198,
220,
220,
220,
532,
17056,
942,
262,
4075,
406,
3727,
2134,
284,
779,
366,
5124,
723,
27441,
1911,
19... | 3.006897 | 145 |
from django.urls import path, include
from blog import urls
from . import views
urlpatterns = [
path('', views.Homepage.as_view(), name='homepage'),
path('aboutus', views.AboutUs.as_view(), name="aboutus"),
path('post/<int:pk>', views.PostDetail, name='postdetail'),
path('newpost', views.newpost, name='newpost'),
path('usrinfo', views.user_image_func, name='usrinfo'),
path('postedit/<int:pk>', views.PostEdit.as_view(), name='postedit'),
path('postdelete/<int:pk>', views.BlogDelete.as_view(), name='postdelete'),
path('mypost/<author>', views.MyPost.as_view(), name='myposts'),
path('commentreplydelete/<int:pk>', views.CommentReplyDelete.as_view(), name='commentreplydelete'),
path('commentdelete/<int:pk>', views.CommentDelete.as_view(), name='commentdelete'),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
path('post/postnew', views.BlogPostNew.as_view(), name='postnew'),
path('profile/<str:nam>', views.profile, name='profile'),
path('editprofileinfo/<int:pk>', views.userInfoFormView, name='editprofileinfo'),
path('search', views.search, name='search'),
path('category/<str:hashtags>', views.categoryview, name='category'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
4130,
1330,
2956,
7278,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
16060,
7700,
13,
292,
62,
1... | 2.783664 | 453 |
"""
Global configuration for pytest
"""
collect_ignore = [
"test_aio_pythonized.py",
"test_aio_raw.py",
]
| [
37811,
198,
22289,
8398,
329,
12972,
9288,
198,
37811,
198,
198,
33327,
62,
46430,
796,
685,
198,
220,
220,
220,
366,
9288,
62,
64,
952,
62,
29412,
1143,
13,
9078,
1600,
198,
220,
220,
220,
366,
9288,
62,
64,
952,
62,
1831,
13,
90... | 2.395833 | 48 |
from valid_ip import Validate_IP
import getpass
from colorama import init, deinit, Fore, Style
init()
# Getting user Credentials
| [
6738,
4938,
62,
541,
1330,
3254,
20540,
62,
4061,
198,
11748,
651,
6603,
198,
6738,
3124,
1689,
1330,
2315,
11,
390,
15003,
11,
4558,
11,
17738,
198,
15003,
3419,
628,
198,
2,
18067,
2836,
327,
445,
14817,
628
] | 3.473684 | 38 |
# DASHBOARD SCRIPT - do not use print()
RESULT = []
# - - - - - - - - - - - - - - - - - - - - - - - - - -
# put your code below:
import subprocess
RESULT= get_win_date()
| [
2,
360,
11211,
8202,
9795,
6374,
46023,
532,
466,
407,
779,
3601,
3419,
201,
198,
19535,
16724,
796,
17635,
201,
198,
2,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,
532,... | 2.291139 | 79 |
# 350111
# a3_p10.py
# Dazhi Zhan
# d.zhan@jacobs-university.de
x = input('please enter the length ')
x = int(x)
y = input('please enter the width ')
y = int(y)
z = input('please enter a character ')
z = str(z)
print_frame(x, y, z)
| [
2,
3439,
486,
1157,
201,
198,
2,
257,
18,
62,
79,
940,
13,
9078,
201,
198,
2,
360,
1031,
5303,
1168,
7637,
201,
198,
2,
288,
13,
89,
7637,
31,
30482,
8158,
12,
403,
1608,
13,
2934,
201,
198,
87,
796,
5128,
10786,
29688,
3802,
... | 2.073171 | 123 |
import os
from _raisim_gym import *
__BLACKPANTHER_V5_RESOURCE_DIRECTORY__ = os.path.dirname(os.path.abspath(__file__)) + '/urdf'
# For compilation, this is not necessary.
# When writing your own training program, you need the default configuration to use this
| [
11748,
28686,
198,
6738,
4808,
430,
271,
320,
62,
1360,
76,
1330,
1635,
198,
198,
834,
9148,
8120,
47,
1565,
21250,
62,
53,
20,
62,
19535,
31033,
62,
17931,
23988,
15513,
834,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,... | 3.082353 | 85 |
"""
Transforming Geolocation (altitude, longitude, altitude) to Carla Location (x, y, z)
Based on the carla function _location_to_gps
https://github.com/carla-simulator/scenario_runner/blob/master/srunner/tools/route_manipulation.py
Adapted by DevGlitch
"""
import math
def from_gps_to_xyz(latitude: float, longitude: float, altitude: float): # lat_ref, lon_ref
"""Get carla location x y z coordinates from GPS (latitude, longitude, altitude)."""
# Equatorial mean radius of Earth in meters
# https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html
earth_radius = 6378137.0
# Hardcoded can only work for town 01 to 07
lat_ref = 0.0
lon_ref = 0.0
scale = math.cos(lat_ref * math.pi / 180.0)
base_x = scale * lon_ref * math.pi * earth_radius / 180.0
base_y = scale * earth_radius * math.log(math.tan((90.0 + lat_ref) * math.pi / 360.0))
x = scale * (longitude - base_x) * math.pi * earth_radius / 180.0
y = (scale * earth_radius * math.log(math.tan((90.0 + latitude) * math.pi / 360.0)) - base_y) * -1
z = altitude
# like carla.Location
location = (x, y, z)
return location
# For debug
# lat = -0.002647366503438775
# lon = -6.620580971056203e-05
# alt = 0.0
#
# result = from_gps_to_xyz(lat, lon, alt)
# print(result)
#
# lat_dif = result[0] + 7.369997024536133
# lon_dif = result[1] - 294.7034912109375
#
# print("x dif=", lat_dif)
# print("y dif=", lon_dif)
| [
37811,
198,
8291,
15464,
2269,
349,
5040,
357,
2501,
3984,
11,
890,
3984,
11,
20334,
8,
284,
1879,
5031,
13397,
357,
87,
11,
331,
11,
1976,
8,
198,
198,
15001,
319,
262,
1097,
5031,
2163,
4808,
24886,
62,
1462,
62,
70,
862,
198,
5... | 2.444068 | 590 |
import logging
from django.conf import settings
from django.utils.datastructures import SortedDict
from statsd import statsd
from tower import ugettext_lazy as _lazy
from zendesk import Zendesk, ZendeskError
log = logging.getLogger('k.questions.marketplace')
MARKETPLACE_CATEGORIES = SortedDict([
('payments', _lazy('Payments')),
('applications', _lazy('Applications')),
('account', _lazy('Account')),
])
class ZendeskSettingsError(ZendeskError):
"""Exception for missing settings."""
def get_zendesk():
"""Instantiate and return a Zendesk client"""
# Verify required Zendesk settings
zendesk_url = settings.ZENDESK_URL
zendesk_email = settings.ZENDESK_USER_EMAIL
zendesk_password = settings.ZENDESK_USER_PASSWORD
if not zendesk_url or not zendesk_email or not zendesk_password:
log.error('Zendesk settings error: please set ZENDESK_URL, '
'ZENDESK_USER_EMAIL and ZENDESK_USER_PASSWORD.')
statsd.incr('questions.zendesk.settingserror')
raise ZendeskSettingsError('Missing Zendesk settings.')
return Zendesk(zendesk_url, zendesk_email, zendesk_password)
def submit_ticket(email, category, subject, body):
"""Submit a marketplace ticket to Zendesk.
:arg email: user's email address
:arg category: issue's category
:arg subject: issue's subject
:arg body: issue's description
"""
# Create the Zendesk connection client.
zendesk = get_zendesk()
# Create the ticket
new_ticket = {
'ticket': {
'requester_email': email,
'subject': settings.ZENDESK_SUBJECT_PREFIX + subject,
'description': body,
'set_tags': category,
}
}
try:
ticket_url = zendesk.create_ticket(data=new_ticket)
statsd.incr('questions.zendesk.success')
except ZendeskError as e:
log.error('Zendesk error: %s' % e.msg)
statsd.incr('questions.zendesk.error')
raise
return ticket_url
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
26791,
13,
19608,
459,
1356,
942,
1330,
311,
9741,
35,
713,
198,
198,
6738,
9756,
67,
1330,
9756,
67,
198,
6738,
10580,
1330,
334,
1136,
5... | 2.390215 | 838 |
from __clrclasses__.System.Windows.Markup import ValueSerializerAttribute
| [
6738,
11593,
565,
81,
37724,
834,
13,
11964,
13,
11209,
13,
9704,
929,
1330,
11052,
32634,
7509,
33682,
198
] | 3.894737 | 19 |
import numpy as np
import scipy.io as sio
import os
npy_root = '../../skating/c3d_feat/'
f = open("annotations.txt").readlines()
max_feats = []
avr_feats = []
scores = []
for line in f:
items = line.strip().split(' ')
scores.append(float(items[1]))
feat_file = items[0] + '.npy'
feat = np.load(npy_root + feat_file)
max_feats.append(np.max(feat, axis=0))
avr_feats.append(np.mean(feat, axis=0))
max_feats = np.array(max_feats)
avr_feats = np.array(avr_feats)
scores = np.array(scores)
sio.savemat("c3d_max_carl.mat", {"x":max_feats, "y":scores})
sio.savemat("c3d_avr_carl.mat", {"x":avr_feats, "y": scores})
| [
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
11748,
28686,
198,
77,
9078,
62,
15763,
796,
705,
40720,
40720,
8135,
803,
14,
66,
18,
67,
62,
27594,
14,
6,
198,
69,
796,
1280,
7203,
34574,
60... | 2.144295 | 298 |
import h5py, argparse, numpy as np
psr = argparse.ArgumentParser()
psr.add_argument('-i', dest='ipt', nargs='+', help='input h5 file')
psr.add_argument('-o', dest='opt', help='output h5 file')
args = psr.parse_args()
length = 0
dtype = []
length = []
index = []
info = []
with h5py.File(args.ipt[0], 'r') as ipt:
keys = list(ipt.keys())
attrs = list(ipt.attrs)
attrsValues = [ipt.attrs[i] for i in attrs]
for i in range(len(keys)):
dtype.append(ipt[keys[i]].dtype)
length.append(0)
index.append(0)
for h5f in args.ipt:
with h5py.File(h5f, 'r') as ipt:
for i in range(len(keys)):
length[i] += len(ipt[keys[i]])
for i in range(len(keys)):
info.append(np.zeros((length[i],),dtype=dtype[i]))
for h5f in args.ipt:
with h5py.File(h5f, 'r') as ipt:
for i in range(len(keys)):
info[i][index[i]:(index[i]+len(ipt[keys[i]]))] = ipt[keys[i]][:]
index[i] += len(ipt[keys[i]])
print('length: {}'.format(length))
with h5py.File(args.opt, 'w') as opt:
for i in range(len(attrs)):
opt.attrs[attrs[i]] = attrsValues[i]
for i in range(len(keys)):
opt.create_dataset(keys[i], data=info[i], compression='gzip')
| [
11748,
289,
20,
9078,
11,
1822,
29572,
11,
299,
32152,
355,
45941,
198,
862,
81,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
862,
81,
13,
2860,
62,
49140,
10786,
12,
72,
3256,
2244,
11639,
10257,
3256,
299,
22046,
11639,
10,
... | 2.07483 | 588 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import spacy
IS_DEBUG = True
IGNORE_CACHE = True
ONTOLOGY_NAME = "biotech"
if __name__ == "__main__":
import plac
plac.call(main)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
599,
1590,
198,
198,
1797,
62,
30531,
796,
6407,
198,
16284,
6965,
62,
34,
2246,
13909,
796,
6407,
198,
35830,... | 2.333333 | 81 |
import pandas
import pytest
from muller.clustering.metrics import DistanceCache
@pytest.fixture
@pytest.fixture
| [
11748,
19798,
292,
198,
11748,
12972,
9288,
198,
198,
6738,
35024,
263,
13,
565,
436,
1586,
13,
4164,
10466,
1330,
34600,
30562,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
628,
628,
198
] | 2.928571 | 42 |
# -*- coding: utf-8 -*-
# Author: TDC Team
# License: MIT
import warnings
warnings.filterwarnings("ignore")
import sys
from ..utils import print_sys
from . import bi_pred_dataset, multi_pred_dataset
from ..metadata import dataset_names
class DrugSyn(multi_pred_dataset.DataLoader):
"""Data loader class to load datasets in Drug Synergy Prediction task.
More info: https://tdcommons.ai/multi_pred_tasks/drugsyn/
Task Description: Regression.
Given the gene expression of cell lines and two SMILES strings of the drug combos,
predict the drug synergy level.
Args:
name (str): the dataset name.
path (str, optional):
The path to save the data file, defaults to './data'
print_stats (bool, optional):
Whether to print basic statistics of the dataset, defaults to False
"""
def __init__(self, name, path='./data', print_stats=False):
"""Create Drug Synergy Prediction dataloader object
"""
super().__init__(name, path, print_stats,
dataset_names=dataset_names["DrugSyn"])
if print_stats:
self.print_stats()
print('Done!', flush=True, file=sys.stderr) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
309,
9697,
4816,
198,
2,
13789,
25,
17168,
198,
198,
11748,
14601,
198,
40539,
654,
13,
24455,
40539,
654,
7203,
46430,
4943,
198,
11748,
25064,
198,
198,
... | 2.491089 | 505 |
import logging
from contextlib import ExitStack
from share.search.elastic_manager import ElasticManager
from share.search.messages import MessageType
__all__ = ('SearchIndexer', 'MessageType')
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
4732,
8019,
1330,
29739,
25896,
198,
198,
6738,
2648,
13,
12947,
13,
417,
3477,
62,
37153,
1330,
48567,
13511,
198,
6738,
2648,
13,
12947,
13,
37348,
1095,
1330,
16000,
6030,
628,
198,
834,
439,
834,
796,
19203,... | 3.630769 | 65 |
# Given an array of ‘K’ sorted LinkedLists, merge them into one sorted list.
# Example 1:
# Input: L1=[2, 6, 8], L2=[3, 6, 7], L3=[1, 3, 4]
# Output: [1, 2, 3, 3, 4, 6, 6, 7, 8]
# Example 2:
# Input: L1=[5, 8, 9], L2=[1, 7]
# Output: [1, 5, 7, 8, 9]
from heapq import *
if __name__ == '__main__':
l1 = ListNode(2)
l1.next = ListNode(6)
l1.next.next = ListNode(8)
l2 = ListNode(3)
l2.next = ListNode(6)
l2.next.next = ListNode(7)
l3 = ListNode(1)
l3.next = ListNode(3)
l3.next.next = ListNode(4)
result = merge_lists([l1, l2, l3])
print("Here are the elements from the merged lists: ",end= '')
while result != None:
print(str(result.val) + " ", end='')
result = result.next
| [
2,
11259,
281,
7177,
286,
564,
246,
42,
447,
247,
23243,
7502,
276,
43,
1023,
11,
20121,
606,
656,
530,
23243,
1351,
13,
198,
198,
2,
17934,
352,
25,
198,
198,
2,
23412,
25,
406,
16,
41888,
17,
11,
718,
11,
807,
4357,
406,
17,
... | 2.079452 | 365 |
for number in range(50):
print("ADD_TINY_TEST(test_" + str(number) + "){")
print(" int i = rand()%50;")
print(" ASSERT_TEST_RESULT(i != " + str(number + 1) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 2) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 3) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 4) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 5) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 6) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 7) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 8) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 9) + ");")
# print(" ASSERT_TEST_RESULT(i != " + str(number + 10) + ");")
print("}\r\n")
# for number in range(50):
# print(" RUN_TINY_TEST(test_" + str(number) + ");") | [
1640,
1271,
287,
2837,
7,
1120,
2599,
198,
197,
4798,
7203,
29266,
62,
51,
1268,
56,
62,
51,
6465,
7,
9288,
62,
1,
1343,
965,
7,
17618,
8,
1343,
366,
8,
4895,
8,
198,
197,
4798,
7203,
197,
600,
1312,
796,
43720,
3419,
4,
1120,
... | 2.166667 | 384 |
"""Conftest for the KNX integration."""
from __future__ import annotations
import asyncio
from unittest.mock import DEFAULT, AsyncMock, Mock, patch
import pytest
from xknx import XKNX
from xknx.core import XknxConnectionState
from xknx.dpt import DPTArray, DPTBinary
from xknx.io import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
from xknx.telegram import Telegram, TelegramDirection
from xknx.telegram.address import GroupAddress, IndividualAddress
from xknx.telegram.apci import APCI, GroupValueRead, GroupValueResponse, GroupValueWrite
from homeassistant.components.knx import ConnectionSchema
from homeassistant.components.knx.const import (
CONF_KNX_AUTOMATIC,
CONF_KNX_CONNECTION_TYPE,
CONF_KNX_INDIVIDUAL_ADDRESS,
DOMAIN as KNX_DOMAIN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
class KNXTestKit:
"""Test helper for the KNX integration."""
INDIVIDUAL_ADDRESS = "1.2.3"
def __init__(self, hass: HomeAssistant, mock_config_entry: MockConfigEntry):
"""Init KNX test helper class."""
self.hass: HomeAssistant = hass
self.mock_config_entry: MockConfigEntry = mock_config_entry
self.xknx: XKNX
# outgoing telegrams will be put in the Queue instead of sent to the interface
# telegrams to an InternalGroupAddress won't be queued here
self._outgoing_telegrams: asyncio.Queue = asyncio.Queue()
def assert_state(self, entity_id: str, state: str, **attributes) -> None:
"""Assert the state of an entity."""
test_state = self.hass.states.get(entity_id)
assert test_state.state == state
for attribute, value in attributes.items():
assert test_state.attributes.get(attribute) == value
async def setup_integration(self, config):
"""Create the KNX integration."""
def knx_ip_interface_mock():
"""Create a xknx knx ip interface mock."""
mock = Mock()
mock.start = AsyncMock()
mock.stop = AsyncMock()
mock.send_telegram = AsyncMock(side_effect=self._outgoing_telegrams.put)
return mock
def fish_xknx(*args, **kwargs):
"""Get the XKNX object from the constructor call."""
self.xknx = kwargs["xknx"]
# disable rate limiter for tests (before StateUpdater starts)
self.xknx.rate_limit = 0
return DEFAULT
with patch(
"xknx.xknx.knx_interface_factory",
return_value=knx_ip_interface_mock(),
side_effect=fish_xknx,
):
self.mock_config_entry.add_to_hass(self.hass)
await async_setup_component(self.hass, KNX_DOMAIN, {KNX_DOMAIN: config})
await self.xknx.connection_manager.connection_state_changed(
XknxConnectionState.CONNECTED
)
await self.hass.async_block_till_done()
########################
# Telegram counter tests
########################
def _list_remaining_telegrams(self) -> str:
"""Return a string containing remaining outgoing telegrams in test Queue. One per line."""
remaining_telegrams = []
while not self._outgoing_telegrams.empty():
remaining_telegrams.append(self._outgoing_telegrams.get_nowait())
return "\n".join(map(str, remaining_telegrams))
async def assert_no_telegram(self) -> None:
"""Assert if every telegram in test Queue was checked."""
await self.hass.async_block_till_done()
assert self._outgoing_telegrams.empty(), (
f"Found remaining unasserted Telegrams: {self._outgoing_telegrams.qsize()}\n"
f"{self._list_remaining_telegrams()}"
)
async def assert_telegram_count(self, count: int) -> None:
"""Assert outgoing telegram count in test Queue."""
await self.hass.async_block_till_done()
actual_count = self._outgoing_telegrams.qsize()
assert actual_count == count, (
f"Outgoing telegrams: {actual_count} - Expected: {count}\n"
f"{self._list_remaining_telegrams()}"
)
####################
# APCI Service tests
####################
async def assert_telegram(
self,
group_address: str,
payload: int | tuple[int, ...] | None,
apci_type: type[APCI],
) -> None:
"""Assert outgoing telegram. One by one in timely order."""
await self.xknx.telegrams.join()
await self.hass.async_block_till_done()
try:
telegram = self._outgoing_telegrams.get_nowait()
except asyncio.QueueEmpty:
raise AssertionError(
f"No Telegram found. Expected: {apci_type.__name__} -"
f" {group_address} - {payload}"
)
assert isinstance(
telegram.payload, apci_type
), f"APCI type mismatch in {telegram} - Expected: {apci_type.__name__}"
assert (
str(telegram.destination_address) == group_address
), f"Group address mismatch in {telegram} - Expected: {group_address}"
if payload is not None:
assert (
telegram.payload.value.value == payload # type: ignore
), f"Payload mismatch in {telegram} - Expected: {payload}"
async def assert_read(self, group_address: str) -> None:
"""Assert outgoing GroupValueRead telegram. One by one in timely order."""
await self.assert_telegram(group_address, None, GroupValueRead)
async def assert_response(
self, group_address: str, payload: int | tuple[int, ...]
) -> None:
"""Assert outgoing GroupValueResponse telegram. One by one in timely order."""
await self.assert_telegram(group_address, payload, GroupValueResponse)
async def assert_write(
self, group_address: str, payload: int | tuple[int, ...]
) -> None:
"""Assert outgoing GroupValueWrite telegram. One by one in timely order."""
await self.assert_telegram(group_address, payload, GroupValueWrite)
####################
# Incoming telegrams
####################
@staticmethod
def _payload_value(payload: int | tuple[int, ...]) -> DPTArray | DPTBinary:
"""Prepare payload value for GroupValueWrite or GroupValueResponse."""
if isinstance(payload, int):
return DPTBinary(payload)
return DPTArray(payload)
async def _receive_telegram(self, group_address: str, payload: APCI) -> None:
"""Inject incoming KNX telegram."""
self.xknx.telegrams.put_nowait(
Telegram(
destination_address=GroupAddress(group_address),
direction=TelegramDirection.INCOMING,
payload=payload,
source_address=IndividualAddress(self.INDIVIDUAL_ADDRESS),
)
)
await self.xknx.telegrams.join()
await self.hass.async_block_till_done()
async def receive_read(
self,
group_address: str,
) -> None:
"""Inject incoming GroupValueRead telegram."""
await self._receive_telegram(group_address, GroupValueRead())
async def receive_response(
self, group_address: str, payload: int | tuple[int, ...]
) -> None:
"""Inject incoming GroupValueResponse telegram."""
payload_value = self._payload_value(payload)
await self._receive_telegram(group_address, GroupValueResponse(payload_value))
async def receive_write(
self, group_address: str, payload: int | tuple[int, ...]
) -> None:
"""Inject incoming GroupValueWrite telegram."""
payload_value = self._payload_value(payload)
await self._receive_telegram(group_address, GroupValueWrite(payload_value))
@pytest.fixture
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="KNX",
domain=KNX_DOMAIN,
data={
CONF_KNX_INDIVIDUAL_ADDRESS: XKNX.DEFAULT_ADDRESS,
ConnectionSchema.CONF_KNX_MCAST_GRP: DEFAULT_MCAST_GRP,
ConnectionSchema.CONF_KNX_MCAST_PORT: DEFAULT_MCAST_PORT,
CONF_KNX_CONNECTION_TYPE: CONF_KNX_AUTOMATIC,
},
)
@pytest.fixture
async def knx(request, hass, mock_config_entry: MockConfigEntry):
"""Create a KNX TestKit instance."""
knx_test_kit = KNXTestKit(hass, mock_config_entry)
yield knx_test_kit
await knx_test_kit.assert_no_telegram()
| [
37811,
3103,
701,
395,
329,
262,
22466,
55,
11812,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
30351,
952,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
5550,
38865,
11,
1081,
13361,
44,
735,
11,
44123,
11,
... | 2.379712 | 3,608 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 15:17:56 2019
@author: giles
"""
'''
Question 1
Create a class to represent a bank account. It will need to have a balance,
a method of withdrawing money, depositing money and displaying the balance to
the screen. Create an instance of the bank account and check that the methods
work as expected.
'''
'''
Question 2
Create a circle class that will take the value of a radius and
return the area of the circle
'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3300,
2556,
1542,
1315,
25,
1558,
25,
3980,
13130,
201,
198,
201,
198,
31,
9800,
25,
308,
2915,
201,
198,
37811,
201,
198,
201,
198,
... | 3.132911 | 158 |
from panshell.core import Shell
from panshell.baidu import baiduFS
from panshell.local import localFS
| [
198,
6738,
36209,
12758,
13,
7295,
1330,
17537,
198,
198,
6738,
36209,
12758,
13,
65,
1698,
84,
1330,
275,
1698,
84,
10652,
198,
6738,
36209,
12758,
13,
12001,
1330,
1957,
10652,
628
] | 3.28125 | 32 |
import models.networks as net
# image channels
input_nc_a, input_nc_b = 3, 3
output_nc_a, output_nc_b = 3, 3
# number of gen filters in first conv layer
ngf_a, ngf_b = 64, 64
# number of discrim filters in first conv layer
ndf_a, ndf_b = 64, 64
# generator: 'resnet_9blocks', 'resnet_6blocks', 'unet_128', 'unet_256'
netG_a, netG_b = 'resnet_9blocks', 'resnet_9blocks'
# discriminator: 'basic', 'n_layers', 'pixel'
netD_a, netD_b = 'basic', 'basic'
netG_A = net.define_G(input_nc_a, output_nc_a, ngf_a, netG_a)
netG_B = net.define_G(input_nc_b, output_nc_b, ngf_b, netG_b)
netD_A = net.define_D(output_nc_a, ndf_a, netD_a)
netD_B = net.define_D(output_nc_b, ndf_b, netD_b)
with open('net_structure.txt', 'w') as outfile:
print('############ Generator A ############', file=outfile)
print(netG_A, file = outfile, end = '\n\n\n')
print('############ Generator B ############', file=outfile)
print(netG_B, file = outfile, end = '\n\n\n')
print('############ Discriminator A ############', file=outfile)
print(netD_A, file = outfile, end = '\n\n\n')
print('############ Discriminator B ############', file=outfile)
print(netD_B, file = outfile, end = '\n\n\n') | [
11748,
4981,
13,
3262,
5225,
355,
2010,
201,
198,
201,
198,
2,
2939,
9619,
201,
198,
15414,
62,
10782,
62,
64,
11,
5128,
62,
10782,
62,
65,
796,
513,
11,
513,
201,
198,
22915,
62,
10782,
62,
64,
11,
5072,
62,
10782,
62,
65,
796,... | 2.253676 | 544 |
"""
atmospheric.py, Sam Murphy (2016-10-26)
Atmospheric water vapour, ozone and AOT from GEE
Usage
H2O = Atmospheric.water(geom,date)
O3 = Atmospheric.ozone(geom,date)
AOT = Atmospheric.aerosol(geom,date)
"""
import ee
import geemap
from Py6S import *
import os, sys, time, math, datetime
class S2_L1C():
"""
Batch function to correct all images inside a GEE collection
Reference idea from Cristian Iranzo https://github.com/samsammurphy/gee-atmcorr-S2/issues/7
conversion():
- Geom: Area of interest which 6S corresction is applied
- imgCol: Sentinel 2 L1C image collection
- gpath: Asset folder and image sufix to save images in GEE (e.g. users/samsammurphy/shared/sentinel2/6S/ESRIN_).
The direction is completed by image dateString.
"""
| [
37811,
198,
265,
6384,
15011,
13,
9078,
11,
3409,
14424,
357,
5304,
12,
940,
12,
2075,
8,
198,
198,
2953,
6384,
15011,
1660,
38187,
454,
11,
37170,
290,
317,
2394,
422,
402,
6500,
198,
198,
28350,
198,
39,
17,
46,
796,
41516,
13,
... | 2.727586 | 290 |
from urllib.parse import parse_qs
import pytest
from pyparsing import ParseException
from great_expectations.core.urn import ge_urn
| [
6738,
2956,
297,
571,
13,
29572,
1330,
21136,
62,
48382,
198,
198,
11748,
12972,
9288,
198,
6738,
279,
4464,
945,
278,
1330,
2547,
325,
16922,
198,
198,
6738,
1049,
62,
1069,
806,
602,
13,
7295,
13,
700,
1330,
4903,
62,
700,
628,
62... | 3.136364 | 44 |
"""
*Gi*
An atomic key.
From the japanese *kagi*,
"""
from dataclasses import dataclass
from typing import Optional
from .base import BaseKey
from .modifier import ModifierKey
__all__ = ["Gi"]
@dataclass
| [
37811,
628,
220,
220,
220,
1635,
33704,
9,
628,
220,
1052,
17226,
1994,
13,
628,
220,
3574,
262,
474,
2674,
2771,
1635,
74,
18013,
25666,
220,
198,
198,
37811,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
... | 2.871795 | 78 |
from typing import List
import orjson # type: ignore
from pydantic import BaseModel
from app.models.availability import Availability
from app.models.ps5_version import PS5Version
from app.models.stock_status import StockStatus
from app.utils.json_utils import orjson_dumps
| [
6738,
19720,
1330,
7343,
198,
198,
11748,
393,
17752,
220,
1303,
2099,
25,
8856,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
198,
6738,
598,
13,
27530,
13,
47274,
1330,
43138,
198,
6738,
598,
13,
27530,
13,
862,
20,
62,
9641,
... | 3.743243 | 74 |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from graph.types import NNEdge, SSDDetectorParameters
from importer.common.provisional_dim import ProvisionalDim
from importer.tflite2.common.tflite_node import TFLiteNode
from importer.tflite2.common.tflite_tensor import TensorBase
from quantization.multiplicative.mult_quantization import \
MultSSDDetectorQuantizationRecord
from quantization.multiplicative.symmetric.symmetric_mult_qtype import \
SymmetricMultQType
from utils.node_id import NodeId
from ..backend_handler import BackendHandler
from ..handler import tflite_op, tflite_custom_op
@tflite_op("CUSTOM")
@tflite_custom_op("TFLite_Detection_PostProcess")
| [
2,
15069,
357,
34,
8,
12131,
220,
3469,
54,
3080,
21852,
11,
35516,
198,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
2846,
286,
262,
22961,
6708,
3529,
3611,
5094... | 3.447837 | 393 |
"""Tests for jabbar."""
| [
37811,
51,
3558,
329,
33896,
5657,
526,
15931,
198
] | 2.666667 | 9 |
from PyPDF2 import PdfFileMerger
files_array = read_files()
merger = PdfFileMerger()
for pdf in files_array:
merger.append(pdf)
merger.write("result.pdf")
merger.close()
| [
6738,
9485,
20456,
17,
1330,
350,
7568,
8979,
13102,
1362,
628,
198,
198,
16624,
62,
18747,
796,
1100,
62,
16624,
3419,
198,
198,
647,
1362,
796,
350,
7568,
8979,
13102,
1362,
3419,
198,
198,
1640,
37124,
287,
3696,
62,
18747,
25,
198... | 2.608696 | 69 |
import fcntl
import os
import asyncio
import numpy as np
import cv2
def bytes2np(bytesarray):
"""
bytes two numpy->array
:param bytesarray:
:return:
"""
nparr = np.frombuffer(bytesarray, np.uint8)
img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # cv2.IMREAD_COLOR in OpenCV 3.1
return img_np
class asyncfile:
"""
"""
BLOCK_SIZE = 512
if __name__ == '__main__':
tasks = [aimread('25-1028.jpg'), aimread('6-9060_2082_742.jpg')]
task_results, tt2 = asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks))
for tk_re in task_results:
print(tk_re.result().shape)
| [
11748,
277,
66,
429,
75,
198,
11748,
28686,
198,
11748,
30351,
952,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
628,
198,
4299,
9881,
17,
37659,
7,
33661,
18747,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
... | 2.208904 | 292 |
peso = float(input('Qual o seu peso? (Kg) '))
altura = float(input('Qual a sua altura? (m) '))
imc = peso / altura ** 2
print(imc)
if imc < | [
12272,
78,
796,
12178,
7,
15414,
10786,
46181,
267,
384,
84,
32317,
78,
30,
357,
42,
70,
8,
705,
4008,
198,
2501,
5330,
796,
12178,
7,
15414,
10786,
46181,
257,
424,
64,
5988,
5330,
30,
357,
76,
8,
705,
4008,
198,
320,
66,
796,
... | 2.241935 | 62 |
"""
module for functions adding layers of circuits
"""
import sys
import itertools
import numpy as np
import cirq
import networkx as nx
import tensorflow as tf
from typing import Sequence, Union, Callable, Any, Optional, Tuple, List
from ..circuit import Circuit
from ..densitymatrix import DMCircuit
from ..gates import num_to_tensor, array_to_tensor, _swap_matrix
from ..channels import depolarizingchannel
thismodule = sys.modules[__name__]
Tensor = Any # tf.Tensor
Graph = Any # nx.Graph
Symbol = Any # sympy.Symbol
def _resolve(symbol: Union[Symbol, Tensor], i: int = 0) -> Tensor:
"""
make sure the layer is compatible with both multiparam and single param requirements
what could be the input: list/tuple of sympy.symbol, tf.tensor with 1D or 0D shape
"""
if isinstance(symbol, list) or isinstance(symbol, tuple):
return symbol[i]
elif tf.is_tensor(symbol): # tf.tensor of 1D or 2D
if len(symbol.shape) == 1:
return symbol[i]
else: # len(shape) == 0
return symbol
else: # sympy.symbol
return symbol
def generate_gate_layer(gate: str) -> None:
"""
$$e^{-i\theta \sigma}$$
:param gate:
:return:
"""
f.__doc__ = """%slayer""" % gate
f.__repr__ = """%slayer""" % gate # type: ignore
f.__trainable__ = False if gate in Circuit.sgates else True # type: ignore
setattr(thismodule, gate + "layer", f)
def generate_any_gate_layer(gate: str) -> None:
"""
$$e^{-i\theta_i \sigma}$$
:param gate:
:return:
"""
f.__doc__ = """any%slayer""" % gate
f.__repr__ = """any%slayer""" % gate # type: ignore
f.__trainable__ = False if gate in Circuit.sgates else True # type: ignore
setattr(thismodule, "any" + gate + "layer", f)
for gate in ["rx", "ry", "rz", "H", "I"]:
generate_gate_layer(gate)
generate_any_gate_layer(gate)
for gates in itertools.product(*[["x", "y", "z"] for _ in range(2)]):
gates = gates[0] + gates[1]
generate_double_gate(gates) # type: ignore
generate_double_gate_layer(gates) # type: ignore
generate_any_double_gate_layer(gates) # type: ignore
generate_double_gate_layer_bitflip(gates) # type: ignore
generate_double_gate_layer_bitflip_mc(gates) # type: ignore
generate_any_double_gate_layer_bitflip_mc(gates) # type: ignore
for gates in itertools.product(
*[["rx", "ry", "rz", "xx", "yy", "zz"] for _ in range(2)]
):
generate_double_layer_block(gates) # type: ignore
## below is similar layer but in cirq API instead of tensrocircuit native API
## special notes to the API, the arguments order are different due to historical reason
basis_rotation = {
"x": (cirq.H, cirq.H),
"y": (cirq.rx(-np.pi / 2), cirq.rx(np.pi / 2)),
"z": None,
}
def generate_cirq_gate_layer(gate: str) -> None:
"""
$$e^{-i\theta \sigma}$$
:param gate:
:return:
"""
f.__doc__ = """%slayer""" % gate
f.__repr__ = """%slayer""" % gate # type: ignore
f.__trainable__ = False if isinstance(getattr(cirq, gate), cirq.Gate) else True # type: ignore
setattr(thismodule, "cirq" + gate + "layer", f)
def generate_cirq_any_gate_layer(gate: str) -> None:
"""
$$e^{-i\theta \sigma}$$
:param gate:
:return:
"""
f.__doc__ = """any%slayer""" % gate
f.__repr__ = """any%slayer""" % gate # type: ignore
f.__trainable__ = True # type: ignore
setattr(thismodule, "cirqany" + gate + "layer", f)
def generate_cirq_any_double_gate_layer(gates: str) -> None:
"""
The following function generated layer should be used with special case,
as its soundness depends on the nature of task or problem, it doesn't always make sense
:param gates:
:return:
"""
f.__doc__ = """any%slayer""" % gates
f.__repr__ = """any%slayer""" % gates # type: ignore
f.__trainable__ = True # type: ignore
setattr(thismodule, "cirqany" + gates + "layer", f)
for gate in ["rx", "ry", "rz", "H"]:
generate_cirq_gate_layer(gate)
if gate != "H":
generate_cirq_any_gate_layer(gate)
for gates in itertools.product(*[["x", "y", "z"] for _ in range(2)]):
gates = gates[0] + gates[1]
generate_cirq_double_gate(gates) # type: ignore
generate_cirq_double_gate_layer(gates) # type: ignore
generate_cirq_any_double_gate_layer(gates) # type: ignore
generate_cirq_double_gate_layer("swap")
generate_cirq_any_double_gate_layer("swap")
generate_cirq_double_gate_layer("cnot")
| [
37811,
198,
21412,
329,
5499,
4375,
11685,
286,
24907,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
340,
861,
10141,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10774,
80,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
11192,
27... | 2.461162 | 1,841 |
# -*- coding: utf-8 -*-
'''
Created on 04 11, 2016
@author: tolerious
'''
from django_weixin.models import *
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render, redirect
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# from django_weixin.ierror import *
from django_weixin.api_errors import Http200, Http400
import logging, json
@csrf_exempt
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
41972,
319,
8702,
1367,
11,
1584,
198,
198,
31,
9800,
25,
8214,
699,
198,
198,
7061,
6,
198,
198,
6738,
42625,
14208,
62,
732,
844,
259,
13,
27530,
133... | 2.934132 | 167 |
"""Audio file(wav) feature extractor."""
import concurrent
import glob
import os
from absl import app
from absl import flags
from absl import logging
import pandas
import tqdm
import feature
flags.DEFINE_string('wav_dir', '', 'Directory to audio files.')
flags.DEFINE_string('csv_output', '', 'Path to csv output.')
FLAGS = flags.FLAGS
if __name__ == '__main__':
app.run(main)
| [
37811,
21206,
2393,
7,
45137,
8,
3895,
7925,
273,
526,
15931,
198,
198,
11748,
24580,
198,
11748,
15095,
198,
11748,
28686,
198,
198,
6738,
2352,
75,
1330,
598,
198,
6738,
2352,
75,
1330,
9701,
198,
6738,
2352,
75,
1330,
18931,
198,
1... | 3 | 130 |
import argparse
from sudachi import analyze_single
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('knowntext', help='known text, in whatever format')
parser.add_argument('sortedtoks', help='tsv of (text, toks) sorted by descending goodness')
parser.add_argument('word', help='target word, in normal form')
args = parser.parse_args()
known_normals_set = set(normal for (orig, fields_str, normal) in analyze_single(open(args.knowntext).read()))
print('Known count:', len(known_normals_set))
print('Known set bytes size:', len('|'.join(known_normals_set).encode('utf-8')))
if args.word in known_normals_set:
print('Target word seems known already, but proceeding')
known_normals_set.remove(args.word)
line_count = 0
for line in open(args.sortedtoks):
line_count += 1
sline = line.rstrip('\n')
(text, toks_str) = sline.split('\t')
toks_set = set(toks_str.split('|')) if toks_str else set()
unknown_toks = toks_set.difference(known_normals_set)
if args.word in unknown_toks:
other_toks = unknown_toks.difference(set([args.word]))
print('GOOD' if (len(other_toks) == 0) else 'NEED', text, other_toks or '', line_count)
| [
11748,
1822,
29572,
198,
198,
6738,
424,
67,
14299,
1330,
16602,
62,
29762,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
220,
220,
... | 2.507782 | 514 |
from .memcached import Memcached | [
6738,
764,
11883,
66,
2317,
1330,
4942,
66,
2317
] | 3.555556 | 9 |
"""
A mish-mash of functions for recalibration. Should probably be renamed in the near future.
"""
import pysam
import numpy as np
import sklearn
from sklearn.linear_model import LogisticRegression as LR
from sklearn.isotonic import IsotonicRegression as IR
import os
import os.path
import sys
import re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import khmer
import scipy.stats
import kbbq.recaltable as recaltable
import kbbq.plot
import pandas as pd
import datetime
import kbbq.benchmark
def tstamp():
"""
Return the current time up to the second in ISO format as a string.
:return: current time with brackets
:rtype: str
"""
return '[ ' + datetime.datetime.today().isoformat(' ', 'seconds') + ' ]'
def load_positions(posfile):
"""
Get positions that are covered by a non-zipped BED file.
Pass the file name; it will be opened with :func:`python.open`.
This is slightly different than :func:`benchmark.get_bed_dict`
because it doesn't require a refdict and only provides a list
of 0-based positions, not a boolean array of all positions in
the reference.
"""
d = dict()
with open(posfile, 'r') as infh:
for line in infh:
# bed format: pos is 0 based and end is 1 based
chrom, pos, end = line.rstrip().split()
for i in range(int(pos), int(end)):
d.setdefault(chrom, list()).append(i)
return d
def get_var_sites(vcf):
"""
Get positions covered by any record in a VCF file.
Pass the file name. Each value in the returned dict will
be a list of 0-based positions.
"""
vcf = pysam.VariantFile(vcf)
d = dict()
for record in vcf:
for i in range(record.start, record.stop, 1):
#record.start is 0-based inclusive
#record.stop is 0-based exclusive
d.setdefault(record.chrom, list()).append(i)
return d
def find_read_errors(read, ref, variable):
"""
Use the CIGAR to find errors in the read or sites to skip.
Softclipped bases will be added to the skip array.
Returns a tuple with the error array and the skip array.
We don't consider indel errors.
"""
# here's how gatk does it: https://github.com/broadinstitute/gatk/blob/78df6b2f6573b3cd2807a71ec8950d7dfbc9a65d/src/main/java/org/broadinstitute/hellbender/utils/recalibration/BaseRecalibrationEngine.java#L370
seq = np.array(list(read.query_sequence), dtype = np.unicode)
skips = np.zeros(seq.shape, dtype = np.bool)
cigartuples = read.cigartuples #list of tuples [(operation, length)]
cigarops, cigarlen = zip(*cigartuples)
#reference length from CIGAR: https://github.com/samtools/htsjdk/blob/942e3d6b4c28a8e97c457dfc89625bb403bdf83c/src/main/java/htsjdk/samtools/Cigar.java#L76
#sum lengths of MDN=X
#reflen = np.sum(cigarlen[np.any([cigarops == 0, cigarops == 2, cigarops == 3, cigarops == 7, cigarops == 8], axis = 0)])
subset_variable = variable[read.reference_name][read.reference_start : read.reference_end]
refseq = np.array(list(ref[read.reference_name][read.reference_start : read.reference_end]), dtype = np.unicode)
readerrors = np.zeros(seq.shape, dtype = np.bool)
readidx = 0
refidx = 0
for op, l in cigartuples:
if op == 0 or op == 7 or op == 8:
#match
readerrors[readidx : readidx + l] = (refseq[refidx : refidx + l] != seq[readidx : readidx + l])
skips[readidx : readidx + l] = subset_variable[refidx : refidx + l]
readidx = readidx + l
refidx = refidx + l
elif op == 1:
#insertion in read
#gatk counts all insertions as aligning to the ref base to the right (maybe?)
#i think for now we skip if it's skipped on both sides
skips[readidx:readidx+l] = np.logical_and(subset_variable[refidx-1], subset_variable[refidx])
readidx = readidx + l
elif op == 2 or op == 3:
#deletion in read or N op
# N is for introns in mRNA
skips[readidx - 1] = np.logical_or(skips[readidx-1],np.any(subset_variable[refidx: refidx + l]))
refidx = refidx + l
elif op == 4:
#soft clip, consumes query not ref
skips[readidx : readidx + l] = True
readidx = readidx + l
elif op == 5 or op == 6:
#hard clip or pad, do nothing
# it is covered, it's just optimized out
# see https://github.com/nedbat/coveragepy/issues/198
continue # pragma: no cover
else:
#unrecognized
# also covered
raise ValueError("Unrecognized Cigar Operation " + \
str(op) + " In Read\n" + str(read)) #pragma: no cover
return readerrors, skips
class RescaledNormal:
"""
A class to cache the rescaled normal prior used in the bayesian recalibration
model.
Attributes
* :attr:`maxscore` - max score supported
* :attr:`prior_dist` - numpy array of prior probability
Methods
* :meth:`prior` - get the prior probability for a given quality score difference
Most of these attributes are nonsense; the "proper" way to interact
with the class is via the :attr:`prior_dist` array. If that makes you
uncomfortable, use the provided accessor function :meth:`prior`.
Under no circumstances should you attempt to replace any of these attributes,
it will most likely not have the desired effect. The caching mechanism here
only works because the class attributes are immediately instatiated when the class
is created, so by the time you replace them it won't matter.
Reimplement it yourself if you want a different prior.
"""
oldset = np.seterr(all = 'raise')
maxscore = 42
"""
The maximum quality score supported by this class.
"""
possible_diffs = np.arange(maxscore+1, dtype = np.int_)
prior_dist = np.zeros(possible_diffs.shape[0], dtype = np.longdouble)
for i in range(possible_diffs.shape[0]):
try:
prior_dist[i] = np.log(.9 * np.exp(-((possible_diffs[i]/.5)**2)/2))
except FloatingPointError:
prior_dist[i] = np.NINF
np.seterr(**oldset)
@classmethod
def prior(cls, difference):
"""
Return the prior probability for a given difference in quality score.
:param int difference: The difference in quality score
:returns: the prior probability
:rtype: np.longdouble
"""
return cls.prior_dist[difference]
class Dinucleotide:
"""
A class to cache dinucleotides and maintain a consistent dinuc -> int
map throughout the module.
"""
nucleotides = ['A','T','G','C']
"""
List of valid nucleotides
"""
complement = {'A' : 'T', 'T' : 'A', 'G' : 'C', 'C' : 'G'}
"""
Dictionary for complementing nucleotides
"""
#dinucs = [i + j for i in nucleotides for j in nucleotides]
#the above won't work because list comprehensions ignore
#class scope except the outermost variable. we can use
# a temporary function, even though it looks bad :(
dinucs = lambda d:[i + j for i in d for j in d]
dinucs = dinucs(nucleotides)
"""
List of valid dinucleotides
"""
dinuc_to_int = dict(zip(dinucs, range(len(dinucs))))
"""
Dictionary mapping dinuc -> int
"""
vectorized_get = np.vectorize(dinuc_to_int.get, otypes = [np.int])
vectorized_complement = np.vectorize(complement.get, otypes = [np.unicode])
@classmethod
@classmethod
def gatk_delta_q(prior_q, numerrs, numtotal, maxscore = 42):
"""
Calculate the shift in quality scores from the prior given
data.
This is achieved by finding the difference between the maximum
a posteriori and the prior point estimate of Q.
"""
assert prior_q.shape == numerrs.shape == numtotal.shape
possible_q = np.arange(maxscore+1, dtype = np.int)
diff = np.absolute(np.subtract.outer(possible_q, prior_q).astype(np.int))
#1st dim is possible qs
prior = RescaledNormal.prior_dist[diff]
broadcast_errs = np.broadcast_to(numerrs, possible_q.shape + numerrs.shape).copy()
broadcast_tot = np.broadcast_to(numtotal, possible_q.shape + numtotal.shape).copy()
p = q_to_p(possible_q).astype(np.float)
while len(p.shape) < len(broadcast_tot.shape):
p = np.expand_dims(p, -1)
broadcast_p = np.broadcast_to( p, broadcast_tot.shape ).copy()
loglike = scipy.stats.binom.logpmf(broadcast_errs+1, broadcast_tot+2, broadcast_p)
#loglike should now have same dims as prior
assert loglike.shape == prior.shape
posterior = prior + loglike
posterior_q = np.argmax(posterior, axis = 0)
assert posterior_q.shape == prior_q.shape
return posterior_q - prior_q
## Generic covariate functions
## Recalibrating FASTQ reads
def fastq_infer_rg(read):
"""
Infer the read group from appended read information, such as produced by the
samtools fastq command.
Requires the read to be formatted such the rg tag is added to the end of the
read name delimited by a ``_``. Returns the read group tag.
"""
rgstr = read.name.split(sep='_')[1]
assert rgstr[0:2] == 'RG'
return rgstr.split(':')[-1]
## Recalibrate reads from a BAM
| [
37811,
198,
32,
29406,
12,
76,
1077,
286,
5499,
329,
42653,
571,
1358,
13,
10358,
2192,
307,
25121,
287,
262,
1474,
2003,
13,
198,
37811,
198,
198,
11748,
279,
893,
321,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1341,
35720,
198... | 2.509882 | 3,744 |
# Relaxation system: relaxes to Burgers as tau->0
#
import numpy
from models import relaxation_burgers
from bcs import outflow
from simulation import simulation
from methods import weno3_upwind
from rk import imex222
from grid import grid
from matplotlib import pyplot
Ngz = 4
Npoints = 200
tau = 1.0
tau2 = 0.01
L = 1
interval = grid([-L, L], Npoints, Ngz)
qL = numpy.array([1.0, 0.5])
qR = numpy.array([0.0, 0.0])
model = relaxation_burgers.relaxation_burgers(initial_data = relaxation_burgers.initial_riemann(qL, qR))
source = relaxation_burgers.relaxation_source(tau)
source2 = relaxation_burgers.relaxation_source(tau2)
sim = simulation(model, interval, weno3_upwind, imex222(source), outflow)
sim.evolve(0.8)
sim.plot_system()
pyplot.show()
sim2 = simulation(model, interval, weno3_upwind, imex222(source2), outflow)
sim2.evolve(0.8)
sim2.plot_system()
pyplot.show()
| [
2,
46883,
341,
1080,
25,
8960,
274,
284,
21435,
364,
355,
256,
559,
3784,
15,
198,
2,
198,
198,
11748,
299,
32152,
198,
6738,
4981,
1330,
34205,
62,
7423,
364,
198,
6738,
275,
6359,
1330,
503,
11125,
198,
6738,
18640,
1330,
18640,
1... | 2.636637 | 333 |
import archinfo
ordered_argument_regs = {
'ARMEL': [
archinfo.ArchARMEL.registers['r0'][0],
archinfo.ArchARMEL.registers['r1'][0],
archinfo.ArchARMEL.registers['r2'][0],
archinfo.ArchARMEL.registers['r3'][0],
archinfo.ArchARMEL.registers['r4'][0],
archinfo.ArchARMEL.registers['r5'][0],
archinfo.ArchARMEL.registers['r6'][0],
archinfo.ArchARMEL.registers['r7'][0],
archinfo.ArchARMEL.registers['r8'][0],
archinfo.ArchARMEL.registers['r9'][0],
archinfo.ArchARMEL.registers['r10'][0],
archinfo.ArchARMEL.registers['r11'][0],
archinfo.ArchARMEL.registers['r12'][0]
],
'AARCH64': [
archinfo.ArchAArch64.registers['x0'][0],
archinfo.ArchAArch64.registers['x1'][0],
archinfo.ArchAArch64.registers['x2'][0],
archinfo.ArchAArch64.registers['x3'][0],
archinfo.ArchAArch64.registers['x4'][0],
archinfo.ArchAArch64.registers['x5'][0],
archinfo.ArchAArch64.registers['x6'][0],
archinfo.ArchAArch64.registers['x7'][0],
],
'MIPS32': [
archinfo.ArchMIPS32.registers['a0'][0],
archinfo.ArchMIPS32.registers['a1'][0],
archinfo.ArchMIPS32.registers['a2'][0],
archinfo.ArchMIPS32.registers['a3'][0],
],
}
return_regs = {
'ARMEL': archinfo.ArchARMEL.registers['r0'][0],
'AARCH64': archinfo.ArchAArch64.registers['x0'][0],
'MIPS32': archinfo.ArchMIPS32.registers['v0'][0]
}
link_regs = {
'ARMEL': archinfo.ArchARMEL.registers['lr'][0],
'AARCH64': archinfo.ArchAArch64.registers['x30'][0],
'MIPS32': archinfo.ArchMIPS32.registers['ra'][0]
}
def arg_reg_name(p, n):
"""
Return the name of a register
:param p: angr project
:param n: register offset
:return: register name
"""
return p.arch.register_names[ordered_argument_regs[p.arch.name][n]]
# FIXME: so far we only consider arguments passed through registers
def get_ord_arguments_call(p, b_addr):
"""
Retrieves the list of instructions setting arguments for a function call. It checks the arguments in order
so to infer the arity of the function:
Example: if the first argument (e.g., r0 in ARM) is not set, it assumes the arity's function is 0.
:param p: angr project
:param b_addr: basic block address
:return: the arguments of a function call
"""
set_params = []
b = p.factory.block(b_addr)
for reg_off in ordered_argument_regs[p.arch.name]:
put_stmts = [s for s in b.vex.statements if s.tag == 'Ist_Put' and s.offset == reg_off]
if not put_stmts:
break
# if more than a write, only consider the last one
# eg r0 = 5
# ....
# r0 = 10
# BL foo
put_stmt = put_stmts[-1]
set_params.append(put_stmt)
return set_params
def get_any_arguments_call(p, b_addr):
"""
Retrieves the list of instructions setting arguments for a function call.
:param p: angr project
:param b_addr: basic block address
:return: instructions setting arguments
"""
set_params = []
b = p.factory.block(b_addr)
put_stmts = [s for s in b.vex.statements if s.tag == 'Ist_Put']
for stmt in put_stmts:
if stmt.offset in ordered_argument_regs[p.arch.name]:
set_params.append(stmt)
return set_params
def get_arity(p, b_addr):
"""
Retrieves the arity by inspecting a funciton call
:param p: angr project
:param b_addr: basic block address
:return: arity of the function
"""
return len(get_ord_arguments_call(p, b_addr))
| [
11748,
3934,
10951,
198,
198,
24071,
62,
49140,
62,
2301,
82,
796,
1391,
198,
220,
220,
220,
705,
33456,
3698,
10354,
685,
198,
220,
220,
220,
220,
220,
220,
220,
3934,
10951,
13,
19895,
33456,
3698,
13,
2301,
6223,
17816,
81,
15,
6... | 2.230675 | 1,630 |
from . import encoder
import os
from easyvvuq.encoders import BaseEncoder
from boutdata.data import BoutOptionsFile
| [
6738,
764,
1330,
2207,
12342,
198,
198,
11748,
28686,
198,
6738,
2562,
25093,
84,
80,
13,
12685,
375,
364,
1330,
7308,
27195,
12342,
198,
6738,
18222,
7890,
13,
7890,
1330,
40808,
29046,
8979,
628,
628
] | 3.428571 | 35 |
# -*- coding: utf-8 -*-
""" tabview.py -- View a tab-delimited file in a spreadsheet-like display.
Scott Hansen <firecat four one five three at gmail dot com>
Based on code contributed by A.M. Kuchling <amk at amk dot ca>
"""
from __future__ import print_function, division, unicode_literals
import csv
import _curses
import curses
import curses.ascii
import datetime
import locale
import io
import os
import re
import string
import sys
from collections import Counter
from curses.textpad import Textbox
from operator import itemgetter
from subprocess import Popen, PIPE
from textwrap import wrap
import unicodedata
if sys.version_info.major < 3:
# Python 2.7 shim
str = unicode
else:
basestring = str
file = io.FileIO
# Python 3 wrappers
class Viewer:
"""The actual CSV viewer class.
Args:
args: other positional arguments. See view() for descriptions.
stdscr, data
kwargs: dict of other keyword arguments.
start_pos, column_width, column_gap, trunc_char, column_widths,
search_str, double_width
"""
def _init_double_width(self, dw):
"""Initialize self._cell_len to determine if double width characters
are taken into account when calculating cell widths.
"""
self.double_width = dw
# Enable double with character processing for small files
if self.double_width is False:
self.double_width = len(self.data) * self.num_data_columns < 65000
if self.double_width is True:
self._cell_len = self.__cell_len_dw
else:
self._cell_len = len
def _init_column_widths(self, cw, cws):
"""Initialize column widths
Args: - cw: column_width mode
cws: list of column widths
"""
if cws is None or len(self.data[0]) != len(cws):
self._get_column_widths(cw)
else:
self.column_width = cws
def column_xw(self, x):
"""Return the position and width of the requested column"""
xp = sum(self.column_width[self.win_x:self.win_x + x]) \
+ x * self.column_gap
w = max(0, min(self.max_x - xp, self.column_width[self.win_x + x]))
return xp, w
def show_cell(self):
"Display current cell in a pop-up window"
yp = self.y + self.win_y
xp = self.x + self.win_x
s = "\n" + self.data[yp][xp]
if not s:
# Only display pop-up if cells have contents
return
TextBox(self.scr, data=s, title=self.location_string(yp, xp))()
self.resize()
def show_info(self):
"""Display data information in a pop-up window
"""
fn = self.info
yp = self.y + self.win_y
xp = self.x + self.win_x
location = self.location_string(yp, xp)
size = sizeof_fmt(sys.getsizeof(self.data))
rows_cols = str((len(self.data), self.num_data_columns))
info = [("Filename/Data Info:", fn),
("Current Location:", location),
("Total Rows/Columns:", rows_cols),
("Data Size:", size)]
display = "\n\n".join(["{:<20}{:<}".format(i, j)
for i, j in info])
TextBox(self.scr, data=display)()
self.resize()
def _search_validator(self, ch):
"""Fix Enter and backspace for textbox.
Used as an aux function for the textpad.edit method
"""
if ch == curses.ascii.NL: # Enter
return curses.ascii.BEL
elif ch == 127: # Backspace
self.search_str = self.textpad.gather().strip().lower()[:-1]
return 8
else:
if 0 < ch < 256:
c = chr(ch)
if c in string.printable:
res = self.textpad.gather().strip().lower()
self.search_str = res + chr(ch)
self.search_results(look_in_cur=True)
self.display()
return ch
def search(self):
"""Open search window, get input and set the search string."""
scr2 = curses.newwin(3, self.max_x, self.max_y - 3, 0)
scr3 = scr2.derwin(1, self.max_x - 12, 1, 9)
scr2.box()
scr2.move(1, 1)
addstr(scr2, "Search: ")
scr2.refresh()
curses.curs_set(1)
self._search_win_open = 3
self.textpad = Textbox(scr3, insert_mode=True)
self.search_str = self.textpad.edit(self._search_validator)
self.search_str = self.search_str.lower().strip()
try:
curses.curs_set(0)
except _curses.error:
pass
if self.search_str:
self.init_search = None
self._search_win_open = 0
def search_results(self, rev=False, look_in_cur=False):
"""Given self.search_str or self.init_search, find next result after
current position and reposition the cursor there.
Args: rev - True/False search backward if true
look_in_cur - True/False start search in current cell
"""
if not self.search_str and not self.init_search:
return
self.search_str = self.search_str or self.init_search
yp, xp = self.y + self.win_y, self.x + self.win_x
if rev is True:
data, yp, xp = self._reverse_data(self.data, yp, xp)
else:
data = self.data
if look_in_cur is False:
# Skip ahead/back one cell
if xp < len(data[0]) - 1:
xp += 1
elif xp >= len(data[0]) - 1 and yp < len(data) - 1:
# Skip ahead a line if at the end of the current line
yp += 1
xp = 0
else:
# Skip back to the top if at the end of the data
yp = xp = 0
search_order = [self._search_cur_line_r,
self._search_next_line_to_end,
self._search_next_line_from_beg,
self._search_cur_line_l]
for search in search_order:
y, x, res = search(data, yp, xp)
if res is True:
yp, xp = y, x
break
if rev is True:
self.data, yp, xp = self._reverse_data(data, yp, xp)
if res is True:
self.goto_yx(yp + 1, xp + 1)
def search_results_prev(self, rev=False, look_in_cur=False):
"""Search backwards"""
self.search_results(rev=True, look_in_cur=look_in_cur)
def _search_cur_line_r(self, data, yp, xp):
""" Current line first, from yp,xp to the right """
res = False
for x, item in enumerate(data[yp][xp:]):
if self.search_str in item.lower():
xp += x
res = True
break
return yp, xp, res
def _search_cur_line_l(self, data, yp, xp):
"""Last, search from beginning of current line to current position """
res = x = False
for x, item in enumerate(data[yp][:xp]):
if self.search_str in item.lower():
res = True
break
return yp, x, res
def _search_next_line_to_end(self, data, yp, xp):
""" Search from next line to the end """
res = done = False
for y, line in enumerate(data[yp + 1:]):
for x, item in enumerate(line):
if self.search_str in item.lower():
done = True
break
if done is True:
res = True
yp, xp = yp + 1 + y, x
break
return yp, xp, res
def _search_next_line_from_beg(self, data, yp, xp):
"""Search from beginning to line before current."""
res = done = y = x = False
for y, line in enumerate(data[:yp]):
for x, item in enumerate(line):
if self.search_str in item.lower():
done = True
break
if done is True:
res = True
yp, xp = y, x
break
return yp, xp, res
def sorted_nicely(self, ls, key, rev=False):
""" Sort the given iterable in the way that humans expect.
From StackOverflow: http://goo.gl/nGBUrQ
"""
return sorted(ls, key=alphanum_key, reverse=rev)
def toggle_column_width(self):
"""Toggle column width mode between 'mode' and 'max' or set fixed
column width mode if self.modifier is set.
"""
try:
self.column_width_mode = min(int(self.modifier), self.max_x)
self.modifier = str()
except ValueError:
if self.column_width_mode == 'mode':
self.column_width_mode = 'max'
else:
self.column_width_mode = 'mode'
self._get_column_widths(self.column_width_mode)
self.recalculate_layout()
def handle_keys(self):
"""Determine what method to call for each keypress.
"""
# Non-blocking to allow refresh to happen
self.scr.timeout(1000)
c = self.scr.getch() # Get a keystroke
self.scr.timeout(-1)
if c == -1:
return False
if c == curses.KEY_RESIZE:
self.resize()
return True
if 0 < c < 256:
c = chr(c)
# Digits are commands without a modifier
try:
found_digit = c.isdigit()
except AttributeError:
# Since .isdigit() doesn't exist if c > 256, we need to catch the
# error for those keys.
found_digit = False
if found_digit and (len(self.modifier) > 0 or c not in self.keys):
self.handle_modifier(c)
elif c in self.keys:
self.keys[c]()
else:
self.modifier = str()
return True
def handle_modifier(self, mod):
"""Append digits as a key modifier, clear the modifier if not
a digit.
Args:
mod: potential modifier string
"""
self.modifier += mod
if not self.modifier.isdigit():
self.modifier = str()
def resize(self):
"""Handle terminal resizing"""
# Check if screen was re-sized (True or False)
resize = self.max_x == 0 or \
curses.is_term_resized(self.max_y, self.max_x)
if resize is True:
self.recalculate_layout()
curses.resizeterm(self.max_y, self.max_x)
def num_columns_fwd(self, x):
"""Count number of fully visible columns starting at x,
going forward.
"""
width = cols = 0
while (x + cols) < self.num_data_columns \
and width + self.column_width[x + cols] <= self.max_x:
width += self.column_width[x + cols] + self.column_gap
cols += 1
return max(1, cols)
def num_columns_rev(self, x):
"""Count number of fully visible columns starting at x,
going reverse.
"""
width = cols = 0
while x - cols >= 0 \
and width + self.column_width[x - cols] <= self.max_x:
width += self.column_width[x - cols] + self.column_gap
cols += 1
return max(1, cols)
def recalculate_layout(self):
"""Recalulate the screen layout and cursor position"""
self.max_y, self.max_x = self.scr.getmaxyx()
self.vis_columns = self.num_columns = self.num_columns_fwd(self.win_x)
if self.win_x + self.num_columns < self.num_data_columns:
xc, wc = self.column_xw(self.num_columns)
if wc > len(self.trunc_char):
self.vis_columns += 1
if self.x >= self.num_columns:
self.goto_x(self.win_x + self.x + 1)
if self.y >= self.max_y - self.header_offset:
self.goto_y(self.win_y + self.y + 1)
def location_string(self, yp, xp):
"""Create (y,x) col_label string. Max 30% of screen width. (y,x) is
padded to the max possible length it could be. Label string gets
trunc_char appended if it's longer than the allowed width.
"""
yx_str = "({},{}) "
label_str = "{},{}"
max_y = str(len(self.data))
max_x = str(len(self.data[0]))
max_yx = yx_str.format(max_y, max_x)
max_label = label_str.format('-', max(self.header, key=len))
if self.header_offset != self.header_offset_orig:
# Hide column labels if header row disabled
label = ""
max_width = min(int(self.max_x * .3), len(max_yx))
else:
label = label_str.format('-', self.header[xp])
max_width = min(int(self.max_x * .3), len(max_yx + max_label))
yx = yx_str.format(yp + 1, xp + 1)
pad = " " * (max_width - len(yx) - len(label))
all = "{}{}{}".format(yx, label, pad)
if len(all) > max_width:
all = all[:max_width - 1] + self.trunc_char
return all
def display(self):
"""Refresh the current display"""
yp = self.y + self.win_y
xp = self.x + self.win_x
# Print the current cursor cell in the top left corner
self.scr.move(0, 0)
self.scr.clrtoeol()
info = " {}".format(self.location_string(yp, xp))
addstr(self.scr, info, curses.A_REVERSE)
# Adds the current cell content after the 'current cell' display
wc = self.max_x - len(info) - 2
s = self.cellstr(yp, xp, wc)
addstr(self.scr, " " + s, curses.A_NORMAL)
# Print a divider line
self.scr.hline(1, 0, curses.ACS_HLINE, self.max_x)
# Print the header if the correct offset is set
if self.header_offset == self.header_offset_orig:
self.scr.move(self.header_offset - 1, 0)
self.scr.clrtoeol()
for x in range(0, self.vis_columns):
xc, wc = self.column_xw(x)
s = self.hdrstr(x + self.win_x, wc)
addstr(self.scr, self.header_offset - 1, xc, s, curses.A_BOLD)
# Print the table data
for y in range(0, self.max_y - self.header_offset -
self._search_win_open):
yc = y + self.header_offset
self.scr.move(yc, 0)
self.scr.clrtoeol()
for x in range(0, self.vis_columns):
if x == self.x and y == self.y:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
xc, wc = self.column_xw(x)
s = self.cellstr(y + self.win_y, x + self.win_x, wc)
if yc == self.max_y - 1 and x == self.vis_columns - 1:
# Prevents a curses error when filling in the bottom right
# character
insstr(self.scr, yc, xc, s, attr)
else:
addstr(self.scr, yc, xc, s, attr)
self.scr.refresh()
def hdrstr(self, x, width):
"Format the content of the requested header for display"
if len(self.header) <= x:
s = ""
else:
s = self.header[x]
return self.strpad(s, width)
def cellstr(self, y, x, width):
"Format the content of the requested cell for display"
if len(self.data) <= y or len(self.data[y]) <= x:
s = ""
else:
s = self.data[y][x]
return self.strpad(s, width)
def _get_column_widths(self, width):
"""Compute column width array
Args: width - 'max', 'mode', or an integer value
Returns: [len of col 1, len of col 2, ....]
"""
if width == 'max':
self.column_width = self._get_column_widths_max(self.data)
elif width == 'mode':
self.column_width = self._get_column_widths_mode(self.data)
else:
try:
width = int(width)
except (TypeError, ValueError):
width = 25
self.column_width = [width for i in
range(0, self.num_data_columns)]
@staticmethod
def __cell_len_dw(s):
"""Return the number of character cells a string will take
(double-width aware). Defined as self._cell_len in __init__
"""
len = 0
for c in s:
w = 2 if unicodedata.east_asian_width(c) == 'W' else 1
len += w
return len
def _mode_len(self, x):
"""Compute arithmetic mode (most common value) of the length of each item
in an iterator.
Args: x - iterator (list, tuple, etc)
Returns: mode - int.
"""
lens = [self._cell_len(i) for i in x]
m = Counter(lens).most_common()
# If there are a lot of empty columns, use the 2nd most common length
# besides 0
try:
mode = m[0][0] or m[1][0]
except IndexError:
mode = 0
max_len = max(lens) or 1
diff = abs(mode - max_len)
if diff > (self.column_gap * 2) and diff / max_len > 0.1:
return max(max(1, self.column_gap), mode)
else:
return max(max(1, self.column_gap), max_len)
def _get_column_widths_mode(self, d):
"""Given a list of lists, return a list of the variable column width
for each column using the arithmetic mode.
Args: d - list of lists with x columns
Returns: list of ints [len_1, len_2...len_x]
"""
d = zip(*d)
return [self._mode_len(i) for i in d]
def _get_column_widths_max(self, d):
"""Given a list of lists, return a list of the variable column width
for each column using the max length.
Args: d - list of lists with x columns
Returns: list of ints [len_1, len_2...len_x]
"""
d = zip(*d)
return [max(1, min(250, max(set(self._cell_len(j) for j in i))))
for i in d]
class TextBox:
"""Display a scrollable text box in the bottom half of the screen.
"""
def _calculate_layout(self):
"""Setup popup window and format data. """
self.scr.touchwin()
self.term_rows, self.term_cols = self.scr.getmaxyx()
self.box_height = self.term_rows - int(self.term_rows / 2)
self.win = curses.newwin(int(self.term_rows / 2),
self.term_cols, self.box_height, 0)
try:
curses.curs_set(False)
except _curses.error:
pass
# transform raw data into list of lines ready to be printed
s = self.data.splitlines()
s = [wrap(i, self.term_cols - 3, subsequent_indent=" ")
or [""] for i in s]
self.tdata = [i for j in s for i in j]
# -3 -- 2 for the box lines and 1 for the title row
self.nlines = min(len(self.tdata), self.box_height - 3)
self.scr.refresh()
def csv_sniff(data, enc):
"""Given a list, sniff the dialect of the data and return it.
Args:
data - list like ["col1,col2,col3"]
enc - python encoding value ('utf_8','latin-1','cp870', etc)
Returns:
csv.dialect.delimiter
"""
data = data.decode(enc)
dialect = csv.Sniffer().sniff(data)
return dialect.delimiter
def fix_newlines(data):
"""If there are windows \r newlines in the input data, split the string on
the \r characters. I can't figure another way to enable universal newlines
without messing up Unicode support.
"""
if sys.version_info.major < 3:
if len(data) == 1 and '\r' in data[0]:
data = data[0].split('\r')
else:
if len(data) == 1 and b'\r' in data[0]:
data = data[0].split(b'\r')
return data
def process_data(data, enc=None, delim=None, quoting=None):
"""Given a list of lists, check for the encoding, quoting and delimiter and
return a list of CSV rows (normalized to a single length)
"""
data = fix_newlines(data)
if data_list_or_file(data) == 'list':
# If data is from an object (list of lists) instead of a file
if sys.version_info.major < 3:
data = py2_list_to_unicode(data)
return pad_data(data)
if enc is None:
enc = detect_encoding(data)
if delim is None:
delim = csv_sniff(data[0], enc)
if quoting is not None:
quoting = getattr(csv, quoting)
else:
quoting = csv.QUOTE_MINIMAL
csv_data = []
if sys.version_info.major < 3:
csv_obj = csv.reader(data, delimiter=delim.encode(enc),
quoting=quoting)
for row in csv_obj:
row = [str(x, enc) for x in row]
csv_data.append(row)
else:
data = [i.decode(enc) for i in data]
csv_obj = csv.reader(data, delimiter=delim, quoting=quoting)
for row in csv_obj:
csv_data.append(row)
return pad_data(csv_data)
def py2_list_to_unicode(data):
"""Convert strings/int to unicode for python 2
"""
enc = detect_encoding()
csv_data = []
for row in data:
r = []
for x in row:
try:
r.append(str(x, enc))
except TypeError:
# The 'enc' parameter fails with int values
r.append(str(x))
csv_data.append(r)
return csv_data
def data_list_or_file(data):
"""Determine if 'data' is a list of lists or list of strings/bytes
Python 3 - reading a file returns a list of byte strings
Python 2 - reading a file returns a list of strings
Both - list of lists is just a list
Returns: 'file' if data was from a file, 'list' if from a python list/tuple
"""
f = isinstance(data[0], (basestring, bytes))
return 'file' if f is True else 'list'
def pad_data(d):
"""Pad data rows to the length of the longest row.
Args: d - list of lists
"""
max_len = set((len(i) for i in d))
if len(max_len) == 1:
return d
else:
max_len = max(max_len)
return [i + [""] * (max_len - len(i)) for i in d]
def detect_encoding(data=None):
"""Return the default system encoding. If data is passed, try
to decode the data with the default system encoding or from a short
list of encoding types to test.
Args:
data - list of lists
Returns:
enc - system encoding
"""
enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2',
'utf-16', 'cp720']
code = locale.getpreferredencoding(False)
if data is None:
return code
if code.lower() not in enc_list:
enc_list.insert(0, code.lower())
for c in enc_list:
try:
for line in data:
line.decode(c)
except (UnicodeDecodeError, UnicodeError):
continue
return c
print("Encoding not detected. Please pass encoding value manually")
def view(data, enc=None, start_pos=(0, 0), column_width=20, column_gap=2,
trunc_char='…', column_widths=None, search_str=None,
double_width=False, delimiter=None, quoting=None, info=None):
"""The curses.wrapper passes stdscr as the first argument to main +
passes to main any other arguments passed to wrapper. Initializes
and then puts screen back in a normal state after closing or
exceptions.
Args:
data: data (filename, file, list of lists or tuple of tuples).
Should be normalized to equal row lengths
enc: encoding for file/data
start_pos: initial file position. Either a single integer for just y
(row) position, or tuple/list (y,x)
column_width: 'max' (max width for the column),
'mode' (uses arithmetic mode to compute width), or
int x (x characters wide). Default is 'mode'
column_gap: gap between columns
column_widths: list of widths for each column [len1, len2, lenxxx...]
trunc_char: character to indicate continuation of too-long columns
search_str: string to search for
double_width: boolean indicating whether double-width characters
should be handled (defaults to False for large files)
delimiter: CSV delimiter. Typically needed only if the automatic
delimiter detection doesn't work. None => automatic
quoting: CSV quoting. None => automatic. This can be useful when
csv.QUOTE_NONE isn't automatically detected, for example when
using as a MySQL pager. Allowed values are per the Python
documentation:
QUOTE_MINIMAL
QUOTE_NONNUMERIC
QUOTE_ALL
QUOTE_NONE
info: Data information to be displayed on ^g. For example a variable
name or description of the current data. Defaults to the filename
or "" if input was not from a file
"""
if sys.version_info.major < 3:
lc_all = locale.getlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, '')
else:
lc_all = None
if info is None:
info = ""
try:
buf = None
while True:
try:
if isinstance(data, basestring):
with open(data, 'rb') as fd:
new_data = fd.readlines()
if info == "":
info = data
elif isinstance(data, (io.IOBase, file)):
new_data = data.readlines()
else:
new_data = data()
if new_data:
buf = process_data(new_data, enc, delimiter, quoting)
elif buf:
# cannot reload the file
pass
else:
# cannot read the file
return 1
curses.wrapper(main, buf,
start_pos=start_pos,
column_width=column_width,
column_gap=column_gap,
trunc_char=trunc_char,
column_widths=column_widths,
search_str=search_str,
double_width=double_width,
info=info)
except (QuitException, KeyboardInterrupt):
return 0
except ReloadException as e:
start_pos = e.start_pos
column_width = e.column_width_mode
column_gap = e.column_gap
column_widths = e.column_widths
search_str = e.search_str
continue
finally:
if lc_all is not None:
locale.setlocale(locale.LC_ALL, lc_all)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
7400,
1177,
13,
9078,
1377,
3582,
257,
7400,
12,
12381,
320,
863,
2393,
287,
257,
30117,
12,
2339,
3359,
13,
628,
220,
4746,
27667,
1279,
6495,
9246,
1440,
530,
... | 2.047059 | 13,175 |
"""
Extract forecasts from weatherbit.io and darksky.net forecast data.
* Single function
Data sources
============
weatherbit.io
-------------
* curl 'http://api.weatherbit.io/v2.0/current?key=d3fc630d26bb4a84b0392029a4c90ba0&lat=-43.49391&lon=172.57900' > test/weatherbit-currently.json
* curl 'http://api.weatherbit.io/v2.0/forecast/hourly?key=d3fc630d26bb4a84b0392029a4c90ba0&lat=-43.49391&lon=172.57900&hours=6' > test/weatherbit-hourly-6h.json
* curl 'http://api.weatherbit.io/v2.0/forecast/daily?key=d3fc630d26bb4a84b0392029a4c90ba0&lat=-43.49391&lon=172.57900&days=5' > test/weatherbit-daily-5d.json
darksky.net
-----------
* curl 'https://api.darksky.net/forecast/c795da7ea1fadbf5dccbf95d39ce7baa/-43.49391,172.57900?units=si&exclude=minutely,alerts,flags,daily,hourly' > test/darksky.net-currently.json
* curl 'https://api.darksky.net/forecast/c795da7ea1fadbf5dccbf95d39ce7baa/-43.49391,172.57900?units=si&exclude=minutely,alerts,flags,currently,daily' > test/darksky.net-hourly.json
* curl 'https://api.darksky.net/forecast/c795da7ea1fadbf5dccbf95d39ce7baa/-43.49391,172.57900?units=si&exclude=minutely,alerts,flags,currently,hourly' > test/darksky.net-daily.json
openweathermap.org
------------------
* curl 'http://api.openweathermap.org/data/2.5/weather?lat=-43.49391&lon=172.57900&APPID=bc805df06b9a715a84c3aa78ddbf4160' > test/openweathermap.org-currently.json
* curl 'http://api.openweathermap.org/data/2.5/forecast?lat=-43.49391&lon=172.57900&APPID=bc805df06b9a715a84c3aa78ddbf4160&cnt=24' > test/openweathermap.org-currently.json
TODO
----
* Conversion from bearing to cardinal only copes with 0--360 degrees.
* Does not extract darksky.io daily summary.
* Convert darksky.net and weatherbit.io icon codes into a common standard.
"""
try:
import utime as utime
except ImportError:
import time as utime
import weatherbit_io as weatherbit_io
weatherbit_io_files = [
'../test/data/weatherbit.io-currently.json',
'../test/data/weatherbit.io-hourly-6h.json',
'../test/data/weatherbit.io-daily-5d.json',
]
for filename in weatherbit_io_files:
with open(filename) as f:
data_json = f
obss = weatherbit_io.process_json(data_json)
# for obs in obss:
# print(obs)
# print()
# Get the data
with open('../test/data/weatherbit.io-currently.json') as f:
data_json = f
current = weatherbit_io.process_json(data_json)[0]
with open('../test/data/weatherbit.io-hourly-6h.json') as f:
data_json = f
hourly = weatherbit_io.process_json(data_json)
with open('../test/data/weatherbit.io-daily-5d.json') as f:
data_json = f
daily = weatherbit_io.process_json(data_json)
c_fmt = '{hour:02d}:{minute:02d} {temperature:2.0f} {rain:2.0f} {summary}'
d_fmt = '{month:02d}-{day:02d} {temperature_min:2.0f}/{temperature_max:2.0f} {rain:2.0f} {summary}'
# Update screen
print('Christchurch - weatherbit.io')
o = current
tt = utime.localtime(o.time_ts)
print(tt)
print(c_fmt.format(hour=tt[3], minute=tt[4], temperature=o.temperature_C, rain=o.precipitation_mm, summary=o.summary))
print()
#for o in hourly[:6]:
# tt = utime.localtime(o.time_ts)
# print(c_fmt.format(hour=tt[3], minute=tt[4], temperature=o.temperature_C, rain=o.precipitation_mm, summary=o.summary))
#print()
for o in daily[:5]:
tt = utime.localtime(o.time_ts)
print(d_fmt.format(month=tt[1], day=tt[2], temperature_min=o.temperature_min_C, temperature_max=o.temperature_max_C, rain=o.precipitation_mm, summary=o.summary))
print()
print()
import darksky_net as darksky_net
darksky_net_files = [
'../test/data/darksky.net-currently.json',
# '../test/data/darksky.net-hourly.json',
'../test/data/darksky.net-daily.json',
]
for filename in darksky_net_files:
print(filename)
with open(filename) as f:
data_json = f
obss = darksky_net.process_json(data_json)
for obs in obss:
print(obs)
print()
# # Get the data
# with open('../test/data/darksky.net-currently.json') as f:
# data_json = f
# current = darksky_net.process_json(data_json)[0]
#
# with open('../test/data/darksky.net-hourly.json') as f:
# data_json = f
# hourly = darksky_net.process_json(data_json)
#
# with open('../test/data/darksky.net-daily.json') as f:
# data_json = f
# daily = darksky_net.process_json(data_json)
# Update screen
print('Christchurch - darksky.net')
c_fmt = '{hour:02d}:{minute:02d} {temperature:2.0f} {rain:2.0f} {summary}'
o = current
tt = utime.localtime(o.time_ts)
print(c_fmt.format(hour=tt[3], minute=tt[4], temperature=o.temperature_C, rain=o.precipitation_mm, summary=o.summary))
print()
#for o in hourly[:6]:
# tt = utime.localtime(o.time_ts)
# print(c_fmt.format(hour=tt[3], minute=tt[4], temperature=o.temperature_C, rain=o.precipitation_mm, summary=o.summary))
#print()
d_fmt = '{month:02d}-{day:02d} {temperature_min:2.0f}/{temperature_max:2.0f} {rain:2.0f} {summary}'
for o in daily[:5]:
tt = utime.localtime(o.time_ts)
print(d_fmt.format(month=tt[1], day=tt[2], temperature_min=o.temperature_min_C, temperature_max=o.temperature_max_C, rain=o.precipitation_mm, summary=o.summary))
print()
print()
| [
37811,
198,
11627,
974,
26119,
422,
6193,
2545,
13,
952,
290,
288,
5558,
2584,
13,
3262,
11092,
1366,
13,
198,
198,
9,
14206,
2163,
220,
198,
198,
6601,
4237,
198,
25609,
198,
198,
23563,
2545,
13,
952,
198,
32501,
198,
9,
29249,
70... | 2.300575 | 2,259 |
"""
This module allows the library to be run as an interactive command line
application, primarily to assist with testing the library.
"""
import argparse
import asyncio
import logging
import sys
import traceback
from typing import Optional, Union
from lifesospy.baseunit import BaseUnit
from lifesospy.const import (
PROJECT_VERSION, PROJECT_DESCRIPTION)
from lifesospy.devicecategory import DeviceCategory, DC_ALL_LOOKUP
from lifesospy.enums import (
OperationMode, ESFlags, SSFlags, SwitchFlags, SwitchNumber)
_LOGGER = logging.getLogger(__name__)
def main(argv):
"""
Basic command line script for testing library.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="LifeSOSpy v{} - {}".format(
PROJECT_VERSION, PROJECT_DESCRIPTION))
parser.add_argument(
'-H', '--host',
help="Hostname/IP Address for the LifeSOS server, if we are to run as a client.",
default=None)
parser.add_argument(
'-P', '--port',
help="TCP port for the LifeSOS ethernet interface.",
default=str(BaseUnit.TCP_PORT))
parser.add_argument(
'-p', '--password',
help="Password for the Master user, if remote access requires it.",
default='')
parser.add_argument(
'-v', '--verbose',
help="Display all logging output.",
action='store_true')
args = parser.parse_args()
# Configure logger
logging.basicConfig(
format="%(asctime)s %(levelname)-5s (%(threadName)s) [%(name)s] %(message)s",
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG if args.verbose else logging.INFO)
# Create base unit instance and start up interface
print("LifeSOSpy v{} - {}\n".format(PROJECT_VERSION, PROJECT_DESCRIPTION))
loop = asyncio.get_event_loop()
baseunit = BaseUnit(args.host, args.port)
if args.password:
baseunit.password = args.password
baseunit.start()
# Provide interactive prompt for running test commands on another thread
loop.run_until_complete(
loop.run_in_executor(
None, _handle_interactive_baseunit_tests, baseunit, loop))
# Shut down interface and event loop
baseunit.stop()
loop.close()
if __name__ == "__main__":
main(sys.argv)
| [
37811,
198,
1212,
8265,
3578,
262,
5888,
284,
307,
1057,
355,
281,
14333,
3141,
1627,
198,
31438,
11,
7525,
284,
3342,
351,
4856,
262,
5888,
13,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
30351,
952,
198,
11748,
18931,
198,
... | 2.659379 | 869 |
import logging
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
# not used
| [
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
8692,
1330,
7308,
22362,
320,
1352,
11,
3602,
16354,
35608,
259,
628,
628,
198,
198,
2,
407,
973,
628,
198
] | 3.225 | 40 |
import time
import pickle # to load data
# import torch
# from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
from principal_dbn_alpha import DBN
######################################################################
######################### HELPER FUNCTIONS #########################
######################################################################
# one_hot encodes numerical labels
# used in make_mnist file
# DNN helper functions
#####################################################################
############################### DNN ###############################
#####################################################################
############################################################################
############################### MAIN PROGRAM ###############################
############################################################################
if __name__ == '__main__':
main(pretrain=False, load=False, train=True) | [
11748,
640,
198,
11748,
2298,
293,
1303,
284,
3440,
1366,
198,
198,
2,
1330,
28034,
198,
2,
422,
28034,
10178,
1330,
40522,
11,
31408,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
... | 5.165829 | 199 |
from bokeh.io import output_file, show, curdoc
from bokeh.layouts import widgetbox
from bokeh.models.widgets import Select
main()
| [
6738,
1489,
365,
71,
13,
952,
1330,
5072,
62,
7753,
11,
905,
11,
1090,
15390,
198,
6738,
1489,
365,
71,
13,
10724,
5269,
1330,
26295,
3524,
198,
6738,
1489,
365,
71,
13,
27530,
13,
28029,
11407,
1330,
9683,
628,
198,
198,
12417,
341... | 3.022727 | 44 |
import pygame
import numpy as np
from . import Network
from . import Player
from . import Canvas
from . import Button
from _thread import *
import sys
sys.path.append("../")
from speech.speech_synthesis import say
from gans.image_synthesis import gen_image
| [
11748,
12972,
6057,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
1330,
7311,
198,
6738,
764,
1330,
7853,
198,
6738,
764,
1330,
1680,
11017,
198,
6738,
764,
1330,
20969,
198,
6738,
4808,
16663,
1330,
1635,
198,
11748,
25064,
198,
... | 3.486486 | 74 |
if __name__ == '__main__':
"""
Writes the API key to api_key.txt file. It will create the file if it doesn't exist.
This function is intended to be called from the Python command line using: python hyperia YOUR_API_KEY
If you don't have an API key yet, register for one at: https://hyperia.net/api/register
INPUT:
argv[1] -> Your API key from Hyperia. Should be 73 hex characters
OUTPUT:
none
"""
import sys
if len(sys.argv) == 2 and sys.argv[1]:
if len(sys.argv[1]) == 73:
# write the key to the file
f = open('api_key.txt', 'w')
f.write(sys.argv[1])
f.close()
print('Key: ' + sys.argv[1] + ' was written to api_key.txt')
print(
'You are now ready to start using Hyperia. For an example, run: python example.py')
else:
print(
'The key appears to invalid. Please make sure to use the 73 character key assigned by Hyperia')
else:
print(
'The key appears to invalid. Please make sure to use the 73 character key assigned by Hyperia')
| [
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
12257,
274,
262,
7824,
1994,
284,
40391,
62,
2539,
13,
14116,
2393,
13,
632,
481,
2251,
262,
2393,
611,
340,
1595,
470,
21... | 2.385744 | 477 |
import collections
from nose.tools import istest, assert_equal
from precisely import has_attr, equal_to
from precisely.results import matched, unmatched
User = collections.namedtuple("User", ["username"])
@istest
@istest
@istest
@istest
@istest
| [
11748,
17268,
198,
198,
6738,
9686,
13,
31391,
1330,
318,
9288,
11,
6818,
62,
40496,
198,
198,
6738,
10582,
1330,
468,
62,
35226,
11,
4961,
62,
1462,
198,
6738,
10582,
13,
43420,
1330,
14451,
11,
48621,
628,
198,
12982,
796,
17268,
13... | 3.333333 | 78 |
import os, sys
import numpy as np
import pandas as pd
import warnings; warnings.filterwarnings("ignore")
try:
import genomics_gans
except:
exec(open('__init__.py').read())
import genomics_gans
from genomics_gans.prepare_data import data_modules
# type imports
from torch.utils.data import Dataset, DataLoader
| [
198,
11748,
28686,
11,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
14601,
26,
14601,
13,
24455,
40539,
654,
7203,
46430,
4943,
198,
28311,
25,
220,
198,
220,
220,
220,
1330,
2429,
31994,
... | 2.929204 | 113 |
from rest_framework.views import APIView
from config.settings.types import HawkScope
from datahub.core.auth import PaaSIPAuthentication
from datahub.core.hawk_receiver import (
HawkAuthentication,
HawkResponseSigningMixin,
HawkScopePermission,
)
from datahub.dataset.core.pagination import DatasetCursorPagination
class BaseDatasetView(HawkResponseSigningMixin, APIView):
"""
Base API view to be used for creating endpoints for consumption
by Data Flow and insertion into Data Workspace.
"""
authentication_classes = (PaaSIPAuthentication, HawkAuthentication)
permission_classes = (HawkScopePermission, )
required_hawk_scope = HawkScope.data_flow_api
pagination_class = DatasetCursorPagination
def get(self, request):
"""Endpoint which serves all records for a specific Dataset"""
dataset = self.get_dataset()
paginator = self.pagination_class()
page = paginator.paginate_queryset(dataset, request, view=self)
return paginator.get_paginated_response(page)
def get_dataset(self):
"""Return a list of records"""
raise NotImplementedError
| [
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
198,
6738,
4566,
13,
33692,
13,
19199,
1330,
26698,
43642,
198,
6738,
4818,
993,
549,
13,
7295,
13,
18439,
1330,
350,
7252,
50,
4061,
47649,
3299,
198,
6738,
4818,
993,
549... | 2.853598 | 403 |
import multiprocessing
import threading
import traceback
from daphne.testing import _reinstall_reactor
from pytest_django.plugin import _blocking_manager
| [
11748,
18540,
305,
919,
278,
198,
11748,
4704,
278,
198,
11748,
12854,
1891,
198,
198,
6738,
288,
6570,
710,
13,
33407,
1330,
4808,
260,
17350,
62,
260,
11218,
198,
6738,
12972,
9288,
62,
28241,
14208,
13,
33803,
1330,
4808,
41938,
62,
... | 3.627907 | 43 |
products = []
| [
29498,
796,
17635,
628
] | 3.75 | 4 |
# -*- coding: utf-8 -*-
import os
import struct
import copy
from tkinter import *
from tkinter import filedialog as fd
from tkinter import ttk
from tkinter import messagebox as mb
from tkinter import simpledialog as sd
imgList = []
seList = []
bgmList = []
comicDataList = []
copyComicData = []
indexList = []
max_param = 0
byteArr = []
file_path = ""
frame = None
cmd = [
"Tx",
"TxSize",
"Alpha",
"End",
"Pos",
"ColorALL",
"Move",
"STAGE_BGM",
"SetFlat3D",
"ChangeFlat3D",
"SetCamDir",
"DisCamDir",
"Set3DObj",
"SetWAngleX",
"SetWAngleY",
"SetWAngleZ",
"SetLAngleX",
"SetLAngleY",
"SetLAngleZ",
"SetBoneWAngleX",
"SetBoneWAngleY",
"SetBoneWAngleZ",
"SetBoneLAngleX",
"SetBoneLAngleY",
"SetBoneLAngleZ",
"ShowMesh",
"HideMesh",
"PlayAnime",
"Length_End",
"SetScall",
"RACE_START",
"RACE_END",
"FADE_STAGE_BGM",
"CHANGE_SCENE",
"LPos",
"LMove",
"LLoopX",
"LLoopY",
"LLoopZ",
"Angle",
"AngleLoop",
"Move2",
"PosX",
"PosY",
"PosZ",
"PlaySE",
"SET_MT_NONE",
"SetCamPos",
"SetCamTarget",
"CamMoveWait",
"SetComic",
"ComicPos",
"ComicAlpha",
"ComicWait",
"Scene_to_Comic",
"SKY_DOME",
"Fill_BG",
"ComicEnd",
"CamComtroll",
"ComicSceneStop",
"BtnWait",
"EyeMove",
"SetZoom",
"BG_Alpha",
"BG_Wait",
"StartCount",
"WaitMoveEye",
"WaitFrame",
"FTV_Play",
"FTV_Wait",
"HideMsgWnd",
"FTV_End",
"SkipEventPoint",
"SkipEventFlg",
"PlayComicSE",
"StopComicSE",
"PlayComicBGM",
"StopComicBGM",
"VolComicBGM",
"HideALLComic",
"Stage_BGM_Vol",
"SET_CPU_FLG",
"SET_CPU_MODE",
"CHK_LENGTH",
"END_CHK_LENGTH",
"CHK_POSTION",
"END_CHK_POSTION",
"WAIT_MOTION",
"END_WAIT_MOTION",
"CHANGE_SPEED",
"CHANGE_CAM_TYPE",
"Set2P",
"CharChk_and_Tx",
"ChangeR",
"ChangeG",
"ChangeB",
"ChangeColor",
"SetGray",
"MoveX",
"MoveY",
"MoveZ",
"SetUV_X",
"RePlay",
"IsStart",
"ShowGoal",
"CHK_WIN_TRAIN",
"END_CHK_WINTRAIN",
"N_ADD_OBJ",
"N_POS",
"START_TIME_LINE",
"N_MOVE",
"WAIT_TIME_LINE",
"N_DEL_OBJ",
"SCREEN_FADE",
"N_CHANGE_ANIME",
"TRAIN_SPEED",
"TRAIN_FLG",
"SCENE_LIGHT",
"CHANGE_CAM_LENGTH",
"CHANGE_CAM_DIRX",
"CHANGE_CAM_DIRY",
"CHANGE_CAM_DIRZ",
"R_Drift",
"L_Drift",
"IS_TRAIN_HIT",
"TO_RAIL",
"SLEEP_TRAIN",
"RandWAngle",
"RandMove",
"ADD_OBJ",
"START_COMIC",
"SetRand3DObj",
"Offset3DObj",
"RandPos",
"RandPlaySE",
"RandAngleX",
"RandAngleY",
"RandAngleZ",
"CHK_TRAIN_STATE",
"END_CHK_TRAIN_STATE",
"CHK_TRAIN_SPEED_U",
"CHK_TRAIN_SPEED_D",
"END_CHK_TRAIN_SPEED_U",
"END_CHK_TRAIN_SPEED_D",
"ChkStory_and_Tx",
"ClearStory_and_Tx",
"N_L_ANGLE_X",
"N_L_ANGLE_Y",
"N_L_ANGLE_Z",
"Comic_Glay",
"N_MoveMesh_X",
"N_MoveMesh_Y",
"N_MoveMesh_Z",
"SetComic_Blur",
"SetComic_Blur_Speed",
"TRACK_BOMB",
"Hide_Sky_Doom",
"ADD_POINT",
"CHK_POINT",
"ELSE_CHK_POINT",
"ELSE_IF_CHK_POINT",
"END_CHK_POINT",
"GOTO_SCRIPT",
"SHEAK_COMIC",
"STORY_OPEN",
"STORY_CLEAR",
"CHAR_OPEN",
"SAVE_GAME",
"KEISUKE_COUNT",
"RandPlayComicSE",
"TITLE_MODE",
"GOING",
"RAND_IF",
"ELSE_RAND_IF",
"END_RAND_IF",
"CHK_SP_BREAK",
"END_CHK_SP_BREAK",
"CHK_DRIFT",
"END_CHK_DRIFT",
"ENDING_MODE",
"ChkCause_and_Tx",
"SET_DRAW_TYPE",
"To_TxSize",
"OPEN_CAUSE",
"DIS_TRAIN_SPEED",
"CHK_RACE_TIME",
"END_CHK_RACE_TIME",
"End_Comic",
"WAIT_RAIL",
"END_WAIT_RAIL",
"COMIC_SCALE",
"USO_COUNT",
"WaitRandPlaySE",
"FROM",
"GOTO",
"CHK_TRAIN_TYPE",
"RAND_IF_AVG",
"CHK_NOTCH",
"WAIT_RAIL_ONLY",
"ONE_TRACK_DRIFT",
"LAST_STATION",
"OSSAN",
"SET_TAIL_SCALE",
"OPEN_HUTA",
"SET_GN",
"MDL_GETINDEX",
"INDEX_BONE_ROT_X",
"INDEX_BONE_ROT_Y",
"INDEX_BONE_ROT_Z",
"INDEX_BONE_L_ROT_X",
"INDEX_BONE_L_ROT_Y",
"INDEX_BONE_L_ROT_Z",
"CREATE_INDEX",
"IB_LI_CREATE_ROT_X",
"IB_LI_CREATE_ROT_Y",
"IB_LI_CREATE_ROT_Z",
"IB_LI_SET_ROT_X",
"IB_LI_SET_ROT_Y",
"IB_LI_SET_ROT_Z",
"IB_LI_SET_LOOP_X",
"IB_LI_SET_LOOP_Y",
"IB_LI_SET_LOOP_Z",
"ADD_MY_OBJ",
"INDEX_BONE_L_POS_X",
"INDEX_BONE_L_POS_Y",
"INDEX_BONE_L_POS_Z",
"IB_LI_CREATE_L_POS_X",
"IB_LI_CREATE_L_POS_Y",
"IB_LI_CREATE_L_POS_Z",
"IB_LI_SET_L_POS_X",
"IB_LI_SET_L_POS_Y",
"IB_LI_SET_L_POS_Z",
"FROM_ADDMT",
"MOVE_UV_X",
"MOVE_UV_Y",
"CREATE_UV_MOVE_X",
"IB_LI_SET_LOOP_LPOSX",
"IB_LI_SET_LOOP_LPOSY",
"IB_LI_SET_LOOP_LPOSZ",
"RELEASE_ALL_IB_LIST",
"ADD_MY_OBJ_INDEX",
"TO_TAGET_POS",
"ATK_HIT",
"ATK_END",
"SET_RELEASE_PARAM",
"CREATE_LENSFLEAR",
"SET_LENSFLEAR_PARAM",
"SET_LENSFLEAR_MT",
"RAIL_POS_TO_BUFF",
"BUFF_TO_CAM_POS",
"BUFF_TO_TARGET_POS",
"FTV_BASE_PROC",
"FTV_NEXT_PROC",
"MDL_INDEX_TO_VIEW",
"SET_FOG_LENGTH",
"SET_UV_MOVE_X",
"SET_UV_LOOP_X",
"CREATE_MESH_INDEX",
"SET_MESH_INDEX",
"INDEX_BONE_L_ADD_ROT_X",
"INDEX_BONE_L_ADD_ROT_Y",
"INDEX_BONE_L_ADD_ROT_Z",
"CHANGE_SCALL",
"CHK_CLEAR_STORY",
"CHK_OPEN_STORY",
"SET_LENSFLEAR_ALL_FLG",
"CHK_USE_CHAR",
"SET_OBJ_FOG_NO",
"SET_OBJ_RENDER_ID",
"PLAY_STAGE_BGM",
"CHANGE_TRAIN_FOG",
"FIRST_OBJ_SET_ANIME",
"SET_CAMPOINT_2P2C",
"SET_CAMPOINT_1P2C",
"CAM_POINT_PER",
"CAM_TARGET_PER",
"SET_CAM_POINT_LENGTH",
"SET_CAM_OFFSET",
"START_WIPER",
"CREATE_TRAIN_ORG",
"ORG_SET_RAIL",
"ORG_ADD",
"SET_CAMPOINT_K",
"ORG_SET_POS",
"ORG_SET_FOG",
"ORG_RELEASE",
"PLAY_FTV_END",
"CNG_TRAIN_MAT_COL",
"CNG_ORG_MAT_COL",
"IS_CAUTION",
"ENDWAIT_COMIC",
"SET_COMIC_BG_COLOR",
"TX_2_TRAIN",
"CHANGE_MT_COL_TRAIN",
"CNG_MT_COL",
"RETURN",
"ReLoadSE",
"BASE_POINT_CAM",
"STOP_3D",
"STOP_STAGE_BGM",
"TRAIN_UD",
"SET_CAM_TARGET_OFFSET",
"SET_CAM_POINT_1T_ROT",
"SET_CAM_T_LENGHT",
"SET_CAM_T_ROT_X",
"SET_CAM_T_ROT_Y",
"SET_CAM_T_OFFSET",
"NO_OUTRUN",
"SET_WHEEL_FIRE",
"RELOAD_OP_TRAIN",
"BackR_Drift",
"BackL_Drift",
"CHK_MOTION",
"ORG_SET_STYLE_POS",
"RECREATE_TRAIN",
"SET_CAMPOINT_1P2T",
"BUFF_TO_SC_CAM_POS",
"SC_ORG_MODE_CHANGE",
"SC_ORG_INIT_POS",
"SC_ORG_SET_POS",
"SC_ORG_SET_ROT",
"SC_ORG_SET_X_ROT",
"SC_ORG_SET_Y_ROT",
"SC_ORG_SET_Z_ROT",
"SET_SC_KOTEI_CAM_POS",
"SET_SC_KOTEI_CAM_T_POS",
"START_SC_WIPER",
"SUPER_DRIFT",
"CNG_TRAIN_NO_MAT_COL",
"ERR_CMD",
"K_HN",
"TO_TRACK_RAIL",
"IS_NO_DRAMA",
"CNG_TRAIN_NO_MAT_RGBA",
"SHOW_RECORD",
"WAIT_RECORD_END",
"IB_LI_SET_UPDATE_FLG",
"PTCL_SCALL",
"PTCL_COLOR",
"PTCL_ALPHA",
"PTCL_DRAWTYPE",
"PTCL_ANGLE",
"PTCL_RAND_ANGLE",
"PTCL_RAND_COLOR",
"PTCL_RAND_ALPHA",
"PTCL_RAND_SCALL",
"IB_ADD_PTCL",
"PTCL_RAND_TONE_COLOR",
"IS_ALPHA_END",
"PTCL_L_POS",
"PTCL_RAND_L_POS",
"CREATE_MAT_COLOR_R_INTERLIST",
"CREATE_MAT_EMISSIVE_R_INTERLIST",
"SET_MAT_COLOR_R",
"SET_MAT_COLOR_G",
"SET_MAT_COLOR_B",
"SET_MAT_COLOR_LOOP",
"SET_MAT_EMISSIVE_R",
"SET_MAT_EMISSIVE_G",
"SET_MAT_EMISSIVE_B",
"SET_MAT_EMISSIVE_LOOP",
"CREATE_MAT_COLOR_G_INTERLIST",
"CREATE_MAT_EMISSIVE_G_INTERLIST",
"CREATE_MAT_COLOR_B_INTERLIST",
"CREATE_MAT_EMISSIVE_B_INTERLIST",
"CREATE_UV_MOVE_Y",
"SET_UV_MOVE_Y",
"SET_UV_LOOP_Y",
"INDEX_RAND_ROT_X",
"INDEX_RAND_ROT_Y",
"INDEX_RAND_ROT_Z",
"INDEX_RAND_POS_X",
"INDEX_RAND_POS_Y",
"INDEX_RAND_POS_Z",
"RAND_SHOW_MESH",
"INDEX_RAND_SCALL",
"ADD_CHILD_OBJ",
"ADD_OBJ_INDEX",
"GAS_TARBIN",
"ENGINE_START",
"CHANGE_CHILDOBJ_ANIME",
"IB_SET_W_MT",
"CHK_OBJ_PARAM",
"SET_OBJ_PARAM",
"INDEX_DIR_CAM",
"CNG_MT_LIGHT",
"ADD_OBJ_INDEX2",
"CNG_MT_ALPHA",
"CREATE_MAT_ALPHA_INTERLIST",
"SET_MAT_ALPHA",
"RESTART_MESH_LIST",
"RAIL_ANIME_CHANGE",
"STOP_COMIC_SE_ALL",
"HURIKO",
"FTV_PLAY_AND_PREV",
"FTV_END_INHERIT",
"STATION_NAME_PRIORITY",
"ALL_FIT",
"SWAP_TX",
"CNG_TX",
"CHK_CAUSE",
"CNG_ANIME",
"CHK_OUHUKU",
"SET_TRAIN_PTCL_AREA",
"WAIT_DOSAN_LENGTH",
"END_DOSAN_LENGTH",
"DOSANSEN",
"MESH_INDEX_SE_UV_ANIME_FLG",
"WEATHER",
"TRAIN_DIR",
"IS_USE_CHAR",
"QUICK_SAVE_EVENT",
"NONE_GOAL",
"ENGINE_STOP",
"IS_BTL_MODE",
"IS_FREE_MODE",
"FIRST_OBJ_SET_ANIME_SCENE",
"G_HIDE_MESH",
"G_SHOW_MESH",
"STOP_WIPER",
"TRAIN_ANIME_CHANGE",
"MESH_INDEX_UV_RESTRT",
"SET_COMIC_COLOR",
"CHK_OUTRUN_CNT",
"CHK_D_AND_NOTCH",
"ADD_CPU_LEN_OUTRUN",
"ADD_CPU_SPEED_D_AND_NOTCH",
"CHK_HIT_CNT",
"TOP_SPEED_HOSYO",
"SET_ROOT_BLOCK",
"RIFT",
"COLLISION",
"DIR_VIEW_CHANGE",
"CHK_RAIL_NO",
"TRACK_CHANGE",
"CHK_LENGTH_DIR",
"CHK_POS_DIR",
"TRUE_CLASH",
"KATARIN_RUN",
"DRAW_UI",
"STOP_SCRIPT_BGM",
"SET_STATION_NO",
"SET_CPU_BREAKE",
"AMB_ANIME",
"ONE_DRIFT_FALSE",
"L_One_Drift",
"R_One_Drift",
"Ret_One_Drift",
"FRONT_JUMP",
"REAR_JUMP",
"FRONT_MOVE_X",
"TRACK_MOVE",
"TRAIN_JUMP",
"SET_LIGHT",
"SET_COL_KASENCHU",
"SET_KAISO",
"SET_FOR",
"CHK_TRAIN_COL",
"VOL_SCRIPT_BGM",
"IF_NOTCH",
"SET_BRIND_SW",
"SET_MIKOSHI",
"ADD_FIRE",
"BREAKE_OR_HIT",
"OUTRUN",
"SOFT_ATK",
"RAIL_STOP",
"CHANGE_OUHUKU_LINE",
"BRIND_ATK",
"OPEN_POS_DLG",
"PLAY_STAGEBGM_BLOCK",
"SET_BTL_POINT",
"CAM_TRAIN",
"PLAY_SCRIPT_BGM",
"CNG_FOR",
"SET_RAILBLOCK_CHECKER",
"RAIN_SE",
"TRAIN_STOP",
"KOTEICAM_BLEND",
"SCRIPT_RAIN",
"LINE_CHANGE",
"WAIT_RAIL_MORE_ONLY",
"SET_SE_VOL",
"CAM_TARGET_TRACK",
"DECAL_D37",
"DECAL_D39",
"DECAL_SMOKE",
"RAIL_PRIORITY",
"GET_KEY",
"SHOW_LIGHT",
"SHOW_IN_LIGHT",
"FOG_POW",
"STORY_WIN",
"RAIN_PARTICLE",
"D39_FIRE",
"SET_CPU_SPEED",
"BODY_AUDIO_PLAY",
"BODY_AUDIO_STOP",
"CNG_FADE_SPRITE",
"RAIL_DRIFT_CHK",
"INQ_WAIT",
"CNG_SCCAM_TRAIN",
"STOP_TRAIN_SE",
"PLAY_SCRIPT_BGM_TIME",
"CNG_BODY_COLOR",
"LOAD_TRAIN",
"SHOW_BLOCK",
"UPDATE_LIGHT_FRARE",
"WAIT_RAIL_MORE_GOTO",
"CREATE_AURA",
"AURA_ALPHA",
"SET_LV_JUMP",
"CREATE_EFFECT_CAM",
"TO_EFFECT_CAM",
"EFFECT_CAM_POW",
"EFFECT_CAM_COLOR",
"EFFECT_CAM_ALPHA",
"HIDE_LIGHT",
"USE_EFFECT_CAM",
"USE_EFFECT_CAM_RGB",
"EFFECT_CAM_RGB",
"COPY_TRAIN_POS",
"COL_SET",
"CNG_CPU_TRAIN",
"BTN_GOTO",
"NO_TIMESCALE_KOMA",
"EFFCAM_NOIZE",
"EFFCAM_GRI",
"EFFCAM_BLOCKNOISE",
"CREATE_TQ5000_FLAGMENT",
"USE_TQ5000_FLAGMENT",
"TQ5000_FLAGPOS",
"HUMIKIRI_VOL",
"TO_EFFECT_CAM_BODY",
"TO_NORM_CAM",
"TO_920",
"NO_TIMESCALE_FVT",
"CNG_TARGET_BODY",
"SC_ADD_POINT",
"CHK_SC_POINT",
"KAISO_TO_DUEL",
"SHOW_ST",
"ORG_UPDATE",
"SET_RAILBLOCK_POS",
"SET_LIGHT_OVER",
"CREATE_STAFFROLL",
"STAFFROLL_START",
"WAIT_STAFFROLL",
"SC_OUTRUN",
"CREATE_TAKMIS",
"SET_TAKMIS_POS",
"SET_TAKMIS_ALPHA",
"FRONT_DOOR",
"SET_KOMA_DEPTH",
"D37_FIRE",
"AMB_HIT_WAIT",
"ShowRecord",
"FIT_PER",
"CREATE_COMIC_PC",
"SET_COMIC_PC",
"PAUSE_STAGE_BGM",
"SET_KAKAPO",
"KOMA_KAKAPO",
"START_TARBINE",
"END_TARBINE",
"TARBINE_FTV_START",
"TARBINE_FTV_END",
"STORY_ENGINE",
"RAND_GOTO",
"KQ_SOUND",
"STORY_GOTO",
"PLAY223HONE",
"RB26",
"PLAYORGSE",
"H2300_GOAL",
"SCRIPT_CMD_MAX"
]
root = Tk()
root.title("電車でD ComicScript 改造 1.2.0")
root.geometry("900x600")
menubar = Menu(root)
menubar.add_cascade(label='ファイルを開く', command= lambda: openFile())
root.config(menu=menubar)
v_fileName = StringVar()
fileNameEt = ttk.Entry(root, textvariable=v_fileName, font=("",14), width=20, state="readonly", justify="center")
fileNameEt.place(relx=0.053, rely=0.03)
selectLb = ttk.Label(text="選択した行番号:", font=("",14))
selectLb.place(relx=0.05, rely=0.11)
v_select = StringVar()
selectEt = ttk.Entry(root, textvariable=v_select, font=("",14), width=5, state="readonly", justify="center")
selectEt.place(relx=0.22, rely=0.11)
editLineBtn = ttk.Button(root, text="選択した行を修正する", width=25, state="disabled", command=editLine)
editLineBtn.place(relx=0.32, rely=0.03)
insertLineBtn = ttk.Button(root, text="選択した行に挿入する", width=25, state="disabled", command=insertLine)
insertLineBtn.place(relx=0.54, rely=0.03)
deleteLineBtn = ttk.Button(root, text="選択した行を削除する", width=25, state="disabled", command=deleteLine)
deleteLineBtn.place(relx=0.76, rely=0.03)
copyLineBtn = ttk.Button(root, text="選択した行をコピーする", width=25, state="disabled", command=copyLine)
copyLineBtn.place(relx=0.32, rely=0.11)
pasteLineBtn = ttk.Button(root, text="選択した行に貼り付けする", width=25, state="disabled", command=pasteLine)
pasteLineBtn.place(relx=0.54, rely=0.11)
csvExtractBtn = ttk.Button(root, text="CSVで取り出す", width=25, state="disabled", command=csvExtractBtn)
csvExtractBtn.place(relx=0.32, rely=0.19)
csvLoadAndSaveBtn = ttk.Button(root, text="CSVで上書きする", width=25, state="disabled", command=csvLoadAndSaveBtn)
csvLoadAndSaveBtn.place(relx=0.54, rely=0.19)
scriptLf = ttk.LabelFrame(root, text="スクリプト内容")
scriptLf.place(relx=0.05, rely=0.25, relwidth=0.9, relheight=0.70)
root.mainloop()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
28686,
201,
198,
11748,
2878,
201,
198,
11748,
4866,
201,
198,
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
5... | 1.65234 | 8,911 |
# HL 分解での LCA を書いたのでボツ
| [
220,
220,
220,
1303,
38312,
10263,
230,
228,
164,
100,
96,
30640,
5641,
406,
8141,
17433,
240,
162,
249,
116,
18566,
25224,
5641,
30640,
1209,
250,
41115,
628
] | 1 | 28 |
from __future__ import unicode_literals
import gzip
from io import BytesIO
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.storage import CachedStaticFilesStorage, StaticFilesStorage
from django.contrib.staticfiles.utils import matches_patterns
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
from pipeline.conf import settings
default_storage = DefaultStorage()
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
308,
13344,
198,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
12708,
16624,
1330,
1064,
364,
198,
6738,
42625,
14... | 3.625806 | 155 |
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/customers/', include('customers.urls')),
url(r'^api/products/', include('products.urls')),
url(r'^api/discounts/', include('discounts.urls'))
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
11,
2291,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
28482,
14,
3256,
13169,
13,
15654... | 2.434426 | 122 |
import os
import logging
| [
11748,
28686,
198,
11748,
18931,
628
] | 4.333333 | 6 |
import tbs.helper.filedescriptor as fd
import tbs.logger.log as logger
import os
def downloadRepo(subpath="toslive"):
"""
Try to download the tos image repository and make that the current directory
"""
result = fd.CMD(["git", "clone", "https://github.com/ODEX-TOS/tos-live.git"]).execute(True)
if not result.exitcode == 0:
logger.log("Something went wrong when trying to download build files, checking for existing build files", logger.LOG_ERROR)
os.chdir("tos-live/"+subpath) | [
11748,
256,
1443,
13,
2978,
525,
13,
69,
3902,
3798,
1968,
273,
355,
277,
67,
198,
11748,
256,
1443,
13,
6404,
1362,
13,
6404,
355,
49706,
198,
11748,
28686,
198,
4299,
4321,
6207,
78,
7,
7266,
6978,
2625,
83,
418,
12583,
1,
2599,
... | 2.843575 | 179 |
"""
34. Find First and Last Position of Element in Sorted Array
https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/
Time complexity: O()
Space complexity: O()
"""
from typing import List
ans = [
{'nums': [5,7,7,8,8,10], 'target': 8}, # output: [3,4]
{'nums': [5,7,7,8,8,10], 'target': 6} # output: [-1,-1]
]
for trails in ans:
print(Solution().searchRange(**trails))
| [
37811,
220,
201,
198,
2682,
13,
9938,
3274,
290,
4586,
23158,
286,
11703,
287,
311,
9741,
15690,
201,
198,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
19796,
12,
11085,
12,
392,
12,
12957,
12,
9150,
12,
1659,
12,
30854... | 2.28866 | 194 |
import pygad
import numpy
from fis import fuzzification
last_fitness = 0
| [
201,
198,
11748,
12972,
70,
324,
201,
198,
11748,
299,
32152,
201,
198,
6738,
277,
271,
1330,
26080,
2649,
201,
198,
12957,
62,
69,
3659,
796,
657,
201,
198,
201,
198,
201,
198
] | 2.515152 | 33 |