content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/evn python3
# -*- config: utf-8 -*-
# Решите следующую задачу: напишите функцию, которая считывает с клавиатуры числа и
# перемножает их до тех пор, пока не будет введен 0. Функция должна возвращать
# полученное произведение. Вызовите функцию и выведите на экран результат ее работы.
if __name__ == '__main__':
prod = composition()
print(prod)
| [
2,
48443,
14629,
14,
8800,
14,
1990,
77,
21015,
18,
198,
2,
532,
9,
12,
4566,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
12466,
254,
16843,
141,
230,
18849,
20375,
16843,
220,
21727,
30143,
16843,
43666,
35072,
141,
236,
141,
... | 1.115502 | 329 |
# Generated by Django 3.0.2 on 2020-05-21 09:40
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
17,
319,
12131,
12,
2713,
12,
2481,
7769,
25,
1821,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import base64
import enum
import json
import requests
class ViberMessageType(enum.IntEnum):
"""Represents Viber message type"""
TextOnly = 106
TextImageButton = 108
TextOnly2Way = 206
TextImageButton2Way = 208
class ViberMessageSourceType(enum.IntEnum):
"""Represents Viber message source type"""
Promotional = 1
Transactional = 2
class ViberMessageStatus(enum.IntEnum):
"""Represents Viber message status"""
Sent = 0
Delivered = 1
Error = 2
Rejected = 3
Undelivered = 4
Pending = 5
Unknown = 20
class ViberError(Exception):
"""Represents Viber error"""
def __init__(self, name, message, code, status) -> None:
"""Initializes ViberError object
Args:
name (string): Error name
message (string): Error message
code (int): Error code
status (int): Error status
"""
super().__init__()
self.name = name
self.message = message
self.code = code
self.status = status
class ViberMessage:
"""Represents Viber message"""
def __init__(self, sender, receiver, message_type, text, source_type, image_url=None, button_caption=None, button_action=None,
callback_url=None, validity_period=None):
"""Initializes ViberMessage object
Args:
sender (string): Message sender (from whom message is sent)
receiver (string): Message receiver (to whom message is sent)
message_type (ViberMessageType): Message type
text (string): Message body
source_type (ViberMessageSourceType): Message sending procedure
image_url (string, optional): Image URL for promotional message with button caption and button action. Defaults to None.
button_caption (string, optional): Button caption. Defaults to None.
button_action (string, optional): URL for transition when the button is pressed. Defaults to None.
callback_url (string, optional): URL for message status callback. Defaults to None.
validity_period (int, optional): Life time of a message (in seconds). Defaults to None.
"""
self.sender = sender
self.receiver = receiver
self.message_type = message_type
self.text = text
self.image_url = image_url
self.button_caption = button_caption
self.button_action = button_action
self.source_type = source_type
self.callback_url = callback_url
self.validity_period = validity_period
class ViberMessageReceipt:
"""Represents Viber message receipt (Id and status of the particular Viber message)"""
def __init__(self, message_id, status) -> None:
"""Initializes ViberMessageReceipt object
Args:
message_id (int): Viber message Id
status (ViberMessageStatus): Viber message status
"""
self.message_id = message_id
self.status = ViberMessageStatus(status)
class ViberClient:
"""Client to work with Viber messages"""
def __init__(self, api_key) -> None:
"""Initializes ViberClient object
Args:
api_key (string): User access key
"""
self.api_key = api_key
def send_message(self, message) -> int:
"""Sends Viber message
Args:
message (ViberMessage): Viber message to send
Returns:
int: Id of the sent Viber message
Raises:
ViberError: If specific Viber error occurred
"""
request = message.toJSON()
return self.__make_http_request("send-viber", request, ok_response_func)
def get_message_status(self, message_id) -> ViberMessageReceipt:
"""Returns Viber message status
Args:
message_id (int): Id of the Viber message (sent in the last 5 days)
Returns:
ViberMessageReceipt: Viber message receipt object
Raises:
ViberError: If specific Viber error occurred
"""
request = json.dumps({"message_id": message_id})
return self.__make_http_request("receive-viber", request, ok_response_func)
| [
11748,
2779,
2414,
198,
11748,
33829,
198,
11748,
33918,
198,
11748,
7007,
628,
198,
4871,
569,
1856,
12837,
6030,
7,
44709,
13,
5317,
4834,
388,
2599,
198,
220,
220,
220,
37227,
6207,
6629,
569,
1856,
3275,
2099,
37811,
628,
220,
220,
... | 2.531494 | 1,667 |
#!/usr/bin/env python2
# Python libs
import math
# Ros libsSIMULATION:
import rospy
# Ros messages
from std_msgs.msg import Float64
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist
#Gravity
G = 9.81
FILTER_SIZE = 20
# IMU offset in real world
if rospy.has_param('/use_simulation'):
SIMULATION = rospy.get_param('/use_simulation')
if SIMULATION:
OFFSET_Y = 0.0
else:
OFFSET_Y = 0.134
else:
SIMULATION = False
OFFSET_Y = 0.134
# get v_max
if rospy.has_param('/v_max'):
V_MAX = rospy.get_param('/v_max')
else:
V_MAX = 0.05
# get loop rate in hz
if rospy.has_param('/loop_rate_in_hz'):
LOOP_RATE_IN_HZ = rospy.get_param('/loop_rate_in_hz')
else:
LOOP_RATE_IN_HZ = 100
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
11361,
9195,
82,
198,
11748,
10688,
198,
198,
2,
10018,
9195,
82,
48913,
6239,
6234,
25,
198,
11748,
686,
2777,
88,
198,
198,
2,
10018,
6218,
198,
6738,
14367,
62,
907,
14542,... | 2.254011 | 374 |
import os
from django.core.files import File
from nose.tools import eq_
from kitsune.groups.models import GroupProfile
from kitsune.groups.tests import group_profile
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import user, group, add_permission
| [
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
1330,
9220,
198,
198,
6738,
9686,
13,
31391,
1330,
37430,
62,
198,
198,
6738,
19183,
1726,
13,
24432,
13,
27530,
1330,
4912,
37046,
198,
6738,
19183,
1726,
13,
24432,
13,
... | 3.457143 | 105 |
#! /usr/bin/python
"""
Entry point for scrapper module to be used in 2017 and 2013
in this module it will be defined all the logic behind the data scrapping from the website(s)
"""
import requests
import json
from .filter import Filter
from .data_transform import transform
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
30150,
966,
329,
19320,
2848,
8265,
284,
307,
973,
287,
2177,
290,
2211,
198,
259,
428,
8265,
340,
481,
307,
5447,
477,
262,
9156,
2157,
262,
1366,
19320,
2105,
422,
262,
3052... | 3.481928 | 83 |
import numpy as np
from numba.decorators import jit, autojit
import hickle
import os, gzip
binary_search_numba = autojit(binary_search, nopython=True)
ex_numba = autojit(extract, nopython=True)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
13,
12501,
273,
2024,
1330,
474,
270,
11,
8295,
45051,
198,
11748,
289,
39423,
198,
11748,
28686,
11,
308,
13344,
198,
198,
39491,
62,
12947,
62,
77,
2178,
64,
796,
8295,
45051,
7,
... | 2.68 | 75 |
from setuptools import setup
setup(
name='fix-author',
version='1.1',
packages=['fix_author'],
install_requires=['rbnf', 'wisepy'],
license='MIT',
author='thautwarm',
keywords='git commit, fix author',
description='fix author info in git commits',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
python_requires='>=3.6.0',
url='https://github.com/thautwarm/fix-author',
author_email='twshere@outlook.com',
platforms='any',
entry_points={'console_scripts': ['fix-author=fix_author.cli:main']},
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython'
],
zip_safe=False)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
13049,
12,
9800,
3256,
198,
220,
220,
220,
2196,
11639,
16,
13,
16,
3256,
198,
220,
220,
220,
10392,
28,
17816,
13049,
62,
9800,
6,
4357,
198,... | 2.626623 | 308 |
import csv
import base64
import pandas as pd
import datetime as dt
from realtime_details import (extract_places_regions, radius_multiplier)
logo_image = 'cartoon-globe.png'
en_logo = base64.b64encode(open(logo_image, 'rb').read())
entire_month = pd.read_csv('https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_month.csv')
def extract_month_values():
'''
Takes the entire data in a list -> [ [], [], [] ]
Parameters : `None`
Return : `list`
'''
all_month = entire_month.copy()
time = pd.to_datetime(all_month['time'])
all_month['time'] = time
fields = [field for field in all_month]
month_values = all_month.values
return fields, month_values
def csv_feature_extraction(year, month, day):
'''
Considers the data which only meet the criteria, year, month, value
Parameters : `year`, `month`, `day`
Return : `list`
'''
fields, month_values = extract_month_values()
extraction = [fields]
for vals in month_values:
if vals[0].year == year and vals[0].month == month and vals[0].day == day:
if vals[4] >= 4.5: # magnitude > 1
extraction.append(vals)
return extraction
def day_wise_extraction(year, month, day):
'''
Writes the data which is selected as per the input into a CSV file.
Parameters : `year`, `month`, `day`
Return : `pandas DataFrame`
'''
extraction = csv_feature_extraction(year, month, day)
with open('month_day.csv', 'w') as extract:
writer = csv.writer(extract)
writer.writerows(extraction)
def get_dates_sorted():
'''
Sort the dates
Parameters : `None`
Return : `list`
'''
_, month_values = extract_month_values()
all_dates = []
for each_date in month_values:
all_dates.append(str(each_date[0].date()))
timestamps = sorted(list(set(all_dates)))
return timestamps
timestamps = get_dates_sorted()
date_start = dt.datetime.strptime(timestamps[0], '%Y-%m-%d')
date_end = dt.datetime.strptime(timestamps[len(timestamps)-1], '%Y-%m-%d')
def place_wise_extraction(place_name):
'''
This function is useful for plotting as per the place name chosen.
Parameters : `place_name` --> Alaska, Japan ...
Return : `pandas DataFrame`
'''
all_month = entire_month.copy()
all_places = all_month['place'].tolist()
u_regions, _, _ = extract_places_regions(all_places) # specific last name
if place_name in u_regions:
entire_place = all_month[all_month['place'].str.contains(place_name)]
return entire_place
else:
entire_world = all_month[all_month['mag'] > 1]
return entire_world
def history_eq(eq_some, zoom_value):
'''
This function basically reduces redundancy.
Parameters : `eq_some`, `zoom_value`
Return : `tuple`
'''
lats = eq_some['latitude'].tolist()
lons = eq_some['longitude'].tolist()
places = eq_some['place'].tolist()
mags = ['Magnitude : ' + str(i) for i in eq_some['mag']]
mag_size = [float(i) * radius_multiplier['outer'] for i in eq_some['mag']]
depths = ['Depth : ' + str(i) for i in eq_some['depth']]
info = [places[i] + '<br>' + mags[i] + '<br>' + depths[i] for i in range(len(places))]
zooming = zoom_value
return lats, lons, places, mags, mag_size, depths, info, zooming
| [
11748,
269,
21370,
198,
11748,
2779,
2414,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
4818,
8079,
355,
288,
83,
198,
198,
6738,
1103,
2435,
62,
36604,
1330,
357,
2302,
974,
62,
23625,
62,
2301,
507,
11,
16874,
62,
47945,
959,
... | 2.622147 | 1,183 |
# Utility functions
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from functools import reduce
import operator
def product(seq):
"""The product of a sequence of numbers"""
return reduce(operator.__mul__, seq, 1)
def scan_reverse(f, arr):
"""Scan over a list in reverse, using a function"""
r=list(arr)
for i in reversed(range(len(r))[1:]):
r[i-1] = f(r[i-1],r[i])
return r
def extend(arr,length):
"""Extend a list APL-style"""
if len(arr) >= length: return arr[:length]
else:
r=arr[:]
while length-len(r) >= len(arr):
r.extend(arr)
else:
r.extend(arr[:length-len(r)])
return r
| [
2,
34030,
5499,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
1257,
310,
10141,
1330,
4646,
628,
198,... | 2.361635 | 318 |
from django.shortcuts import redirect
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
628
] | 4.333333 | 9 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The configs list command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.runtime_config import util
from googlecloudsdk.calliope import base
class List(base.ListCommand):
"""List runtime-config resources within the current project.
This command lists runtime-config resources for the current project.
"""
DEFAULT_PAGE_SIZE = 100
detailed_help = {
'EXAMPLES': """\
To list all runtime-config resources for the current project, run:
$ {command}
The --filter parameter can be used to filter results based on content.
For example, to list all runtime-config resources with names that
begin with 'foo', run:
$ {command} --filter 'name=foo*'
""",
}
@staticmethod
def Run(self, args):
"""Run 'runtime-configs list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Yields:
The list of runtime-config resources.
Raises:
HttpException: An http error response was received while executing api
request.
"""
config_client = util.ConfigClient()
messages = util.Messages()
project = util.Project()
request = messages.RuntimeconfigProjectsConfigsListRequest(
parent=util.ProjectPath(project),
)
page_size = args.page_size or self.DEFAULT_PAGE_SIZE
results = list_pager.YieldFromList(
config_client, request, field='configs',
batch_size_attribute='pageSize', limit=args.limit,
batch_size=page_size,
)
for result in results:
yield util.FormatConfig(result)
| [
2,
15069,
1584,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.986684 | 751 |
import warnings
warnings.warn("sympy_compat module is deprecated. Use `import symengine` instead", DeprecationWarning,
stacklevel=2)
from symengine import *
| [
11748,
14601,
198,
40539,
654,
13,
40539,
7203,
1837,
3149,
88,
62,
5589,
265,
8265,
318,
39224,
13,
5765,
4600,
11748,
5659,
18392,
63,
2427,
1600,
2129,
8344,
341,
20361,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 3 | 57 |
import os
# list all include directories
include_directories = [os.path.sep.join(x.split('/')) for x in ['extension/httpfs/include', 'third_party/picohash', 'third_party/httplib']]
# source files
source_files = [os.path.sep.join(x.split('/')) for x in ['extension/httpfs/crypto.cpp', 'extension/httpfs/httpfs.cpp', 'extension/httpfs/httpfs-extension.cpp', 'extension/httpfs/s3fs.cpp']]
| [
11748,
28686,
198,
2,
1351,
477,
2291,
29196,
198,
17256,
62,
12942,
1749,
796,
685,
418,
13,
6978,
13,
325,
79,
13,
22179,
7,
87,
13,
35312,
10786,
14,
6,
4008,
329,
2124,
287,
37250,
2302,
3004,
14,
4023,
9501,
14,
17256,
3256,
... | 2.662069 | 145 |
#!/usr/bin/env python3
'''
To start, read the docblock of `provides.py`. The code in this file verifies
that a set of Items can be correctly installed (all requirements are
satisfied, etc). It then computes an installation order such that every
Item is installed only after all of the Items that match its Requires have
already been installed. This is known as dependency order or topological
sort.
'''
from collections import namedtuple
# To build the item-to-item dependency graph, we need to first build up a
# complete mapping of {path, {items, requiring, it}}. To validate that
# every requirement is satisfied, it is similarly useful to have access to a
# mapping of {path, {what, it, provides}}. Lastly, we have to
# simultaneously examine a single item's requires() and provides() for the
# purposes of sanity checks.
#
# To avoid re-evaluating ImageItem.{provides,requires}(), we'll just store
# everything in these data structures:
ItemProv = namedtuple('ItemProv', ['provides', 'item'])
# NB: since the item is part of the tuple, we'll store identical
# requirements that come from multiple items multiple times. This is OK.
ItemReq = namedtuple('ItemReq', ['requires', 'item'])
ItemReqsProvs = namedtuple('ItemReqsProvs', ['item_provs', 'item_reqs'])
class ValidatedReqsProvs:
'''
Given a set of Items (see the docblocks of `item.py` and `provides.py`),
computes {'path': {ItemReqProv{}, ...}} so that we can build the
DependencyGraph for these Items. In the process validates that:
- No one item provides or requires the same path twice,
- Each path is provided by at most one item (could be relaxed later),
- Every Requires is matched by a Provides at that path.
'''
@staticmethod
@staticmethod
class DependencyGraph:
'''
Given an iterable of ImageItems, validates their requires / provides
structures, and populates indexes describing dependencies between items.
The indexes make it easy to topologically sort the items.
'''
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
2514,
923,
11,
1100,
262,
2205,
9967,
286,
4600,
15234,
1460,
13,
9078,
44646,
383,
2438,
287,
428,
2393,
3326,
6945,
198,
5562,
257,
900,
286,
17230,
460,
307,
938... | 3.499133 | 577 |
# coding=utf-8
from comply.rules.rule import *
class SymbolUsed(Rule):
""" Always list used symbols as needed/required.<br/><br/>**_Not implemented._**
If your code is using a symbol, but not explicitly telling where it got it from, you might have
a hard time figuring out just how far your code reaches out.
<br/><br/>
See <tt>require-symbols</tt>.
"""
@property
@property
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
11997,
13,
38785,
13,
25135,
1330,
1635,
628,
198,
4871,
38357,
38052,
7,
31929,
2599,
198,
220,
220,
220,
37227,
16622,
1351,
973,
14354,
355,
2622,
14,
35827,
29847,
1671,
14,
6927,
1671,... | 3.051852 | 135 |
SEQUENCE = [
'change_descriptions',
'last_review_timestamp',
'shipit_count',
'default_reviewer_repositories',
]
| [
5188,
10917,
18310,
796,
685,
198,
220,
220,
220,
705,
3803,
62,
20147,
1968,
507,
3256,
198,
220,
220,
220,
705,
12957,
62,
19023,
62,
16514,
27823,
3256,
198,
220,
220,
220,
705,
6720,
270,
62,
9127,
3256,
198,
220,
220,
220,
705,... | 2.285714 | 56 |
from django.apps import AppConfig
from django.db.migrations import state
from django.db.models import options
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('db_constraints',)
state.DEFAULT_NAMES = options.DEFAULT_NAMES
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
9945,
13,
76,
3692,
602,
1330,
1181,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
3689,
198,
198,
25811,
13,
7206,
38865,
62,
45,
29559,
796,
3689,
... | 3.068493 | 73 |
import pickle
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from ImageGenerator import *
from sklearn.model_selection import KFold
from keras.applications import VGG16
from keras.applications.resnet50 import ResNet50
from keras.layers import Input, Dropout, Dense, concatenate, CuDNNGRU, Embedding, Flatten, Activation, BatchNormalization, PReLU
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
import keras.backend as K
from tqdm import tqdm
from nltk import ngrams
from keras.backend.tensorflow_backend import set_session
from sklearn.metrics import mean_squared_error
import os
import tensorflow as tf
from keras import models
from keras import layers
from keras import optimizers
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
train_dir = '../input/train_jpg/data/competition_files/train_jpg_ds/'
test_dir = '../input/test_jpg/data/competition_files/test_jpg_ds/'
# restrict gpu usage
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
import pickle
with open('../input/train_ridge.p', 'rb') as f:
train = pickle.load(f)
with open('../input/test_ridge.p', 'rb') as f:
test = pickle.load(f)
# train = train.iloc[:10000]
nfolds=10
fname='vgg_base'
epochs= 30
model = get_model()
val_predict = train_bagging(train, train.deal_probability.values, nfolds)
# print(f"model list length: {len(model_list)}")
# fname = 'des_word_svd_200_char_svd_1000_title_200_resnet50_500_lgb_1fold'
print('storing test prediction', flush=True)
for index in tqdm(range(nfold)):
model_path = f'../weights/{fname}_fold{index}.hdf5'
model.load_weights(model_path)
if index == 0:
y_pred = model.predict(x_test)
else:
y_pred *= model.predict(x_test)
# y_pred += model.predict(x_test)
y_pred = np.clip(y_pred, 0, 1)
y_pred = y_pred **( 1.0/ (nfold))
print('storing test prediction', flush=True)
sub = pd.read_csv('../input/sample_submission.csv')
sub['deal_probability'] = y_pred
sub['deal_probability'].clip(0.0, 1.0, inplace=True)
sub.to_csv(f'../output/{fname}_test.csv', index=False)
print('storing oof prediction', flush=True)
train_data = pd.read_csv('../input/train.csv.zip')
label = ['deal_probability']
train_user_ids = train_data.user_id.values
train_item_ids = train_data.item_id.values
train_item_ids = train_item_ids.reshape(len(train_item_ids), 1)
train_user_ids = train_user_ids.reshape(len(train_user_ids), 1)
val_predicts = pd.DataFrame(data=val_predict, columns= label)
val_predicts['user_id'] = train_user_ids
val_predicts['item_id'] = train_item_ids
val_predicts.to_csv(f'../output/{fname}_train.csv', index=False)
| [
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
13,
3866,
36948,
13,
9060,
1330,
7412,
6601,
8645,
1352,
198,
6738,
41927,
292,
1330,
6436,
11341,
198,
6738,
7412,
8645,
1352,
1330,
1635,
198,
6738,
1341,
35... | 2.538462 | 1,118 |
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c
from esphome.const import CONF_ID
CODEOWNERS = ["@berfenger"]
DEPENDENCIES = ["i2c"]
MULTI_CONF = True
CONF_STORE_IN_EEPROM = "store_in_eeprom"
mcp4728_ns = cg.esphome_ns.namespace("mcp4728")
MCP4728Component = mcp4728_ns.class_("MCP4728Component", cg.Component, i2c.I2CDevice)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(MCP4728Component),
cv.Optional(CONF_STORE_IN_EEPROM, default=False): cv.boolean,
}
)
.extend(cv.COMPONENT_SCHEMA)
.extend(i2c.i2c_device_schema(0x60))
)
| [
11748,
1658,
746,
462,
13,
8189,
5235,
355,
269,
70,
198,
11748,
1658,
746,
462,
13,
11250,
62,
12102,
341,
355,
269,
85,
198,
6738,
1658,
746,
462,
13,
5589,
3906,
1330,
1312,
17,
66,
198,
6738,
1658,
746,
462,
13,
9979,
1330,
71... | 2 | 328 |
import os
import time
import signal
import pika
INTERVAL = int(os.getenv('PYP_INTERVAL', 5))
RABBITMQ_HOST = os.getenv('PYP_RABBITMQ_HOST', 'rabbitmq')
RABBITMQ_VHOST = os.getenv('PYP_RABBITMQ_VHOST')
RABBITMQ_USER = os.getenv('PYP_RABBITMQ_USER')
RABBITMQ_PASS = os.getenv('PYP_RABBITMQ_PASS')
if __name__ == '__main__':
credentials = pika.PlainCredentials(
RABBITMQ_USER,
RABBITMQ_PASS,
)
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=RABBITMQ_HOST,
credentials=credentials,
virtual_host=RABBITMQ_VHOST,
)
)
signal.signal(
signal.SIGTERM,
lambda s, f: connection.close(),
)
channel = connection.channel()
channel.queue_declare(queue='hello')
while True:
time.sleep(INTERVAL)
print(' [x] Sending message.')
channel.basic_publish(
exchange='',
routing_key='hello',
body='Hello World!',
)
| [
11748,
28686,
198,
11748,
640,
198,
11748,
6737,
198,
11748,
279,
9232,
198,
198,
41358,
23428,
796,
493,
7,
418,
13,
1136,
24330,
10786,
47,
48232,
62,
41358,
23428,
3256,
642,
4008,
198,
3861,
15199,
2043,
49215,
62,
39,
10892,
796,
... | 2.04898 | 490 |
# Code made for Sergio Andrés Díaz Ariza
# 29 July 2021
# License MIT
# Transport Phenomena: Pipe find Diameter
from scipy.optimize import minimize
import seaborn as sns
import numpy as np
import time
start_time = time.time()
sns.set()
# Optimice the function for T, and assign constraints to resolve for Rmin,E_cons,C1,C2
Opt = Optimice()
constraint_equal = {'type': 'eq', 'fun': Opt.objective_Colebrook}
constraint_equal1 = {'type': 'eq', 'fun': Opt.constraint_D_eq_f}
constraint_equal2 = {'type': 'eq', 'fun': Opt.constraint_Vavg_eq_D}
constraint = [constraint_equal, constraint_equal1, constraint_equal2]
x0 = [0.5, 1, 1.5]
sol = minimize(Opt.objective_Colebrook, x0, method='SLSQP', constraints=constraint, options={'maxiter': 1000})
print(sol)
print("\nDarcy factor :\t", sol.x[0])
print("\nDiameter:\t", sol.x[1], "[m]")
print("\nVelocity Average:\t", sol.x[2], "[m/s]")
print("\n--- %s seconds ---" % (time.time() - start_time))
| [
2,
6127,
925,
329,
36759,
843,
81,
20954,
360,
8836,
1031,
6069,
4496,
198,
2,
2808,
2901,
33448,
198,
2,
13789,
17168,
198,
2,
19940,
34828,
3674,
64,
25,
36039,
1064,
360,
13173,
198,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330... | 2.587432 | 366 |
import os
import random
from PIL import Image, ImageDraw, ImageFont
import click
import textwrap
@click.command()
@click.option('--filename', type=click.Path(dir_okay=False), required=True)
@click.option('--text', type=str, required=True)
@click.option('--font', type=str, default='LiberationSans-Bold')
@click.option('--font-size', type=int, default=42)
@click.option('--color', is_flag=True)
@click.option('--no-rect', is_flag=True)
@click.option('--wrap-width', type=int, default=42)
@click.option('--bottom', is_flag=True)
@click.option('--right', is_flag=True)
@click.option('--invert', is_flag=True)
| [
11748,
28686,
198,
11748,
4738,
198,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
11,
7412,
23252,
198,
198,
11748,
3904,
198,
198,
11748,
2420,
37150,
628,
198,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976,
13,
18076,
10786,
... | 2.807339 | 218 |
import pytest
import ckan.tests.factories as factories
import ckan.plugins.toolkit as tk
import ckan.authz as authz
import ckan.model as model
import ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base
from ckanext.hdx_org_group.helpers.static_lists import ORGANIZATION_TYPE_LIST
from ckanext.hdx_users.helpers.notifications_dao import QuarantinedDatasetsDao
from ckanext.hdx_users.helpers.notification_service import QuarantinedDatasetsService, \
SysadminQuarantinedDatasetsService
config = tk.config
NotAuthorized = tk.NotAuthorized
_get_action = tk.get_action
| [
11748,
12972,
9288,
198,
198,
11748,
269,
27541,
13,
41989,
13,
22584,
1749,
355,
17590,
198,
11748,
269,
27541,
13,
37390,
13,
25981,
15813,
355,
256,
74,
198,
11748,
269,
27541,
13,
18439,
89,
355,
6284,
89,
198,
11748,
269,
27541,
... | 2.786408 | 206 |
import pytest
import os
@pytest.fixture(scope="session", autouse=True)
| [
11748,
12972,
9288,
201,
198,
11748,
28686,
201,
198,
201,
198,
201,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
2625,
29891,
1600,
1960,
1076,
28,
17821,
8,
201,
198
] | 2.516129 | 31 |
#! /usr/bin/env python3
#
# Simple UDP server companion for udp_client.c
# This expects to receive a particular message from the CC3000. Upon each
# receipt it will respond with its own message.
import socket
UDP_IP = "10.0.0.1"
UDP_PORT = 44444
MSG_EXP = "Hello World from CC3000"
MSG_EXP_BYTES = MSG_EXP.encode()
MSG_TX = "Hello CC3000"
MSG_TX_BYTES = MSG_TX.encode()
print("Creating socket...")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Created!")
print("Binding to:", UDP_IP, ":", UDP_PORT)
sock.bind((UDP_IP, UDP_PORT))
print("Bound!")
while True:
data_bytes, (src_ip, src_port) = sock.recvfrom(256)
data = data_bytes.decode()
print("Message Received:")
print("data is: ", data)
print("src_ip is: ", src_ip)
print("src_port is: ", src_port)
if data != MSG_EXP:
print("Message text was not as expected.")
continue
else:
print("Sending Reply...")
sock.sendto(MSG_TX_BYTES, (src_ip, src_port))
print("Sent!")
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
17427,
36428,
4382,
15185,
329,
334,
26059,
62,
16366,
13,
66,
198,
2,
770,
13423,
284,
3328,
257,
1948,
3275,
422,
262,
12624,
23924,
13,
14438,
1123,
220,
198,
2... | 2.391101 | 427 |
import pytest
from constructure.utilities import MissingOptionalDependency, requires_package
from constructure.utilities.utilities import _CONDA_INSTALLATION_COMMANDS
| [
11748,
12972,
9288,
198,
198,
6738,
1500,
5620,
13,
315,
2410,
1330,
25639,
30719,
35,
2690,
1387,
11,
4433,
62,
26495,
198,
6738,
1500,
5620,
13,
315,
2410,
13,
315,
2410,
1330,
4808,
10943,
5631,
62,
38604,
7036,
6234,
62,
9858,
107... | 3.8 | 45 |
"""Setup."""
from setuptools import setup, find_packages
inst_reqs = ["rio-cogeo~=2.0a4", "rasterio[s3]~=1.1", "requests"]
extra_reqs = {"test": ["pytest", "pytest-cov"]}
setup(
name="app",
version="0.0.2",
description=u"cogeo watchbot",
python_requires=">=3",
keywords="AWS-Lambda Python",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=inst_reqs,
extras_require=extra_reqs,
)
| [
37811,
40786,
526,
15931,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
8625,
62,
42180,
82,
796,
14631,
27250,
12,
1073,
469,
78,
93,
28,
17,
13,
15,
64,
19,
1600,
366,
81,
1603,
952,
58,
82,
18,
... | 2.308756 | 217 |
import uvicorn
from fastapi import FastAPI
from app.config import get_config
from app.db import db
from app.rest import posts
app = FastAPI(title="Async FastAPI")
app.include_router(posts.router, prefix='/api/posts')
@app.on_event("startup")
@app.on_event("shutdown")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
11748,
334,
25531,
1211,
198,
6738,
3049,
15042,
1330,
12549,
17614,
198,
198,
6738,
598,
13,
11250,
1330,
651,
62,
11250,
198,
6738,
598,
13,
9945,
1330,
20613,
198,
6738,
598,
13,
2118,
1330,
6851,
198,
198,
1324,
796,
12549,
17614,
... | 2.666667 | 132 |
# Copyright (c) 2013 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fixtures
from oslo_config import cfg
from oslo_log import log
from osprofiler import opts
import testtools
from zaqar.common import configs
from zaqar.tests import helpers
class TestBase(testtools.TestCase):
"""Child class of testtools.TestCase for testing Zaqar.
Inherit from this and write your test methods. If the child class defines
a prepare(self) method, this method will be called before executing each
test method.
"""
config_file = None
@classmethod
def conf_path(cls, filename):
"""Returns the full path to the specified Zaqar conf file.
:param filename: Name of the conf file to find (e.g.,
'wsgi_memory.conf')
"""
if os.path.exists(filename):
return filename
return os.path.join(os.environ["ZAQAR_TESTS_CONFIGS_DIR"], filename)
@classmethod
def load_conf(cls, filename):
"""Loads `filename` configuration file.
:param filename: Name of the conf file to find (e.g.,
'wsgi_memory.conf')
:returns: Project's config object.
"""
conf = cfg.ConfigOpts()
log.register_options(conf)
conf(args=[], default_config_files=[cls.conf_path(filename)])
return conf
def config(self, group=None, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the tearDown() method.
"""
for k, v in kw.items():
self.conf.set_override(k, v, group)
| [
2,
15069,
357,
66,
8,
2211,
37927,
13200,
14504,
278,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,... | 2.806977 | 860 |
#!/usr/bin/env python
"""
Generate client and server CURVE certificate files then move them into the
appropriate store directory, private_keys or public_keys. The certificates
generated by this script are used by the stonehouse and ironhouse examples.
In practice this would be done by hand or some out-of-band process.
Author: Chris Laws
"""
import zmq.auth
from __init__ import KEYS_DIR
def generate_certificates():
''' Generate client and server CURVE certificate files'''
# create new keys in certificates dir
zmq.auth.create_certificates(KEYS_DIR, "server")
zmq.auth.create_certificates(KEYS_DIR, "client")
if __name__ == '__main__':
generate_certificates()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
8645,
378,
5456,
290,
4382,
327,
4261,
6089,
10703,
3696,
788,
1445,
606,
656,
262,
198,
13335,
3650,
8619,
11,
2839,
62,
13083,
393,
1171,
62,
13083,
13,
383,
20835,
... | 3.248826 | 213 |
import sys
import zlib
import base64
import requests
import auth_server.jwt as jwt
from json import dumps as json_dumps
from json import loads as json_loads
| [
11748,
25064,
198,
11748,
1976,
8019,
198,
11748,
2779,
2414,
198,
11748,
7007,
198,
11748,
6284,
62,
15388,
13,
73,
46569,
355,
474,
46569,
198,
6738,
33918,
1330,
45514,
355,
33918,
62,
67,
8142,
198,
6738,
33918,
1330,
15989,
355,
33... | 3.468085 | 47 |
# Aula 13 - Desafio 52: Numeros primos
# Ler um numero inteiro e dizer se ele eh ou nao primo
num = int(input('Digite um numero: '))
primo = 0
for n in range(1, num+1):
if num % n == 0:
primo += 1
print('\033[1;32m', end=' ')
else:
print('\033[m', end=' ')
print(f'{n}\033[m ', end='')
print()
if primo == 2:
print(f'\nLogo \033[1m{num}\033[m \033[4mEH NUMERO PRIMO\033[m pois soh eh divisivel por {primo} numeros')
else:
print(f'Logo \033[1m{num}\033[m \033[4mNAO EH NUMERO PRIMO\033[m pois eh divisiel por {primo} numeros')
| [
2,
317,
4712,
1511,
532,
2935,
1878,
952,
6740,
25,
399,
6975,
418,
2684,
418,
198,
2,
31831,
23781,
997,
3529,
493,
68,
7058,
304,
288,
7509,
384,
9766,
32622,
267,
84,
299,
5488,
2684,
78,
198,
198,
22510,
796,
493,
7,
15414,
10... | 2.050179 | 279 |
import cv2
import math
import numpy as np
from filterpy.kalman import KalmanFilter
from scipy.spatial import distance
from scipy.optimize import linear_sum_assignment
# local imported codes
from automatic_brightness import average_brightness, average_brightness_hsv
import parameters as parm
# Dilates the image multiple times to get of noise in order to get a single large contour for each background object
# Identify background objects by their shape (non-circular)
# Creates a copy of the input image which has the background contour filled in
# Returns the filled image which has the background elements filled in
# Take in the original frame, and return two masked images: One contains the sky while the other contains non-sky components
# This is for situations where there is bright sunlight reflecting off the drone, causing it to blend into sky
# Increasing contrast of the whole image will detect drone but cause false positives in the background
# Hence the sky must be extracted before a localised contrast increase can be applied to it
# The sky is extracted by converting the image from RGB to HSV and applying thresholding + morphological operations
# Create VideoCapture object to extract frames from,
# background subtractor object and blob detector objects for object detection
# and VideoWriters for output videos
# Apply image masks to prepare frame for blob detection
# Masks: 1) Increased contrast and brightness to fade out the sky and make objects stand out
# 2) Background subtractor to remove the stationary background (Converts frame to a binary image)
# 3) Further background subtraction by means of contouring around non-circular objects
# 4) Dilation to fill holes in detected drones
# 5) Inversion to make the foreground black for the blob detector to identify foreground objects
# Perform the blob detection on the masked image
# Return detected blob centroids as well as size
# Adjust contrast and brightness of image to make foreground stand out more
# alpha used to adjust contrast, where alpha < 1 reduces contrast and alpha > 1 increases it
# beta used to increase brightness, scale of (-255 to 255) ? Needs confirmation
# formula is im_out = alpha * im_in + beta
# Therefore to change brightness before contrast, we need to do alpha = 1 first
# Assigns detections to tracks using Munkre's Algorithm with cost based on euclidean distance,
# with detections being located too far from existing tracks being designated as unassigned detections
# and tracks without any nearby detections being designated as unassigned tracks
# Using the coordinates of valid assignments which correspond to the detection and track indices,
# update the track with the matched detection
# Existing tracks without a matching detection are aged and considered invisible for the frame
# If any track has been invisible for too long, or generated by a flash, it will be removed from the list of tracks
# Detections not assigned an existing track are given their own track, initialized with the location of the detection
# for single camera detection
# for multi camera detection | [
11748,
269,
85,
17,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
8106,
9078,
13,
74,
282,
805,
1330,
12612,
805,
22417,
198,
6738,
629,
541,
88,
13,
2777,
34961,
1330,
5253,
198,
6738,
629,
541,
88,
13,
40085,
1... | 4.419355 | 713 |
import numpy as np
if __name__ == '__main__':
print('Numpy Version', np.__version__)
# broadcast_operate()
broadcast_operate_example()
| [
11748,
299,
32152,
355,
45941,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
10786,
45,
32152,
10628,
3256,
45941,
13,
834,
9641,
834,
8,
198,
220,
220,
220,
1303,
7025,
62,
3575,
... | 2.696429 | 56 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from enum import Enum
from mobly import base_test
from mobly import records
from mobly import signals
from mobly import utils
class _InstrumentationStructurePrefixes:
"""Class containing prefixes that structure insturmentation output.
Android instrumentation generally follows the following format:
.. code-block:: none
INSTRUMENTATION_STATUS: ...
...
INSTRUMENTATION_STATUS: ...
INSTRUMENTATION_STATUS_CODE: ...
INSTRUMENTATION_STATUS: ...
...
INSTRUMENTATION_STATUS: ...
INSTRUMENTATION_STATUS_CODE: ...
...
INSTRUMENTATION_RESULT: ...
...
INSTRUMENTATION_RESULT: ...
...
INSTRUMENTATION_CODE: ...
This means that these prefixes can be used to guide parsing
the output of the instrumentation command into the different
instrumetnation test methods.
Refer to the following Android Framework package for more details:
.. code-block:: none
com.android.commands.am.AM
"""
STATUS = 'INSTRUMENTATION_STATUS:'
STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE:'
RESULT = 'INSTRUMENTATION_RESULT:'
CODE = 'INSTRUMENTATION_CODE:'
FAILED = 'INSTRUMENTATION_FAILED:'
class _InstrumentationKnownStatusKeys:
"""Commonly used keys used in instrumentation output for listing
instrumentation test method result properties.
An instrumenation status line usually contains a key-value pair such as
the following:
.. code-block:: none
INSTRUMENTATION_STATUS: <key>=<value>
Some of these key-value pairs are very common and represent test case
properties. This mapping is used to handle each of the corresponding
key-value pairs different than less important key-value pairs.
Refer to the following Android Framework packages for more details:
.. code-block:: none
android.app.Instrumentation
android.support.test.internal.runner.listener.InstrumentationResultPrinter
TODO: Convert android.support.* to androidx.*,
(https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
"""
CLASS = 'class'
ERROR = 'Error'
STACK = 'stack'
TEST = 'test'
STREAM = 'stream'
class _InstrumentationStatusCodes:
"""A mapping of instrumentation status codes to test method results.
When instrumentation runs, at various points output is created in a series
of blocks that terminate as follows:
.. code-block:: none
INSTRUMENTATION_STATUS_CODE: 1
These blocks typically have several status keys in them, and they indicate
the progression of a particular instrumentation test method. When the
corresponding instrumentation test method finishes, there is generally a
line which includes a status code that gives thes the test result.
The UNKNOWN status code is not an actual status code and is only used to
represent that a status code has not yet been read for an instrumentation
block.
Refer to the following Android Framework package for more details:
.. code-block:: none
android.support.test.internal.runner.listener.InstrumentationResultPrinter
TODO: Convert android.support.* to androidx.*,
(https://android-developers.googleblog.com/2018/05/hello-world-androidx.html).
"""
UNKNOWN = None
OK = '0'
START = '1'
IN_PROGRESS = '2'
ERROR = '-1'
FAILURE = '-2'
IGNORED = '-3'
ASSUMPTION_FAILURE = '-4'
class _InstrumentationStatusCodeCategories:
"""A mapping of instrumentation test method results to categories.
Aside from the TIMING category, these categories roughly map to Mobly
signals and are used for determining how a particular instrumentation test
method gets recorded.
"""
TIMING = [
_InstrumentationStatusCodes.START,
_InstrumentationStatusCodes.IN_PROGRESS,
]
PASS = [
_InstrumentationStatusCodes.OK,
]
FAIL = [
_InstrumentationStatusCodes.ERROR,
_InstrumentationStatusCodes.FAILURE,
]
SKIPPED = [
_InstrumentationStatusCodes.IGNORED,
_InstrumentationStatusCodes.ASSUMPTION_FAILURE,
]
class _InstrumentationKnownResultKeys:
"""Commonly used keys for outputting instrumentation errors.
When instrumentation finishes running all of the instrumentation test
methods, a result line will appear as follows:
.. code-block:: none
INSTRUMENTATION_RESULT:
If something wrong happened during the instrumentation run such as an
application under test crash, the line will appear similarly as thus:
.. code-block:: none
INSTRUMENTATION_RESULT: shortMsg=Process crashed.
Since these keys indicate that something wrong has happened to the
instrumentation run, they should be checked for explicitly.
Refer to the following documentation page for more information:
.. code-block:: none
https://developer.android.com/reference/android/app/ActivityManager.ProcessErrorStateInfo.html
"""
LONGMSG = 'longMsg'
SHORTMSG = 'shortMsg'
class _InstrumentationResultSignals:
"""Instrumenttion result block strings for signalling run completion.
The final section of the instrumentation output generally follows this
format:
.. code-block:: none
INSTRUMENTATION_RESULT: stream=
...
INSTRUMENTATION_CODE -1
Inside of the ellipsed section, one of these signaling strings should be
present. If they are not present, this usually means that the
instrumentation run has failed in someway such as a crash. Because the
final instrumentation block simply summarizes information, simply roughly
checking for a particilar string should be sufficient to check to a proper
run completion as the contents of the instrumentation result block don't
really matter.
Refer to the following JUnit package for more details:
.. code-block:: none
junit.textui.ResultPrinter
"""
FAIL = 'FAILURES!!!'
PASS = 'OK ('
class _InstrumentationBlockStates(Enum):
"""States used for determing what the parser is currently parsing.
The parse always starts and ends a block in the UNKNOWN state, which is
used to indicate that either a method or a result block (matching the
METHOD and RESULT states respectively) are valid follow ups, which means
that parser should be checking for a structure prefix that indicates which
of those two states it should transition to. If the parser is in the
METHOD state, then the parser will be parsing input into test methods.
Otherwise, the parse can simply concatenate all the input to check for
some final run completion signals.
"""
UNKNOWN = 0
METHOD = 1
RESULT = 2
class _InstrumentationBlock:
"""Container class for parsed instrumentation output for instrumentation
test methods.
Instrumentation test methods typically follow the follwoing format:
.. code-block:: none
INSTRUMENTATION_STATUS: <key>=<value>
...
INSTRUMENTATION_STATUS: <key>=<value>
INSTRUMENTATION_STATUS_CODE: <status code #>
The main issue with parsing this however is that the key-value pairs can
span multiple lines such as this:
.. code-block:: none
INSTRUMENTATION_STATUS: stream=
Error in ...
...
Or, such as this:
.. code-block:: none
INSTRUMENTATION_STATUS: stack=...
...
Because these keys are poentially very long, constant string contatention
is potentially inefficent. Instead, this class builds up a buffer to store
the raw output until it is processed into an actual test result by the
_InstrumentationBlockFormatter class.
Additionally, this class also serves to store the parser state, which
means that the BaseInstrumentationTestClass does not need to keep any
potentially volatile instrumentation related state, so multiple
instrumentation runs should have completely separate parsing states.
This class is also used for storing result blocks although very little
needs to be done for those.
Attributes:
begin_time: string, optional timestamp for when the test corresponding
to the instrumentation block began.
current_key: string, the current key that is being parsed, default to
_InstrumentationKnownStatusKeys.STREAM.
error_message: string, an error message indicating that something
unexpected happened during a instrumentatoin test method.
known_keys: dict, well known keys that are handled uniquely.
prefix: string, a prefix to add to the class name of the
instrumentation test methods.
previous_instrumentation_block: _InstrumentationBlock, the last parsed
instrumentation block.
state: _InstrumentationBlockStates, the current state of the parser.
status_code: string, the state code for an instrumentation method
block.
unknown_keys: dict, arbitrary keys that are handled generically.
"""
@property
def is_empty(self):
"""Deteremines whether or not anything has been parsed with this
instrumentation block.
Returns:
A boolean indicating whether or not the this instrumentation block
has parsed and contains any output.
"""
return self._empty
def set_error_message(self, error_message):
"""Sets an error message on an instrumentation block.
This method is used exclusively to indicate that a test method failed
to complete, which is usually cause by a crash of some sort such that
the test method is marked as error instead of ignored.
Args:
error_message: string, an error message to be added to the
TestResultRecord to explain that something wrong happened.
"""
self._empty = False
self.error_message = error_message
def _remove_structure_prefix(self, prefix, line):
"""Helper function for removing the structure prefix for parsing.
Args:
prefix: string, a _InstrumentationStructurePrefixes to remove from
the raw output.
line: string, the raw line from the instrumentation output.
Returns:
A string containing a key value pair descripting some property
of the current instrumentation test method.
"""
return line[len(prefix):].strip()
def set_status_code(self, status_code_line):
"""Sets the status code for the instrumentation test method, used in
determining the test result.
Args:
status_code_line: string, the raw instrumentation output line that
contains the status code of the instrumentation block.
"""
self._empty = False
self.status_code = self._remove_structure_prefix(
_InstrumentationStructurePrefixes.STATUS_CODE,
status_code_line,
)
if self.status_code == _InstrumentationStatusCodes.START:
self.begin_time = utils.get_current_epoch_time()
def set_key(self, structure_prefix, key_line):
"""Sets the current key for the instrumentation block.
For unknown keys, the key is added to the value list in order to
better contextualize the value in the output.
Args:
structure_prefix: string, the structure prefix that was matched
and that needs to be removed.
key_line: string, the raw instrumentation output line that contains
the key-value pair.
"""
self._empty = False
key_value = self._remove_structure_prefix(
structure_prefix,
key_line,
)
if '=' in key_value:
(key, value) = key_value.split('=', 1)
self.current_key = key
if key in self.known_keys:
self.known_keys[key].append(value)
else:
self.unknown_keys[key].append(key_value)
def add_value(self, line):
"""Adds unstructured or multi-line value output to the current parsed
instrumentation block for outputting later.
Usually, this will add extra lines to the value list for the current
key-value pair. However, sometimes, such as when instrumentation
failed to start, output does not follow the structured prefix format.
In this case, adding all of the output is still useful so that a user
can debug the issue.
Args:
line: string, the raw instrumentation line to append to the value
list.
"""
# Don't count whitespace only lines.
if line.strip():
self._empty = False
if self.current_key in self.known_keys:
self.known_keys[self.current_key].append(line)
else:
self.unknown_keys[self.current_key].append(line)
def transition_state(self, new_state):
"""Transitions or sets the current instrumentation block to the new
parser state.
Args:
new_state: _InstrumentationBlockStates, the state that the parser
should transition to.
Returns:
A new instrumentation block set to the new state, representing
the start of parsing a new instrumentation test method.
Alternatively, if the current instrumentation block represents the
start of parsing a new instrumentation block (state UNKNOWN), then
this returns the current instrumentation block set to the now
known parsing state.
"""
if self.state == _InstrumentationBlockStates.UNKNOWN:
self.state = new_state
return self
else:
next_block = _InstrumentationBlock(
state=new_state,
prefix=self.prefix,
previous_instrumentation_block=self,
)
if self.status_code in _InstrumentationStatusCodeCategories.TIMING:
next_block.begin_time = self.begin_time
return next_block
class _InstrumentationBlockFormatter:
"""Takes an instrumentation block and converts it into a Mobly test
result.
"""
DEFAULT_INSTRUMENTATION_METHOD_NAME = 'instrumentation_method'
def _get_name(self):
"""Gets the method name of the test method for the instrumentation
method block.
Returns:
A string containing the name of the instrumentation test method's
test or a default name if no name was parsed.
"""
if self._known_keys[_InstrumentationKnownStatusKeys.TEST]:
return self._known_keys[_InstrumentationKnownStatusKeys.TEST]
else:
return self.DEFAULT_INSTRUMENTATION_METHOD_NAME
def _get_class(self):
"""Gets the class name of the test method for the instrumentation
method block.
Returns:
A string containing the class name of the instrumentation test
method's test or empty string if no name was parsed. If a prefix
was specified, then the prefix will be prepended to the class
name.
"""
class_parts = [
self._prefix, self._known_keys[_InstrumentationKnownStatusKeys.CLASS]
]
return '.'.join(filter(None, class_parts))
def _get_full_name(self):
"""Gets the qualified name of the test method corresponding to the
instrumentation block.
Returns:
A string containing the fully qualified name of the
instrumentation test method. If parts are missing, then degrades
steadily.
"""
full_name_parts = [self._get_class(), self._get_name()]
return '#'.join(filter(None, full_name_parts))
def _get_details(self):
"""Gets the output for the detail section of the TestResultRecord.
Returns:
A string to set for a TestResultRecord's details.
"""
detail_parts = [self._get_full_name(), self._error_message]
return '\n'.join(filter(None, detail_parts))
def _get_extras(self):
"""Gets the output for the extras section of the TestResultRecord.
Returns:
A string to set for a TestResultRecord's extras.
"""
# Add empty line to start key-value pairs on a new line.
extra_parts = ['']
for value in self._unknown_keys.values():
extra_parts.append(value)
extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.STREAM])
extra_parts.append(
self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG])
extra_parts.append(
self._known_keys[_InstrumentationKnownResultKeys.LONGMSG])
extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.ERROR])
if self._known_keys[
_InstrumentationKnownStatusKeys.STACK] not in self._known_keys[
_InstrumentationKnownStatusKeys.STREAM]:
extra_parts.append(
self._known_keys[_InstrumentationKnownStatusKeys.STACK])
return '\n'.join(filter(None, extra_parts))
def _is_failed(self):
"""Determines if the test corresponding to the instrumentation block
failed.
This method can not be used to tell if a test method passed and
should not be used for such a purpose.
Returns:
A boolean indicating if the test method failed.
"""
if self._status_code in _InstrumentationStatusCodeCategories.FAIL:
return True
elif (self._known_keys[_InstrumentationKnownStatusKeys.STACK] and
self._status_code != _InstrumentationStatusCodes.ASSUMPTION_FAILURE):
return True
elif self._known_keys[_InstrumentationKnownStatusKeys.ERROR]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG]:
return True
elif self._known_keys[_InstrumentationKnownResultKeys.LONGMSG]:
return True
else:
return False
def create_test_record(self, mobly_test_class):
"""Creates a TestResultRecord for the instrumentation block.
Args:
mobly_test_class: string, the name of the Mobly test case
executing the instrumentation run.
Returns:
A TestResultRecord with an appropriate signals exception
representing the instrumentation test method's result status.
"""
details = self._get_details()
extras = self._get_extras()
tr_record = records.TestResultRecord(
t_name=self._get_full_name(),
t_class=mobly_test_class,
)
if self._begin_time:
tr_record.begin_time = self._begin_time
if self._is_failed():
tr_record.test_fail(e=signals.TestFailure(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED:
tr_record.test_skip(e=signals.TestSkip(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.PASS:
tr_record.test_pass(e=signals.TestPass(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.TIMING:
if self._error_message:
tr_record.test_error(
e=signals.TestError(details=details, extras=extras))
else:
tr_record = None
else:
tr_record.test_error(e=signals.TestError(details=details, extras=extras))
if self._known_keys[_InstrumentationKnownStatusKeys.STACK]:
tr_record.termination_signal.stacktrace = self._known_keys[
_InstrumentationKnownStatusKeys.STACK]
return tr_record
def has_completed_result_block_format(self, error_message):
"""Checks the instrumentation result block for a signal indicating
normal completion.
Args:
error_message: string, the error message to give if the
instrumentation run did not complete successfully.-
Returns:
A boolean indicating whether or not the instrumentation run passed
or failed overall.
Raises:
signals.TestError: Error raised if the instrumentation run did not
complete because of a crash or some other issue.
"""
extras = self._get_extras()
if _InstrumentationResultSignals.PASS in extras:
return True
elif _InstrumentationResultSignals.FAIL in extras:
return False
else:
raise signals.TestError(details=error_message, extras=extras)
class InstrumentationTestMixin:
"""A mixin for Mobly test classes to inherit from for instrumentation tests.
This class should be used in a subclass of both BaseTestClass and this class
in order to provide instrumentation test capabilities. This mixin is
explicitly for the case where the underlying BaseTestClass cannot be
replaced with BaseInstrumentationTestClass. In general, prefer using
BaseInstrumentationTestClass instead.
Attributes:
DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
instrumentation params contained within user params.
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
message to set if something has prevented something in the
instrumentation test run from completing properly.
"""
DEFAULT_INSTRUMENTATION_OPTION_PREFIX = 'instrumentation_option_'
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE = ('instrumentation run exited '
'unexpectedly')
def _previous_block_never_completed(self, current_block, previous_block,
new_state):
"""Checks if the previous instrumentation method block completed.
Args:
current_block: _InstrumentationBlock, the current instrumentation
block to check for being a different instrumentation test
method.
previous_block: _InstrumentationBlock, rhe previous
instrumentation block to check for an incomplete status.
new_state: _InstrumentationBlockStates, the next state for the
parser, used to check for the instrumentation run ending
with an incomplete test.
Returns:
A boolean indicating whether the previous instrumentation block
completed executing.
"""
if previous_block:
previously_timing_block = (previous_block.status_code
in _InstrumentationStatusCodeCategories.TIMING)
currently_new_block = (current_block.status_code
== _InstrumentationStatusCodes.START or
new_state == _InstrumentationBlockStates.RESULT)
return all([previously_timing_block, currently_new_block])
else:
return False
def _create_formatters(self, instrumentation_block, new_state):
"""Creates the _InstrumentationBlockFormatters for outputting the
instrumentation method block that have finished parsing.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation method block to create formatters based upon.
new_state: _InstrumentationBlockState, the next state that the
parser will transition to.
Returns:
A list of the formatters tha need to create and add
TestResultRecords to the test results.
"""
formatters = []
if self._previous_block_never_completed(
current_block=instrumentation_block,
previous_block=instrumentation_block.previous_instrumentation_block,
new_state=new_state):
instrumentation_block.previous_instrumentation_block.set_error_message(
self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
formatters.append(
_InstrumentationBlockFormatter(
instrumentation_block.previous_instrumentation_block))
if not instrumentation_block.is_empty:
formatters.append(_InstrumentationBlockFormatter(instrumentation_block))
return formatters
def _transition_instrumentation_block(
self,
instrumentation_block,
new_state=_InstrumentationBlockStates.UNKNOWN):
"""Transitions and finishes the current instrumentation block.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation block to finish.
new_state: _InstrumentationBlockState, the next state for the
parser to transition to.
Returns:
The new instrumentation block to use for storing parsed
instrumentation output.
"""
formatters = self._create_formatters(instrumentation_block, new_state)
for formatter in formatters:
test_record = formatter.create_test_record(self.TAG)
if test_record:
self.results.add_record(test_record)
self.summary_writer.dump(test_record.to_dict(),
records.TestSummaryEntryType.RECORD)
return instrumentation_block.transition_state(new_state=new_state)
def _parse_method_block_line(self, instrumentation_block, line):
"""Parses the instrumnetation method block's line.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation method block.
line: string, the raw instrumentation output line to parse.
Returns:
The next instrumentation block, which should be used to continue
parsing instrumentation output.
"""
if line.startswith(_InstrumentationStructurePrefixes.STATUS):
instrumentation_block.set_key(_InstrumentationStructurePrefixes.STATUS,
line)
return instrumentation_block
elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE):
instrumentation_block.set_status_code(line)
return self._transition_instrumentation_block(instrumentation_block)
elif line.startswith(_InstrumentationStructurePrefixes.RESULT):
# Unexpected transition from method block -> result block
instrumentation_block.set_key(_InstrumentationStructurePrefixes.RESULT,
line)
return self._parse_result_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.RESULT,
),
line,
)
else:
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_result_block_line(self, instrumentation_block, line):
"""Parses the instrumentation result block's line.
Args:
instrumentation_block: _InstrumentationBlock, the instrumentation
result block for the instrumentation run.
line: string, the raw instrumentation output to add to the
instrumenation result block's _InstrumentationResultBlocki
object.
Returns:
The instrumentation result block for the instrumentation run.
"""
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_unknown_block_line(self, instrumentation_block, line):
"""Parses a line from the instrumentation output from the UNKNOWN
parser state.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumenation block, where the correct categorization it noti
yet known.
line: string, the raw instrumenation output line to be used to
deteremine the correct categorization.
Returns:
The next instrumentation block to continue parsing with. Usually,
this is the same instrumentation block but with the state
transitioned appropriately.
"""
if line.startswith(_InstrumentationStructurePrefixes.STATUS):
return self._parse_method_block_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.METHOD,
),
line,
)
elif (line.startswith(_InstrumentationStructurePrefixes.RESULT) or
_InstrumentationStructurePrefixes.FAILED in line):
return self._parse_result_block_line(
self._transition_instrumentation_block(
instrumentation_block,
new_state=_InstrumentationBlockStates.RESULT,
),
line,
)
else:
# This would only really execute if instrumentation failed to start.
instrumentation_block.add_value(line)
return instrumentation_block
def _parse_line(self, instrumentation_block, line):
"""Parses an arbitrary line from the instrumentation output based upon
the current parser state.
Args:
instrumentation_block: _InstrumentationBlock, an instrumentation
block with any of the possible parser states.
line: string, the raw instrumentation output line to parse
appropriately.
Returns:
The next instrumenation block to continue parsing with.
"""
if instrumentation_block.state == _InstrumentationBlockStates.METHOD:
return self._parse_method_block_line(instrumentation_block, line)
elif instrumentation_block.state == _InstrumentationBlockStates.RESULT:
return self._parse_result_block_line(instrumentation_block, line)
else:
return self._parse_unknown_block_line(instrumentation_block, line)
def _finish_parsing(self, instrumentation_block):
"""Finishes parsing the instrumentation result block for the final
instrumentation run status.
Args:
instrumentation_block: _InstrumentationBlock, the instrumentation
result block for the instrumenation run. Potentially, thisi
could actually be method block if the instrumentation outputi
is malformed.
Returns:
A boolean indicating whether the instrumentation run completed
with all the tests passing.
Raises:
signals.TestError: Error raised if the instrumentation failed to
complete with either a pass or fail status.
"""
formatter = _InstrumentationBlockFormatter(instrumentation_block)
return formatter.has_completed_result_block_format(
self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
def parse_instrumentation_options(self, parameters=None):
"""Returns the options for the instrumentation test from user_params.
By default, this method assume that the correct instrumentation options
all start with DEFAULT_INSTRUMENTATION_OPTION_PREFIX.
Args:
parameters: dict, the key value pairs representing an assortment
of parameters including instrumentation options. Usually,
this argument will be from self.user_params.
Returns:
A dictionary of options/parameters for the instrumentation tst.
"""
if parameters is None:
return {}
filtered_parameters = {}
for parameter_key, parameter_value in parameters.items():
if parameter_key.startswith(self.DEFAULT_INSTRUMENTATION_OPTION_PREFIX):
option_key = parameter_key[len(self.
DEFAULT_INSTRUMENTATION_OPTION_PREFIX):]
filtered_parameters[option_key] = parameter_value
return filtered_parameters
def run_instrumentation_test(self,
device,
package,
options=None,
prefix=None,
runner=None):
"""Runs instrumentation tests on a device and creates test records.
Args:
device: AndroidDevice, the device to run instrumentation tests on.
package: string, the package name of the instrumentation tests.
options: dict, Instrumentation options for the instrumentation
tests.
prefix: string, an optional prefix for parser output for
distinguishing between instrumentation test runs.
runner: string, the runner to use for the instrumentation package,
default to DEFAULT_INSTRUMENTATION_RUNNER.
Returns:
A boolean indicating whether or not all the instrumentation test
methods passed.
Raises:
TestError if the instrumentation run crashed or if parsing the
output failed.
"""
# Dictionary hack to allow overwriting the instrumentation_block in the
# parse_instrumentation closure
instrumentation_block = [_InstrumentationBlock(prefix=prefix)]
device.adb.instrument(package=package,
options=options,
runner=runner,
handler=parse_instrumentation)
return self._finish_parsing(instrumentation_block[0])
class BaseInstrumentationTestClass(InstrumentationTestMixin,
base_test.BaseTestClass):
"""Base class for all instrumentation test classes to inherit from.
This class extends the BaseTestClass to add functionality to run and parse
the output of instrumentation runs.
Attributes:
DEFAULT_INSTRUMENTATION_OPTION_PREFIX: string, the default prefix for
instrumentation params contained within user params.
DEFAULT_INSTRUMENTATION_ERROR_MESSAGE: string, the default error
message to set if something has prevented something in the
instrumentation test run from completing properly.
"""
| [
2,
15069,
2177,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 2.999631 | 10,842 |
from functools import singledispatch
import streamlit as st
from sagas.ofbiz.entities import MetaEntity
from sagas.ofbiz.services import OfService
from sagas.ofbiz.entities import OfEntity as e, format
from sagas.ofbiz.services import OfService as s, oc
from datetime import datetime
# product("GZ-2002", 'price')
# product(dt('2013-07-04 00:00:00'), "Test_product_A")
@singledispatch
@product.register(str)
@product.register(datetime)
exports={product}
| [
6738,
1257,
310,
10141,
1330,
31958,
8802,
963,
198,
11748,
4269,
18250,
355,
336,
198,
198,
6738,
45229,
292,
13,
1659,
42189,
13,
298,
871,
1330,
30277,
32398,
198,
6738,
45229,
292,
13,
1659,
42189,
13,
30416,
1330,
3226,
16177,
198,... | 3.026144 | 153 |
""" Functional tests for the Obey simple list app """
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
""" Tests of the layout and styling of the lists app."""
def test_layout_and_styling(self):
""" The home page looks roughly what we expect it to """
self.browser.get(self.server_url)
self.browser.set_window_size(1024, 768)
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
self._type_and_submit_item('Learn python')
self._wait_for_row_in_list_table('1: Learn python')
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
| [
37811,
44224,
5254,
329,
262,
440,
23454,
2829,
1351,
598,
37227,
198,
6738,
764,
8692,
1330,
44224,
14402,
628,
198,
4871,
47639,
1870,
18716,
1359,
14402,
7,
22203,
282,
14402,
2599,
198,
220,
220,
220,
37227,
30307,
286,
262,
12461,
... | 2.299479 | 384 |
from adfs_aws_login import conf
import pytest
import argparse
try:
# For Python 3.5 and later
import configparser
except ImportError:
# Fall back to Python 2
import ConfigParser as configparser # noqa: F401
args = {
"profile": "test-profile",
"user": "test@example.com",
"no_prompt": False,
"duration": None,
"role": None,
}
params = {
"adfs_role_arn": "arn:aws:iam::123456789012:role/test_role",
"adfs_login_url": "https://testauthority",
"adfs_default_username": "test@example.com",
}
sections = {"profile test-profile": params}
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
6738,
512,
9501,
62,
8356,
62,
38235,
1330,
1013,
198,
11748,
12972,
9288,
198,
11748,
1822,
29572,
198,
198,
28311,
25,
198,
220,
220,
220,
1303,
1114,
11361,
513,
13,
20,
290,
1568,
198,
220,
220,
220,
1330,
4566,
48610,
198,
16341,... | 2.568 | 250 |
#!/usr/bin/env python3
"""Script for testing the TriFingerPro model."""
import time
import pybullet
from trifinger_simulation import (
sim_finger,
visual_objects,
)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
7391,
329,
4856,
262,
7563,
37,
3889,
2964,
2746,
526,
15931,
198,
11748,
640,
198,
11748,
12972,
15065,
1616,
198,
6738,
491,
361,
3889,
62,
14323,
1741,
1330,
357,
198,
22... | 2.75641 | 78 |
from website.project.model import User
from website.util.permissions import reduce_permissions
from admin.users.serializers import serialize_simple_node
| [
6738,
3052,
13,
16302,
13,
19849,
1330,
11787,
198,
6738,
3052,
13,
22602,
13,
525,
8481,
1330,
4646,
62,
525,
8481,
198,
198,
6738,
13169,
13,
18417,
13,
46911,
11341,
1330,
11389,
1096,
62,
36439,
62,
17440,
628,
198
] | 4 | 39 |
# Generated by Django 3.2.8 on 2021-11-02 17:04
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
23,
319,
33448,
12,
1157,
12,
2999,
1596,
25,
3023,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/python
from BoostBuild import Tester
t = Tester(pass_toolset=0)
t.run_build_system(extra_args="--debug --build-system=test")
t.cleanup()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
19835,
15580,
1330,
309,
7834,
198,
198,
83,
796,
309,
7834,
7,
6603,
62,
25981,
2617,
28,
15,
8,
198,
83,
13,
5143,
62,
11249,
62,
10057,
7,
26086,
62,
22046,
2625,
438,
24442... | 2.559322 | 59 |
import logging
import sys
from pathlib import Path
from typing import Any, Optional, Union
from ruamel.yaml import YAML
from cookietemple.lint.domains.cli import CliJavaLint, CliPythonLint
from cookietemple.lint.domains.gui import GuiJavaLint
from cookietemple.lint.domains.lib import LibCppLint
from cookietemple.lint.domains.pub import PubLatexLint
from cookietemple.lint.domains.web import WebWebsitePythonLint
from cookietemple.lint.template_linter import TemplateLinter
from cookietemple.util.rich import console
log = logging.getLogger(__name__)
def lint_project(project_dir: str, skip_external: bool, is_create: bool = False) -> Optional[TemplateLinter]:
"""
Verifies the integrity of a project to best coding and practices.
Runs a set of general linting functions, which all templates share and afterwards runs template specific linting functions.
All results are collected and presented to the user.
:param project_dir: The path to the .cookietemple.yml file.
:param skip_external: Whether to skip external linters such as autopep8
:param is_create: Whether linting is called during project creation
"""
# Detect which template the project is based on
template_handle = get_template_handle(project_dir)
log.debug(f"Detected handle {template_handle}")
switcher = {
"cli-python": CliPythonLint,
"cli-java": CliJavaLint,
"web-website-python": WebWebsitePythonLint,
"gui-java": GuiJavaLint,
"lib-cpp": LibCppLint,
"pub-thesis-latex": PubLatexLint,
}
try:
lint_obj: Union[TemplateLinter, Any] = switcher.get(template_handle)(project_dir) # type: ignore
except TypeError:
console.print(f"[bold red]Unable to find linter for handle {template_handle}! Aborting...")
sys.exit(1)
# Run the linting tests
try:
# Disable check files?
disable_check_files_templates = ["pub-thesis-latex"]
if template_handle in disable_check_files_templates:
disable_check_files = True
else:
disable_check_files = False
# Run non project specific linting
log.debug("Running general linting.")
console.print("[bold blue]Running general linting")
lint_obj.lint_project(
super(lint_obj.__class__, lint_obj), custom_check_files=disable_check_files, is_subclass_calling=False
)
# Run the project specific linting
log.debug(f"Running linting of {template_handle}")
console.print(f"[bold blue]Running {template_handle} linting")
# for every python project that is created autopep8 will run one time
# when linting en existing python cookietemple project, autopep8 should be now optional,
# since (for example) it messes up Jinja syntax (if included in project)
if "python" in template_handle:
lint_obj.lint(is_create, skip_external) # type: ignore
else:
lint_obj.lint(skip_external) # type: ignore
except AssertionError as e:
console.print(f"[bold red]Critical error: {e}")
console.print("[bold red] Stopping tests...")
return lint_obj
# Print the results
lint_obj.print_results()
# Exit code
if len(lint_obj.failed) > 0:
console.print(f"[bold red] {len(lint_obj.failed)} tests failed! Exiting with non-zero error code.")
sys.exit(1)
return None
def get_template_handle(dot_cookietemple_path: str = ".cookietemple.yml") -> str:
"""
Reads the .cookietemple file and extracts the template handle
:param dot_cookietemple_path: path to the .cookietemple file
:return: found template handle
"""
path = Path(f"{dot_cookietemple_path}/.cookietemple.yml")
if not path.exists():
console.print("[bold red].cookietemple.yml not found. Is this a cookietemple project?")
sys.exit(1)
yaml = YAML(typ="safe")
dot_cookietemple_content = yaml.load(path)
return dot_cookietemple_content["template_handle"]
| [
11748,
18931,
198,
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4377,
11,
32233,
11,
4479,
198,
198,
6738,
7422,
17983,
13,
88,
43695,
1330,
575,
2390,
43,
198,
198,
6738,
4255,
1155,
368,
1154,
13,
75,
60... | 2.613342 | 1,544 |
from celery import Celery
app = Celery('tasks', broker='redis://localhost:6379/0', backend='redis://localhost:6379/1')
@app.task
| [
6738,
18725,
1924,
1330,
15248,
1924,
628,
198,
1324,
796,
15248,
1924,
10786,
83,
6791,
3256,
20426,
11639,
445,
271,
1378,
36750,
25,
21,
29088,
14,
15,
3256,
30203,
11639,
445,
271,
1378,
36750,
25,
21,
29088,
14,
16,
11537,
198,
1... | 2.791667 | 48 |
## interaction3 / arrays / foldable_constant_spiral.py
import numpy as np
from interaction3.abstract import *
from interaction3 import util
# default parameters
defaults = {}
# membrane properties
defaults['length'] = [35e-6, 35e-6]
defaults['electrode'] = [35e-6, 35e-6]
defaults['nnodes'] = [9, 9]
defaults['thickness'] = [2.2e-6,]
defaults['density'] = [2040,]
defaults['y_modulus'] = [110e9,]
defaults['p_ratio'] = [0.22,]
defaults['isolation'] = 200e-9
defaults['permittivity'] = 6.3
defaults['gap'] = 50e-9
defaults['att_mech'] = 3000
defaults['ndiv'] = [2, 2]
# array properties
defaults['mempitch'] = [45e-6, 45e-6]
defaults['nmem'] = [2, 2]
defaults['nelem'] = 489
defaults['edge_buffer'] = 60e-6 # accounts for 20um dicing tolerance
defaults['taper_radius'] = 3.63e-3 # controls size of spiral
defaults['assert_radius'] = 3.75e-3 - 40e-6
# array pane vertices, hard-coded
_vertices0 = [[-3.75e-3, -3.75e-3, 0],
[-3.75e-3, 3.75e-3, 0],
[-1.25e-3, 3.75e-3, 0],
[-1.25e-3, -3.75e-3, 0]]
_vertices1 = [[-1.25e-3, -3.75e-3, 0],
[-1.25e-3, 3.75e-3, 0],
[1.25e-3, 3.75e-3, 0],
[1.25e-3, -3.75e-3, 0]]
_vertices2 = [[1.25e-3, -3.75e-3, 0],
[1.25e-3, 3.75e-3, 0],
[3.75e-3, 3.75e-3, 0],
[3.75e-3, -3.75e-3, 0]]
## COMMAND LINE INTERFACE ##
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--nmem', nargs=2, type=int)
parser.add_argument('--mempitch', nargs=2, type=float)
parser.add_argument('--length', nargs=2, type=float)
parser.add_argument('--electrode', nargs=2, type=float)
parser.add_argument('--nelem', type=int)
parser.add_argument('-d', '--dump', nargs='?', default=None)
parser.set_defaults(**defaults)
args = vars(parser.parse_args())
filename = args.pop('dump')
spec = create(**args)
print(spec)
print('Total number of channels ->', sum(get_channel_count(spec)))
print('Number of transmit channels ->', sum(get_channel_count(spec, kind='tx')))
print('Number of receive channels ->', sum(get_channel_count(spec, kind='rx')))
print('Number of transmit/receive channels ->', sum(get_channel_count(spec, kind='both')))
if filename is not None:
dump(spec, filename, mode='w')
from matplotlib import pyplot as plt
pos = np.concatenate(get_membrane_positions_from_array(spec), axis=0)
plt.plot(pos[:, 0], pos[:, 1], '.')
plt.gca().set_aspect('equal')
plt.gca().axvline(-1.25e-3)
plt.gca().axvline(1.25e-3)
plt.gca().axvline(-3.75e-3)
plt.gca().axvline(3.75e-3)
plt.gca().axhline(-3.75e-3)
plt.gca().axhline(3.75e-3)
plt.gca().add_patch(plt.Circle(radius=defaults['assert_radius'], xy=(0,0), fill=None))
plt.show() | [
2235,
10375,
18,
1220,
26515,
1220,
5591,
540,
62,
9979,
415,
62,
2777,
21093,
13,
9078,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
10375,
18,
13,
397,
8709,
1330,
1635,
198,
6738,
10375,
18,
1330,
7736,
628,
198,
2,
4... | 2.123696 | 1,342 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Tobias Weber <tobi-weber@gmx.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from . import Middleware
log = logging.getLogger("levitas.middleware.dynSiteMiddleware")
class DynSiteMiddleware(Middleware):
"""
class MySite(object):
def index(self):
return "Hello World"
Example settings entry:
urls = [(r"^/(.*)$", DynSiteMiddleware, MySite)]
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
3050,
12,
4967,
46675,
28137,
1279,
83,
13411,
12,
732,
527,
31,
70,
36802,
13,
2934,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
... | 2.697917 | 384 |
import numpy as np
from .cpd_nonlin import cpd_nonlin
def cpd_auto(K, ncp, vmax, desc_rate=1, **kwargs):
"""Main interface
Detect change points automatically selecting their number
K - kernel between each pair of frames in video
ncp - maximum ncp
vmax - special parameter
Optional arguments:
lmin - minimum segment length
lmax - maximum segment length
desc_rate - rate of descriptor sampling (vmax always corresponds to 1x)
Note:
- cps are always calculated in subsampled coordinates irrespective to
desc_rate
- lmin and m should be in agreement
---
Returns: (cps, costs)
cps - best selected change-points
costs - costs for 0,1,2,...,m change-points
Memory requirement: ~ (3*N*N + N*ncp)*4 bytes ~= 16 * N^2 bytes
That is 1,6 Gb for the N=10000.
"""
m = ncp
(_, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
#(cps, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
N = K.shape[0]
N2 = N*desc_rate # length of the video before subsampling
penalties = np.zeros(m+1)
# Prevent division by zero (in case of 0 changes)
ncp = np.arange(1, m+1)
penalties[1:] = (vmax*ncp/(2.0*N2))*(np.log(float(N2)/ncp)+1)
costs = scores/float(N) + penalties
m_best = np.argmin(costs)
(cps, scores2) = cpd_nonlin(K, m_best, **kwargs)
return (cps, costs)
# ------------------------------------------------------------------------------
# Extra functions (currently not used)
def estimate_vmax(K_stable):
"""K_stable - kernel between all frames of a stable segment"""
n = K_stable.shape[0]
vmax = np.trace(centering(K_stable)/n)
return vmax
def centering(K):
"""Apply kernel centering"""
mean_rows = np.mean(K, 1)[:, np.newaxis]
return K - mean_rows - mean_rows.T + np.mean(mean_rows)
def eval_score(K, cps):
""" Evaluate unnormalized empirical score
(sum of kernelized scatters) for the given change-points """
N = K.shape[0]
cps = [0] + list(cps) + [N]
V1 = 0
V2 = 0
for i in range(len(cps)-1):
K_sub = K[cps[i]:cps[i+1], :][:, cps[i]:cps[i+1]]
V1 += np.sum(np.diag(K_sub))
V2 += np.sum(K_sub) / float(cps[i+1] - cps[i])
return (V1 - V2)
def eval_cost(K, cps, score, vmax):
""" Evaluate cost function for automatic number of change points selection
K - kernel between all frames
cps - selected change-points
score - unnormalized empirical score (sum of kernelized scatters)
vmax - vmax parameter"""
N = K.shape[0]
penalty = (vmax*len(cps)/(2.0*N))*(np.log(float(N)/len(cps))+1)
return score/float(N) + penalty
| [
11748,
299,
32152,
355,
45941,
198,
6738,
764,
13155,
67,
62,
13159,
2815,
1330,
269,
30094,
62,
13159,
2815,
198,
198,
4299,
269,
30094,
62,
23736,
7,
42,
11,
299,
13155,
11,
410,
9806,
11,
1715,
62,
4873,
28,
16,
11,
12429,
46265,... | 2.330275 | 1,199 |
import numpy as np
from scipy import ndimage
import imageio
from PIL import Image, ImageFilter
import argparse
import constants
if __name__ == "__main__":
main() | [
11748,
299,
32152,
355,
45941,
201,
198,
6738,
629,
541,
88,
1330,
299,
67,
9060,
201,
198,
11748,
2939,
952,
201,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
22417,
201,
198,
11748,
1822,
29572,
201,
198,
11748,
38491,
201,
198,
201,... | 2.765625 | 64 |
import asyncio
import logging
import uvloop
from vkwave.bots import SimpleLongPollBot
from vkwave.bots.core.dispatching import filters
from app import db
from app.config import config
from app.routers import home, registration
logging.basicConfig(level=logging.INFO)
uvloop.install()
loop = asyncio.get_event_loop()
loop.run_until_complete(db.init())
bot = SimpleLongPollBot(config.TOKENS, config.GROUP_ID)
bot.router.registrar.add_default_filter(filters.EventTypeFilter("message_new"))
bot.dispatcher.add_router(registration.router)
bot.dispatcher.add_router(home.router)
try:
bot.run_forever(ignore_errors=True)
except KeyboardInterrupt:
exit()
| [
11748,
30351,
952,
198,
11748,
18931,
198,
198,
11748,
334,
85,
26268,
198,
6738,
410,
74,
19204,
13,
42478,
1330,
17427,
14617,
39176,
20630,
198,
6738,
410,
74,
19204,
13,
42478,
13,
7295,
13,
6381,
17147,
278,
1330,
16628,
198,
198,
... | 2.899563 | 229 |
import time
from grove.adc import ADC
adc = ADC()
while True:
soil_moisture = adc.read(0)
print("Soil moisture:", soil_moisture)
time.sleep(10) | [
11748,
640,
198,
6738,
7128,
303,
13,
324,
66,
1330,
49169,
198,
198,
324,
66,
796,
49169,
3419,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
9260,
62,
5908,
396,
495,
796,
512,
66,
13,
961,
7,
15,
8,
198,
220,
220,
220,
3601,
... | 2.358209 | 67 |
import casadi as cs
# plt.figure(1)
# plt.clf()
# plt.plot(sol.value(k))
# plt.figure(2)
# plt.clf()
# plt.plot(sol.value(eps_soft))
# plt.figure(3)
# plt.clf()
# plt.plot(sol.value(x)[3,:],label='infected')
# plt.plot(sol.value(x)[4,:],label='hospitalized')
# plt.plot(sol.value(x)[5,:],label='death')
# plt.legend()
# plt.show()
#pd.DataFrame(sol.value(x), index=['S','E','A','I','H','D','R']).to_csv('For_Emanuele.csv')
| [
11748,
6124,
9189,
355,
50115,
628,
198,
220,
220,
220,
220,
628,
198,
198,
2,
458,
83,
13,
26875,
7,
16,
8,
220,
198,
2,
458,
83,
13,
565,
69,
3419,
198,
2,
458,
83,
13,
29487,
7,
34453,
13,
8367,
7,
74,
4008,
198,
198,
2,
... | 1.977376 | 221 |
import os, sys, argparse, mlflow, yaml
import numpy as np
import torch
import torch.nn as nn
import segmentation_models_pytorch as smp
from torch.utils.data import DataLoader
from albumentations import (
Compose, PadIfNeeded, Normalize, HorizontalFlip, VerticalFlip, RandomBrightnessContrast,
CropNonEmptyMaskIfExists, GaussNoise, RandomResizedCrop, Rotate, GaussianBlur
)
from albumentations.pytorch import ToTensorV2
from resources.data import SegmentationData, FactorResize
from resources.train_utils import Trainer
from resources.utils import load_pretrained_state_for_unet, moco_to_unet_prefixes
augmentation_dict = {
'PadIfNeeded': PadIfNeeded, 'HorizontalFlip': HorizontalFlip, 'VerticalFlip': VerticalFlip,
'RandomBrightnessContrast': RandomBrightnessContrast, 'CropNonEmptyMaskIfExists': CropNonEmptyMaskIfExists,
'GaussNoise': GaussNoise, 'RandomResizedCrop': RandomResizedCrop, 'Rotate': Rotate,
'GaussianBlur': GaussianBlur
}
if __name__ == "__main__":
if 'snakemake' in globals():
args = snakemake_args()
else:
args = parse_args()
#set manual seed to ensure we always start with the same model parameters
torch.manual_seed(42)
with open(args['config'], 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['config_file'] = args['config']
#overwrite the model_dir, pretraining, iterations, or finetuning layer
if args['md'] is not None:
config['model_dir'] = args['md']
if args['pf'] is not None:
config['pretraining'] = args['pf']
if args['n'] is not None:
config['iters'] = args['n']
if args['ft'] is not None:
config['finetune_layer'] = args['ft']
experiment = config['experiment_name']
pretraining = config['pretraining']
#if we're working with MoCo pretrained weights
#then we'll have to download them separately from the
#built-in pytorch function
if pretraining in ['imagenet_mocov2', 'cellemnet_mocov2']:
#this loads the state dict and adds the prefix "encoder."
#to the keys such that they match those in the UNet model
#it
state_dict, norms = load_pretrained_state_for_unet(config['encoder'], pretraining)
if norms == None:
gray_channels = 3
normalize = Normalize() #default is ImageNet means and standard deviations
else:
gray_channels = 1
normalize = Normalize(mean=norms[0], std=norms[1])
#create the Unet model and load the pretrained weights
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
msg = model.load_state_dict(state_dict, strict=False)
elif pretraining == 'imagenet_supervised':
#create the UNet with imagenet supervised weights which are
#automatically downloaded through smp
model = smp.Unet(config['encoder'], encoder_weights='imagenet', classes=config['num_classes'])
gray_channels = 3
normalize = Normalize() #default is ImageNet means and standard deviations
elif os.path.isfile(pretraining):
#it's also possible to directly pass a .pth file as the
#pretrained weights. In which case we assume that they
#were generated by the train_mocov2.py script and load them accordingly
checkpoint = torch.load(pretraining, map_location='cpu')
state_dict, norms = checkpoint['state_dict'], checkpoint['norms']
state_dict = moco_to_unet_prefixes(state_dict)
gray_channels = 1
normalize = Normalize(mean=norms[0], std=norms[1])
#create the Unet model and load the pretrained weights
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
msg = model.load_state_dict(state_dict, strict=False)
print(f'Successfully loaded parameters from {pretraining}')
else: #random initialization
print('No pretraining found. Using randomly initialized weights!')
gray_channels = 1
model = smp.Unet(config['encoder'], in_channels=gray_channels, encoder_weights=None, classes=config['num_classes'])
#use the norms defined for the dataset in the config file
normalize = Normalize(**config['norms'])
#importantly, we want to store the mean and std that we're
#using for training with theses weights. this eliminates
#any confusion during inference.
config['training_norms'] = [normalize.mean, normalize.std]
#freeze all encoder layers to start and only open
#them when specified
for param in model.encoder.parameters():
param.requires_grad = False
#unfreeze layers based on the finetune_layer argument
finetune_layer = config['finetune_layer']
encoder_groups = [mod[1] for mod in model.encoder.named_children()]
if finetune_layer != 'none':
#this indices should work for any ResNet model, but were specifically
#chosen for ResNet50
layer_index = {'all': 0, 'layer1': 4, 'layer2': 5, 'layer3': 6, 'layer4': 7}
start_layer = layer_index[finetune_layer]
#always finetune from the start layer to the last layer in the resnet
for group in encoder_groups[start_layer:]:
for param in group.parameters():
param.requires_grad = True
#in the MoCo paper, the authors suggest making the parameters
#in BatchNorm layers trainable to help account for the smaller
#magnitudes of weights that typically occur with unsupervised
#pretraining. we haven't found this to be beneficial for the
#OneCycle LR policy, it might be for other lr policies though.
if config['unfreeze_encoder_bn']:
#this makes all the batchnorm layers in the encoder trainable
model.encoder.apply(unfreeze_encoder_bn)
#print out the number of trainable parameters in the whole model
#unfreeze_encoder_bn adds about 50k more
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(f'Using model with {params} trainable parameters!')
#construct the set of augmentations from config
dataset_augs = []
for aug_params in config['augmentations']:
aug_name = aug_params['aug']
#lookup aug_name and replace it with the
#correct augmentation class
aug = augmentation_dict[aug_name]
#delete the aug key and then the remaining
#dictionary items are kwargs
del aug_params['aug']
dataset_augs.append(aug(**aug_params))
#unpack the list of dataset specific augmentations
#into Compose, and then add normalization and tensor
#conversion, which apply universally
augs = Compose([
*dataset_augs,
normalize,
ToTensorV2()
])
#create the segmentation data for training
data_dir = config['data_dir']
train_dir = 'train/'
bsz = config['bsz']
trn_data = SegmentationData(os.path.join(data_dir, train_dir), tfs=augs, gray_channels=gray_channels,
segmentation_classes=config['num_classes'])
config['n_images'] = len(trn_data.fnames)
#create the dataloader
#NOTE: if using CPU, the pin_memory argument must be set to False
#In the future, we may add a "cpu" argument to the config; we expect
#that most users will have access to a GPU though.
train = DataLoader(trn_data, batch_size=bsz, shuffle=True, pin_memory=True, drop_last=True, num_workers=config['jobs'])
#check for a validation directory and use it if it exists
#if not, then we don't use any validation data
val_dir = 'valid/'
if os.path.isdir(os.path.join(data_dir, val_dir)):
#eval_augs are always the same.
#since we ultimately want to run our model on
#full size images and not cropped patches, we use
#FactorResize. This is a custom augmentation that
#simply resizes the image to the nearest multiple
#of 32 (which is necessary to work with the UNet model).
#if working with very large images that don't fit in memory
#it could be swapped out for a CenterCrop. the results will
#be less reflective of performance in the test case however.
eval_augs = Compose([
FactorResize(32),
normalize,
ToTensorV2()
])
val_data = SegmentationData(os.path.join(data_dir, val_dir), tfs=eval_augs, gray_channels=gray_channels,
segmentation_classes=config['num_classes'])
#using a batch size of 1 means that we report a per-image IoU score
valid = DataLoader(val_data, batch_size=1, shuffle=False, pin_memory=True, num_workers=config['jobs'])
else:
valid = None
#create model path ahead of time so that
#we don't try to save to a directory that doesn't
#exist later on
model_dir = config['model_dir']
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
#train the model using the parameters in the config file
#TODO: add a progress bar option to config
trainer = Trainer(config, model, train, valid)
trainer.train() | [
11748,
28686,
11,
25064,
11,
1822,
29572,
11,
285,
1652,
9319,
11,
331,
43695,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
10618,
341,
62,
27530,
62,
9078,
13165,
354,
... | 2.593155 | 3,623 |
from .exemplar_primitive_generators import EXEMPLAR_PRIMITIVE_GENERATORS
from .exemplar_derived_generators import EXEMPLAR_DERIVED_GENERATORS
from .exemplar_custom_generators import EXEMPLAR_CUSTOM_GENERATORS
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
| [
6738,
764,
1069,
18856,
283,
62,
19795,
1800,
62,
8612,
2024,
1330,
7788,
3620,
6489,
1503,
62,
4805,
3955,
2043,
9306,
62,
35353,
1137,
1404,
20673,
198,
6738,
764,
1069,
18856,
283,
62,
34631,
62,
8612,
2024,
1330,
7788,
3620,
6489,
... | 2.450382 | 131 |
from xml.etree import ElementTree
from bs4 import BeautifulSoup
import nltk
import json
import re
if __name__ == '__main__':
# parser = Parser('./data/ace_2005_td_v7/data/English/un/fp2/alt.gossip.celebrities_20041118.2331')
parser = Parser('./data/ace_2005_td_v7/data/English/un/timex2norm/alt.corel_20041228.0503')
data = parser.get_data()
with open('./output/debug.json', 'w') as f:
json.dump(data, f, indent=2)
# index = parser.sgm_text.find("Diego Garcia")
# print('index :', index)
# print(parser.sgm_text[1918 - 30:])
| [
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
299,
2528,
74,
198,
11748,
33918,
198,
11748,
302,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198... | 2.354167 | 240 |
import io
import unittest
from aero_util.tecio import *
from . import common
| [
11748,
33245,
198,
11748,
555,
715,
395,
198,
6738,
257,
3529,
62,
22602,
13,
36281,
952,
1330,
1635,
198,
6738,
764,
1330,
2219,
198
] | 3.208333 | 24 |
import logging
logger = logging.getLogger(__name__)
from monai.transforms import (Compose, EnsureChannelFirstd, LoadImaged,
ScaleIntensityRanged, Spacingd)
from monailabel.interfaces.tasks.infer import InferTask, InferType
from monailabel.scribbles.transforms import AddBackgroundScribblesFromROId
from monailabel.transform.post import BoundingBoxd, Restored
from lib.transforms import (AddBackgroundScribblesFromROIWithDropfracd,
ApplyGaussianSmoothing, ApplyGraphCutOptimisationd,
MakeLikelihoodFromScribblesDybaORFd,
MakeLikelihoodFromScribblesECONetd,
MakeLikelihoodFromScribblesGMMd,
MakeLikelihoodFromScribblesHistogramd, Timeit)
class ECONetPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Efficient Convolutional Online Likelihood Network (ECONet) based Online Likelihood training and inference method for
COVID-19 lung lesion segmentation based on the following paper:
Asad, Muhammad, Lucas Fidon, and Tom Vercauteren. "" ECONet: Efficient Convolutional Online Likelihood Network
for Scribble-based Interactive Segmentation."
To be reviewed (preprint: https://arxiv.org/pdf/2201.04584.pdf).
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is learned and inferred using ECONet method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
This also implements variations of ECONet with hand-crafted features, referred as ECONet-Haar-Like in the paper.
"""
class DybaORFPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Dynamically Balanced Online Random Forest (DybaORF) based Online Likelihood training and inference method for
COVID-19 lung lesion segmentation based on the following paper:
Wang, Guotai, et al. "Dynamically balanced online random forests for interactive scribble-based segmentation."
International Conference on Medical Image Computing and Computer-Assisted Intervention. Springer, Cham, 2016.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is learned and inferred using DybaORF-Haar-Like method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
class GMMPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Gaussian Mixture Model (GMM) based Online Likelihood generation method for COVID-19 lung lesion segmentation based on the following paper:
Rother, Carsten, Vladimir Kolmogorov, and Andrew Blake. "" GrabCut" interactive foreground extraction using iterated graph cuts."
ACM transactions on graphics (TOG) 23.3 (2004): 309-314.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is generated using GMM method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
class HistogramPlusGraphCut(MyLikelihoodBasedSegmentor):
"""
Defines Histogram-based Online Likelihood generation method for COVID-19 lung lesion segmentation based on the following paper:
Boykov, Yuri Y., and M-P. Jolly. "Interactive graph cuts for optimal boundary & region segmentation of objects in ND images."
Proceedings eighth IEEE international conference on computer vision. ICCV 2001. Vol. 1. IEEE, 2001.
This task takes as input 1) original image volume and 2) scribbles from user
indicating foreground and background regions. A likelihood volume is generated using histogram method.
numpymaxflow's GraphCut layer is used to regularise the resulting likelihood, where unaries come from likelihood
and pairwise is the original input volume.
"""
| [
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
6738,
937,
1872,
13,
7645,
23914,
1330,
357,
7293,
577,
11,
48987,
29239,
5962,
67,
11,
8778,
3546,
1886,
11,
198,
220,
220,
220,... | 3.34655 | 1,261 |
'''
Author: zyq
Date: 2020-11-30 17:19:51
LastEditTime: 2020-12-09 17:24:59
LastEditors: Please set LastEditors
Description: 数值分析上机题 课本 P195 37题 3次样条插值
FilePath: /code/chapter4/q4-37-1.py
'''
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
import sys, os
'''
description:
param {*} x n+1 个插值点
param {*} y n+1 个插值点
return {*} n
'''
'''
description: 求三次样条差值的 4n 个方程
param: {x[0,n], y[0,n]} n+1 个插值点
param: Type 三次样条边界条件 1 or 2 or 3
return {A, B} [a0 b0 c0 d0 a1 b1 c1 d1 ... a(n-1) b(n-1) c(n-1) d(n-1)] = [B] 形式的方程组
'''
"""
功能:根据所给参数,计算三次函数的函数值:
参数:OriginalInterval为原始x的区间, parameters为二次函数的系数,x为自变量
返回值:为函数的因变量
"""
"""
功能:将函数绘制成图像
参数:data_x,data_y为离散的点.new_data_x,new_data_y为由拉格朗日插值函数计算的值。x为函数的预测值。
返回值:空
"""
if __name__ == "__main__":
# 获取当前文件路径
current_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(current_path), '../')))
# print(sys.path)
# 调用 chapter3 中的列主元高斯消去法
from chapter3.q3 import MGauss_Caculate
main()
| [
7061,
6,
198,
13838,
25,
1976,
88,
80,
198,
10430,
25,
12131,
12,
1157,
12,
1270,
1596,
25,
1129,
25,
4349,
198,
5956,
18378,
7575,
25,
12131,
12,
1065,
12,
2931,
1596,
25,
1731,
25,
3270,
198,
5956,
18378,
669,
25,
4222,
900,
458... | 1.298246 | 798 |
"""
This module provides methods to drop and re-create all tables.
"""
from db import create_session
import cql_queries
def create_database():
"""
Creates the database and establishes the connection.
"""
# connect to default database
cluster, session = create_session()
# create sparkify database with UTF8 encoding
session.execute(cql_queries.KEYSPACE_DROP)
session.execute(cql_queries.KEYSPACE_CREATE)
session.set_keyspace('sparkifydb')
return cluster, session
def drop_tables(session):
"""
Drops all tables.
"""
for query in cql_queries.DROP_TABLE_QUERIES:
session.execute(query)
def create_tables(session):
"""
Creates all tables.
"""
for query in cql_queries.CREATE_TABLE_QUERIES:
session.execute(query)
def main():
"""
First, creates databse and establishes connection.
Then, drops all tables and re-creates them.
"""
print("Creating connection...")
cluster, session = create_database()
print("Dropping old tables...")
drop_tables(session)
print("Creating new tables...")
create_tables(session)
print("Closing connection...")
session.shutdown()
cluster.shutdown()
print("Done.")
if __name__ == "__main__":
main()
| [
37811,
198,
1212,
8265,
3769,
5050,
284,
4268,
290,
302,
12,
17953,
477,
8893,
13,
198,
37811,
198,
6738,
20613,
1330,
2251,
62,
29891,
198,
11748,
269,
13976,
62,
421,
10640,
628,
198,
4299,
2251,
62,
48806,
33529,
198,
220,
220,
220... | 2.792576 | 458 |
match = "dc:title"
ns = {'dc': 'http://purl.org/dc/elements/1.1/'}
import xml.etree.ElementTree as ET
tree = ET.parse('country_data.xml') # $ decodeFormat=XML decodeInput='country_data.xml' decodeOutput=ET.parse(..) xmlVuln='XML bomb' getAPathArgument='country_data.xml'
root = tree.getroot()
root.find(match, namespaces=ns) # $ getXPath=match
root.findall(match, namespaces=ns) # $ getXPath=match
root.findtext(match, default=None, namespaces=ns) # $ getXPath=match
tree = ET.ElementTree()
tree.parse("index.xhtml") # $ decodeFormat=XML decodeInput="index.xhtml" decodeOutput=tree.parse(..) xmlVuln='XML bomb' getAPathArgument="index.xhtml"
tree.find(match, namespaces=ns) # $ getXPath=match
tree.findall(match, namespaces=ns) # $ getXPath=match
tree.findtext(match, default=None, namespaces=ns) # $ getXPath=match
parser = ET.XMLParser()
parser.feed("<foo>bar</foo>") # $ decodeFormat=XML decodeInput="<foo>bar</foo>" xmlVuln='XML bomb'
tree = parser.close() # $ decodeOutput=parser.close()
tree.find(match, namespaces=ns) # $ getXPath=match
| [
15699,
796,
366,
17896,
25,
7839,
1,
198,
5907,
796,
1391,
6,
17896,
10354,
705,
4023,
1378,
79,
6371,
13,
2398,
14,
17896,
14,
68,
3639,
14,
16,
13,
16,
14,
6,
92,
198,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12... | 2.673418 | 395 |
class DocumentPage(object,IDisposable):
"""
Represents a document page produced by a paginator.
DocumentPage(visual: Visual)
DocumentPage(visual: Visual,pageSize: Size,bleedBox: Rect,contentBox: Rect)
"""
def Dispose(self):
"""
Dispose(self: DocumentPage)
Releases all resources used by the System.Windows.Documents.DocumentPage.
"""
pass
def OnPageDestroyed(self,*args):
"""
OnPageDestroyed(self: DocumentPage,e: EventArgs)
Raises the System.Windows.Documents.DocumentPage.PageDestroyed event.
e: An System.EventArgs that contains the event data.
"""
pass
def SetBleedBox(self,*args):
"""
SetBleedBox(self: DocumentPage,bleedBox: Rect)
Sets the dimensions and location of the
System.Windows.Documents.DocumentPage.BleedBox.
bleedBox: An object that specifies the size and location of a rectangle.
"""
pass
def SetContentBox(self,*args):
"""
SetContentBox(self: DocumentPage,contentBox: Rect)
Sets the dimension and location of the
System.Windows.Documents.DocumentPage.ContentBox.
contentBox: An object that specifies the size and location of a rectangle.
"""
pass
def SetSize(self,*args):
"""
SetSize(self: DocumentPage,size: Size)
Sets the System.Windows.Documents.DocumentPage.Size of the physical page as it
will be after any cropping.
size: The size of the page.
"""
pass
def SetVisual(self,*args):
"""
SetVisual(self: DocumentPage,visual: Visual)
Sets the System.Windows.Documents.DocumentPage.Visual that depicts the page.
visual: The visual representation of the page.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,visual,pageSize=None,bleedBox=None,contentBox=None):
"""
__new__(cls: type,visual: Visual)
__new__(cls: type,visual: Visual,pageSize: Size,bleedBox: Rect,contentBox: Rect)
"""
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
BleedBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the area for print production-related bleeds,registration marks,and crop marks that may appear on the physical sheet outside the logical page boundaries.
Get: BleedBox(self: DocumentPage) -> Rect
"""
ContentBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the area of the page within the margins.
Get: ContentBox(self: DocumentPage) -> Rect
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the actual size of a page as it will be following any cropping.
Get: Size(self: DocumentPage) -> Size
"""
Visual=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""When overridden in a derived class,gets the visual representation of the page.
Get: Visual(self: DocumentPage) -> Visual
"""
Missing=None
PageDestroyed=None
| [
4871,
16854,
9876,
7,
15252,
11,
2389,
271,
1930,
540,
2599,
201,
198,
37227,
201,
198,
1432,
6629,
257,
3188,
2443,
4635,
416,
257,
42208,
20900,
13,
201,
201,
198,
220,
201,
201,
198,
16854,
9876,
7,
41464,
25,
15612,
8,
201,
201,... | 2.900245 | 1,223 |
from sqlalchemy import Boolean, Column, Integer, String
from db.session import Base
# JOBS MODEL
| [
6738,
44161,
282,
26599,
1330,
41146,
11,
29201,
11,
34142,
11,
10903,
198,
198,
6738,
20613,
13,
29891,
1330,
7308,
198,
198,
2,
32357,
4462,
19164,
3698,
628
] | 3.571429 | 28 |
from rest_framework import viewsets
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from tests import models, serializers
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
6738,
1334,
62,
30604,
13,
79,
945,
364,
1330,
19449,
46677,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
1148,
47649,
3474,
198,
198,
6738,
5254,
1330,
4981,
11,
11389,
11341,
628
] | 4.317073 | 41 |
import yfinance as yf
import streamlit as st
import pandas as pd
import csv
import csv
tickers = []
with open(r'nasdaq_screener_1640497257523.csv') as f:
r = csv.reader(f)
header = next(r)
for row in r:
tickers.append([row[1],row[0]])
tname = []
for i in tickers:
tname.append(i[0])
st.write("""
# Simple Stock Price App
### Shows ***closing price*** and ***volume*** of Selected Company
***
""")
tickersymbol = ''
tickername = st.selectbox(
'Select Ticker',
tuple(tname))
for i in tickers:
if i[0] == tickername:
tickersymbol = i[1]
tickerdata = yf.Ticker(tickersymbol)
tickerdf = tickerdata.history(period='1d',start='2010-5-31',end='2020-5-31')
if not tickerdf.empty:
st.write("""
## Closing Price
""")
st.line_chart(tickerdf.Close)
st.write("""
## Volume Price
""")
st.line_chart(tickerdf.Volume)
else :
st.error("No data found for this company")
st.write("""
***
""") | [
11748,
331,
69,
14149,
355,
331,
69,
198,
11748,
4269,
18250,
355,
336,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
269,
21370,
198,
11748,
269,
21370,
198,
198,
83,
21630,
796,
17635,
198,
4480,
1280,
7,
81,
6,
24716,
48539,
... | 2.328502 | 414 |
import math
import random
def index_of_nearest(p, hot_points, distance_f=distance):
"""Given a point and a set of hot points it found the hot point
nearest to the given point. An arbitrary distance function can
be specified
:return the index of the nearest hot points, or None if the list of hot
points is empty
"""
min_dist = None
nearest_hp_i = None
for i, hp in enumerate(hot_points):
dist = distance_f(p, hp)
if min_dist is None or dist < min_dist:
min_dist = dist
nearest_hp_i = i
return nearest_hp_i
| [
11748,
10688,
198,
11748,
4738,
628,
628,
198,
4299,
6376,
62,
1659,
62,
710,
12423,
7,
79,
11,
3024,
62,
13033,
11,
5253,
62,
69,
28,
30246,
2599,
198,
220,
220,
220,
37227,
15056,
257,
966,
290,
257,
900,
286,
3024,
2173,
340,
1... | 2.588745 | 231 |
print('test321')
| [
4798,
10786,
9288,
36453,
11537,
198
] | 2.833333 | 6 |
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client
import os
import logging
LOGGER = logging.getLogger(__name__)
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')
TWILIO_SERVICE_SID = os.environ.get('TWILIO_SERVICE_SID')
| [
198,
6738,
665,
346,
952,
13,
8692,
13,
1069,
11755,
1330,
1815,
346,
952,
19452,
16922,
198,
6738,
665,
346,
952,
13,
2118,
1330,
20985,
198,
11748,
28686,
198,
11748,
18931,
628,
198,
25294,
30373,
796,
18931,
13,
1136,
11187,
1362,
... | 2.462121 | 132 |
import pytest
from assertpy import assert_that
import year2020.day19.reader as reader
import year2020.day19.solver as solver
@pytest.mark.parametrize('word', ['aab', 'aba'])
@pytest.mark.parametrize('word', ['abba', 'abbb', 'bab'])
@pytest.mark.solution
@pytest.mark.solution
| [
11748,
12972,
9288,
198,
6738,
6818,
9078,
1330,
6818,
62,
5562,
198,
198,
11748,
614,
42334,
13,
820,
1129,
13,
46862,
355,
9173,
198,
11748,
614,
42334,
13,
820,
1129,
13,
82,
14375,
355,
1540,
332,
628,
198,
198,
31,
9078,
9288,
... | 2.627273 | 110 |
from plugin import plugin
import random
@plugin("give me advice")
| [
6738,
13877,
1330,
13877,
198,
11748,
4738,
628,
198,
31,
33803,
7203,
26535,
502,
5608,
4943,
198
] | 4 | 17 |
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import os
import ldb
import samba
import samba.auth
from samba import param
from samba.samdb import SamDB
import subprocess
import tempfile
# Other modules import these two classes from here, for convenience:
from testtools.testcase import (
TestCase as TesttoolsTestCase,
TestSkipped,
)
class TestCase(TesttoolsTestCase):
"""A Samba test case."""
class LdbTestCase(TesttoolsTestCase):
"""Trivial test case for running tests against a LDB."""
def set_modules(self, modules=[]):
"""Change the modules for this Ldb."""
m = ldb.Message()
m.dn = ldb.Dn(self.ldb, "@MODULES")
m["@LIST"] = ",".join(modules)
self.ldb.add(m)
self.ldb = samba.Ldb(self.filename)
def env_get_var_value(var_name):
"""Returns value for variable in os.environ
Function throws AssertionError if variable is defined.
Unit-test based python tests require certain input params
to be set in environment, otherwise they can't be run
"""
assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
return os.environ[var_name]
cmdline_credentials = None
class RpcInterfaceTestCase(TestCase):
"""DCE/RPC Test case."""
class BlackboxProcessError(subprocess.CalledProcessError):
"""This exception is raised when a process run by check_output() returns
a non-zero exit status. Exception instance should contain
the exact exit code (S.returncode), command line (S.cmd),
process output (S.stdout) and process error stream (S.stderr)"""
class BlackboxTestCase(TestCase):
"""Base test case for blackbox tests."""
def connect_samdb(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Create SamDB instance and connects to samdb_url database.
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
Added value for tests is that we have a shorthand function
to make proper URL for ldb.connect() while using default
parameters for connection based on test environment
"""
samdb_url = samdb_url.lower()
if not "://" in samdb_url:
if not ldap_only and os.path.isfile(samdb_url):
samdb_url = "tdb://%s" % samdb_url
else:
samdb_url = "ldap://%s" % samdb_url
# use 'paged_search' module when connecting remotely
if samdb_url.startswith("ldap://"):
ldb_options = ["modules:paged_searches"]
elif ldap_only:
raise AssertionError("Trying to connect to %s while remote "
"connection is required" % samdb_url)
# set defaults for test environment
if lp is None:
lp = env_loadparm()
if session_info is None:
session_info = samba.auth.system_session(lp)
if credentials is None:
credentials = cmdline_credentials
return SamDB(url=samdb_url,
lp=lp,
session_info=session_info,
credentials=credentials,
flags=flags,
options=ldb_options)
def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None,
flags=0, ldb_options=None, ldap_only=False):
"""Connects to samdb_url database
:param samdb_url: Url for database to connect to.
:param lp: Optional loadparm object
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param ldap_only: If set, only remote LDAP connection will be created.
:return: (sam_db_connection, rootDse_record) tuple
"""
sam_db = connect_samdb(samdb_url, lp, session_info, credentials,
flags, ldb_options, ldap_only)
# fetch RootDse
res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE,
attrs=["*"])
return (sam_db, res[0])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
33501,
9447,
33,
14,
34,
5064,
50,
7822,
13,
198,
2,
15069,
357,
34,
8,
449,
417,
647,
23092,
2238,
2926,
1279,
73,
417,
647,
31,
82,
31842,
13,
2398,
29,
4343,
12,
10333... | 2.688073 | 1,853 |
import subprocess
import os
import time
processes = []
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_s2v_ensemble_unlimited_neg_rew --model s2v --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_ensemble_unlimited_neg_rew --model mha --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_full_ensemble_unlimited_neg_rew --model mha_full --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_s2v_ensemble_unlimited_neg_rew --model s2v --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_ensemble_unlimited_neg_rew --model mha --use-cuda", shell=True))
time.sleep(10)
processes.append(subprocess.Popen("python examples/boxes_2D_dqn__fill_multidim.py --save --name stack_mha_full_ensemble_unlimited_neg_rew --model mha_full --use-cuda", shell=True))
while (len(processes)>0):
removal_list = []
for i in range(len(processes)):
poll = processes[i].poll()
if poll is None:
time.sleep(60)
else:
removal_list.append(i)
time.sleep(60)
if (len(removal_list)!=0):
correcting_counter = 0
for i in range(len(removal_list)):
print ("PROCESS " + str(removal_list[i]) + " FINISHED")
processes.pop(removal_list[i]-correcting_counter)
correcting_counter += 1
| [
11748,
850,
14681,
198,
11748,
28686,
198,
11748,
640,
198,
198,
14681,
274,
796,
17635,
198,
14681,
274,
13,
33295,
7,
7266,
14681,
13,
47,
9654,
7203,
29412,
6096,
14,
29305,
62,
17,
35,
62,
49506,
77,
834,
20797,
62,
16680,
312,
... | 2.441727 | 695 |
from In.boxer.box import Box, BoxThemer
class BoxMessagesList(Box):
''''''
title = s('Messages')
@IN.register('BoxMessagesList', type = 'Themer')
| [
6738,
554,
13,
3524,
263,
13,
3524,
1330,
8315,
11,
8315,
464,
647,
198,
198,
4871,
8315,
36479,
1095,
8053,
7,
14253,
2599,
198,
197,
39115,
7061,
198,
197,
7839,
796,
264,
10786,
36479,
1095,
11537,
628,
198,
31,
1268,
13,
30238,
... | 2.763636 | 55 |
import argparse
from convlab2.e2e.rnn_rollout.deal_or_not import DealornotAgent
from convlab2.e2e.rnn_rollout.deal_or_not.model import get_context_generator
from convlab2 import DealornotSession
import convlab2.e2e.rnn_rollout.utils as utils
import numpy as np
session_num = 20
# agent
alice_agent = DealornotAgent('Alice', rnn_model_args(), sel_model_args())
bob_agent = DealornotAgent('Bob', rnn_model_args(), sel_model_args())
agents = [alice_agent, bob_agent]
context_generator = get_context_generator(rnn_model_args().context_file)
# session
session = DealornotSession(alice_agent, bob_agent)
session_idx = 0
rewards = [[], []]
for ctxs in context_generator.iter():
print('session_idx', session_idx)
for agent, ctx, partner_ctx in zip(agents, ctxs, reversed(ctxs)):
agent.feed_context(ctx)
agent.feed_partner_context(partner_ctx)
last_observation = None
while True:
response = session.next_response(last_observation)
print('\t', ' '.join(response))
session_over = session.is_terminated()
if session_over:
break
last_observation = response
agree, [alice_r, bob_r] = session.get_rewards(ctxs)
print('session [{}] alice vs bos: {:.1f}/{:.1f}'.format(session_idx, alice_r, bob_r))
rewards[0].append(alice_r)
rewards[1].append(bob_r)
session.init_session()
session_idx += 1
# print(np.mean(rewards, axis=1))
| [
11748,
1822,
29572,
198,
198,
6738,
3063,
23912,
17,
13,
68,
17,
68,
13,
81,
20471,
62,
2487,
448,
13,
31769,
62,
273,
62,
1662,
1330,
15138,
1211,
313,
36772,
198,
6738,
3063,
23912,
17,
13,
68,
17,
68,
13,
81,
20471,
62,
2487,
... | 2.420784 | 587 |
import json
import os
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
try:
HOST = os.environ["GOLLY_PELICAN_TEST_MOCKAPI_HOST"]
PORT = int(os.environ["GOLLY_PELICAN_TEST_MOCKAPI_PORT"])
except KeyError:
raise Exception(
"Error: you must define GOLLY_PELICAN_TEST_MOCKAPI_{HOST,PORT}. Try running source environment.test"
)
except ValueError:
raise Exception(
"Error: you must provide an integer for GOLLY_PELICAN_TEST_MOCKAPI_PORT. Try running source environment.test"
)
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
4704,
278,
198,
6738,
2638,
13,
15388,
1330,
7308,
40717,
18453,
25060,
11,
38288,
18497,
628,
198,
28311,
25,
198,
220,
220,
220,
367,
10892,
796,
28686,
13,
268,
2268,
14692,
11230,
3069,
5... | 2.643902 | 205 |
import unittest;
import math;
from secant_root_solve import secant_root_solve;
class Test_secant_root_solve(unittest.TestCase):
'''
Test_secantrootsolve.m
Test case for the Secant Root Solver function. Based on the solution to
Problem 2 of Homework 1 f AOE 4404 Numerical Methods
Use Graphical technique, bisection method, false-position, fixed-point
iteration, Netwon method, and secant method to find the first root of
f(x) = x*exp(x) - cos(x)
@author: Matt Marti
@date: 2019-06-16
'''
def test_only(self):
'''Only test needed'''
# Define function
f = lambda x : math.cos(x) - x*math.exp(x);
# Parameters
a = 0; # Lower bound
b = 1; # Upper bound
errstop = 1e-12; # Stopping criteria
maxiter = 1000;
# Function call
x, niter, erra = secant_root_solve(f, a, b, maxiter, errstop);
# Check results
self.assertLess(abs(f(x)), errstop, \
'Results error not less than specified error');
self.assertLess(abs(erra), errstop, \
'Results error not less than specified error');
self.assertLess(niter, maxiter, \
'Took too many iterations, function could be bugged');
#
#
# | [
11748,
555,
715,
395,
26,
198,
11748,
10688,
26,
198,
6738,
792,
415,
62,
15763,
62,
82,
6442,
1330,
792,
415,
62,
15763,
62,
82,
6442,
26,
198,
198,
4871,
6208,
62,
2363,
415,
62,
15763,
62,
82,
6442,
7,
403,
715,
395,
13,
1440... | 2.224456 | 597 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
espider.*
------------------------------------------------------------
Package espider is a simply constructed web crawling and scrabing framework that is easy to use.
This package includes modules mentioned below:
|name |description |
|:-------------:|:---------------------------------------------------------------------------|
|spider |Scribe web sources automatically and save original sources |
|parser |Parse the sources that are scribed by spider |
|httphandler |Manipulate module that communicate with web server |
|proxy |A proxy handler provides Internet connection |
|selephan |Use selenium and phantomjs to load website instantly just like a browser do |
|mysql |Provide mysql service while saving data |
|log |Support configurable console and file logging |
|util |Including some useful functions the project need |
|config |Loading configuration from both config_default and config_override |
|config_default |Define default settings. You should always change configs in config_override|
You can refer to README.md for further instruction.
:Copyright (c) 2016 MeteorKepler
:license: MIT, see LICENSE for more details.
"""
__author__ = 'MeterKepler'
__version__ = '0.1.3'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
15024,
1304,
15885,
198,
220,
220,
220,
20368,
1783,
10541,
628,
220,
220,
220,
1... | 2.349229 | 713 |
from django.core import mail
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.http import Http404, QueryDict
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from organizations.tests.utils import request_factory_login
from organizations.backends.defaults import (BaseBackend, InvitationBackend,
RegistrationBackend)
from organizations.backends.tokens import RegistrationTokenGenerator
@override_settings(USE_TZ=True)
@override_settings(USE_TZ=True)
@override_settings(USE_TZ=True)
| [
6738,
42625,
14208,
13,
7295,
1330,
6920,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
... | 3.347594 | 187 |
import numpy as np
from scipy.optimize import newton
from collections import Counter
from sys import stderr, exit
FAST_EMBEDDING_AVAILABLE = True
try:
import hde_fast_embedding as fast_emb
except:
FAST_EMBEDDING_AVAILABLE = False
print("""
Error importing Cython fast embedding module. Continuing with slow Python implementation.\n
This may take a long time.\n
""", file=stderr, flush=True)
def get_set_of_scalings(past_range_T,
number_of_bins_d,
number_of_scalings,
min_first_bin_size,
min_step_for_scaling):
"""
Get scaling exponents such that the uniform embedding as well as
the embedding for which the first bin has a length of
min_first_bin_size (in seconds), as well as linearly spaced
scaling factors in between, such that in total
number_of_scalings scalings are obtained.
"""
min_scaling = 0
if past_range_T / number_of_bins_d <= min_first_bin_size or number_of_bins_d == 1:
max_scaling = 0
else:
# for the initial guess assume the largest bin dominates, so k is approx. log(T) / d
max_scaling = newton(lambda scaling: get_past_range(number_of_bins_d,
min_first_bin_size,
scaling)
- past_range_T,
np.log10(past_range_T
/ min_first_bin_size) / (number_of_bins_d - 1),
tol = 1e-04, maxiter = 500)
while np.linspace(min_scaling, max_scaling,
number_of_scalings, retstep = True)[1] < min_step_for_scaling:
number_of_scalings -= 1
return np.linspace(min_scaling, max_scaling, number_of_scalings)
def get_embeddings(embedding_past_range_set,
embedding_number_of_bins_set,
embedding_scaling_exponent_set):
"""
Get all combinations of parameters T, d, k, based on the
sets of selected parameters.
"""
embeddings = []
for past_range_T in embedding_past_range_set:
for number_of_bins_d in embedding_number_of_bins_set:
if not isinstance(number_of_bins_d, int) or number_of_bins_d < 1:
print("Error: numer of bins {} is not a positive integer. Skipping.".format(number_of_bins_d),
file=stderr, flush=True)
continue
if type(embedding_scaling_exponent_set) == dict:
scaling_set_given_T_and_d = get_set_of_scalings(past_range_T,
number_of_bins_d,
**embedding_scaling_exponent_set)
else:
scaling_set_given_T_and_d = embedding_scaling_exponent_set
for scaling_k in scaling_set_given_T_and_d:
embeddings += [(past_range_T, number_of_bins_d, scaling_k)]
return embeddings
def get_fist_bin_size_for_embedding(embedding):
"""
Get size of first bin for the embedding, based on the parameters
T, d and k.
"""
past_range_T, number_of_bins_d, scaling_k = embedding
return newton(lambda first_bin_size: get_past_range(number_of_bins_d,
first_bin_size,
scaling_k) - past_range_T,
0.005, tol = 1e-03, maxiter = 100)
def get_past_range(number_of_bins_d, first_bin_size, scaling_k):
"""
Get the past range T of the embedding, based on the parameters d, tau_1 and k.
"""
return np.sum([first_bin_size * 10**((number_of_bins_d - i) * scaling_k)
for i in range(1, number_of_bins_d + 1)])
def get_window_delimiters(number_of_bins_d, scaling_k, first_bin_size, embedding_step_size):
"""
Get delimiters of the window, used to describe the embedding. The
window includes both the past embedding and the response.
The delimiters are times, relative to the first bin, that separate
two consequent bins.
"""
bin_sizes = [first_bin_size * 10**((number_of_bins_d - i) * scaling_k)
for i in range(1, number_of_bins_d + 1)]
window_delimiters = [sum([bin_sizes[j] for j in range(i)])
for i in range(1, number_of_bins_d + 1)]
window_delimiters.append(window_delimiters[number_of_bins_d - 1] + embedding_step_size)
return window_delimiters
def get_median_number_of_spikes_per_bin(raw_symbols):
"""
Given raw symbols (in which the number of spikes per bin are counted,
ie not necessarily binary quantity), get the median number of spikes
for each bin, among all symbols obtained by the embedding.
"""
# number_of_bins here is number_of_bins_d + 1,
# as it here includes not only the bins of the embedding but also the response
number_of_bins = len(raw_symbols[0])
spike_counts_per_bin = [[] for i in range(number_of_bins)]
for raw_symbol in raw_symbols:
for i in range(number_of_bins):
spike_counts_per_bin[i] += [raw_symbol[i]]
return [np.median(spike_counts_per_bin[i]) for i in range(number_of_bins)]
def symbol_binary_to_array(symbol_binary, number_of_bins_d):
"""
Given a binary representation of a symbol (cf symbol_array_to_binary),
convert it back into its array-representation.
"""
# assert 2 ** number_of_bins_d > symbol_binary
spikes_in_window = np.zeros(number_of_bins_d)
for i in range(0, number_of_bins_d):
b = 2 ** (number_of_bins_d - 1 - i)
if b <= symbol_binary:
spikes_in_window[i] = 1
symbol_binary -= b
return spikes_in_window
def symbol_array_to_binary(spikes_in_window, number_of_bins_d):
"""
Given an array of 1s and 0s, representing spikes and the absence
thereof, read the array as a binary number to obtain a
(base 10) integer.
"""
# assert len(spikes_in_window) == number_of_bins_d
# TODO check if it makes sense to use len(spikes_in_window)
# directly, to avoid mismatch as well as confusion
# as number_of_bins_d here can also be number_of_bins
# as in get_median_number_of_spikes_per_bin, ie
# including the response
return sum([2 ** (number_of_bins_d - i - 1) * spikes_in_window[i]
for i in range(0, number_of_bins_d)])
def get_raw_symbols(spike_times,
embedding,
first_bin_size,
embedding_step_size):
"""
Get the raw symbols (in which the number of spikes per bin are counted,
ie not necessarily binary quantity), as obtained by applying the
embedding.
"""
past_range_T, number_of_bins_d, scaling_k = embedding
# the window is the embedding plus the response,
# ie the embedding and one additional bin of size embedding_step_size
window_delimiters = get_window_delimiters(number_of_bins_d,
scaling_k,
first_bin_size,
embedding_step_size)
window_length = window_delimiters[-1]
num_spike_times = len(spike_times)
last_spike_time = spike_times[-1]
raw_symbols = []
spike_index_lo = 0
# for time in np.arange(0, int(last_spike_time - window_length), embedding_step_size):
for time in np.arange(0, last_spike_time - window_length, embedding_step_size):
while(spike_index_lo < num_spike_times and spike_times[spike_index_lo] < time):
spike_index_lo += 1
spike_index_hi = spike_index_lo
while(spike_index_hi < num_spike_times and
spike_times[spike_index_hi] < time + window_length):
spike_index_hi += 1
spikes_in_window = np.zeros(number_of_bins_d + 1)
embedding_bin_index = 0
for spike_index in range(spike_index_lo, spike_index_hi):
while(spike_times[spike_index] > time + window_delimiters[embedding_bin_index]):
embedding_bin_index += 1
spikes_in_window[embedding_bin_index] += 1
raw_symbols += [spikes_in_window]
return raw_symbols
def get_symbol_counts(spike_times, embedding, embedding_step_size):
"""
Apply embedding to the spike times to obtain the symbol counts.
"""
if FAST_EMBEDDING_AVAILABLE:
return Counter(fast_emb.get_symbol_counts(spike_times, embedding, embedding_step_size))
past_range_T, number_of_bins_d, scaling_k = embedding
first_bin_size = get_fist_bin_size_for_embedding(embedding)
raw_symbols = get_raw_symbols(spike_times,
embedding,
first_bin_size,
embedding_step_size)
median_number_of_spikes_per_bin = get_median_number_of_spikes_per_bin(raw_symbols)
symbol_counts = Counter()
for raw_symbol in raw_symbols:
symbol_array = [int(raw_symbol[i] > median_number_of_spikes_per_bin[i])
for i in range(number_of_bins_d + 1)]
symbol = symbol_array_to_binary(symbol_array, number_of_bins_d + 1)
symbol_counts[symbol] += 1
return symbol_counts
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
649,
1122,
198,
6738,
17268,
1330,
15034,
198,
6738,
25064,
1330,
336,
1082,
81,
11,
8420,
198,
198,
37,
11262,
62,
3620,
33,
1961,
35,
2751,
62,
10116,
... | 2.068627 | 4,590 |
from unit_test_common import execute_csv2_request, initialize_csv2_request, ut_id, sanity_requests
from sys import argv
# lno: CV - error code identifier.
if __name__ == "__main__":
main(None)
| [
6738,
4326,
62,
9288,
62,
11321,
1330,
12260,
62,
40664,
17,
62,
25927,
11,
41216,
62,
40664,
17,
62,
25927,
11,
3384,
62,
312,
11,
34182,
62,
8897,
3558,
198,
6738,
25064,
1330,
1822,
85,
198,
198,
2,
300,
3919,
25,
26196,
532,
4... | 2.926471 | 68 |
#Project Euler Question 61
#Cyclical figurate numbers
oct_list = []
hept_list = []
hex_list = []
pent_list = []
squ_list = []
tri_list = []
n = 0
while True:
n += 1
oct_x = octagonal(n)
hept_x = heptagonal(n)
hex_x = hexagonal(n)
pent_x = pentagonal(n)
squ_x = sqaure(n)
tri_x = triangle(n)
if 10000 > oct_x >= 1000:
oct_list.append(oct_x)
if 10000 > hept_x >= 1000:
hept_list.append(hept_x)
if 10000 > hex_x >= 1000:
hex_list.append(hex_x)
if 10000 > pent_x >= 1000:
pent_list.append(pent_x)
if 10000 > squ_x >= 1000:
squ_list.append(squ_x)
if 10000 > tri_x >= 1000:
tri_list.append(tri_x)
elif oct_x >= 10000:
break
all_list = [hept_list, hex_list, pent_list, squ_list, tri_list]
print (cycle_numbers()) | [
2,
16775,
412,
18173,
18233,
8454,
198,
2,
20418,
565,
605,
2336,
15537,
3146,
628,
198,
38441,
62,
4868,
796,
17635,
198,
258,
457,
62,
4868,
796,
17635,
198,
33095,
62,
4868,
796,
17635,
198,
16923,
62,
4868,
796,
17635,
198,
16485,... | 2.083756 | 394 |
# Generated by Django 3.2.7 on 2022-01-10 12:12
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
22,
319,
33160,
12,
486,
12,
940,
1105,
25,
1065,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import config
import requests, bs4, urllib, os
dest_dir = os.path.join(config.DATASET_DIR, 'model_zip')
queries_dir = os.path.join(config.DATASET_DIR, 'queries_zip')
url = 'http://rll.berkeley.edu/bigbird/aliases/a47741b172/'
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(queries_dir):
os.makedirs(queries_dir)
page = requests.get(url)
soup = bs4.BeautifulSoup(page.content, 'html.parser')
download_links = [x.get('href') for x in soup.find_all('a') if x.get_text() == 'High res (.tgz)']
for d in download_links:
urllib.urlretrieve(url + d, os.path.join(dest_dir, d.split('/')[-2] + '.tgz'))
download_links = [x.get('href') for x in soup.find_all('a') if x.get_text() == 'RGB-D (.tgz)']
for d in download_links:
urllib.urlretrieve(url + d, os.path.join(queries_dir, d.split('/')[-2] + '.tgz'))
| [
11748,
4566,
198,
11748,
7007,
11,
275,
82,
19,
11,
2956,
297,
571,
11,
28686,
198,
198,
16520,
62,
15908,
796,
28686,
13,
6978,
13,
22179,
7,
11250,
13,
35,
1404,
1921,
2767,
62,
34720,
11,
705,
19849,
62,
13344,
11537,
198,
421,
... | 2.289189 | 370 |
import numpy as np
from matplotlib import pyplot as plt
import csv
import math
import pandas
if __name__=="__main__":
plot_log('result/log.csv')
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
269,
21370,
198,
11748,
10688,
198,
11748,
19798,
292,
628,
198,
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
198,
220,
... | 2.8 | 55 |
"""Config flow for 1-Wire component."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.device_registry import DeviceRegistry
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEVICE_SUPPORT_OPTIONS,
DOMAIN,
INPUT_ENTRY_CLEAR_OPTIONS,
INPUT_ENTRY_DEVICE_SELECTION,
OPTION_ENTRY_DEVICE_OPTIONS,
OPTION_ENTRY_SENSOR_PRECISION,
PRECISION_MAPPING_FAMILY_28,
)
from .model import OWDeviceDescription
from .onewirehub import CannotConnect, OneWireHub
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
)
async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> dict[str, str]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
hub = OneWireHub(hass)
host = data[CONF_HOST]
port = data[CONF_PORT]
# Raises CannotConnect exception on failure
await hub.connect(host, port)
# Return info that you want to store in the config entry.
return {"title": host}
class OneWireFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle 1-Wire config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize 1-Wire config flow."""
self.onewire_config: dict[str, Any] = {}
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle 1-Wire config flow start.
Let user manually input configuration.
"""
errors: dict[str, str] = {}
if user_input:
# Prevent duplicate entries
self._async_abort_entries_match(
{
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
}
)
self.onewire_config.update(user_input)
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
else:
return self.async_create_entry(
title=info["title"], data=self.onewire_config
)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Get the options flow for this handler."""
return OnewireOptionsFlowHandler(config_entry)
class OnewireOptionsFlowHandler(OptionsFlow):
"""Handle OneWire Config options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize OneWire Network options flow."""
self.entry_id = config_entry.entry_id
self.options = dict(config_entry.options)
self.configurable_devices: dict[str, OWDeviceDescription] = {}
self.devices_to_configure: dict[str, OWDeviceDescription] = {}
self.current_device: str = ""
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
controller: OneWireHub = self.hass.data[DOMAIN][self.entry_id]
all_devices: list[OWDeviceDescription] = controller.devices # type: ignore[assignment]
if not all_devices:
return self.async_abort(reason="No configurable devices found.")
device_registry = dr.async_get(self.hass)
self.configurable_devices = {
self._get_device_long_name(device_registry, device.id): device
for device in all_devices
if device.family in DEVICE_SUPPORT_OPTIONS
}
return await self.async_step_device_selection(user_input=None)
async def async_step_device_selection(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Select what devices to configure."""
errors = {}
if user_input is not None:
if user_input.get(INPUT_ENTRY_CLEAR_OPTIONS):
# Reset all options
self.options = {}
return self._async_update_options()
selected_devices: list[str] = (
user_input.get(INPUT_ENTRY_DEVICE_SELECTION) or []
)
if selected_devices:
self.devices_to_configure = {
device_name: self.configurable_devices[device_name]
for device_name in selected_devices
}
return await self.async_step_configure_device(user_input=None)
errors["base"] = "device_not_selected"
return self.async_show_form(
step_id="device_selection",
data_schema=vol.Schema(
{
vol.Optional(
INPUT_ENTRY_CLEAR_OPTIONS,
default=False,
): bool,
vol.Optional(
INPUT_ENTRY_DEVICE_SELECTION,
default=self._get_current_configured_sensors(),
description="Multiselect with list of devices to choose from",
): cv.multi_select(
{device: False for device in self.configurable_devices}
),
}
),
errors=errors,
)
async def async_step_configure_device(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Config precision option for device."""
if user_input is not None:
self._update_device_options(user_input)
if self.devices_to_configure:
return await self.async_step_configure_device(user_input=None)
return self._async_update_options()
self.current_device, description = self.devices_to_configure.popitem()
data_schema = vol.Schema(
{
vol.Required(
OPTION_ENTRY_SENSOR_PRECISION,
default=self._get_current_setting(
description.id, OPTION_ENTRY_SENSOR_PRECISION, "temperature"
),
): vol.In(PRECISION_MAPPING_FAMILY_28),
}
)
return self.async_show_form(
step_id="configure_device",
data_schema=data_schema,
description_placeholders={"sensor_id": self.current_device},
)
@callback
def _async_update_options(self) -> FlowResult:
"""Update config entry options."""
return self.async_create_entry(title="", data=self.options)
@staticmethod
def _get_current_configured_sensors(self) -> list[str]:
"""Get current list of sensors that are configured."""
configured_sensors = self.options.get(OPTION_ENTRY_DEVICE_OPTIONS)
if not configured_sensors:
return []
return [
device_name
for device_name, description in self.configurable_devices.items()
if description.id in configured_sensors
]
def _get_current_setting(self, device_id: str, setting: str, default: Any) -> Any:
"""Get current value for setting."""
if entry_device_options := self.options.get(OPTION_ENTRY_DEVICE_OPTIONS):
if device_options := entry_device_options.get(device_id):
return device_options.get(setting)
return default
def _update_device_options(self, user_input: dict[str, Any]) -> None:
"""Update the global config with the new options for the current device."""
options: dict[str, dict[str, Any]] = self.options.setdefault(
OPTION_ENTRY_DEVICE_OPTIONS, {}
)
description = self.configurable_devices[self.current_device]
device_options: dict[str, Any] = options.setdefault(description.id, {})
if description.family == "28":
device_options[OPTION_ENTRY_SENSOR_PRECISION] = user_input[
OPTION_ENTRY_SENSOR_PRECISION
]
self.options.update({OPTION_ENTRY_DEVICE_OPTIONS: options})
| [
37811,
16934,
5202,
329,
352,
12,
29451,
7515,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
4377,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1363,
562,
10167,
13,
11250,
62,
298... | 2.194785 | 3,912 |
import torch
import torch.nn as nn
from abc import ABC
| [
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
6738,
450,
66,
1330,
9738,
201,
198,
201
] | 2.809524 | 21 |
# coding: utf-8
# Back to the main [Index](../index.ipynb)
# ### Combine archives
# The experiment, i.e. model with the simulation description, can be stored as Combine Archive.
# In[1]:
#!!! DO NOT CHANGE !!! THIS FILE WAS CREATED AUTOMATICALLY FROM NOTEBOOKS !!! CHANGES WILL BE OVERWRITTEN !!! CHANGE CORRESPONDING NOTEBOOK FILE !!!
from __future__ import print_function
import tellurium as te
antimonyStr = """
model test()
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
phrasedmlStr = """
model0 = model "test"
sim0 = simulate uniform(0, 6, 100)
task0 = run sim0 on model0
plot "Timecourse test model" task0.time vs task0.S1
"""
# phrasedml experiment
exp = te.experiment(antimonyStr, phrasedmlStr)
exp.execute(phrasedmlStr)
# create Combine Archive
import tempfile
f = tempfile.NamedTemporaryFile()
exp.exportAsCombine(f.name)
# print the content of the Combine Archive
import zipfile
zip=zipfile.ZipFile(f.name)
print(zip.namelist())
# ### Create combine archive
# TODO
# In[2]:
import tellurium as te
import phrasedml
antTest1Str = """
model test1()
J0: S1 -> S2; k1*S1;
S1 = 10.0; S2=0.0;
k1 = 0.1;
end
"""
antTest2Str = """
model test2()
v0: X1 -> X2; p1*X1;
X1 = 5.0; X2 = 20.0;
k1 = 0.2;
end
"""
phrasedmlStr = """
model1 = model "test1"
model2 = model "test2"
model3 = model model1 with S1=S2+20
sim1 = simulate uniform(0, 6, 100)
task1 = run sim1 on model1
task2 = run sim1 on model2
plot "Timecourse test1" task1.time vs task1.S1, task1.S2
plot "Timecourse test2" task2.time vs task2.X1, task2.X2
"""
# phrasedml.setReferencedSBML("test1")
exp = te.experiment(phrasedmlList=[phrasedmlStr], antimonyList=[antTest1Str])
print(exp)
# set first model
phrasedml.setReferencedSBML("test1", te.antimonyToSBML(antTest1Str))
phrasedml.setReferencedSBML("test2", te.antimonyToSBML(antTest2Str))
sedmlstr = phrasedml.convertString(phrasedmlStr)
if sedmlstr is None:
raise Exception(phrasedml.getLastError())
print(sedmlstr)
# In[3]:
# In[3]:
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
5157,
284,
262,
1388,
685,
15732,
16151,
40720,
9630,
13,
541,
2047,
65,
8,
198,
198,
2,
44386,
29176,
22415,
198,
2,
383,
6306,
11,
1312,
13,
68,
13,
2746,
351,
262,
18640,
6764... | 2.42007 | 857 |
from six.moves import xrange
def get(obj, path):
"""
Looks up and returns a path in the object. Returns None if the path isn't there.
"""
for part in path:
try:
obj = obj[part]
except(KeyError, IndexError):
return None
return obj
def glob(obj, path, func, extra_arg):
"""
Resolves wildcards in `path`, calling func for all matching paths. Returns the number of
times that func was called.
obj - An object to scan.
path - Path to an item in an object or an array in obj. May contain the special key '*', which
-- for arrays only -- means "for all indices".
func - Will be called as func(subobj, key, fullPath, extraArg).
extra_arg - An arbitrary value to pass along to func, for convenience.
Returns count of matching paths, for which func got called.
"""
return _globHelper(obj, path, path, func, extra_arg)
def place(obj, path, value):
"""
Sets or deletes an object property in DocObj.
gpath - Path to an Object in obj.
value - Any value. Setting None will remove the selected object key.
"""
return glob(obj, path, _placeHelper, value)
def _checkIsArray(subobj, errPrefix, index, itemPath, isInsert):
"""
This is a helper for checking operations on arrays, and throwing descriptive errors.
"""
if subobj is None:
raise Exception(errPrefix + ": non-existent object at " + describe(dirname(itemPath)))
elif not _is_array(subobj):
raise Exception(errPrefix + ": not an array at " + describe(dirname(itemPath)))
else:
length = len(subobj)
validIndex = (isinstance(index, int) and index >= 0 and index < length)
validInsertIndex = (index is None or index == length)
if not (validIndex or (isInsert and validInsertIndex)):
raise Exception(errPrefix + ": invalid array index: " + describe(itemPath))
def insert(obj, path, value):
"""
Inserts an element into an array in DocObj.
gpath - Path to an item in an array in obj.
The new value will be inserted before the item pointed to by gpath.
The last component of gpath may be null, in which case the value is appended at the end.
value - Any value.
"""
return glob(obj, path, _insertHelper, value)
def update(obj, path, value):
"""
Updates an element in an array in DocObj.
gpath - Path to an item in an array in obj.
value - Any value.
"""
return glob(obj, path, _updateHelper, value)
def remove(obj, path):
"""
Removes an element from an array in DocObj.
gpath - Path to an item in an array in obj.
"""
return glob(obj, path, _removeHelper, None)
def dirname(path):
"""
Returns path without the last component, like a directory name in a filesystem path.
"""
return path[:-1]
def basename(path):
"""
Returns the last component of path, like base name of a filesystem path.
"""
return path[-1] if path else None
def describe(path):
"""
Returns a human-readable representation of path.
"""
return "/" + "/".join(str(p) for p in path)
| [
6738,
2237,
13,
76,
5241,
1330,
2124,
9521,
628,
198,
4299,
651,
7,
26801,
11,
3108,
2599,
198,
220,
37227,
198,
220,
29403,
510,
290,
5860,
257,
3108,
287,
262,
2134,
13,
16409,
6045,
611,
262,
3108,
2125,
470,
612,
13,
198,
220,
... | 3.126173 | 959 |
from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
from mpf.tests.MpfGameTestCase import MpfGameTestCase
from unittest.mock import MagicMock
| [
6738,
29034,
69,
13,
41989,
13,
28861,
69,
49233,
8777,
14402,
20448,
1330,
337,
79,
69,
49233,
8777,
14402,
20448,
198,
198,
6738,
29034,
69,
13,
41989,
13,
28861,
69,
8777,
14402,
20448,
1330,
337,
79,
69,
8777,
14402,
20448,
198,
6... | 2.87037 | 54 |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from .models import User
from .serializers import DefaultUserSerializer
client = APIClient()
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2149,
75,
1153,
11,
3486,
2043,
395,
20448,
198,
198,
6738,
764,
27530,
1330,
11787,
198,
6738,
76... | 3.606557 | 61 |
from django.contrib.messages import get_messages
from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase, tag
from django.urls import reverse
from django.utils import timezone
from BookClub.models import User, Meeting, Club, ClubMembership
from BookClub.tests.helpers import LogInTester
@tag("views", "meeting", "leave_meeting")
class LeaveMeetingViewTestCase(TestCase, LogInTester):
"""Tests of the Join Meeting view."""
fixtures = [
'BookClub/tests/fixtures/default_users.json',
'BookClub/tests/fixtures/default_clubs.json',
'BookClub/tests/fixtures/default_meetings.json',
'BookClub/tests/fixtures/default_books.json',
]
def test_get_leave_meeting_redirects_to_list_of_meetings(self):
"""Test for redirecting user to available_clubs when used get method."""
self.client.login(username=self.user.username, password='Password123')
self.assertTrue(self._is_logged_in())
response = self.client.get(reverse('leave_meeting', kwargs={'club_url_name': self.club.club_url_name,
'meeting_id': self.future_meeting.id}))
redirect_url = reverse('meeting_list', kwargs={'club_url_name': self.club.club_url_name})
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=200)
| [
6738,
42625,
14208,
13,
3642,
822,
13,
37348,
1095,
1330,
651,
62,
37348,
1095,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
7621,
198,... | 2.525455 | 550 |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Baseline models for time series data."""
import os
import time
import warnings
from data.data_loader import ECL
from exp.exp_basic import ExpBasic
import matplotlib.pyplot as plt
from models.ar_net import ARNet
from models.linear import Linear
from models.lstm import LSTM
import numpy as np
import torch
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from utils.metrics import Metric as metric
from utils.tools import EarlyStopping
warnings.filterwarnings('ignore')
class ExpBaseline(ExpBasic):
"""Baseline experiments for time series data."""
def _get_dataset(self):
"""Function creates dataset based on data name in the parsers.
Returns:
Data: An instant of the dataset created
"""
if self.args.data == 'ECL':
data = ECL(self.args.root_path, self.args.seq_len, self.args.pred_len,
self.args.features, self.args.scale, self.args.num_ts)
else:
raise NotImplementedError
return data
def _build_model(self):
"""Function that creates a model instance based on the model name.
Here we only support LSTM, Linear and ARNet.
Returns:
model: An instance of the model.
"""
if self.args.model == 'LSTM':
model = LSTM(self.args.input_dim, self.args.pred_len, self.args.d_model,
self.args.layers, self.args.dropout, self.device).float()
elif self.args.model == 'Linear':
model = Linear(
self.args.pred_len * self.args.input_dim,
self.args.seq_len,
).float()
elif self.args.model == ' ARNet':
model = ARNet(
n_forecasts=self.args.pred_len * self.args.input_dim,
n_lags=self.args.seq_len,
device=self.device).float()
else:
raise NotImplementedError
# if multiple GPU are to be used parralize model
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
def _get_data(self, flag):
"""Function that creats a dataloader basd on flag.
Args:
flag: Flag indicating if we should return training/validation/testing
dataloader
Returns:
data_loader: Dataloader for the required dataset.
"""
args = self.args
if flag == 'test':
shuffle_flag = False
drop_last = True
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.test_x), torch.Tensor(self.data.test_y))
elif flag == 'pred':
shuffle_flag = False
drop_last = False
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.test_x), torch.Tensor(self.data.test_y))
elif flag == 'val':
shuffle_flag = False
drop_last = False
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.valid_x), torch.Tensor(self.data.valid_y))
else:
shuffle_flag = True
drop_last = True
batch_size = args.batch_size
data_set = TensorDataset(
torch.Tensor(self.data.train_x), torch.Tensor(self.data.train_y))
print('Data for', flag, 'dataset size', len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_loader
def _select_optimizer(self):
"""Function that returns the optimizer based on learning rate.
Returns:
model_optim: model optimizer
"""
model_optim = optim.Adam(
self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def vali(self, vali_loader, criterion):
"""Validation Function.
Args:
vali_loader: Validation dataloader
criterion: criterion used in for loss function
Returns:
total_loss: average loss
"""
self.model.eval()
total_loss = []
for (batch_x, batch_y) in vali_loader:
pred, true = self._process_one_batch(batch_x, batch_y, validation=True)
loss = criterion(pred.detach().cpu(), true.detach().cpu())
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
"""Training Function.
Args:
setting: Name used to save the model
Returns:
model: Trained model
"""
# Load different datasets
train_loader = self._get_data(flag='train')
vali_loader = self._get_data(flag='val')
test_loader = self._get_data(flag='test')
path = os.path.join(self.args.checkpoints, setting)
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
# Setting optimizer and loss functions
model_optim = self._select_optimizer()
criterion = nn.MSELoss()
all_training_loss = []
all_validation_loss = []
# Training Loop
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
epoch_time = time.time()
for i, (batch_x, batch_y) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
pred, true = self._process_one_batch(batch_x, batch_y)
loss = criterion(pred, true)
train_loss.append(loss.item())
if (i + 1) % 100 == 0:
print('\titers: {0}/{1}, epoch: {2} | loss: {3:.7f}'.format(
i + 1, train_steps, epoch + 1, loss.item()))
speed = (time.time() - time_now) / iter_count
left_time = speed * (
(self.args.train_epochs - epoch) * train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(
speed, left_time))
iter_count = 0
time_now = time.time()
loss.backward()
model_optim.step()
print('Epoch: {} cost time: {}'.format(epoch + 1,
time.time() - epoch_time))
train_loss = np.average(train_loss)
all_training_loss.append(train_loss)
vali_loss = self.vali(vali_loader, criterion)
all_validation_loss.append(vali_loss)
test_loss = self.vali(test_loader, criterion)
print(
'Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}'
.format(epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
# Plotting train and validation loss
if ((epoch + 1) % 5 == 0 and self.args.plot):
check_folder = os.path.isdir(self.args.plot_dir)
# If folder doesn't exist, then create it.
if not check_folder:
os.makedirs(self.args.plot_dir)
plt.figure()
plt.plot(all_training_loss, label='train loss')
plt.plot(all_validation_loss, label='Val loss')
plt.legend()
plt.savefig(self.args.plot_dir + setting + '.png')
plt.show()
plt.close()
# If ran out of patience stop training
if early_stopping.early_stop:
if self.args.plot:
plt.figure()
plt.plot(all_training_loss, label='train loss')
plt.plot(all_validation_loss, label='Val loss')
plt.legend()
plt.savefig(self.args.plot_dir + setting + '.png')
plt.show()
print('Early stopping')
break
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def predict(self, setting, load=False):
"""Prediction Function.
Args:
setting: Name used to be used for prediction
load: whether to load best model
Returns:
mae: Mean absolute error
mse: Mean squared error
rmse: Root mean squared error
mape: Mean absolute percentage error
mspe: Mean squared percentage error
"""
# Create prediction dataset
pred_loader = self._get_data(flag='pred')
# Load best model saved in the checkpoint folder
if load:
path = os.path.join(self.args.checkpoints, setting)
best_model_path = path + '/' + 'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
# Get model predictions
self.model.eval()
for i, (batch_x, batch_y) in enumerate(pred_loader):
pred, true = self._process_one_batch(batch_x, batch_y, validation=True)
if i == 0:
preds = pred.detach().cpu().numpy()
trues = true.detach().cpu().numpy()
else:
preds = np.concatenate((preds, pred.detach().cpu().numpy()), axis=0)
trues = np.concatenate((trues, true.detach().cpu().numpy()), axis=0)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
# save predictions made by model
folder_path = './results/' + setting + '/'
check_folder = os.path.isdir(folder_path)
if not check_folder:
os.makedirs(folder_path)
np.save(folder_path + 'real_prediction.npy', preds)
# Evaluate the model performance
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}, rmse:{}'.format(mse, mae, rmse))
return mae, mse, rmse, mape, mspe, 0, 0
def _process_one_batch(self, batch_x, batch_y, validation=False):
"""Function to process batch and send it to model and get output.
Args:
batch_x: batch input
batch_y: batch target
validation: flag to determine if this process is done for training or
testing
Returns:
outputs: model outputs
batch_y: batch target
"""
# Reshape input for Linear and ARNet
if (self.model_type == 'Linear' or self.model_type == ' ARNet'):
batch_size, _, _ = batch_x.shape
batch_x = batch_x.reshape(batch_size, -1)
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float().to(self.device)
if (self.model_type == 'Linear' or self.model_type == ' ARNet'):
batch_y = batch_y[:, -self.args.pred_len:, 0]
else:
batch_y = batch_y[:, -self.args.pred_len:, 0].unsqueeze(-1)
if not validation:
if self.model_type == ' ARNet':
outputs = self.model(batch_x, batch_y)
else:
outputs = self.model(batch_x)
else:
if self.model_type == ' ARNet':
outputs = self.model.predict(batch_x)
else:
outputs = self.model(batch_x)
return outputs, batch_y
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.391119 | 4,684 |
import operator
import tempfile
import unittest
import fungraph
| [
11748,
10088,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
198,
11748,
31317,
1470,
628,
198
] | 3.722222 | 18 |
from unittest import TestCase
from p14 import p14
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
279,
1415,
1330,
279,
1415,
198
] | 3.333333 | 15 |
from abc import ABC, abstractmethod
from typing import Union, Optional, Tuple, Dict, Any
from FaceEngine import DetectionFloat, HumanLandmark, HumanLandmarks17 # pylint: disable=E0611,E0401
from .image_utils.geometry import LANDMARKS, Point, Rect
class BaseEstimation(ABC):
"""
Base class for estimation structures.
Attributes:
_coreEstimation: core estimation
"""
__slots__ = ("_coreEstimation",)
@property
def coreEstimation(self):
"""
Get core estimation from init
Returns:
_coreEstimation
"""
return self._coreEstimation
@abstractmethod
def asDict(self) -> Union[dict, list]:
"""
Convert to a dict.
Returns:
dict from luna api
"""
pass
def __repr__(self) -> str:
"""
Representation.
Returns:
str(self.asDict())
"""
return str(self.asDict())
class Landmarks(BaseEstimation):
"""
Base class for landmarks
Attributes:
_points (Optional[Tuple[Point[float]]]): lazy loaded attributes: core landmarks as point list
"""
__slots__ = ["_points", "_coreEstimation"]
def __init__(self, coreLandmarks: LANDMARKS):
"""
Init
Args:
coreLandmarks (LANDMARKS): core landmarks
"""
super().__init__(coreLandmarks)
self._points: Optional[Tuple[Point[float], ...]] = None
@property
def points(self) -> Tuple[Point[float], ...]:
"""
Lazy points loader.
Returns:
list of points
"""
if self._points is None:
self._points = tuple(
Point.fromVector2(self._coreEstimation[index]) for index in range(len(self._coreEstimation))
)
return self._points
def asDict(self) -> Tuple[Tuple[int, int], ...]: # type: ignore
"""
Convert to dict
Returns:
list to list points
"""
pointCount = len(self._coreEstimation)
points = self._coreEstimation
return tuple(((int(points[index].x), int(points[index].x)) for index in range(pointCount)))
class LandmarkWithScore(BaseEstimation):
"""
Point with score.
"""
def __init__(self, landmark: HumanLandmark): # pylint: disable=C0103
"""
Init
Args:
landmark: core landmark
"""
super().__init__(landmark)
@property
def point(self) -> Point[float]:
"""
Coordinate of landmark
Returns:
point
"""
return Point.fromVector2(self._coreEstimation.point)
@property
def score(self) -> float:
"""
Landmark score
Returns:
float[0,1]
"""
return self._coreEstimation.score
def asDict(self) -> dict:
"""
Convert point to list (json), coordinates will be cast from float to int
Returns:
dict with keys: score and point
"""
return {"score": self._coreEstimation.score, "point": (int(self.point.x), int(self.point.y))}
def __repr__(self) -> str:
"""
Representation.
Returns:
"x = {self.point.x}, y = {self.point.y}, score = {self.score}"
"""
return "x = {}, y = {}, score = {}".format(self.point.x, self.point.y, self.score)
class LandmarksWithScore(BaseEstimation):
"""
Base class for landmarks with score
Attributes:
_points (Optional[Tuple[Point[float]]]): lazy load attributes, converted to point list core landmarks
"""
__slots__ = ["_points", "_coreEstimation"]
def __init__(self, coreLandmarks: HumanLandmarks17):
"""
Init
Args:
coreLandmarks (LANDMARKS): core landmarks
"""
super().__init__(coreLandmarks)
self._points: Optional[Tuple[LandmarkWithScore, ...]] = None
@property
def points(self) -> Tuple[LandmarkWithScore, ...]:
"""
Lazy load of points.
Returns:
list of points
"""
if self._points is None:
self._points = tuple(
LandmarkWithScore(self._coreEstimation[index]) for index in range(len(self._coreEstimation))
)
return self._points
def asDict(self) -> Tuple[dict, ...]: # type: ignore
"""
Convert to dict
Returns:
list to list points
"""
return tuple(point.asDict() for point in self.points)
class BoundingBox(BaseEstimation):
"""
Detection bounding box, it is characterized of rect and score:
- rect (Rect[float]): face bounding box
- score (float): face score (0,1), detection score is the measure of classification confidence
and not the source image quality. It may be used topick the most "*confident*" face of many.
"""
# pylint: disable=W0235
def __init__(self, boundingBox: DetectionFloat):
"""
Init.
Args:
boundingBox: core bounding box
"""
super().__init__(boundingBox)
@property
def score(self) -> float:
"""
Get score
Returns:
number in range [0,1]
"""
return self._coreEstimation.score
@property
def rect(self) -> Rect[float]:
"""
Get rect.
Returns:
float rect
"""
return Rect.fromCoreRect(self._coreEstimation.rect)
def asDict(self) -> Dict[str, Union[Dict[str, float], float]]:
"""
Convert to dict.
Returns:
{"rect": self.rect, "score": self.score}
"""
return {"rect": self.rect.asDict(), "score": self.score}
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
4479,
11,
32233,
11,
309,
29291,
11,
360,
713,
11,
4377,
198,
198,
6738,
15399,
13798,
1330,
46254,
43879,
11,
5524,
22342,
4102,
11,
5524,
22342,
14306,
1558,
220,
... | 2.245363 | 2,588 |
# Generated by Django 3.0.2 on 2020-01-30 15:54
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
17,
319,
12131,
12,
486,
12,
1270,
1315,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
# Generated by Django 3.2 on 2021-04-28 00:01
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
3023,
12,
2078,
3571,
25,
486,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
# Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
EnumTypeNEMImportanceTransferMode = Literal[1, 2]
except ImportError:
pass
| [
2,
17406,
4142,
7560,
416,
279,
65,
17,
9078,
198,
2,
46996,
25,
572,
198,
6738,
11485,
1330,
1237,
672,
3046,
355,
279,
198,
198,
361,
11593,
24442,
834,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
... | 2.475806 | 124 |
"""
Provides trees/bushes/etc.
"""
__copyright__ = """
MIT License
Copyright (c) 2019 tcdude
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
from math import ceil
from math import pi
import numpy as np
from panda3d import core
from .shapegen import shape
from . import common
sg = shape.ShapeGen()
# noinspection PyArgumentList
# noinspection PyArgumentList
# noinspection PyArgumentList
# noinspection PyArgumentList
# noinspection PyArgumentList
| [
37811,
198,
15946,
1460,
7150,
14,
10885,
956,
14,
14784,
13,
198,
37811,
198,
198,
834,
22163,
4766,
834,
796,
37227,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
13130,
256,
10210,
2507,
198,
198,
5990,
3411,
318,
29376,
7520,
11... | 3.643392 | 401 |
"""Managers."""
# Django
from django.db import models
class ActiveManager(models.Manager):
"""Active manager."""
| [
37811,
5124,
10321,
526,
15931,
198,
198,
2,
37770,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
4871,
14199,
13511,
7,
27530,
13,
13511,
2599,
198,
220,
220,
220,
37227,
13739,
4706,
526,
15931,
198
] | 3.243243 | 37 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.signal import fftconvolve
from librosa.core import load
from librosa.core import stft
from librosa.core import istft
from librosa import amplitude_to_db, db_to_amplitude
from librosa.display import specshow
from librosa.output import write_wav
from scipy.signal import butter, lfilter, csd
from scipy.linalg import svd, pinv
import scipy
import scipy.fftpack
from scipy.linalg import toeplitz
from scipy.signal import fftconvolve
from utils import apply_reverb, read_wav
import corpus
import mir_eval
from pypesq import pypesq
import pyroomacoustics as pra
import roomsimove_single
import olafilt
if __name__ == '__main__':
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
629,
541,
88,
13,
952,
1330,
266,
615,
7753,
198,
6738,
629,
541,
88,
13,
12683,
282,
1330,
277,
701,
42946,
6442,
198,
198,
673... | 2.859922 | 257 |