content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""Test VirtualPIPoint calculus."""
# Copyright 2017 Hugo van den Berg, Stijn de Jong
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pragma pylint: disable=unused-import
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import (
bytes,
dict,
int,
list,
object,
range,
str,
ascii,
chr,
hex,
input,
next,
oct,
open,
pow,
round,
super,
filter,
map,
zip,
)
# pragma pylint: enable=unused-import
from .fakes import pi_point # pylint: disable=unused-import
class TestVirtualAddition:
"""Test VirtualPIPoint addition."""
def test_add_integer_current_value(self, pi_point):
"""Test adding an integer to a PIPoint via the current value."""
point2 = pi_point.point + 1
assert round(point2.current_value - (pi_point.values[-1] + 1), ndigits=7) == 0
def test_add_integer_reverse_current_value(self, pi_point):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 1 + pi_point.point
assert round(point2.current_value - (pi_point.values[-1] + 1), ndigits=7) == 0
def test_add_pipoints_current_value(self, pi_point):
"""Test adding two PIPoints via the current value."""
total = pi_point.point + pi_point.point
assert (
round(
total.current_value - (pi_point.values[-1] + pi_point.values[-1]),
ndigits=7,
)
== 0
)
class TestVirtualMultiplication:
"""Test VirtualPIPoint addition."""
def test_multiply_integer_current_value(self, pi_point):
"""Test adding an integer to a PIPoint via the current value."""
point2 = pi_point.point * 1
assert round(point2.current_value - (pi_point.values[-1] * 1), ndigits=7) == 0
def test_multiply_integer_reverse_current_value(self, pi_point):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 1 * pi_point.point
assert round(point2.current_value - (pi_point.values[-1] * 1), ndigits=7) == 0
def test_multiply_pipoints_current_value(self, pi_point):
"""Test adding two PIPoints via the current value."""
total = pi_point.point * pi_point.point
assert (
round(
total.current_value - (pi_point.values[-1] * pi_point.values[-1]),
ndigits=7,
)
== 0
)
def test_multiply_integer_two_current_value(self, pi_point):
"""Test adding an integer to a PIPoint via the current value."""
point2 = pi_point.point * 2
assert round(point2.current_value - (pi_point.values[-1] * 2), ndigits=7) == 0
def test_multiply_integer_two_reverse_current_value(self, pi_point):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 2 * pi_point.point
assert round(point2.current_value - (pi_point.values[-1] * 2), ndigits=7) == 0
def test_multiply_float_two_current_value(self, pi_point):
"""Test adding an integer to a PIPoint via the current value."""
point2 = pi_point.point * 2.0
assert round(point2.current_value - (pi_point.values[-1] * 2.0), ndigits=7) == 0
def test_multiply_float_two_reverse_current_value(self, pi_point):
"""Test adding a PIPoint to an integer via the current value."""
point2 = 2.0 * pi_point.point
assert round(point2.current_value - (pi_point.values[-1] * 2.0), ndigits=7) == 0
| [
37811,
14402,
15595,
47,
4061,
1563,
41443,
526,
15931,
198,
2,
15069,
2177,
25930,
5719,
2853,
24626,
11,
520,
48848,
390,
17960,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
1... | 2.566761 | 1,775 |
"""
HTTPクライアントや認証、環境設定などの共通処理用ディレクトリ
"""
from . import config
from . import auth
from . import utils
from . import locust
from .locust import AppLocust
| [
37811,
198,
40717,
14099,
9263,
11482,
11839,
6527,
13298,
1792,
226,
45739,
235,
164,
101,
120,
23513,
163,
240,
108,
161,
95,
225,
164,
101,
255,
22522,
248,
26945,
2515,
102,
15474,
227,
109,
34460,
248,
49035,
99,
49426,
228,
18796,... | 1.876543 | 81 |
import numpy as np
import cmath
import torch
def rot(euler: tuple) -> torch.tensor:
"""
General rotation matrix
:param euler: (a, b, r) rotation in rad in ZYX
:return R: a rotation matrix R
"""
from math import sin, cos
a, b, r = euler[0], euler[1], euler[2]
row1 = torch.tensor([cos(a)*cos(b), cos(a)*sin(b)*sin(r)-sin(a)*cos(r), cos(a)*sin(b)*cos(r)+sin(a)*sin(r)])
row2 = torch.tensor([sin(a)*cos(b), sin(a)*sin(b)*sin(r)+cos(a)*cos(r), sin(a)*sin(b)*cos(r)-cos(a)*sin(r)])
row3 = torch.tensor([-sin(b), cos(b)*sin(r), cos(b)*cos(r)])
R = torch.stack((row1, row2, row3), 0)
assert cmath.isclose(torch.linalg.det(R), 1, rel_tol=1e-04), torch.linalg.det(R)
return R
def rot_to_euler(R: np.array) -> np.array:
"""
:return: Euler angles in rad in ZYX
"""
import cv2 as cv
if torch.is_tensor(R):
R = R.detach().cpu().numpy()
angles = np.radians(cv.RQDecomp3x3(R)[0])
angles[0], angles[2] = angles[2], angles[0]
return angles
class Human:
""" Implementation of Winter human model """
def check_constraints(self, bone, R: np.array, parent=None):
"""
Punish (by adding weights) if NN outputs are beyond joint rotation constraints.
"""
import torch.nn.functional as f
absolute_angles = rot_to_euler(R.reshape(3,-1))
if parent is not None:
parent_angles = rot_to_euler(parent.detach().cpu().numpy())
child_angles = absolute_angles
relative_angles = child_angles - parent_angles
aug_angles, punish_w = self.check_range(bone, relative_angles)
R = rot(aug_angles + parent_angles)
else:
aug_angles, punish_w = self.check_range(bone, absolute_angles)
R = rot(aug_angles)
return f.normalize(R.to(torch.float32).to(self.device)), punish_w
def sort_rot(self, elem: np.array):
"""
:param ang: a list of 144 elements (9 * 16)
process NN output to rotation matrix of 16 bones
"""
elem = elem.flatten()
assert len(elem) == 144, len(elem)
self.rot_mat, self.punish_list = {}, []
for k, bone in enumerate(self.constraints.keys()):
R = elem[9*k:9*(k+1)]
if bone in self.child.keys():
parent = self.child[bone]
self.rot_mat[bone], punish_w = self.check_constraints(bone, R, self.rot_mat[parent])
else:
self.rot_mat[bone], punish_w = self.check_constraints(bone, R)
self.punish_list.append(punish_w)
def update_bones(self, elem=None):
"""
Initiates a T-Pose human model and
rotate each bone using the rotation matrices if given
:return model: a numpy array of (17,3)
"""
self._init_bones()
if elem is not None:
elem = elem.detach().cpu().numpy() if torch.is_tensor(elem) else elem
self.sort_rot(elem)
self.bones = { bone: self.rot_mat[bone] @ self.bones[bone] for bone in self.constraints.keys() }
def update_pose(self, elem=None) -> torch.tensor:
"""
Assemble bones to make a human body
"""
self.update_bones(elem)
root = self.root
lower_spine = self.bones["lower_spine"]
neck = self.bones["upper_spine"] + lower_spine
chin = self.bones["neck"] + neck
nose = self.bones["head"] + chin
l_shoulder = self.bones["l_clavicle"] + neck
l_elbow = self.bones["l_upper_arm"] + l_shoulder
l_wrist = self.bones["l_lower_arm"] + l_elbow
r_shoulder = self.bones["r_clavicle"] + neck
r_elbow = self.bones["r_upper_arm"] + r_shoulder
r_wrist = self.bones["r_lower_arm"] + r_elbow
l_hip = self.bones["l_hip"]
l_knee = self.bones["l_thigh"] + l_hip
l_ankle = self.bones["l_calf"] + l_knee
r_hip = self.bones["r_hip"]
r_knee = self.bones["r_thigh"] + r_hip
r_ankle = self.bones["r_calf"] + r_knee
self.model = torch.stack((neck, lower_spine, root, chin, nose,
l_shoulder, l_elbow, l_wrist, r_shoulder, r_elbow, r_wrist,
l_hip, l_knee, l_ankle, r_hip, r_knee, r_ankle), 0)
return self.model
def vectorize(gt_3d) -> torch.tensor:
"""
process gt_3d (17,3) into a (16,4) that contains bone vector and length
:return bone_info: [unit bone vector (,3) + bone length (,1)]
"""
indices = (
(2,1), (1,0), (0,3), (3,4), # spine + head
(0,5), (5,6), (6,7),
(0,8), (8,9), (9,10), # arms
(2,11), (11,12), (12,13),
(2,14), (14,15), (15,16), # legs
)
num_bones = len(indices)
gt_3d_tensor = gt_3d if torch.is_tensor(gt_3d) \
else torch.from_numpy(gt_3d)
bone_info = torch.zeros([num_bones, 4], requires_grad=False) # (16, 4)
for i in range(num_bones):
vec = gt_3d_tensor[indices[i][1],:] - gt_3d_tensor[indices[i][0],:]
vec_len = torch.linalg.norm(vec)
unit_vec = vec/vec_len
bone_info[i,:3], bone_info[i,3] = unit_vec, vec_len
return bone_info
# functions below are for demonstration and debuggging purpose
if __name__ == "__main__":
rand_pose()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
11018,
198,
11748,
28034,
628,
198,
4299,
5724,
7,
68,
18173,
25,
46545,
8,
4613,
28034,
13,
83,
22854,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3611,
13179,
17593,
198,
220,
... | 2.056999 | 2,579 |
import pandas as pd
import numpy as np
import datetime
import math
import binning
from ..core.status import Status
config = Config()
analysis = Analysis(config)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
198,
11748,
10688,
198,
11748,
9874,
768,
198,
198,
6738,
11485,
7295,
13,
13376,
1330,
12678,
198,
198,
11250,
796,
17056,
3419,
198,
20930,
7... | 3.565217 | 46 |
import pytest
import sell_stats
| [
11748,
12972,
9288,
198,
11748,
3677,
62,
34242,
628,
628,
198
] | 3.272727 | 11 |
import re
import allure
from model.contact import Contact
| [
11748,
302,
198,
11748,
477,
495,
198,
6738,
2746,
13,
32057,
1330,
14039,
628,
628,
198
] | 3.875 | 16 |
# This program has been developed by students from the bachelor Computer Science at Utrecht University within the
# Software and Game project course
# ©Copyright Utrecht University Department of Information and Computing Sciences.
"""
Django settings for mofa project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import logging
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's52+4q(4zx)w9xw=@a^yagzq@79$^7=!&h+!v@)o*qzhn%xhe+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Set JQUERY_URL to true in order to work with smart_selects
JQUERY_URL = "https://code.jquery.com/jquery-3.4.1.min.js"
ALLOWED_HOSTS = ['host.docker.internal', 'dockerhost', 'localhost',
'127.0.0.1', '[::1]', '0.0.0.0']
# Application definition
INSTALLED_APPS = [
'assistants.apps.AssistantsConfig',
'scheduler.apps.SchedulerConfig',
'courses.apps.CoursesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'smart_selects',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mofa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'mofa', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mofa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'courses.User'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.getenv("TIME_ZONE")
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "mofa/static"),
]
# Logging
if os.getenv('TESTING'):
logging.basicConfig(level=logging.CRITICAL)
else:
log_format = '%(asctime)s: %(message)s'
logging.basicConfig(filename='../log.log', level=logging.WARNING, format=log_format)
if not os.getenv('TESTING'):
# Moodle
MOODLE_BASE_URL = os.getenv("MOODLE_BASE_URL") if not os.getenv("MOODLE_BASE_URL").endswith('/') \
else os.getenv("MOODLE_BASE_URL")[:-1]
MOODLE_BASE_IP = os.getenv("MOODLE_BASE_IP") if not os.getenv("MOODLE_BASE_IP").endswith('/') \
else os.getenv("MOODLE_BASE_IP")[:-1]
MOODLE_WEBSERVICE_URL = os.getenv("MOODLE_WEBSERVICE_URL")
MOODLE_TOKEN = os.getenv("MOODLE_TOKEN")
# Learning Locker
LL_URL = os.getenv("LL_URL")
LL_AUTH_KEY = os.getenv("LL_AUTH_KEY")
ORGANISATION = os.getenv("ORGANISATION")
# Django
DJANGO_PORT = os.getenv("DJANGO_PORT")
DJANGO_URL = os.getenv("DJANGO_URL")
SYNC_AGENT_URLS = {'course': f'{DJANGO_URL}:{DJANGO_PORT}/assistants/api/course_sync_agent/',
'user': f'{DJANGO_URL}:{DJANGO_PORT}/assistants/api/user_sync_agent/',
'question': f'{DJANGO_URL}:{DJANGO_PORT}/assistants/api/question_sync_agent/'}
| [
2,
770,
1430,
468,
587,
4166,
416,
2444,
422,
262,
29384,
13851,
5800,
379,
7273,
260,
21474,
2059,
1626,
262,
198,
2,
10442,
290,
3776,
1628,
1781,
198,
2,
10673,
15269,
7273,
260,
21474,
2059,
2732,
286,
6188,
290,
38589,
13473,
13,... | 2.307483 | 2,205 |
from conans import ConanFile, CMake, tools
import os
| [
6738,
369,
504,
1330,
31634,
8979,
11,
327,
12050,
11,
4899,
198,
11748,
28686,
628
] | 3.6 | 15 |
""" The (Linear) Integer Partition Problem into k equal parts """
l = [7, 3, 2, 1, 5, 4, 8]
k = 3
n = len(l)
mat = [[l[0] for j in range(k)] for i in range(n)]
div = [[0 for i in range(k)] for i in range(n)]
for i in range(n):
mat[i][0] = sum(l[:i+1])
for i in range(1, k):
for j in range(1, n):
x = list()
for m in range(0,j):
x.append(max(mat[m][i-1], sum(l[m+1:j+1])))
x.append(mat[j][i-1])
mat[j][i] = min(x)
div[j][i] = x.index(min(x))
print("Partitions : ")
partition(n-1, k-1, n) | [
37811,
383,
357,
14993,
451,
8,
34142,
2142,
653,
20647,
656,
479,
4961,
3354,
37227,
198,
75,
796,
685,
22,
11,
513,
11,
362,
11,
352,
11,
642,
11,
604,
11,
807,
60,
198,
74,
796,
513,
198,
198,
77,
796,
18896,
7,
75,
8,
198,... | 1.883562 | 292 |
from transformers import ViltProcessor, ViltModel
from PIL import Image
import requests
import os
# prepare image and text
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
text = "hello world"
processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm")
model = ViltModel.from_pretrained("dandelin/vilt-b32-mlm")
inputs = processor(image, text, return_tensors="pt")
outputs = model(**inputs)
# last_hidden_states = outputs.last_hidden_state, | [
198,
6738,
6121,
364,
1330,
569,
2326,
18709,
273,
11,
569,
2326,
17633,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
7007,
198,
11748,
28686,
198,
2,
8335,
2939,
290,
2420,
198,
6371,
796,
366,
4023,
1378,
17566,
13,
66,
420,
375,
... | 2.812834 | 187 |
"""Honkai battle chronicle models."""
from __future__ import annotations
import datetime
import re
import typing
import pydantic
from genshin.models.honkai import battlesuit
from genshin.models.model import Aliased, APIModel, Unique
__all__ = [
"Boss",
"ELF",
"ElysianRealm",
"MemorialArena",
"MemorialBattle",
"OldAbyss",
"SuperstringAbyss",
]
REMEMBRANCE_SIGILS: typing.Dict[int, typing.Tuple[str, int]] = {
119301: ("The MOTH Insignia", 1),
119302: ("Home Lost", 1),
119303: ("False Hope", 1),
119304: ("Tin Flask", 1),
119305: ("Ruined Legacy", 1),
119306: ("Burden", 2),
119307: ("Gold Goblet", 2),
119308: ("Mad King's Mask", 2),
119309: ("Light as a Bodhi Leaf", 2),
119310: ("Forget-Me-Not", 2),
119311: ("Forbidden Seed", 2),
119312: ("Memory", 2),
119313: ("Crystal Rose", 2),
119314: ("Abandoned", 3),
119315: ("Good Old Days", 3),
119316: ("Shattered Shackles", 3),
119317: ("Heavy as a Million Lives", 3),
119318: ("Stained Sakura", 3),
119319: ("The First Scale", 3),
119320: ("Resolve", 3),
119321: ("Thorny Crown", 3),
}
# GENERIC
def get_competitive_tier_mi18n(tier: int) -> str:
"""Turn the tier returned by the API into the respective tier name displayed in-game."""
return "bbs/" + ("area1", "area2", "area3", "area4")[tier - 1]
class Boss(APIModel, Unique):
"""Represents a Boss encountered in Abyss or Memorial Arena."""
id: int
name: str
icon: str = Aliased("avatar")
@pydantic.validator("icon")
class ELF(APIModel, Unique):
"""Represents an ELF equipped for a battle."""
id: int
name: str
icon: str = Aliased("avatar")
rarity: str
upgrade_level: int = Aliased("star")
@pydantic.validator("rarity", pre=True)
# ABYSS
def get_abyss_rank_mi18n(rank: int, tier: int) -> str:
"""Turn the rank returned by the API into the respective rank name displayed in-game."""
if tier == 4:
mod = ("1", "2_1", "2_2", "2_3", "3_1", "3_2", "3_3", "4", "5")[rank - 1]
else:
mod = str(rank)
return f"bbs/level{mod}"
class BaseAbyss(APIModel):
"""Represents one cycle of abyss.
(3 days per cycle, 2 cycles per week)
"""
# somewhat feel like this is overkill
abyss_lang: str = "en-us"
raw_tier: int = Aliased("area")
score: int
lineup: typing.Sequence[battlesuit.Battlesuit]
boss: Boss
elf: typing.Optional[ELF]
@property
def tier(self) -> str:
"""The user's Abyss tier as displayed in-game."""
return self.get_tier()
def get_tier(self, lang: typing.Optional[str] = None) -> str:
"""Get the user's Abyss tier in a specific language."""
key = get_competitive_tier_mi18n(self.raw_tier)
return self._get_mi18n(key, lang or self.abyss_lang)
class OldAbyss(BaseAbyss):
"""Represents once cycle of Quantum Singularis or Dirac Sea.
Exclusive to players of level 80 and below.
"""
end_time: datetime.datetime = Aliased("time_second")
raw_type: str = Aliased("type")
result: str = Aliased("reward_type")
raw_rank: int = Aliased("level")
@pydantic.validator("raw_rank", pre=True)
@property
def rank(self) -> str:
"""The user's Abyss rank as displayed in-game."""
return self.get_rank()
def get_rank(self, lang: typing.Optional[str] = None) -> str:
"""Get the user's Abyss rank in a specific language."""
key = get_abyss_rank_mi18n(self.raw_rank, self.raw_tier)
return self._get_mi18n(key, lang or self.abyss_lang)
@property
def type(self) -> str:
"""The name of this cycle's abyss type."""
return self.get_type()
def get_type(self, lang: typing.Optional[str] = None) -> str:
"""Get the name of this cycle's abyss type in a specific language."""
key = "bbs/" + ("level_of_ow" if self.raw_type == "OW" else self.raw_type)
return self._get_mi18n(key, lang or self.abyss_lang)
class SuperstringAbyss(BaseAbyss):
"""Represents one cycle of Superstring Abyss, exclusive to players of level 81 and up."""
# NOTE endpoint: game_record/honkai3rd/api/latestOldAbyssReport
end_time: datetime.datetime = Aliased("updated_time_second")
raw_tier: int = 4 # Not returned by API, always the case
placement: int = Aliased("rank")
trophies_gained: int = Aliased("settled_cup_number")
end_trophies: int = Aliased("cup_number")
raw_start_rank: int = Aliased("level")
raw_end_rank: int = Aliased("settled_level")
@property
def start_rank(self) -> str:
"""The rank the user started the abyss cycle with, as displayed in-game."""
return self.get_start_rank()
def get_start_rank(self, lang: typing.Optional[str] = None) -> str:
"""Get the rank the user started the abyss cycle with in a specific language."""
key = get_abyss_rank_mi18n(self.raw_start_rank, self.raw_tier)
return self._get_mi18n(key, lang or self.abyss_lang)
@property
def end_rank(self) -> str:
"""The rank the user ended the abyss cycle with, as displayed in-game."""
return self.get_end_rank()
def get_end_rank(self, lang: typing.Optional[str] = None) -> str:
"""Get the rank the user ended the abyss cycle with in a specific language."""
key = get_abyss_rank_mi18n(self.raw_end_rank, self.raw_tier)
return self._get_mi18n(key, lang or self.abyss_lang)
@property
# MEMORIAL ARENA
def prettify_MA_rank(rank: int) -> str: # Independent of mi18n
"""Turn the rank returned by the API into the respective rank name displayed in-game."""
brackets = (0, 0.20, 2, 7, 17, 35, 65)
return f"{brackets[rank - 1]:1.2f} ~ {brackets[rank]:1.2f}"
class MemorialBattle(APIModel):
"""Represents weekly performance against a single Memorial Arena boss."""
score: int
lineup: typing.Sequence[battlesuit.Battlesuit]
elf: typing.Optional[ELF]
boss: Boss
class MemorialArena(APIModel):
"""Represents aggregate weekly performance for the entire Memorial Arena rotation."""
ma_lang: str = "en-us"
score: int
ranking: float = Aliased("ranking_percentage")
raw_rank: int = Aliased("rank")
raw_tier: int = Aliased("area")
end_time: datetime.datetime = Aliased("time_second")
battle_data: typing.Sequence[MemorialBattle] = Aliased("battle_infos")
@property
def rank(self) -> str:
"""The user's Memorial Arena rank as displayed in-game."""
return prettify_MA_rank(self.raw_rank)
@property
def tier(self) -> str:
"""The user's Memorial Arena tier as displayed in-game."""
return self.get_tier()
def get_tier(self, lang: typing.Optional[str] = None) -> str:
"""Get the user's Memorial Arena tier in a specific language."""
key = get_competitive_tier_mi18n(self.raw_tier)
return self._get_mi18n(key, lang or self.ma_lang)
# ELYSIAN REALMS
# TODO: Implement a way to link response_json["avatar_transcript"] data to be added to
# ER lineup data; will require new Battlesuit subclass.
class Condition(APIModel):
"""Represents a debuff picked at the beginning of an Elysian Realms run."""
name: str
description: str = Aliased("desc")
difficulty: int
class Signet(APIModel):
"""Represents a buff Signet picked in an Elysian Realms run."""
id: int
icon: str
number: int
@property
class RemembranceSigil(APIModel):
"""Represents a Remembrance Sigil from Elysian Realms."""
icon: str
@property
@property
@property
class ElysianRealm(APIModel):
"""Represents one completed run of Elysean Realms."""
completed_at: datetime.datetime = Aliased("settle_time_second")
floors_cleared: int = Aliased("level")
score: int
difficulty: int = Aliased("punish_level")
conditions: typing.Sequence[Condition]
signets: typing.Sequence[Signet] = Aliased("buffs")
leader: battlesuit.Battlesuit = Aliased("main_avatar")
supports: typing.Sequence[battlesuit.Battlesuit] = Aliased("support_avatars")
elf: typing.Optional[ELF]
remembrance_sigil: RemembranceSigil = Aliased("extra_item_icon")
@pydantic.validator("remembrance_sigil", pre=True)
@property
| [
37811,
29478,
32765,
3344,
16199,
1548,
4981,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
11748,
19720,
198,
198,
11748,
279,
5173,
5109,
198,
198,
6738,
308,
641,
20079,
13,
... | 2.614998 | 3,187 |
import download_genomes
import process_Gnomon
import generate_gene_map
import gene_clustering
from gene_modelling_utils import resolve_args
if __name__=='__main__':
args = resolve_args()
if 'download_genomes' in args.scripts:
download_genomes.main()
if 'process_Gnomon' in args.scripts:
process_Gnomon.main()
if 'generate_gene_map' in args.scripts:
generate_gene_map.main()
if 'gene_clustering' in args.scripts:
gene_clustering.main() | [
11748,
4321,
62,
5235,
2586,
198,
11748,
1429,
62,
38,
77,
16698,
198,
11748,
7716,
62,
70,
1734,
62,
8899,
198,
11748,
9779,
62,
565,
436,
1586,
198,
6738,
9779,
62,
4666,
9417,
62,
26791,
1330,
10568,
62,
22046,
198,
198,
361,
115... | 2.497436 | 195 |
from pyxbos.process import run_loop
from pyxbos.drivers import pbc
import logging
logging.basicConfig(level="INFO", format='%(asctime)s - %(name)s - %(message)s')
import random
class democontroller(pbc.LPBCProcess):
"""
To implement a LPBC, subclass pbc.LPBCProcess
and implement the step() method as documented below
"""
def step(self, c37_frame, p_target, q_target):
"""
Step is called every 'rate' seconds with the most recent c37 frame from the upmu
and the latest P and Q targets given by the SPBC.
It runs its control loop to determine the actuation, performs it is 'self.control_on' is True
and returns the status
C37 frame looks like
{
"stationName": "ENERGIZE_1",
"idCode": 1,
"phasorChannels": [
{
"channelName": "L1MagAng",
"unit": "Volt",
"data": [
{
"time": "1559231114799996800",
"angle": 193.30149788923268,
"magnitude": 0.038565948605537415
},
{
"time": "1559231114899996400",
"angle": 195.50249902851263,
"magnitude": 0.042079225182533264
}
]
}
]
}
"""
print(c37_frame)
# do measurements
self.measured_p = random.randint(0,100)
self.measured_q = random.randint(0,100)
p_diff = self.measured_p - p_target
q_diff = self.measured_q - q_target
print(f'controller called. P diff: {p_diff}, Q diff: {q_diff}')
if self.control_on:
print("DO CONTROL HERE")
# return error message (default to empty string), p, q and boolean saturated value
return ("error message", self.measured_p, self.measured_q, self.saturated)
cfg = {
'namespace': "GyCetklhSNcgsCKVKXxSuCUZP4M80z9NRxU1pwfb2XwGhg==",
'name': 'lpbc1', # name of lpbc
'upmu': 'L1', # name + other info for uPMU
'rate': 2, # number of seconds between calls to 'step'
}
lpbc1 = democontroller(cfg)
run_loop()
| [
6738,
12972,
30894,
418,
13,
14681,
1330,
1057,
62,
26268,
198,
6738,
12972,
30894,
418,
13,
36702,
1330,
279,
15630,
198,
11748,
18931,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
2625,
10778,
1600,
5794,
11639,
4,
7,
292,
310,
524,
... | 1.837121 | 1,320 |
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Responsible AI Utilities package."""
from .version import name, version
__name__ = name
__version__ = version
| [
2,
15069,
357,
66,
8,
5413,
10501,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
198,
37811,
19309,
684,
856,
9552,
41086,
5301,
526,
15931,
198,
198,
6738,
764,
9641,
1330,
1438,
11,
2196,
198,
198,
834,
3672,
834,
796,
1438,
198... | 3.836735 | 49 |
import requests
import json
from datetime import datetime, timedelta
import dateutil.parser
import pytz
API_SERVER_URI = 'https://api.copyleaks.com'
IDENTITY_SERVER_URI = 'https://id.copyleaks.com'
USER_AGENT = 'python-sdk/3.0'
| [
11748,
7007,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
3128,
22602,
13,
48610,
198,
11748,
12972,
22877,
628,
198,
17614,
62,
35009,
5959,
62,
47269,
796,
705,
5450,
1378,
15042,
13,
22163,
... | 2.77381 | 84 |
"""Unit testing with python coding as following Plurasight course*
* Unit Testing with Python
https://app.pluralsight.com/library/courses/ec92942a-62c7-466e-ba92-56201eaf900f/table-of-contents
By Emily Bache
""" | [
37811,
26453,
4856,
351,
21015,
19617,
355,
1708,
1345,
17786,
432,
1781,
9,
198,
198,
9,
11801,
23983,
351,
11361,
198,
5450,
1378,
1324,
13,
489,
333,
874,
432,
13,
785,
14,
32016,
14,
66,
39975,
14,
721,
24,
1959,
3682,
64,
12,
... | 2.90411 | 73 |
import numpy as np
from scipy.special import logsumexp
import params
def forward_backward(lls, tr, ip):
"""
Inputs:
lls - matrix of per-frame log HMM state output probabilities
tr - transition probability matrix
ip - vector of initial state probabilities (i.e. statrting in the state)
Outputs:
sp - matrix of per-frame state occupation posteriors
tll - total (forward) log-likelihood
lfw - log forward probabilities
lfw - log backward probabilities
"""
ltr = np.log(tr)
lfw = np.empty_like(lls)
lbw = np.empty_like(lls)
lfw[:] = -np.inf
lbw[:] = -np.inf
lfw[0] = lls[0] + np.log(ip)
lbw[-1] = 0.0
for ii in range(1, len(lls)):
lfw[ii] = lls[ii] + logsumexp(lfw[ii - 1] + ltr.T, axis=1)
for ii in reversed(range(len(lls) - 1)):
lbw[ii] = logsumexp(ltr + lls[ii + 1] + lbw[ii + 1], axis=1)
tll = logsumexp(lfw[-1])
sp = np.exp(lfw + lbw - tll)
return sp, tll, lfw, lbw
def mean_filter(arr, k):
"""Process mean filter over array of k-elements on each side,
changing filter size on start and end of array to smoother output"""
kernel = np.ones(2 * k + 1) / (2 * k + 1)
if kernel.shape[0] > arr.shape[0]:
kernel = np.zeros(arr.shape[0])
front = np.empty(k)
back = np.empty(k)
for i in range(k):
front[i] = np.mean(arr[0: +i + k + 1])
back[i] = np.mean(arr[arr.shape[0] - k - 1 - i:])
out = np.convolve(arr, kernel, mode='same')
out[0:k] = front
out[arr.shape[0] - k:] = np.flip(back)
return out
def segments_filter(arr, filter_size, value_to_filter):
"""Remove segments containing provided value shorter than filter_size"""
if filter_size <= 0:
return arr
filter_size = int(filter_size / params.window_stride)
segment_start = np.empty(arr.shape, dtype=bool)
segment_start[0] = True
segment_start[1:] = np.not_equal(arr[:-1], arr[1:])
segment_indexes = np.argwhere(segment_start).reshape(-1)
segment_indexes = np.append(segment_indexes, arr.shape)
segments = np.append(segment_indexes[:-1][:, np.newaxis], segment_indexes[1:][:, np.newaxis], axis=1)
value_to_replace = 1 - value_to_filter
for index in range(segments.shape[0]):
segment = segments[index]
segment_width = segment[1] - segment[0]
if arr[segment[0]] == value_to_filter and segment_width < filter_size:
if value_to_replace == 0 or index == 0 or index == segments.shape[0] - 1:
arr[segment[0]:segment[1]] = value_to_replace
else:
pre_segment_len = segments[index - 1][1] - segments[index - 1][0]
post_segment_len = segments[index + 1][1] - segments[index + 1][0]
if post_segment_len > filter_size and pre_segment_len > filter_size:
arr[segment[0]:segment[1]] = value_to_replace
return arr
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
20887,
1330,
2604,
16345,
11201,
198,
11748,
42287,
628,
198,
4299,
2651,
62,
1891,
904,
7,
297,
82,
11,
491,
11,
20966,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220... | 2.260337 | 1,306 |
from typing import List
from pytest import fixture
from pytest_bdd import scenarios, given, when, then
from pytest_bdd.parsers import parse
from game import Game
from puzzle import HintType, Puzzle
from dictionary import Dictionary
@fixture(scope="session")
scenarios("")
@given(
parse('a puzzle where the correct answer is "{solution}"'),
target_fixture="game",
)
@given(
parse('the hints [{hint_words}]'),
converters={
'hint_words': lambda s: [h.strip() for h in s.split(',')]
},
)
@when(
parse('the player guesses a non-word "{word}"'),
target_fixture='guess'
)
@when(
parse('the player guesses "{word}"'),
target_fixture='guess'
)
@given(
parse('the game does {won} say the player has won'),
converters = {
'won': lambda s: {'': True, 'not': False}[s]
}
)
@then(
parse('the player sees a {won} message.'),
converters = {
'won': lambda s: {'win': True, 'lose': False}[s]
}
)
@then(
parse('the hint does {is_registered} get registered in the list of guesses.'),
converters = {
'is_registered': lambda s: {'yes': True, 'not': False}[s]
}
)
@then(
parse('the hint for "{hint_word}" shows that letters [{indices}] are {hint_type}'),
converters={
'indices': lambda ss: ([int(ee) for ee in ss.split(',')] if ss != 'None' else []),
'hint_type': {
'correct': HintType.CORRECT,
'in the wrong position': HintType.WRONG_PLACE,
'not present': HintType.NOT_PRESENT,
}.get
}
)
@given(
parse('the current round is {round}'),
converters={'round': int},
)
@then(
parse('the current round is {round}'),
converters={'round': int},
) | [
6738,
19720,
1330,
7343,
198,
198,
6738,
12972,
9288,
1330,
29220,
198,
6738,
12972,
9288,
62,
65,
1860,
1330,
13858,
11,
1813,
11,
618,
11,
788,
198,
6738,
12972,
9288,
62,
65,
1860,
13,
79,
945,
364,
1330,
21136,
198,
198,
6738,
9... | 2.389344 | 732 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 11:27:49 2020
@author: cmccurley
"""
"""
***********************************************************************
* File: Out_of_Sample.py
*
* Desc: This file contains code for out-of-sample embedding methods.
*
* Written by: Connor H. McCurley
*
* Latest Revision: 2020-04-16
*
**********************************************************************
"""
######################################################################
######################### Import Packages ############################
######################################################################
# General packages
import numpy as np
import scipy.io
import random
import math
from itertools import combinations, combinations_with_replacement
from scipy.spatial import distance_matrix
from numpy import linalg as LA
from sklearn.neighbors import kneighbors_graph
from cvxopt import solvers, matrix
######################################################################
####################### Function Definitions #########################
######################################################################
def lse(A, b, B, d):
"""
******************************************************************
Equality-contrained least squares.
The following algorithm minimizes ||Ax - b|| subject to the
constrain Bx = d.
Parameters
----------
A : array-like, shape=[m, n]
B : array-like, shape=[p, n]
b : array-like, shape=[m]
d : array-like, shape=[p]
Reference
---------
Matrix Computations, Golub & van Loan, algorithm 12.1.2
Examples
--------
>>> A = np.array([[0, 1], [2, 3], [3, 4.5]])
>>> b = np.array([1, 1])
>>> # equality constrain: ||x|| = 1.
>>> B = np.ones((1, 3))
>>> d = np.ones(1)
>>> lse(A.T, b, B, d)
array([-0.5, 3.5, -2. ])
******************************************************************
"""
from scipy import linalg
if not hasattr(linalg, 'solve_triangular'):
# compatibility for old scipy
solve_triangular = linalg.solve
else:
solve_triangular = linalg.solve_triangular
A, b, B, d = map(np.asanyarray, (A, b, B, d))
p = B.shape[0]
Q, R = linalg.qr(B.T)
y = solve_triangular(R[:p, :p].T, d)
A = np.dot(A, Q)
z = linalg.lstsq(A[:, p:], b - np.dot(A[:, :p], y))[0].ravel()
return np.dot(Q[:, :p], y) + np.dot(Q[:, p:], z)
def unmix_cvxopt(data, endmembers, gammaConst=0, P=None):
"""
******************************************************************
unmix finds an accurate estimation of the proportions of each endmember
Syntax: P2 = unmix(data, endmembers, gammaConst, P)
This product is Copyright (c) 2013 University of Missouri and University
of Florida
All rights reserved.
CVXOPT package is used here. Parameters H,F,L,K,Aeq,beq are corresbonding to
P,q,G,h,A,B, respectively. lb and ub are element-wise bound constraints which
are added to matrix G and h respectively.
Inputs:
data = DxN matrix of N data points of dimensionality D
endmembers = DxM matrix of M endmembers with D spectral bands
gammaConst = Gamma Constant for SPT term
P = NxM matrix of abundances corresponding to N input pixels and M endmembers
Returns:
P2 = NxM matrix of new abundances corresponding to N input pixels and M endmembers
******************************************************************
"""
solvers.options['show_progress'] = False
X = data
M = endmembers.shape[1] # number of endmembers # endmembers should be column vectors
N = X.shape[1] # number of pixels
# Equation constraint Aeq*x = beq
# All values must sum to 1 (X1+X2+...+XM = 1)
Aeq = np.ones((1, M))
beq = np.ones((1, 1))
# Boundary Constraints ub >= x >= lb
# All values must be greater than 0 (0 ? X1,0 ? X2,...,0 ? XM)
lb = 0
ub = 1
g_lb = np.eye(M) * -1
g_ub = np.eye(M)
# import pdb; pdb.set_trace()
G = np.concatenate((g_lb, g_ub), axis=0)
h_lb = np.ones((M, 1)) * lb
h_ub = np.ones((M, 1)) * ub
h = np.concatenate((h_lb, h_ub), axis=0)
if P is None:
P = np.ones((M, 1)) / M
gammaVecs = np.divide(gammaConst, sum(P))
H = 2 * (endmembers.T @ endmembers)
cvxarr = np.zeros((N,M))
for i in range(N):
F = ((np.transpose(-2 * X[:, i]) @ endmembers) + gammaVecs).T
cvxopt_ans = solvers.qp(P=matrix(H.astype(np.double)), q=matrix(F.astype(np.double)), G=matrix(G.astype(np.double)), h=matrix(h.astype(np.double)), A=matrix(Aeq.astype(np.double)), b=matrix(beq.astype(np.double)))
cvxarr[i, :] = np.array(cvxopt_ans['x']).T
cvxarr[cvxarr < 0] = 0
return cvxarr
def embed_out_of_sample(X_train, X_manifold, X_out, K, beta, neighbor_measure):
"""
******************************************************************
*
* Func: embed_out_of_sample(X_train, X_manifold, X_out, K, beta, neighbor_measure)
*
* Desc: Embeds out-of-sample points into lower-dimensional space.
* Uses a k-nearest neighbor, constrained least square reconstruction.
*
* Inputs:
* X_train - NxD matrix of training data coordinates
*
* X_manifold - NxK matrix of low-dimensional training data coordinates
*
* X_out - MxD data matrix of out-of-sample points
*
* K - dimensionality of embedding space
*
* beta - bandwidth of RBf affinity function
*
* neighbor_measure - number of neighbors to consider in k-NN graph
*
* Outputs:
* Z_out - MxK data matrix of embedded out of sample points
*
******************************************************************
"""
print("\nEmbedding out of sample data...")
## Extract constants
num_total = np.shape(X_train)[0] ## Number of training data points
num_out_sample = np.shape(X_out)[0] ## Number of out-of-sample-data-points
input_dim = np.shape(X_out)[1] ## Dimesnionality of input space
Z_out = np.zeros((num_out_sample,K)) ## Initialize out of sample embedded coordinate matrix
##### Affinity of out-of-sample with training set #####
print("Computing affinity matrices...")
## Define K-nearest neighbor graph
W_L2 = distance_matrix(X_out, X_train, p=2)
W_neighbors = W_L2
## Square L2 distances, divide by negative bandwidth and exponentiate
W_total = np.exp((-1/beta)*(W_L2**2))
print("Embedding out-of-sample points...")
for idx in range(0,num_out_sample):
temp_row = W_neighbors[idx, :]
## indicies of nearest neighbors according to L2 distance
valid_ind = np.argpartition(temp_row, neighbor_measure)
##### Find reconstruction weights of current out of sample NO bias ######
X_recon = X_train[valid_ind[0:neighbor_measure],:].T
x_current = X_out[idx,:]
x_current= x_current.astype(np.double)
X_recon - X_recon.astype(np.double)
w_recon = unmix_cvxopt(np.expand_dims(x_current, axis=1), X_recon, gammaConst=0, P=None)
w_recon = np.squeeze(w_recon)
## Embed sample as reconstruction of low-dimensional training data embeddings
Z_recon = X_manifold[valid_ind[0:neighbor_measure],:].T
z = np.dot(Z_recon, w_recon)
Z_out[idx,:] = z
print('Done!')
return Z_out
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2758,
1467,
1367,
25,
1983,
25,
2920,
12131,
198,
198,
31,
9800,
25,
12067,
535,
333,
1636,
198,
37811,
198,
198,
37811,
198,
17174,
17174... | 2.47168 | 3,125 |
# Copyright 2021 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from datetime import timedelta
import pandas as pd
import pytest
from pytest_lazyfixture import lazy_fixture
from testcontainers.core.container import DockerContainer
from testcontainers.core.waiting_utils import wait_for_logs
from feast import Feature, FileSource, RequestSource
from feast.data_format import ParquetFormat
from feast.entity import Entity
from feast.errors import FeatureViewNotFoundException
from feast.feature_view import FeatureView
from feast.field import Field
from feast.infra.registry_stores.sql import SqlRegistry
from feast.on_demand_feature_view import on_demand_feature_view
from feast.repo_config import RegistryConfig
from feast.types import Array, Bytes, Float32, Int32, Int64, String
from feast.value_type import ValueType
POSTGRES_USER = "test"
POSTGRES_PASSWORD = "test"
POSTGRES_DB = "test"
logger = logging.getLogger(__name__)
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.mark.skipif(
sys.platform == "darwin", reason="does not run on mac github actions"
)
@pytest.mark.parametrize(
"sql_registry", [lazy_fixture("mysql_registry"), lazy_fixture("pg_registry")],
)
@pytest.mark.skipif(
sys.platform == "darwin", reason="does not run on mac github actions"
)
@pytest.mark.parametrize(
"sql_registry", [lazy_fixture("mysql_registry"), lazy_fixture("pg_registry")],
)
@pytest.mark.skipif(
sys.platform == "darwin", reason="does not run on mac github actions"
)
@pytest.mark.parametrize(
"sql_registry", [lazy_fixture("mysql_registry"), lazy_fixture("pg_registry")],
)
@pytest.mark.skipif(
sys.platform == "darwin", reason="does not run on mac github actions"
)
@pytest.mark.parametrize(
"sql_registry", [lazy_fixture("mysql_registry"), lazy_fixture("pg_registry")],
)
@pytest.mark.parametrize(
"request_source_schema",
[[Field(name="my_input_1", dtype=Int32)], {"my_input_1": ValueType.INT32}],
)
@pytest.mark.skipif(
sys.platform == "darwin", reason="does not run on mac github actions"
)
@pytest.mark.integration
@pytest.mark.parametrize(
"sql_registry", [lazy_fixture("mysql_registry"), lazy_fixture("pg_registry")],
)
| [
2,
15069,
33448,
383,
42936,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
74... | 3.010917 | 916 |
""" Module providing unit-testing for the `~halotools.mock_observables.angular_tpcf` function.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.tests.helper import pytest
from astropy.utils.misc import NumpyRNGContext
from ..angular_tpcf import angular_tpcf
from ....utils import sample_spherical_surface
from ....custom_exceptions import HalotoolsError
slow = pytest.mark.slow
__all__ = ('test_angular_tpcf1', )
fixed_seed = 43
| [
37811,
19937,
4955,
4326,
12,
33407,
329,
262,
4600,
93,
14201,
313,
10141,
13,
76,
735,
62,
672,
3168,
2977,
13,
21413,
62,
34788,
12993,
63,
2163,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
... | 3.226415 | 159 |
# Traffic flow
#
# Copyright (c) 2018 Yurii Khomiak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import convertor as conv
import generator as gen
import physics as phys
# Загальні константи
number_of_vehicles = 500
number_of_lanes = 2
road_interval = conv.km_to_m(3.5) #(км/год)
unit_length = 25
vehicle_length = 2.0 #(м)
max_number_of_vehicles = (road_interval*number_of_lanes) / vehicle_length
average_number_of_vehicles = max_number_of_vehicles * 0.5
# Часові константи
number_of_time_stamps = 100
time_step = 2.0 #(с)
time_stamps = gen.generate_time_stamps(number_of_time_stamps, time_step)
# Константи швидкості
mean_speed = conv.km_per_h_to_m_per_sec(55.0) #(км/год)
speed_deviation = conv.km_per_h_to_m_per_sec(10.0) #(км/год)
# Константи густини
max_density = phys.max_density(number_of_lanes, vehicle_length)
tabled_density_values = gen.generate_tabled_density_values(max_density)
# Статистичні константи
significance_level = 0.05
number_of_paramaters = 2
| [
2,
23624,
5202,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
575,
333,
4178,
5311,
12753,
461,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428,
3788,
290,
391... | 2.723901 | 728 |
from onto.context import Context as CTX
CTX.load()
engine = CTX.services.engine
from onto.view import Mediator
# class TodoMediatorLc(Mediator):
#
# from onto.source.leancloud import hook
# from onto.sink.json_rpc import sink
#
# src = hook('Todo')
# snk = sink(uri=f'{JSONRPC_URI}/todo')
#
# @src.triggers.after_save
# def call_after_save_rpc(self, ref, snapshot):
# self.snk.emit('after_save', ref=str(ref), snapshot=snapshot)
# #
# # @src.triggers.before_save
# # def fb_before_todo_save(self, ref, snapshot):
# # from onto.database.firestore import FirestoreReference
# # CTX.dbs.firestore.set(ref=FirestoreReference.from_str(str(ref)), snapshot=snapshot)
# # raise ValueError(f"{str(ref)} {str(snapshot)}")
#
# @classmethod
# def start(cls):
# cls.src.start()
| [
6738,
4291,
13,
22866,
1330,
30532,
355,
16356,
55,
198,
198,
4177,
55,
13,
2220,
3419,
198,
198,
18392,
796,
16356,
55,
13,
30416,
13,
18392,
628,
198,
6738,
4291,
13,
1177,
1330,
2019,
38585,
628,
198,
2,
1398,
309,
24313,
9921,
3... | 2.258575 | 379 |
"""
Work with folders
---------------------
Please also see the entry on files. After files, folders are the other fundamental operating system primitive users might find themselves working with. The Flyte IDL's support of folders take the form of `multi-part blobs <https://github.com/lyft/flyteidl/blob/cee566b2e6e109120f1bb34c980b1cfaf006a473/protos/flyteidl/core/types.proto#L50>`__.
"""
import pathlib
import os
import urllib.request
import cv2
import flytekit
from flytekit import task, workflow
from flytekit.types.directory import FlyteDirectory
# %%
# Playing on the same example used in the File chapter, this first task downloads a bunch of files into a directory,
# and then returns a Flyte object referencing them.
default_images = [
"https://upload.wikimedia.org/wikipedia/commons/a/a8/Fractal_pyramid.jpg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/a/ad/Julian_fractal.jpg/256px-Julian_fractal.jpg",
]
# %%
# This task downloads the two files above using non-Flyte libraries, and returns the path to the folder, in a FlyteDirectory object.
@task
# %%
# Purely Python function, no Flyte components here.
def rotate(local_image: str):
"""
In place rotation of the image
"""
img = cv2.imread(local_image, 0)
if img is None:
raise Exception("Failed to read image")
(h, w) = img.shape[:2]
center = (w / 2, h / 2)
mat = cv2.getRotationMatrix2D(center, 180, 1)
res = cv2.warpAffine(img, mat, (w, h))
# out_path = os.path.join(working_dir, "rotated.jpg")
cv2.imwrite(local_image, res)
# %%
# This task accepts the previously downloaded folder, and calls the rotate function above on each. Since the rotate function does the image manipulation in place, we just create a new FlyteDirectory object pointed to the same place.
@task
def rotate_all(img_dir: FlyteDirectory) -> FlyteDirectory:
"""
Download the given image, rotate it by 180 degrees
"""
for img in [os.path.join(img_dir, x) for x in os.listdir(img_dir)]:
rotate(img)
return FlyteDirectory(path=img_dir.path)
@workflow
if __name__ == "__main__":
print(f"Running {__file__} main...")
print(f"Running main {download_and_rotate()}")
| [
37811,
198,
12468,
351,
24512,
198,
19351,
12,
198,
198,
5492,
635,
766,
262,
5726,
319,
3696,
13,
2293,
3696,
11,
24512,
389,
262,
584,
7531,
5361,
1080,
20049,
2985,
1244,
1064,
2405,
1762,
351,
13,
383,
13575,
660,
4522,
43,
338,
... | 2.918421 | 760 |
"""Analyzes the contents of ntuple dicts, track property dicts, and
value lists.
Do things like get the efficiency of an ntuple dict, bin values and
take a measure on each set of binned values, and create custom value
lists that wouldn't be found in the original ntuple.
Also contains a function for finding the error of a prediction given
the prediction and the real values. This can be a prediction as per
efficiency of track finding or a prediction by an ML model.
"""
from . import operations as ndops
from .operations import select as sel
from numpy import linspace
from math import sqrt
from statistics import stdev
def get_proportion_selected(val_list, selector, norm=True):
"""Find the proportion of tracks selected with the given selector.
If there are no tracks in the tracks property value list, returns
zero. Can also return the number of tracks meeting the condition.
Args:
val_list: a list of values of a track property, such as
tp_pt or trk_chi2rphi.
selector: a property that these value can satisfy. For
example, "lambda trk_eta: trk_eta <= 2.4".
norm: if True, divides the number of tracks meeting the
condition by the total number of tracks. This is the default
option.
Returns:
Either the number or proportion of tracks meeting the condition,
depending on the value of norm.
"""
if len(val_list) == 0:
return 0
num_tracks_meeting_cond = sum(map(selector, val_list))
return float(num_tracks_meeting_cond) / len(val_list) if norm \
else num_tracks_meeting_cond
def make_bins(bin_specifier, binning_values):
"""Takes in a bin specifier, which is either an integer number of
bins, a tuple of the form (lower_bound, upper_bound, num_bins) or
a list of values, with the last element being the upper bound of the
last bin.
If bin_specifier is an integer, it uses the max and min values of
binned_property to find its range.
If bin_specifier is a 3-tuple, it creates the third argument number
of evenly spaced bins between the first two values.
If bin_specifier is a list, return the list.
Args:
bin_specifier: either an int for the number of bins, a 3-tuple
of the form (low_bound, high_bound, num_bins), or a list of
numbers
binning_values: a list of values forming the basis for the bins
Returns:
A list of bin edges, of length one greater than the number of
bins.
Raises:
ValueError if bin_specifier is not an int, tuple, or list
"""
if isinstance(bin_specifier, int):
bin_specifier = (min(binning_values), max(binning_values),
bin_specifier)
if isinstance(bin_specifier, tuple):
bin_specifier = list(bin_specifier)
bin_specifier[2] += 1 # we'll need one more value than we want bins
bin_specifier = list(linspace(*bin_specifier))
if isinstance(bin_specifier, list):
return bin_specifier
raise ValueError("Expected int, tuple, or list as arg 'bin_specifier', "
"but received {}.".format(str(bin_specifier)))
def take_measure_by_bin(track_prop_dict, bin_property, measure, bins=30):
"""Bin a track properties dict by a value list of a corresponding
property, then compute some measure for the values in each bin. For
example, the track_prop_dict could could be of tracking particles
and contain nmatch, and the measure could be
eff_from_track_prop_dict.
Args:
track_prop_dict: a track properties dict.
bin_property: a property in track_prop_dict that will split it
into bins. Preferably a continuous value, but no hard
restriction is made in this code.
measure: a function that takes in a track properties dict and
returns a number and an error.
bins: either an int for the number of bins, a 3-tuple of the
form (low_bound, high_bound, num_bins), or a list of
numbers. See ntupledict.operations.make_bins() for info.
Returns:
The bins, bin heights, and errors computed from the binned value
lists.
"""
binning_val_list = track_prop_dict[bin_property]
bins = make_bins(bins, binning_val_list)
# Sort values into bins with respect to binning value
bin_heights_and_errs = list(map(lambda lower_bin, upper_bin:
measure(ndops.cut_track_prop_dict(track_prop_dict,
# Select values in range lower_bin to upper_bin,
# but exclude values equal to upper_bin
{bin_property: lambda val: lower_bin <= val < upper_bin})),
bins[:-1], bins[1:]))
bin_heights = list(map(lambda l: l[0], bin_heights_and_errs))
bin_errs = list(map(lambda l: l[1], bin_heights_and_errs))
return bins, bin_heights, bin_errs
def pred_error(domain_size, num_selected):
"""Finds the error of a prediction in some domain given the size of
the domain and the number of correct predictions in that domain. If
at any point division by zero is attempted, return 0."""
try:
return 1 / (domain_size * sqrt(
num_selected * (1 - (num_selected / domain_size))))
except ZeroDivisionError:
return 0
def eff_from_ntuple_dict(ntuple_dict, tp_selector_dict=None):
"""Finds the efficieny of an ntuple dict and its standard deviation.
Restrictions can be made on the tracking particles by performing a
cut on the ntuple. Note that the ntuple must contain pt.
Args:
ntuple_dict: an ntuple dictionary containing a tracking
particle track property dict.
tp_selector_dict: a dictionary from tp properties
("pt", "eta", etc.) to conditions (lambda pt: pt < 2, etc.).
Returns:
A tuple containing the efficiency of the tracking algorithm for
the tracks in the given ntuple dict and the standard deviation.
"""
return eff_from_track_prop_dict(ntuple_dict["tp"], tp_selector_dict)
def eff_from_track_prop_dict(track_prop_dict_tp, selector_dict=None):
"""Finds the efficieny with pred error of an track properties dict.
Restrictions can be made on the tracking particles by performing a
cut. Note that the track properties dictionary must be of tracking
particles.
Args:
track_prop_dict_tp: a tracks properties dict carrying value
lists from tracking particles.
selector_dict: a dictionary from tp properties
("pt", "eta", etc.) to conditions (lambda pt: pt < 2, etc.).
Returns:
A tuple containing the efficiency of the tracking algorithm for
the tracks in the given ntuple dict and the standard deviation.
"""
if selector_dict is not None:
track_prop_dict_tps = ndops.cut_track_prop_dict(
track_prop_dict_tp, selector_dict)
num_tps = ndops.track_prop_dict_length(track_prop_dict_tps)
num_matched_tps = num_tps - track_prop_dict_tps["nmatch"].count(0)
return num_matched_tps / num_tps, pred_error(num_tps, num_matched_tps)
class StubInfo(object):
"""Converts eta and hitpattern into data about stubs for a single
track.
The only directly accessible info from this class are boolean
lists, all of which are indexed by layer/disk:
- indices 0 - 5 in the lists correspond to layers 1 - 6.
- indices 6 - 10 in the list correspond to disks 1 - 5.
Any information you could want about stubs can be found from these
three lists, sum(), map(), and lambda.
For example, if I wanted to find the number of missing 2S layers:
def missing_2S_layers(stub_info):
return sum(map(lambda expected, hit, ps_2s:
not ps_layer and expected and not hit,
stub_info.get_expected(),
stub_info.get_hit(),
stub_info.get_ps_2s()))
Down below, there are convenience functions process_stub_info and
basic_process_stub_info for processing instances of this class.
Note that these definitions are in accordance with the expected and
missed definitions in the TrackTrigger's Kalman filter used to
originally create hitpattern. One consequence of this is that there
will never be hit stub that was not expected.
"""
def __init__(self, eta, hitpattern):
"""Stores expected, hit, and PS (False for 2S) as tuples of
boolean values."""
self._gen_expected(abs(eta))
self._gen_hit(hitpattern)
self._gen_ps_2s(abs(eta))
def _gen_expected(self, abseta):
"""Sets a tuple of boolean values indicating whether the
Kalman filter expects a hit on a layer/disk for some absolute
eta. If eta is greater than 2.4, the list will be all False.
Args:
abseta: the absolute value of a pseudorapitiy measurement
"""
# eta regions for and indices of expected layers/disks
eta_regions = [0., 0.2, 0.41, 0.62, 0.9, 1.26, 1.68, 2.08, 2.4]
num_layers_disks = 11
layer_maps = [[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 7, 8, 9, 10],
[1, 2, 8, 9, 10, 11],
[1, 7, 8, 9, 10, 11]]
expected_layers = []
for eta_low, eta_high, layer_map in zip(
eta_regions[:-1],
eta_regions[1:],
layer_maps):
if eta_low <= abseta <= eta_high:
expected_layers = layer_map
break
self._expected = tuple(map(lambda index: index + 1 in expected_layers,
range(num_layers_disks)))
def _gen_hit(self, hitpattern):
"""Generates a tuple of the same form as the expected hits tuple
using the hitpattern variable and the expected hits list. Each
True value in this list represents a hit. The _gen_expected()
method must be run first.
Args:
hitpattern: a number that, when in base two, corresponds to
a list of zeroes or ones that indicate whether each
layer in a set of six or seven expected layers were hit.
"""
def gen_hits_iter(hitpattern, num_expected):
"""Return an iterator through hitpattern by converting it
into a list of boolean values, ordered by ascending
magnitude in the original hitpattern. Falses are included
at the end of the list until it is the same length as the
expected number of values (6 or 7)."""
hits_bool = [bool(int(i)) for i in bin(hitpattern)[-1:1:-1]]
return iter(hits_bool + (num_expected - len(hits_bool)) * [False])
hits_iter = gen_hits_iter(hitpattern, len(self._expected))
self._hit = tuple(map(lambda expected:
expected and next(hits_iter),
self.get_expected()))
def _gen_ps_2s(self, abseta):
"""Generates a tuple indexed by layer for which each boolean
value represents whether a layer or disk is PS (True) or 2S
(False). This is necessary because a given disk has PS and 2S
modules, separated by eta.
Args:
abseta: the absolute value of a pseudorapitiy measurement
"""
layer_ps_2s = 3 * (True,) + 3 * (False,)
disk_ps_2s_cuts = [1.45, 1.6, 1.8, 1.975, 2.15]
# ps above, 2s below
disk_ps_2s = tuple(map(lambda disk_ps_2s_cut:
abseta > disk_ps_2s_cut,
disk_ps_2s_cuts))
self._ps_2s = layer_ps_2s + disk_ps_2s
def get_expected(self):
"""Returns a list of booleans representing which layers/disks
were expected to be hit by the Kalman filter."""
return list(self._expected)
def get_hit(self):
"""Returns a list of booleans representing which layers/disks
were hit, within the layers/disks expected byt the Kalman
filter."""
return list(self._hit)
def get_ps_2s(self):
"""Returns a list of booleans indexed by layer/disk indicating
if the layer or disk with that index is PS (True) or 2S
(False)."""
return list(self._ps_2s)
def create_stub_info_list(track_prop_dict, process_stub_info):
"""Uses eta and hitpattern to generate a list of StubInfos from the
given track property dict. Then maps those StubInfos to something
else using some function.
Args:
track_prop_dict: a tracks properties dict with track properties
eta and hitpattern. Must represent either trk or matchtrk,
as only those have the hitpattern track property.
process_stub_info: a function or lambda expression that accepts
StubInfos.
Returns:
A list of processed StubInfos indexed by track.
"""
return list(map(lambda eta, hitpattern:
process_stub_info(StubInfo(eta, hitpattern)),
track_prop_dict["eta"], track_prop_dict["hitpattern"]))
def basic_process_stub_info(process_layer):
"""Returns a StubInfo processing function that is agnostic towards
layer indices, which means it should work for most cases.
For example, a function that determines how many missing 2S layers
are in a StubInfo would be:
basic_process_stub_info(lambda expected, hit, ps_2s:
not ps_2s and expected and not hit)
Args:
process_layer: A function from a single layer's expected bool,
hit bool, and ps/2s bool (in that order) to a boolean.
Returns:
A function that accepts a StubInfo and counts for how many
layers process_layer returns True.
"""
return lambda stub_info: sum(map(process_layer,
stub_info.get_expected(),
stub_info.get_hit(),
stub_info.get_ps_2s()))
| [
37811,
37702,
12271,
262,
10154,
286,
299,
83,
29291,
8633,
82,
11,
2610,
3119,
8633,
82,
11,
290,
198,
8367,
8341,
13,
198,
198,
5211,
1243,
588,
651,
262,
9332,
286,
281,
299,
83,
29291,
8633,
11,
9874,
3815,
290,
198,
20657,
257,... | 2.510431 | 5,656 |
import os
from os import listdir
import matplotlib.pyplot as plt
import numpy as np
plots = {}
plotnames_x = []
plotnames_y = []
plotnames_rot = []
for file in listdir():
# read .txt files
if file.endswith('_x.txt'):
name = file[:-4]
plots[name] = []
plotnames_x.append(name)
txt = open(file,'r')
lines = txt.readlines()
for l in lines:
plots[name].append(float(l))
elif file.endswith('_y.txt'):
name = file[:-4]
plots[name] = []
plotnames_y.append(name)
txt = open(file,'r')
lines = txt.readlines()
for l in lines:
plots[name].append(float(l))
elif file.endswith('_rot.txt'):
name = file[:-4]
plots[name] = []
plotnames_rot.append(name)
txt = open(file,'r')
lines = txt.readlines()
for l in lines:
if float(l) > -100:
plots[name].append(float(l))
else:
plots[name].append(360+float(l))
time = range(len(plots['odom_x']))
plots['imu_rot'][0:2] = [0,0]
for name in plotnames_x:
plt.figure(1)
plt.plot(time,plots[name],label=name)
plt.title('pose_x')
plt.legend()
plt.grid()
for name in plotnames_y:
plt.figure(2)
plt.plot(time,plots[name],label=name)
plt.title('pose_y')
plt.legend()
plt.grid()
for name in plotnames_rot:
plt.figure(3)
plt.plot(time,plots[name],label=name)
plt.title('pose_rot')
plt.legend()
plt.grid()
plt.show()
| [
11748,
28686,
198,
6738,
28686,
1330,
1351,
15908,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
489,
1747,
796,
23884,
198,
29487,
14933,
62,
87,
796,
17635,
198,
29487... | 1.92625 | 800 |
"""Methods which send SQL queries to the MariaDB docker database."""
from docker.models.containers import Container, ExecResult
def _get_epidata_db_size(container: Container) -> ExecResult:
"""
Query the size of the epidata database in megabytes.
Parameters
----------
container: Container
Docker Container object where the MariaDB database is running.
Returns
-------
ExecResult from exec_run, which will be the exit code and the query result as a bytestring.
"""
db_sizes = container.exec_run(
'mysql -uuser -ppass -e '
'"SELECT table_schema db, sum(data_length + index_length)/1024/1024 size_mb '
'FROM information_schema.TABLES GROUP BY table_schema ORDER BY table_schema;"')
return db_sizes
def _get_covidcast_rows(container: Container) -> ExecResult:
"""
Query the row count of the epidata.covidcast table.
Parameters
----------
container: Container
Docker Container object where the MariaDB database is running.
Returns
-------
ExecResult from exec_run, which will be the exit code and the query result as a bytestring.
"""
row_count = container.exec_run(
'mysql -uuser -ppass -e '
'"SELECT count(*) FROM epidata.covidcast;"')
return row_count
def _clear_cache(container: Container) -> ExecResult:
"""
Clear MariaDB cache so query times can be measured independently.
https://mariadb.com/kb/en/query-cache/#emptying-and-disabling-the-query-cache
Parameters
----------
container: Container
Docker Container object where the MariaDB database is running.
Returns
-------
ExecResult from exec_run, which will be the exit code and any output. No output means
the command was successful.
"""
return container.exec_run('mysql -uroot -ppass -e "FLUSH TABLES; RESET QUERY CACHE;"')
def _clear_db(container: Container) -> ExecResult:
"""
Clear tables and cache so the covidcast tables and caches are reset.
Runs _clear_cache() and then deletes rows from the covidcast data and metadata tables.
Parameters
----------
container: Container
Docker Container object where the MariaDB database is running.
Returns
-------
2-Tuple of ExecResults from exec_run, which will be the exit code and any output.
No output means the command was successful. The first entry of the tuple is the ExecResult of
_clear_cache() and the second entry will be the ExecResult from the table clearing query.
"""
clear_tables = container.exec_run(
'mysql -uroot -ppass -e '
'"USE epidata; '
'DELETE FROM covidcast; '
'DELETE FROM covidcast_meta_cache;"')
return _clear_cache(container), clear_tables
| [
37811,
46202,
543,
3758,
16363,
20743,
284,
262,
14200,
11012,
36253,
6831,
526,
15931,
198,
6738,
36253,
13,
27530,
13,
3642,
50221,
1330,
43101,
11,
8393,
23004,
628,
198,
4299,
4808,
1136,
62,
538,
312,
1045,
62,
9945,
62,
7857,
7,
... | 2.961702 | 940 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetNatGatewayResult',
'AwaitableGetNatGatewayResult',
'get_nat_gateway',
]
@pulumi.output_type
class GetNatGatewayResult:
"""
A collection of values returned by getNatGateway.
"""
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> int:
"""
The idle timeout in minutes which is used for the NAT Gateway.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def location(self) -> str:
"""
The location where the NAT Gateway exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
@property
@pulumi.getter(name="publicIpAddressIds")
def public_ip_address_ids(self) -> Sequence[str]:
"""
A list of existing Public IP Address resource IDs which the NAT Gateway is using.
"""
return pulumi.get(self, "public_ip_address_ids")
@property
@pulumi.getter(name="publicIpPrefixIds")
def public_ip_prefix_ids(self) -> Sequence[str]:
"""
A list of existing Public IP Prefix resource IDs which the NAT Gateway is using.
"""
return pulumi.get(self, "public_ip_prefix_ids")
@property
@pulumi.getter(name="resourceGroupName")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The Resource GUID of the NAT Gateway.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> str:
"""
The SKU used by the NAT Gateway.
"""
return pulumi.get(self, "sku_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def zones(self) -> Sequence[str]:
"""
A list of Availability Zones which the NAT Gateway exists in.
"""
return pulumi.get(self, "zones")
# pylint: disable=using-constant-test
def get_nat_gateway(name: Optional[str] = None,
public_ip_address_ids: Optional[Sequence[str]] = None,
public_ip_prefix_ids: Optional[Sequence[str]] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNatGatewayResult:
"""
Use this data source to access information about an existing NAT Gateway.
:param str name: Specifies the Name of the NAT Gateway.
:param Sequence[str] public_ip_address_ids: A list of existing Public IP Address resource IDs which the NAT Gateway is using.
:param Sequence[str] public_ip_prefix_ids: A list of existing Public IP Prefix resource IDs which the NAT Gateway is using.
:param str resource_group_name: Specifies the name of the Resource Group where the NAT Gateway exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['publicIpAddressIds'] = public_ip_address_ids
__args__['publicIpPrefixIds'] = public_ip_prefix_ids
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:network/getNatGateway:getNatGateway', __args__, opts=opts, typ=GetNatGatewayResult).value
return AwaitableGetNatGatewayResult(
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
location=__ret__.location,
name=__ret__.name,
public_ip_address_ids=__ret__.public_ip_address_ids,
public_ip_prefix_ids=__ret__.public_ip_prefix_ids,
resource_group_name=__ret__.resource_group_name,
resource_guid=__ret__.resource_guid,
sku_name=__ret__.sku_name,
tags=__ret__.tags,
zones=__ret__.zones)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.429484 | 1,879 |
# Abdulnaser Sheikh
# https://www.linkedin.com/in/abdulnasersheikh/
# ZIP password cracker
# This script iterates through a user provided dictionary and finds the password for the encrypted
import zipfile
'''
zipfile has a method call extractall().
extractall(self, path=None, members=None, pwd=None)
Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
'''
if __name__ == '__main__':
main() | [
2,
23547,
77,
6005,
30843,
201,
198,
2,
3740,
1378,
2503,
13,
25614,
259,
13,
785,
14,
259,
14,
397,
67,
377,
77,
19865,
258,
13848,
14,
201,
198,
201,
198,
2,
42977,
9206,
8469,
263,
201,
198,
201,
198,
2,
770,
4226,
11629,
689... | 3.106383 | 188 |
import os
import sys
import soundfile as sf
import librosa
id1, id2 = 61, 70968
input_dir = os.path.join(sys.argv[1], "test-clean", str(id1), str(id2))
output_dir = os.path.join("..", "test_data")
transcript_file = os.path.join(input_dir, "%d-%d.trans.txt" % (id1, id2))
output_file = os.path.join(output_dir, "transcript.txt")
sample_rate = 16000
os.makedirs(output_dir, exist_ok=True)
with open(transcript_file, 'rt') as f:
with open(output_file, 'wt') as outf:
for line in f:
name, _, text = line.rstrip('\r\n').partition(" ")
text = text.lower()
audio_file = os.path.join(input_dir, name + ".flac")
wav_file = os.path.join(output_dir, name + ".wav")
x, orig_sample_rate = sf.read(audio_file)
assert x.ndim == 1
x = librosa.resample(x, orig_sample_rate, sample_rate)
print("Writing %s..." % (wav_file,))
outf.write("%s.wav|%s\n" % (name, text))
sf.write(wav_file, x, samplerate=sample_rate, subtype="PCM_16")
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
2128,
7753,
355,
264,
69,
198,
11748,
9195,
4951,
64,
198,
198,
312,
16,
11,
4686,
17,
796,
8454,
11,
767,
2931,
3104,
198,
15414,
62,
15908,
796,
28686,
13,
6978,
13,
22179,
7,
17597,
13... | 2.087824 | 501 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import uuid
import rospy
import cv2
import os
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from pyuwds3.reasoning.detection.foreground_detector import ForegroundDetector
from pyuwds3.reasoning.tracking.multi_object_tracker import MultiObjectTracker, iou_cost, centroid_cost
DEFAULT_SENSOR_QUEUE_SIZE = 10
if __name__ == "__main__":
rospy.init_node("object_recorder", anonymous=False)
recorder = ObjectRecorderNode().run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
11748,
334,
27112,
198,
11748,
686,
2777,
88,
198,
11748,
269,
85,
17,
198,
11748,
28686,
198,
6738,
269,
85,
62,
9458,
... | 2.680628 | 191 |
try:
from collections.abc import Sequence as Sequence_co, Callable as Callable_co
except ImportError:
from collections import Sequence as Sequence_co, Callable as Callable_co
from datetime import datetime
from typing import (
Any,
Callable,
ClassVar,
Collection,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
try:
from typing_extensions import Literal
except ImportError:
from typing_extensions import _Literal as Literal
import pytest
from runtime_type_checker import check_type, check_types
from runtime_type_checker.utils import get_func_type_hints
from .fixtures import (
T_bound,
T_constraint,
MyClass,
MyDerived,
NewList,
NewString,
my_func,
MyTpDict,
MyGeneric,
MyGenericImpl,
PYTHON_38,
PYTHON_39,
ListOfString,
DictOfStringToInt,
)
skip_before_3_8 = pytest.mark.skipif(not PYTHON_38, reason="feature exists only in python 3.8")
skip_before_3_9 = pytest.mark.skipif(not PYTHON_39, reason="feature exists only in python 3.8")
@pytest.mark.parametrize(
"type_or_hint, instance, raises",
[
pytest.param(Any, None, False, id="any"),
pytest.param(None, None, False, id="none"),
pytest.param(type(None), None, False, id="none__type"),
pytest.param(Optional[int], 1, False, id="optional"),
pytest.param(Optional[int], None, False, id="optional__none_value"),
pytest.param(Union[int, str], "a", False, id="union"),
pytest.param(Union[int, str], 3.1, True, id="union__wrong_val"),
pytest.param(Union[List[str], Mapping[str, int]], ["a", "b"], False, id="union__nested"),
pytest.param(Union[List[str], Mapping[str, int]], {"a": "a"}, True, id="union__nested_wrong_item"),
pytest.param(Tuple, tuple(), False, id="tuple__no_subscription"),
pytest.param(Tuple, (3,), False, id="tuple__no_subscription"),
pytest.param(Tuple[int], (3,), False, id="tuple__single_type"),
pytest.param(Tuple[int], ("a",), True, id="tuple__wrong_type"),
pytest.param(Tuple[int], (3, 2), True, id="tuple__wrong_length"),
pytest.param(Tuple[int, str], (3, "a"), False, id="tuple_variadic"),
pytest.param(Tuple[int, str], (3, 4), True, id="tuple_variadic__wrong_type"),
pytest.param(Tuple[int, str], (3, "a", "b"), True, id="tuple_variadic__wrong_length"),
pytest.param(Tuple[int, ...], tuple(), False, id="tuple_ellipsis__empty"),
pytest.param(Tuple[int, ...], (3, 4, 5), False, id="tuple_ellipsis__values"),
pytest.param(Tuple[int, ...], (3, "a"), True, id="tuple_ellipsis__wrong_type"),
pytest.param(Mapping[str, int], {"a": 1}, False, id="mapping__abstract"),
pytest.param(Dict[str, int], {"a": 1}, False, id="mapping__concrete"),
pytest.param(Dict, {"a": 1}, False, id="mapping__non_parametrized"),
pytest.param(Dict, {"a", 1}, True, id="mapping__non_parametrized_wrong_type"),
pytest.param(dict, {"a": 1}, False, id="mapping__plain"),
pytest.param(DictOfStringToInt, {"a": 1}, False, id="mapping__generic_w_concrete", marks=skip_before_3_9),
pytest.param(
DictOfStringToInt, {"a": "a"}, True, id="mapping__generic_w_concrete_wrong", marks=skip_before_3_9
),
pytest.param(Dict[str, int], {1: 1}, True, id="mapping__wrong_key"),
pytest.param(Dict[str, int], {"a": "a"}, True, id="mapping__wrong_key"),
pytest.param(Collection[str], frozenset(["a", "b"]), False, id="collection__abstract"),
pytest.param(Collection[str], frozenset(), False, id="collection__abstract_no_item"),
pytest.param(Sequence[str], ("a", "b", "c"), False, id="collection__tuple"),
pytest.param(Sequence_co, ["a", "b"], False, id="collection__concrete_sequence"),
pytest.param(List[str], ["a", "b", "c"], False, id="collection__concrete"),
pytest.param(List[str], {"a", "b"}, True, id="collection__wrong_type"),
pytest.param(List[str], ["a", 1, "b"], True, id="collection__wrong_item"),
pytest.param(List[List["MyClass"]], [[MyClass()]], False, id="collection__nested"),
pytest.param(List, ["a", 1], False, id="collection__non_parametrized"),
pytest.param(list, ["a", 1], False, id="collection__plain"),
pytest.param(ListOfString, ["a", "b"], False, id="collection__generic_w_concrete", marks=skip_before_3_9),
pytest.param(ListOfString, ["a", 1], True, id="collection__generic_w_concrete_wrong", marks=skip_before_3_9),
pytest.param(T_bound, datetime(2020, 1, 1), False, id="type_variable__bound_date"),
pytest.param(T_bound, "2020__01__01", False, id="type_variable__bound_str"),
pytest.param(T_bound, 1, True, id="type_variable__bound_int"),
pytest.param(T_constraint, datetime(2020, 1, 1), False, id="type_variable__constraint_date"),
pytest.param(T_constraint, None, False, id="type_variable__constraint_none"),
pytest.param(T_constraint, 1, False, id="type_variable__int"),
pytest.param(T_constraint, None, False, id="type_variable__none"),
pytest.param(T_constraint, [1], True, id="type_variable__none"),
pytest.param("int", 1, False, id="forward_reference__literal"),
pytest.param("MyClass", MyClass(), False, id="forward_reference__class"),
pytest.param(Optional["MyClass"], None, False, id="forward_reference__optional"),
pytest.param(NewString, NewString("1"), False, id="new_type"),
pytest.param(str, NewString("1"), False, id="new_type__string"),
pytest.param(NewList, NewList(["1"]), False, id="new_type__nested"),
pytest.param(ClassVar[int], MyClass.t, False, id="ClassVar"),
pytest.param(Type[int], int, False, id="type"),
pytest.param(Type[int], 1, True, id="type__wrong_argument"),
pytest.param(Type["MyClass"], MyClass, False, id="type__forward_ref"),
pytest.param(Type[Union[List[str], Mapping[str, int]]], list, False, id="type__nested_union"),
pytest.param(Callable[[int], int], lambda x: 1, False, id="callable"),
pytest.param(Callable_co, lambda x: 1, False, id="callable__concrete"),
pytest.param(MyClass, MyClass(2, ("a", "c"), MyClass()), False, id="class"),
pytest.param(MyDerived, MyDerived(2, d=0), False, id="class__inherited"),
pytest.param(MyClass, MyDerived(), False, id="class__inherited_from_base"),
pytest.param(MyClass, 1, True, id="class__wrong_type"),
pytest.param(MyTpDict, {"a": "a", "b": MyClass()}, False, id="typed_dict", marks=skip_before_3_8),
pytest.param(
MyTpDict, {"a": "a", "b": MyClass(), "c": 1}, True, id="typed_dict__extra_key", marks=skip_before_3_8
),
pytest.param(MyTpDict, {"a": "a"}, True, id="typed_dict__too_few_keys", marks=skip_before_3_8),
pytest.param(MyTpDict, {"a": "a", "b": 2}, True, id="typed_dict__wrong_val_type", marks=skip_before_3_8),
pytest.param(MyGeneric[str], MyGeneric("a"), False, id="generic__concrete"),
pytest.param(MyGeneric, MyGeneric("a"), False, id="generic__concrete_no_typevar"),
pytest.param(Literal[1, 2, 3], 1, False, id="literal"),
pytest.param(Literal[1, 2, 3], 4, True, id="literal__wrong_val"),
pytest.param(Literal[1, 2, 3], "1", True, id="literal__wrong_type"),
],
)
@pytest.mark.parametrize(
"func, expected",
[
pytest.param(lambda: 1, {"return": Any}, id="empty"),
pytest.param(
my_func,
{
"a": Any,
"args": Sequence[str],
"b": int,
"c": Optional[MyClass],
"d": str,
"kwargs": Mapping[str, float],
"return": int,
},
id="full_function",
),
],
)
@pytest.mark.parametrize(
"kls, args, kwargs, raises",
[
pytest.param(MyDerived, tuple(), {}, False, id="no_args"),
pytest.param(MyDerived, ("a",), {}, True, id="wrong_arg"),
pytest.param(MyDerived, tuple(), {"c": MyClass(c=MyClass())}, False, id="forward_ref__ok"),
pytest.param(MyDerived, tuple(), {"c": MyClass(c=MyClass("a")), "d": "str"}, True, id="forward_ref__wrong"),
pytest.param(MyGeneric, ("1",), {}, False, id="generic"),
pytest.param(MyGeneric, (1,), {}, True, id="generic__wrong_args"),
pytest.param(MyGenericImpl, ("1",), {}, False, id="generic_impl"),
pytest.param(MyGenericImpl, (1,), {}, True, id="generic_impl__wrong_args"),
],
)
@pytest.mark.parametrize(
"func, args, kwargs, raises",
[
pytest.param(my_func, ("a", 1, None, "x", "y"), {"n": 1.1}, False, id="all_args"),
pytest.param(my_func, ("a", 1, MyClass(), 1), {}, True, id="wrong_vararg"),
pytest.param(my_func, ("a", 1), {"x": 1}, True, id="wrong_kwarg"),
pytest.param(lambda x: 1, ("a",), {}, False, id="lambda"),
pytest.param(MyClass().my_method, (1,), {}, False, id="method"),
pytest.param(MyClass().my_method, ("a",), {}, True, id="method__wrong_arg"),
pytest.param(MyClass.my_class_method, (1,), {}, False, id="class_method"),
pytest.param(MyClass.my_class_method, ("a",), {}, True, id="class_method__wrong_arg"),
pytest.param(MyClass.my_static_method, (1,), {}, False, id="static_method"),
pytest.param(MyClass.my_static_method, ("a",), {}, True, id="static_method__wrong_arg"),
],
)
| [
28311,
25,
198,
220,
220,
220,
422,
17268,
13,
39305,
1330,
45835,
355,
45835,
62,
1073,
11,
4889,
540,
355,
4889,
540,
62,
1073,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
17268,
1330,
45835,
355,
45835,
62,
1073,
11,
4... | 2.275442 | 4,186 |
"""
entradas
salidas
"""
i=0
l=[]
for i in range(1,101,2):
if(i%7!=0):
l.append(i)
continue
print(l)
| [
37811,
198,
298,
6335,
292,
220,
198,
198,
82,
10751,
292,
198,
198,
37811,
198,
72,
28,
15,
198,
75,
28,
21737,
198,
1640,
1312,
287,
2837,
7,
16,
11,
8784,
11,
17,
2599,
198,
220,
220,
220,
611,
7,
72,
4,
22,
0,
28,
15,
25... | 1.631579 | 76 |
"""Image normalization related functions"""
import numpy as np
import sys
from skimage.exposure import equalize_adapthist
def zscore(input_image, im_mean=None, im_std=None):
"""
Performs z-score normalization. Adds epsilon in denominator for robustness
:param np.array input_image: input image for intensity normalization
:param float/None im_mean: Image mean
:param float/None im_std: Image std
:return np.array norm_img: z score normalized image
"""
if not im_mean:
im_mean = np.nanmean(input_image)
if not im_std:
im_std = np.nanstd(input_image)
norm_img = (input_image - im_mean.astype(np.float64)) /\
(im_std + sys.float_info.epsilon)
return norm_img
def unzscore(im_norm, zscore_median, zscore_iqr):
"""
Revert z-score normalization applied during preprocessing. Necessary
before computing SSIM
:param im_norm: Normalized image for un-zscore
:param zscore_median: Image median
:param zscore_iqr: Image interquartile range
:return im: image at its original scale
"""
im = im_norm * (zscore_iqr + sys.float_info.epsilon) + zscore_median
return im
def hist_clipping(input_image, min_percentile=2, max_percentile=98):
"""Clips and rescales histogram from min to max intensity percentiles
rescale_intensity with input check
:param np.array input_image: input image for intensity normalization
:param int/float min_percentile: min intensity percentile
:param int/flaot max_percentile: max intensity percentile
:return: np.float, intensity clipped and rescaled image
"""
assert (min_percentile < max_percentile) and max_percentile <= 100
pmin, pmax = np.percentile(input_image, (min_percentile, max_percentile))
hist_clipped_image = np.clip(input_image, pmin, pmax)
return hist_clipped_image
def hist_adapteq_2D(input_image, kernel_size=None, clip_limit=None):
"""CLAHE on 2D images
skimage.exposure.equalize_adapthist works only for 2D. Extend to 3D or use
openCV? Not ideal, as it enhances noise in homogeneous areas
:param np.array input_image: input image for intensity normalization
:param int/list kernel_size: Neighbourhood to be used for histogram
equalization. If none, use default of 1/8th image size.
:param float clip_limit: Clipping limit, normalized between 0 and 1
(higher values give more contrast, ~ max percent of voxels in any
histogram bin, if > this limit, the voxel intensities are redistributed).
if None, default=0.01
"""
nrows, ncols = input_image.shape
if kernel_size is not None:
if isinstance(kernel_size, int):
assert kernel_size < min(nrows, ncols)
elif isinstance(kernel_size, (list, tuple)):
assert len(kernel_size) == len(input_image.shape)
else:
raise ValueError('kernel size invalid: not an int / list / tuple')
if clip_limit is not None:
assert 0 <= clip_limit <= 1, \
"Clip limit {} is out of range [0, 1]".format(clip_limit)
adapt_eq_image = equalize_adapthist(
input_image, kernel_size=kernel_size, clip_limit=clip_limit
)
return adapt_eq_image
| [
37811,
5159,
3487,
1634,
3519,
5499,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
6738,
1341,
9060,
13,
1069,
26205,
1330,
4961,
1096,
62,
324,
499,
400,
396,
628,
198,
4299,
1976,
26675,
7,
15414,
62,
9060,
11,
... | 2.739574 | 1,175 |
#!/usr/bin/python
#coding=utf-8
import sys
import pexpect
import random
if __name__ == '__main__':
try:
newPassword = changepassword('用户名','sudo 密码')
print "IP:xxx.xxx.xxx.xxx"
print "Port:端口"
print "UserName:用户名"
print "NewPassword: %s" %(newPassword)
except Exception,e:
print(str(e))
print 9999
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
66,
7656,
28,
40477,
12,
23,
198,
198,
11748,
25064,
198,
11748,
613,
87,
806,
198,
11748,
4738,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
... | 1.92228 | 193 |
import logging
import datetime
from src.summarize_schedules.calendar_summarizer import GoogleCalendarSummarizer
| [
11748,
18931,
198,
11748,
4818,
8079,
198,
198,
6738,
12351,
13,
16345,
3876,
1096,
62,
1416,
704,
5028,
13,
9948,
9239,
62,
16345,
3876,
7509,
1330,
3012,
9771,
9239,
13065,
3876,
7509,
628,
628
] | 3.411765 | 34 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from os.path import join
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
from gym.spaces.box import Box
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
18931,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
11748,
299,
32152,
355,
45941,
... | 3.852941 | 68 |
# Generated by Django 2.1.5 on 2019-04-24 17:05
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
20,
319,
13130,
12,
3023,
12,
1731,
1596,
25,
2713,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python3
import sys
from datetime import datetime
from pathlib import Path
from PyQt5.QtCore import QSize
from PyQt5.QtWidgets import (QAction, QApplication, QDialog, QFileDialog, QGridLayout, QLabel,
QLineEdit, QMainWindow, QPushButton, QTableWidget, QTableWidgetItem, qApp)
from vcards import Vcard
from vcf_parser import export_ab, import_ab
MAIN = {'EMAIL': 'EMAIL;TYPE=HOME,INTERNET', 'TEL': 'TEL;TYPE=CELL', 'X-JABBER': 'X-JABBER'}
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Contacts()
sys.exit(app.exec_())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1195,
10699,
198,
6738,
... | 2.393574 | 249 |
from ..util import register, circle
from PIL import Image
import os
plugin_dir = os.path.dirname(os.path.abspath(__file__))
@register(["ori", "拥抱光明", "奥日", "奥里"], "制作Ori拥抱光明图", '''\
/ori - 机器人拥抱光明
/ori <对方> - 对方拥抱光明
可以使用头像,也可以使用图片链接''')
| [
6738,
11485,
22602,
1330,
7881,
11,
9197,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
28686,
198,
198,
33803,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
198,
19... | 1.422619 | 168 |
#!/usr/bin/env python
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from pkg_resources import resource_filename
try:
from BurstCube.bcSim import simFile
except ImportError:
pass
try:
from BurstCube.bcSim import simFiles
except ImportError:
pass
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
# Don't need since files are installed in package
# def test_setPath():
# from BurstCube.utils import setPath
# assert(not setPath())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
6738,
6468,
28338,
13,
41989,
13,
2978,
525,
1330,
12972,
9288,
198,
6738,
279,
1... | 2.983516 | 182 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# xuehaiyang: xuehaiyang@sogou-inc.com
#
"""
Eval Test
"""
import sys
import os
import threading
if __name__ == "__main__":
checkpoint_path = "/search/odin/haiyang/fairseq_exp/e2e_trans/fairseq/demo/cp"
# item = ["asr_baseline_lr03_noise02/checkpoint17.pt",
# "asr_our_fuen_03_noise02/checkpoint17.pt","asr_our_fuen_03_noise_char/checkpoint6.pt",
# "asr_our_fuen_03_noise_char_mgpu/checkpoint6.pt","asr_our_fuen_03_noise_char_mgpu/checkpoint16.pt",
# "asr_our_fuen_03_noiseall_mgpu/checkpoint6.pt"]
item = ["multi_gpu_fuen_noiseall_alpha0.3/checkpoint4.pt",
"multi_gpu_fuen_noiseall_alpha0.5/checkpoint4.pt", "multi_gpu_fuen_noiseall_alpha0.7/checkpoint4.pt"
]
# "asr_our_fuen_03_noiseall_mgpu/checkpoint3.pt"
# "asr_our_fuen_03_noiseall_mgpu/checkpoint6.pt"
gpu_ids = ["0", "1", "2", "3", "4", "5", "6", "7"]
tasks = [ "audio_translation", "audio_translation", "audio_translation", "audio_translation", "audio_translation","audio_translation","audio_translation"]
decode_file(checkpoint_path, item, gpu_ids, tasks)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
198,
2,
2124,
518,
44488,
17859,
25,
2124,
518,
44488,
17859,
31,
82,
519,
280,
12,
1939,
13,
785,
198,
2,
198,
198... | 2.118613 | 548 |
import html4Symbols
REMOVED_SYMBOLS = ['acronym','applet','basefont','big','center','dir','font','frame','frameset','noframes','strike','tt']
NEW_ELEMENTS = [
'canvas',
'audio' ,
'video',
'source',
'embed',
'track',
'datalist', #Specifies a list of pre-defined options for input controls
'keygen', #Defines a key-pair generator field (for forms)
'output', #Defines the result of a calculation
'article', #Defines an article
'aside',#Defines content aside from the page content
'bdi',#Isolates a part of text that might be formatted in a different direction from other text outside it
'command',#Defines a command button that a user can invoke
'details',#Defines additional details that the user can view or hide
'dialog',#Defines a dialog box or window
'summary',#Defines a visible heading for a 'details' element
'figure',#Specifies self-contained content, like illustrations, diagrams, photos, code listings, etc.
'figcaption',#Defines a caption for a 'figure' element
'footer',#Defines a footer for a document or section
'header',#Defines a header for a document or section
'mark',#Defines marked/highlighted text
'meter',#Defines a scalar measurement within a known range (a gauge)
'nav',#Defines navigation links
'progress',#Represents the progress of a task
'ruby',#Defines a ruby annotation (for East Asian typography)
'rt',#Defines an explanation/pronunciation of characters (for East Asian typography)
'rp',#Defines what to show in browsers that do not support ruby annotations
'section',#Defines a section in a document
'time',#Defines a date/time
'wbr'#Defines a possible line-break
]
CLOSING_TAGS = diff(html4Symbols.CLOSING_TAGS,REMOVED_SYMBOLS) + NEW_ELEMENTS
LINE_BREAK_AFTER = diff(html4Symbols.LINE_BREAK_AFTER,REMOVED_SYMBOLS) + NEW_ELEMENTS
NON_CLOSING_TAGS = diff(html4Symbols.NON_CLOSING_TAGS,REMOVED_SYMBOLS)
ONE_LINE = diff(html4Symbols.ONE_LINE,REMOVED_SYMBOLS) | [
11748,
27711,
19,
13940,
2022,
10220,
198,
198,
40726,
8874,
1961,
62,
23060,
10744,
3535,
50,
796,
37250,
330,
1313,
4948,
41707,
1324,
1616,
41707,
8692,
10331,
41707,
14261,
41707,
16159,
41707,
15908,
41707,
10331,
41707,
14535,
41707,
... | 3.089869 | 612 |
import os
import shutil
import time
from flask_apscheduler import APScheduler
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from PIL import Image
import gdown
from flask import Flask, render_template, request, redirect, flash, send_from_directory
from werkzeug.utils import secure_filename
disease_map = {
0: 'Apple: Apple Scab',
1: 'Apple: Black Rot',
2: 'Apple: Cedar Rust',
3: 'Apple: Healthy',
4: 'Blueberry: Healthy',
5: 'Cherry: Powdery Mildew',
6: 'Cherry: Healthy',
7: 'Corn (Maize): Grey Leaf Spot',
8: 'Corn (Maize): Common Rust of Maize',
9: 'Corn (Maize): Northern Leaf Blight',
10: 'Corn (Maize): Healthy',
11: 'Grape: Black Rot',
12: 'Grape: Black Measles (Esca)',
13: 'Grape: Leaf Blight (Isariopsis Leaf Spot)',
14: 'Grape: Healthy',
15: 'Orange: Huanglongbing (Citrus Greening)',
16: 'Peach: Bacterial spot',
17: 'Peach: Healthy',
18: 'Bell Pepper: Bacterial Spot',
19: 'Bell Pepper: Healthy',
20: 'Potato: Early Blight',
21: 'Potato: Late Blight',
22: 'Potato: Healthy',
23: 'Raspberry: Healthy',
24: 'Rice: Brown Spot',
25: 'Rice: Hispa',
26: 'Rice: Leaf Blast',
27: 'Rice: Healthy',
28: 'Soybean: Healthy',
29: 'Squash: Powdery Mildew',
30: 'Strawberry: Leaf Scorch',
31: 'Strawberry: Healthy',
32: 'Tomato: Bacterial Spot',
33: 'Tomato: Early Blight',
34: 'Tomato: Late Blight',
35: 'Tomato: Leaf Mold',
36: 'Tomato: Septoria Leaf Spot',
37: 'Tomato: Spider Mites (Two-spotted Spider Mite)',
38: 'Tomato: Target Spot',
39: 'Tomato: Yellow Leaf Curl Virus',
40: 'Tomato: Mosaic Virus',
41: 'Tomato: Healthy'
}
details_map = {
'Apple: Apple Scab': [
'A serious disease of apples and ornamental crabapples, apple scab (Venturia inaequalis) attacks both leaves and fruit. The fungal disease forms pale yellow or olive-green spots on the upper surface of leaves. Dark, velvety spots may appear on the lower surface. Severely infected leaves become twisted and puckered and may drop early in the summer.',
'Symptoms on fruit are similar to those found on leaves. Scabby spots are sunken and tan and may have velvety spores in the center. As these spots mature, they become larger and turn brown and corky. Infected fruit becomes distorted and may crack allowing entry of secondary organisms. Severely affected fruit may drop, especially when young.',
'https://www.planetnatural.com/pest-problem-solver/plant-disease/apple-scab'],
'Apple: Black Rot': [
'Black rot is occasionally a problem on Minnesota apple trees. This fungal disease causes leaf spot, fruit rot and cankers on branches. Trees are more likely to be infected if they are: Not fully hardy in Minnesota, Infected with fire blight or Stressed by environmental factors like drought.',
'Large brown rotten areas can form anywhere on the fruit but are most common on the blossom end. Brown to black concentric rings can often be seen on larger infections. The flesh of the apple is brown but remains firm. Infected leaves develop "frog-eye leaf spot". These are circular spots with purplish or reddish edges and light tan interiors.',
'https://extension.umn.edu/plant-diseases/black-rot-apple'],
'Apple: Cedar Rust': [
'Cedar apple rust (Gymnosporangium juniperi-virginianae) is a fungal disease that requires juniper plants to complete its complicated two year life-cycle. Spores overwinter as a reddish-brown gall on young twigs of various juniper species. In early spring, during wet weather, these galls swell and bright orange masses of spores are blown by the wind where they infect susceptible apple and crab-apple trees. The spores that develop on these trees will only infect junipers the following year. From year to year, the disease must pass from junipers to apples to junipers again; it cannot spread between apple trees.',
'On apple and crab-apple trees, look for pale yellow pinhead sized spots on the upper surface of the leaves shortly after bloom. These gradually enlarge to bright orange-yellow spots which make the disease easy to identify. Orange spots may develop on the fruit as well. Heavily infected leaves may drop prematurely.',
'https://www.planetnatural.com/pest-problem-solver/plant-disease/cedar-apple-rust'],
'Apple: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Blueberry: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Cherry: Powdery Mildew': [
'Powdery mildew of sweet and sour cherry is caused by Podosphaera clandestina, an obligate biotrophic fungus. Mid- and late-season sweet cherry (Prunus avium) cultivars are commonly affected, rendering them unmarketable due to the covering of white fungal growth on the cherry surface. Season long disease control of both leaves and fruit is critical to minimize overall disease pressure in the orchard and consequently to protect developing fruit from accumulating spores on their surfaces.',
'Initial symptoms, often occurring 7 to 10 days after the onset of the first irrigation, are light roughly-circular, powdery looking patches on young, susceptible leaves (newly unfolded, and light green expanding leaves). Older leaves develop an age-related (ontogenic) resistance to powdery mildew and are naturally more resistant to infection than younger leaves. Look for early leaf infections on root suckers, the interior of the canopy or the crotch of the tree where humidity is high.',
'http://treefruit.wsu.edu/crop-protection/disease-management/cherry-powdery-mildew'],
'Cherry: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Corn (Maize): Grey Leaf Spot': [
'Gray leaf spot (GLS) is a common fungal disease in the United States caused by the pathogen Cercospora zeae-maydis in corn. Disease development is favored by warm temperatures, 80°F or 27 °C; and high humidity, relative humidity of 90% or higher for 12 hours or more. Cercospora zeae-maydis overwinters in corn residue, allowing inoculum to build up from year to year in fields. Cropping systems with reduced- or no-till and/or continuous corn are at higher risk for gray leaf spot outbreaks.',
'Gray leaf spot lesions begin as small necrotic pinpoints with chlorotic halos, these are more visible when leaves are backlit. Coloration of initial lesions can range from tan to brown before sporulation begins. Because early lesions are ambiguous, they are easily confused with other foliar diseases such as anthracnose leaf blight, eyespot, or common rust. As infection progresses, lesions begin to take on a more distinct shape. Lesion expansion is limited by parallel leaf veins, resulting in the blocky shaped “spots”. As sporulation commences, the lesions take on a more gray coloration.',
'https://www.pioneer.com/us/agronomy/gray_leaf_spot_cropfocus.html'],
'Corn (Maize): Common Rust of Maize': [
'Common rust is caused by the fungus Puccinia sorghi. Late occurring infections have limited impact on yield. The fungus overwinters on plants in southern states and airborne spores are wind-blown to northern states during the growing season. Disease development is favored by cool, moist weather (60 – 70◦ F).',
'Symptoms of common rust often appear after silking. Small, round to elongate brown pustules form on both leaf surfaces and other above ground parts of the plant. As the pustules mature they become brown to black. If disease is severe, the leaves may yellow and die early.',
'https://fieldcrops.cals.cornell.edu/corn/diseases-corn/common-rust'],
'Corn (Maize): Northern Leaf Blight': [
'Northern corn leaf blight caused by the fungus Exerohilum turcicum is a common leaf blight. If lesions begin early (before silking), crop loss can result. Late infections may have less of an impact on yield. Northern corn leaf blight is favored by wet humid cool weather typically found later in the growing season. Spores of the fungus that causes this disease can be transported by wind long distances from infected fields. Spread within and between fields locally also relies on wind blown spores.',
'The tan lesions of northern corn leaf blight are slender and oblong tapering at the ends ranging in size between 1 to 6 inches. Lesions run parallel to the leaf margins beginning on the lower leaves and moving up the plant. They may coalesce and cover the enter leaf. Spores are produced on the underside of the leaf below the lesions giving the appearance of a dusty green fuzz.',
'https://fieldcrops.cals.cornell.edu/corn/diseases-corn/northern-corn-leaf-blight'],
'Corn (Maize): Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Grape: Black Rot': [
'Black rot is one of the most damaging diseases of grapes. The disease is caused by the fungus Guignardia bidwellii. The fungus can infect the leaves, shoots, berries, tendrils, rachises and cluster stems (peduncles) of grapes. If the disease is not managed early in the season, the impact on grape clusters can be devastating, resulting in complete crop losses.',
'Disease development is favored by warm and humid weather. Symptoms of black rot first appear as small yellow spots on leaves. Enlarged spots (lesions) have a dark brownish-red border with tan to dark brown centers. As the infection develops, tiny black dots appear in the lesion, usually in a ring pattern near the border of the lesion. These dots are fungal structures (pycnidia), which contain thousands of spores (conidia) that can infect new tissue. New infections can occur in less than 10 hours at temperatures between 60 to 85 degrees Fahrenheit.',
'https://ohioline.osu.edu/factsheet/plpath-fru-24'],
'Grape: Black Measles (Esca)': [
'Grapevine measles, also called esca, black measles or Spanish measles, has long plagued grape growers with its cryptic expression of symptoms and, for a long time, a lack of identifiable causal organism(s). The name "measles" refers to the superficial spots found on the fruit. During the season, the spots may coalesce over the skin surface, making berries black in appearance. Spotting can develop anytime between fruit set and a few days prior to harvest.',
'Leaf symptoms are characterized by a "tiger stripe" pattern when infections are severe from year to year. Mild infections can produce leaf symptoms that can be confused with other diseases or nutritional deficiencies. White cultivars will display areas of chlorosis followed by necrosis, while red cultivars are characterized by red areas followed by necrosis. Early spring symptoms include shoot tip dieback, leaf discoloration and complete defoliation in severe cases.',
'https://grapes.extension.org/grapevine-measles'],
'Grape: Leaf Blight (Isariopsis Leaf Spot)': [
'Common in tropical and subtropical grapes. The disease appear late in the season. Cynthiana and Cabernet Sauvignon are susceptible to this pathogen.',
'On leaf surface we will see lesions which are irregularly shaped (2 to 25 mm in diameter). Initially lesions are dull red to brown in color turn black later. If disease is severe this lesions may coalesce. On berries we can see symptom similar to black rot but the entire clusters will collapse.',
'https://plantvillage.psu.edu/topics/grape/infos'],
'Grape: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Orange: Huanglongbing (Citrus Greening)': [
'Huanglongbing (HLB) or citrus greening is the most severe citrus disease, currently devastating the citrus industry worldwide. The presumed causal bacterial agent Candidatus Liberibacter spp. affects tree health as well as fruit development, ripening and quality of citrus fruits and juice. Fruit from infected orange trees can be either symptomatic or asymptomatic. Symptomatic oranges are small, asymmetrical and greener than healthy fruit. Furthermore, symptomatic oranges show higher titratable acidity and lower soluble solids, solids/acids ratio, total sugars, and malic acid levels.',
'In the early stages of the disease, it is difficult to make a clear diagnosis. McCollum and Baldwin (2017) noted that HLB symptoms are more apparent during cooler seasons, more so than in warmer months. It is uncertain how long a tree can be infected before showing the symptoms of the disease but, when it eventually becomes symptomatic, symptoms appear on different parts of the tree. Infected trees generally develop some canopy thinning, with twig dieback and discolored leaves, which appear in contrast to the other healthy or symptomless parts of the tree.',
'https://www.frontiersin.org/articles/10.3389/fpls.2018.01976/full'],
'Peach: Bacterial spot': [
'Bacterial spot affects peaches, nectarines, apricots, plums, prunes and cherries. The disease is widespread throughout all fruit growing states east of the Rocky Mountains. Bacterial spot can affect leaves, twigs, and fruit. Severe infection results in reduced fruit quality and yield. Fruit infection is most serious on late-maturing varieties. If proper environmental conditions occur, up to 50 percent or more of the fruit of susceptible varieties may have to be discarded.',
'Small (1/25 to 1/5 inch) spots form in the leaves. Spots are irregular to angular and have a deep purple to rusty-brown or black color. In time, the centers dry and tear away leaving ragged "shot-holes". When several spots merge, the leaf may appear scorched, blighted or ragged. Badly infected leaves may turn yellow and drop early. Early defoliation is most common on trees deficient in nitrogen or where the disease is further complicated by pesticide injury.',
'https://ohioline.osu.edu/factsheet/plpath-fru-38'],
'Peach: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Bell Pepper: Bacterial Spot': [
'Bacterial leaf spot, caused by Xanthomonas campestris pv. vesicatoria, is the most common and destructive disease for peppers in the eastern United States. It is a gram-negative, rod-shaped bacterium that can survive in seeds and plant debris from one season to another. Different strains or races of the bacterium are cultivar-specific, causing disease symptoms in certain varieties due to stringent host specificity. Bacterial leaf spot can devastate a pepper crop by early defoliation of infected leaves and disfiguring fruit.',
'Disease symptoms can appear throughout the above-ground portion of the plant, which may include leaf spot, fruit spot and stem canker. However, early symptoms show up as water-soaked lesions on leaves that can quickly change from green to dark brown and enlarge into spots that are up to 1/4 inch in diameter with slightly raised margins. Over time, these spots can dry up in less humid weather, which allows the damaged tissues to fall off, resulting in a tattered appearance on the affected leaves.',
'https://extension.wvu.edu/lawn-gardening-pests/plant-disease/fruit-vegetable-diseases/bacterial-leaf-spot-of-pepper'],
'Bell Pepper: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Potato: Early Blight': [
'Common on tomato and potato plants, early blight is caused by the fungus Alternaria solani. Symptoms first appear on the lower, older leaves as small brown spots with concentric rings that form a "bull’s eye" pattern. As the disease matures, it spreads outward on the leaf surface causing it to turn yellow, wither and die. Eventually the stem, fruit and upper portion of the plant will become infected. Crops can be severely damaged.',
'Early blight overwinters on infected plant tissue and is spread by splashing rain, irrigation, insects and garden tools. The disease is also carried on tomato seeds and in potato tubers. In spite of its name, early blight can occur any time throughout the growing season. High temperatures (80-85˚F.) and wet, humid conditions promote its rapid spread. In many cases, poorly nourished or stressed plants are attacked.',
'https://www.planetnatural.com/pest-problem-solver/plant-disease/early-blight'],
'Potato: Late Blight': [
'Late blight (Phytophthora infestans) fungus is in the same genus as the fungus causing pink rot (P. erythroseptica). Late blight was responsible for the Irish potato famine in the mid-nineteenth century (Daly, 1996). In the late twentieth century, there have been major re-occurrences and concern around the world over this pathogen and its disease due to recent mutations (Fry and Goodwin, 1997). These mutations, most notably strain US-8, have made the pathogen resistant to control by metalaxyl, the stand-by fungicide for many years.',
'Late blight will first appear as water-soaked spots, usually at the tips or edges of lower leaves where water or dew tends to collect. Under moist, cool conditions, water-soaked spots rapidly enlarge and a broad yellow halo may be seen surrounding the lesion (Mohan et al., 1996). On the leaf underside, a spore-producing zone of white moldy growth approximately 0.1 - 0.2 inches wide may appear at the border of the lesion. Under continuously wet conditions, the disease progresses rapidly and warm, dry weather will slow or stop disease development.',
'https://cropwatch.unl.edu/potato/late_blights'],
'Potato: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Raspberry: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Rice: Brown Spot': [
'Brown Spot is called as sesame leaf spot or Helminthosporiose or fungal blight. The fungus attacks the crop from seedling in nursery to milk stage in main field.',
'The disease appears first as minute brown dots, later becoming cylindrical or oval to circular (resemble sesame seed). Spots measures 0.5 to 2.0mm in breadth - coalesce to form large patches. Then several spots coalesce and the leaf dries up. Infection also occurs on panicle, neck with brown colour appearance. Seeds also infected (black or brown spots on glumes spots are covered by olivaceous velvety growth)',
'http://www.agritech.tnau.ac.in/expert_system/paddy/cpdisbrownspot.html'],
'Rice: Hispa': [
'The mining of the grubs will be clearly seen on the leaves. Scraping of the upper surface of the leaf blade leaving only the lower epidermis as white streaks parallel to the midrib. Tunneling of larvae through leaf tissue causes irregular translucent white patches that are parallel to the leaf veins. Damaged leaves wither off. Rice field appears burnt when severely infested.',
'The grub mines into the leaf blade and feed on the green tissue between the veins. Adults also feed in the green tissue; they scrape green matter of the tender leaves. Generally the plants are affected in the young stage.',
'http://www.agritech.tnau.ac.in/expert_system/paddy/cppests_ricehispa.html'],
'Rice: Leaf Blast': [
'Blast, also called rotten neck, is one of the most destructive diseases of Missouri rice. Losses due to this disease have been on the increase since 2000. Blast does not develop every year but is very destructive when it occurs. Rice blast can be controlled by a combination of preventive measures and foliar fungicides applied when rice is in the late boot stage and again when it is 80 to 90 percent headed.',
'Blast symptoms can occur on leaves, leaf collars, nodes and panicles. Leaf spots are typically elliptical (football shaped), with gray-white centers and brown to red-brown margins. Fully developed leaf lesions are approximately 0.4 to 0.7 inch long and 0.1 to 0.2 inch wide. Both the shape and the color vary depending on the environment, age of the lesion and rice variety. Lesions on leaf sheaths, which rarely develop, resemble those on leaves.',
'https://extension.missouri.edu/publications/mp645'],
'Rice: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Soybean: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Squash: Powdery Mildew': [
'Powdery mildew, mainly caused by the fungus Podosphaera xanthii, infects all cucurbits, including muskmelons, squash, cucumbers, gourds, watermelons and pumpkins. In severe cases, powdery mildew can cause premature death of leaves, and reduce yield and fruit quality.',
'The first sign of powdery mildew is pale yellow leaf spots. White powdery spots can form on both upper and lower leaf surfaces, and quickly expand into large blotches. The large blotches can cover entire leaf, petiole and stem surfaces. When powdery mildew infects the majority of the foliage, the plant weakens and the fruit ripens prematurely.',
'https://extension.umn.edu/diseases/powdery-mildew-cucurbits'],
'Strawberry: Leaf Scorch': [
'In addition to leaves, leaf scorch (Diplocarpon earlianum) can infect petioles, runners, fruit stalks and berry caps. If unchecked, plants can be significantly weakened reducing the growth of all plant parts. Severely infected plants are weakened and can die from other stresses such as drought or extreme temperatures.',
'Dark purple, angular to round spots appear on the upper surface of the leaf. As the disease progresses the tissues around these spots turn reddish or purple. In severe cases, the infected area dries to a tan color and the leaf curls upward looking scorched. Lesions remain reddish purple and do not turn tan or gray in the center.',
'https://extension.umn.edu/fruit/growing-strawberries-home-garden#gray-mold%2C-leaf-blight%2C-leaf-scorch-and-leaf-spot--1008160'],
'Strawberry: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.'],
'Tomato: Bacterial Spot': [
'Bacterial spot can be a devastating disease when the weather is warm and humid. The disease can affect all above-ground parts of tomato and pepper plants: stems, petioles, leaves, and fruits. Fruit spots commonly result in unmarketable fruit, not only for fresh market but also for processing because the spots make the fruit difficult to peel.',
'Tomato leaves have small (<1/8 inch), brown, circular spots surrounded by a yellow halo. The center of the leaf spots often falls out resulting in small holes. Small, brown, circular spots may also occur on stems and the fruit calyx. Fruit spots are ¼ inch, slightly raised, brown and scabby. Tomato fruit often have a waxy white halo surrounding the fruit spot.',
'https://extension.umn.edu/diseases/bacterial-spot-tomato-and-pepper'],
'Tomato: Early Blight': [
'Early blight is one of the most common tomato diseases, occurring nearly every season wherever tomatoes are grown. It affects leaves, fruits and stems and can be severely yield limiting when susceptible cultivars are used and weather is favorable. Severe defoliation can occur and result in sunscald on the fruit. Early blight is common in both field and high tunnel tomato production in Minnesota.',
'Initially, small dark spots form on older foliage near the ground. Leaf spots are round, brown and can grow up to half inch in diameter. Larger spots have target-like concentric rings. The tissue around spots often turns yellow. Severely infected leaves turn brown and fall off, or dead, dried leaves may cling to the stem.',
'https://extension.umn.edu/diseases/early-blight-tomato'],
'Tomato: Late Blight': [
'Late blight is a potentially devastating disease of tomato and potato, infecting leaves, stems and fruits of tomato plants. The disease spreads quickly in fields and can result in total crop failure if untreated. Late blight of potato was responsible for the Irish potato famine of the late 1840s.',
'Leaves have large, dark brown blotches with a green gray edge; not confined by major leaf veins. Infections progress through leaflets and petioles, resulting in large sections of dry brown foliage. Stem infections are firm and dark brown with a rounded edge.',
'https://extension.umn.edu/diseases/late-blight'],
'Tomato: Leaf Mold': [
'Leaf mold is not normally a problem in field-grown tomatoes in northern climates. It can cause losses in tomatoes grown in greenhouses or high tunnels due to the higher humidity found in these environments. Foliage is often the only part of the plant infected and will cause infected leaves to wither and die, indirectly affecting yield. In severe cases, blossoms and fruit can also be infected, directly reducing yield.',
'The oldest leaves are infected first. Pale greenish-yellow spots, usually less than 1/4 inch, with no definite margins, form on upper sides of leaves. Olive-green to brown velvety mold forms on the lower leaf surface below leaf spots. Leaf spots grow together and turn brown. Leaves wither and die but often remain attached to the plant.',
'https://extension.umn.edu/diseases/leaf-mold-tomato'],
'Tomato: Septoria Leaf Spot': [
'Septoria leaf spot is a very common disease of tomatoes. It is caused by a fungus (Septoria lycopersici) and can affect tomatoes and other plants in the Solanaceae family, especially potatoes and eggplant, just about anywhere in the world. Although Septoria leaf spot is not necessarily fatal for your tomato plants, it spreads rapidly and can quickly defoliate and weaken the plants, rendering them unable to bear fruit to maturity.',
'Septoria leaf spots start off somewhat circular and first appear on the undersides of older leaves, at the bottom of the plant. They are small, 1/16 to 1/8 inches (1.6 to 3.2 millimeters) in diameter, with a dark brown margin and lighter gray or tan centers. A yellow halo may surround the spot.',
'https://www.thespruce.com/identifying-and-controlling-septoria-leaf-spot-of-tomato-1402974'],
'Tomato: Spider Mites (Two-spotted Spider Mite)': [
'Many species of the spider mite (family: Tetranychidae), so common in North America, attack both indoor and outdoor plants. They can be especially destructive in greenhouses. Spider mites are not true insects, but are classed as a type of arachnid, relatives of spiders, ticks and scorpions.',
'Spider mites, almost too small to be seen, pass into our gardens without notice. No matter how few, each survives by sucking material from plant cells. Large infestations cause visible damage. Leaves first show patterns of tiny spots or stipplings. They may change color, curl and fall off. The mites activity is visible in the tight webs that are formed under leaves and along stems.',
'https://www.planetnatural.com/pest-problem-solver/houseplant-pests/spider-mite-control'],
'Tomato: Target Spot': [
'Also known as early blight, target spot of tomato is a fungal disease that attacks a diverse assortment of plants, including papaya, peppers, snap beans, potatoes, cantaloupe, and squash as well as passion flower and certain ornamentals. Target spot on tomato fruit is difficult to control because the spores, which survive on plant refuse in the soil, are carried over from season to season.',
'Target spot on tomato fruit is difficult to recognize in the early stages, as the disease resembles several other fungal diseases of tomatoes. However, as diseased tomatoes ripen and turn from green to red, the fruit displays circular spots with concentric, target-like rings and a velvety black, fungal lesions in the center. The "targets" become pitted and larger as the tomato matures.',
'https://www.gardeningknowhow.com/edible/vegetables/tomato/target-spot-on-tomatoes.htm'],
'Tomato: Yellow Leaf Curl Virus': [
'Tomato yellow leaf curl virus is undoubtedly one of the most damaging pathogens of tomato, and it limits production of tomato in many tropical and subtropical areas of the world. It is also a problem in many countries that have a Mediterranean climate such as California. Thus, the spread of the virus throughout California must be considered as a serious potential threat to the tomato industry.',
'Infected tomato plants initially show stunted and erect or upright plant growth; plants infected at an early stage of growth will show severe stunting. However, the most diagnostic symptoms are those in leaves.',
'https://www2.ipm.ucanr.edu/agriculture/tomato/tomato-yellow-leaf-curl'],
'Tomato: Mosaic Virus': [
'Tomato mosaic virus (ToMV) and Tobacco mosaic virus (TMV) are hard to distinguish. Tomato mosaic virus (ToMV) can cause yellowing and stunting of tomato plants resulting in loss of stand and reduced yield. ToMV may cause uneven ripening of fruit, further reducing yield.',
'Mottled light and dark green on leaves. If plants are infected early, they may appear yellow and stunted overall. Leaves may be curled, malformed, or reduced in size. Spots of dead leaf tissue may become apparent with certain cultivars at warm temperatures. Fruits may ripen unevenly. Reduced fruit number and size.',
'https://extension.umn.edu/diseases/tomato-mosaic-virus-and-tobacco-mosaic-virus'],
'Tomato: Healthy': [
'Your crops are healthy. You took good care of it.',
'Healthy Crops',
'Just take care of it as you usually do.']
}
if not os.path.exists('AgentCropKeras_v1.h5'):
url='https://drive.google.com/uc?id=1JNggWQ9OJFYnQpbsFXMrVu-E-sR3VnCu'
output = 'AgentCropKeras_v1.h5'
gdown.download(url, output, quiet=False)
model = load_model('AgentCropKeras_v1.h5')
if not os.path.exists('./static/test'):
os.makedirs('./static/test')
# Create an app
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024 # maximum upload size is 50 MB
app.secret_key = "agentcrop"
ALLOWED_EXTENSIONS = {'png', 'jpeg', 'jpg'}
folder_num = 0
folders_list = []
# initialize scheduler
scheduler = APScheduler()
scheduler.api_enabled = True
scheduler.init_app(app)
# Adding Interval Job to delete folder
@scheduler.task('interval', id='clean', seconds=1800, misfire_grace_time=900)
scheduler.start()
@app.route('/', methods=['GET', 'POST'])
@app.route('/favicon.ico')
#API requests are handled here
@app.route('/api/predict', methods=['POST']) | [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
640,
198,
6738,
42903,
62,
499,
1416,
704,
18173,
1330,
3486,
50,
1740,
18173,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
41927,
292,
13,
3866,
... | 3.52889 | 8,913 |
import torch.tensor
import joblib
import numpy as np
tensor_dict = joblib.load("TENSORS_FILE")
print(tensor_dict["beta"]) # tensor obtido a partir de model.get_beta() no ETM
print(tensor_dict["beta"].size()) # dimensoes do tensor parecem ser KxV (topicos x vocabulario)
beta_sum = torch.sum(tensor_dict["beta"], 1)
print(beta_sum) # linhas somam 1, mostrando que estao normalizadas
print(beta_sum.size())
array = tensor_dict["beta"].numpy()
filter_fun = array < 0
print(array[filter_fun]) # o tensor nao possui elementos negativos
print("*"*20)
tensor_dict = joblib.load("TENSORS_FILE")
print(tensor_dict["theta"]) # tensor obtido a partir de model.get_theta() no ETM
print(tensor_dict["theta"].size()) # dimensoes do tensor parecem ser DxK (documentos x topicos)
theta_sum = torch.sum(tensor_dict["theta"], 1)
print(theta_sum) # linhas somam 1, mostrando que estao normalizadas
print(theta_sum.size())
array = tensor_dict["theta"].numpy()
filter_fun = array < 0
print(array[filter_fun]) # o tensor nao possui elementos negativos
| [
11748,
28034,
13,
83,
22854,
198,
11748,
1693,
8019,
198,
11748,
299,
32152,
355,
45941,
198,
198,
83,
22854,
62,
11600,
796,
1693,
8019,
13,
2220,
7203,
51,
16938,
20673,
62,
25664,
4943,
220,
198,
4798,
7,
83,
22854,
62,
11600,
1469... | 2.612091 | 397 |
import os
import neptune
from neptunecontrib.versioning.data import log_data_version
from neptunecontrib.api.utils import get_filepaths
from src.features.const import V0_CAT_COLS
from src.utils import read_config, check_env_vars
from src.features.utils import load_and_merge
check_env_vars()
CONFIG = read_config(config_path=os.getenv('CONFIG_PATH'))
neptune.init(project_qualified_name=CONFIG.project)
RAW_DATA_PATH = CONFIG.data.raw_data_path
FEATURES_DATA_PATH = CONFIG.data.features_data_path
FEATURE_NAME = 'v0'
NROWS = None
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
198,
11748,
497,
457,
1726,
198,
6738,
497,
457,
403,
721,
756,
822,
13,
9641,
278,
13,
7890,
1330,
2604,
62,
7890,
62,
9641,
198,
6738,
497,
457,
403,
721,
756,
822,
13,
15042,
13,
26791,
1330,
651,
62,
7753,
6... | 2.71831 | 213 |
"""Support for Goal Zero Yeti Sensors."""
from __future__ import annotations
from typing import cast
from goalzero import Yeti
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_NAME,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
SIGNAL_STRENGTH_DECIBELS,
TEMP_CELSIUS,
TIME_MINUTES,
TIME_SECONDS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import YetiEntity
from .const import DATA_KEY_API, DATA_KEY_COORDINATOR, DOMAIN
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="wattsIn",
name="Watts In",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="ampsIn",
name="Amps In",
device_class=SensorDeviceClass.CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=SensorStateClass.MEASUREMENT,
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="wattsOut",
name="Watts Out",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_WATT,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="ampsOut",
name="Amps Out",
device_class=SensorDeviceClass.CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=SensorStateClass.MEASUREMENT,
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="whOut",
name="WH Out",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="whStored",
name="WH Stored",
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_WATT_HOUR,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="volts",
name="Volts",
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="socPercent",
name="State of Charge Percent",
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
),
SensorEntityDescription(
key="timeToEmptyFull",
name="Time to Empty/Full",
device_class=TIME_MINUTES,
native_unit_of_measurement=TIME_MINUTES,
),
SensorEntityDescription(
key="temperature",
name="Temperature",
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
entity_category=EntityCategory.DIAGNOSTIC,
),
SensorEntityDescription(
key="wifiStrength",
name="Wifi Strength",
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
SensorEntityDescription(
key="timestamp",
name="Total Run Time",
native_unit_of_measurement=TIME_SECONDS,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
SensorEntityDescription(
key="ssid",
name="Wi-Fi SSID",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
SensorEntityDescription(
key="ipAddr",
name="IP Address",
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Goal Zero Yeti sensor."""
name = entry.data[CONF_NAME]
goalzero_data = hass.data[DOMAIN][entry.entry_id]
sensors = [
YetiSensor(
goalzero_data[DATA_KEY_API],
goalzero_data[DATA_KEY_COORDINATOR],
name,
description,
entry.entry_id,
)
for description in SENSOR_TYPES
]
async_add_entities(sensors, True)
class YetiSensor(YetiEntity, SensorEntity):
"""Representation of a Goal Zero Yeti sensor."""
def __init__(
self,
api: Yeti,
coordinator: DataUpdateCoordinator,
name: str,
description: SensorEntityDescription,
server_unique_id: str,
) -> None:
"""Initialize a Goal Zero Yeti sensor."""
super().__init__(api, coordinator, name, server_unique_id)
self._attr_name = f"{name} {description.name}"
self.entity_description = description
self._attr_unique_id = f"{server_unique_id}/{description.key}"
@property
def native_value(self) -> StateType:
"""Return the state."""
return cast(StateType, self.api.data[self.entity_description.key])
| [
37811,
15514,
329,
25376,
12169,
6430,
72,
14173,
669,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
3350,
198,
198,
6738,
3061,
22570,
1330,
6430,
72,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
39... | 2.359264 | 2,391 |
#!/usr/bin/python3
# WallGen v0.2'/'
#You nd yaml, pyyaml modules...
from util import parser
template_filename=""
rules_filename=""
# Get argvs of user's input
template_filename,rules_filename = parser.arguments()
# load rules of firewall at directory rules
try:
rules_wall=parser.Get_config(rules_filename)
except Exception as e:
print(" log error in config parser rules: "+str(e))
exit(0)
# Load templates and generate
try:
parser.start_generator(template_filename, rules_wall)
except Exception as e:
print(" log error in rule generator: "+str(e))
exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
5007,
13746,
410,
15,
13,
17,
26488,
6,
198,
2,
1639,
299,
67,
331,
43695,
11,
12972,
88,
43695,
13103,
986,
220,
198,
6738,
7736,
1330,
30751,
198,
198,
28243,
62,
34345,
33151,
1... | 2.994898 | 196 |
from html.parser import HTMLParser
from unittest import mock
import pytest
from duffy.app.main import app, init_model
from duffy.exceptions import DuffyConfigurationError
from ..util import noop_context
@pytest.mark.client_auth_as(None)
@pytest.mark.asyncio
| [
6738,
27711,
13,
48610,
1330,
11532,
46677,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
288,
15352,
13,
1324,
13,
12417,
1330,
598,
11,
2315,
62,
19849,
198,
6738,
288,
15352,
13,
1069,
11755,
... | 3.207317 | 82 |
# This program saves a list of strings to a file.
# Call the main function.
main()
| [
2,
770,
1430,
16031,
257,
1351,
286,
13042,
284,
257,
2393,
13,
201,
198,
201,
198,
2,
4889,
262,
1388,
2163,
13,
201,
198,
12417,
3419,
201,
198
] | 3.142857 | 28 |
#!/usr/bin/env python
import pytest
from sonic_package_manager.database import PackageEntry
from sonic_package_manager.errors import (
PackageNotFoundError,
PackageAlreadyExistsError,
PackageManagerError
)
from sonic_package_manager.version import Version
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
36220,
62,
26495,
62,
37153,
13,
48806,
1330,
15717,
30150,
198,
6738,
36220,
62,
26495,
62,
37153,
13,
48277,
1330,
357,
198,
220,
220,
220,
1... | 3.426829 | 82 |
'''
MIT License
Copyright (c) 2018 Stanford Computational Imaging Lab
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import numpy as np
from numpy.fft import ifftn, fftn
import matplotlib.pyplot as plt
import util.lct as lct
import pickle
from tqdm import tqdm
from scipy.signal import firwin, lfilter
import scipy.signal
import csv
import os
from util.pickle_util import *
import sys
import time
plt.style.use('dark_background')
# retrieve calibration information from text files for the measurement microphones
# get the low-pass/high-pass filter parameters used for processing the raw measurements
# define the transmit chirp signal sent over the speakers
# demodulate the recording of the scene response to the FMCW transmit signal
if __name__ == '__main__':
reconstruction = AcousticNLOSReconstruction()
valid_scenes = ['double', 'letter_H', 'corner_reflectors',
'psf', 'resolution_corner1m', 'resolution_corner2m',
'resolution_plane1m', 'resolution_plane2m', 'letters_LT']
scene = sys.argv[1:]
if len(scene) == 0:
reconstruction.usage()
if scene == ['all']:
scenes = valid_scenes
for s in scene:
if s not in valid_scenes:
reconstruction.usage()
break
reconstruction.run(s)
| [
7061,
6,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
2864,
13863,
22476,
864,
48656,
3498,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
10314,
... | 3.183702 | 724 |
# Django settings for testproj project.
import os
import sys
# import source code dir
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
SITE_ID = 300
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_URLCONF = "urls"
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
TEST_RUNNER = "django_nose.run_tests"
here = os.path.abspath(os.path.dirname(__file__))
COVERAGE_EXCLUDE_MODULES = ("celery.__init__",
"celery.conf",
"celery.tests.*",
"celery.management.*",
"celery.contrib.*",
"celery.bin.celeryinit",
"celery.bin.celerybeat",
"celery.utils.patch",
"celery.utils.compat",
"celery.task.rest",
"celery.platform", # FIXME
"celery.backends.mongodb", # FIXME
"celery.backends.tyrant", # FIXME
)
NOSE_ARGS = [os.path.join(here, os.pardir, "celery", "tests"),
"--cover3-package=celery",
"--cover3-branch",
"--cover3-exclude=%s" % ",".join(COVERAGE_EXCLUDE_MODULES)]
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_VHOST = "/"
BROKER_USER = "guest"
BROKER_PASSWORD = "guest"
TT_HOST = "localhost"
TT_PORT = 1978
CELERY_DEFAULT_EXCHANGE = "testcelery"
CELERY_DEFAULT_ROUTING_KEY = "testcelery"
CELERY_DEFAULT_QUEUE = "testcelery"
CELERY_QUEUES = {"testcelery": {"binding_key": "testcelery"}}
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = ':memory'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_nose',
'celery',
'someapp',
'someappwotask',
)
CELERY_SEND_TASK_ERROR_EMAILS = False
| [
2,
37770,
6460,
329,
1332,
1676,
73,
1628,
13,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
2,
1330,
2723,
2438,
26672,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
1136,
66,
16993,
28955,
198,
17597,
13,
6978,
13,
2846... | 1.830154 | 1,101 |
import nomad
import numpy as np
# This was a one-time use script to confirm that the RA swatch ranges are >= and <.
# Found:
# LOW MATCH:
# 0021 18.0 18.0 18.2499899074
# LOW MATCH:
# 0064 15.25 15.25 15.4999305556
# LOW MATCH:
# 73 4.75 4.75 4.99976037037
# before I then cut off the run. Good enough for me, showing that
# RA swatch ranges are >= and <
# (which makes logical sense)
for cur_dec_filenum in np.arange(1799):
nomad_filenum_str = '%04i' % cur_dec_filenum
print nomad_filenum_str
for ra_swatch in np.arange(0, 24, 0.25):
records_to_retrieve = nomad._determine_record_numbers_to_retrieve(ra_swatch, ra_swatch, cur_dec_filenum)[0]
f = open(nomad._nomad_dir + nomad_filenum_str[0:3] + '/m' + nomad_filenum_str + '.cat', 'rb')
f.seek((records_to_retrieve[0] - 1) * nomad._nomad_record_length_bytes)
raw_byte_data = f.read((records_to_retrieve[1] - records_to_retrieve[0] + 1) * nomad._nomad_record_length_bytes)
nomad_ids = [nomad_filenum_str + '-' + ('%07i' % a) for a in range(records_to_retrieve[0], records_to_retrieve[1] + 1)]
stars = nomad._apply_proper_motion(nomad._convert_raw_byte_data_to_dataframe(raw_byte_data, nomad_ids=nomad_ids),
epoch=2000.0)
if ra_swatch == stars['RAJ2000'].min() / 15.:
print 'LOW MATCH:'
print nomad_filenum_str, ra_swatch, stars['RAJ2000'].min() / 15., stars['RAJ2000'].max() / 15.
if (ra_swatch + 0.25) == stars['RAJ2000'].max() / 15.:
print 'HIGH MATCH:'
print nomad_filenum_str, ra_swatch, stars['RAJ2000'].min() / 15., stars['RAJ2000'].max() / 15.
| [
11748,
4515,
324,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
770,
373,
257,
530,
12,
2435,
779,
4226,
284,
6216,
326,
262,
17926,
1509,
963,
16069,
389,
18189,
290,
1279,
13,
198,
198,
2,
220,
4062,
25,
198,
2,
46663,
337,
1... | 2.164295 | 773 |
import json
import logging
import os
import sys
import traceback
from typing import Union, Dict, List
import requests
from teams_logger import TeamsHandler, Office365CardFormatter
import validators
from pigeons.filter import TeamsFilter
def init_logger(
endpoint: Union[str, Dict[str, str]],
endpoint_key: str = None,
name: str = None,
level: int = logging.INFO,
log_to_teams: bool = True,
tf_capture_flags: List[str] = None,
tf_regex: bool = False,
):
"""
Initialize a filter for logging to Teams.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter above
the level specified.
Parameters
----------
endpoint : str, default=''
Name of the filter.
endpoint_key: str, default=None
Ignored if endpoint is URL, otherwise indicates key for dict.
name: str
Logger name.
level: int, default=logging.INFO
Log level.
log_to_teams: bool, default=True
Whether to send logs to MSTeams or not.
tf_capture_flags: List[str], default=None
Flags to capture in log records for specified level.
tf_regex: bool, default=False
Whether capture flags are regex or not.
Returns
-------
filter : logging.Logger
Logger object.
"""
if isinstance(endpoint, str):
if os.path.exists(endpoint):
endpoint = get_endpoint_from_file(filepath=endpoint, key=endpoint_key)
if isinstance(endpoint, dict):
endpoint = endpoint.get(endpoint_key)
_check_url(endpoint)
logger = logging.getLogger(name)
logger.setLevel(level)
if log_to_teams:
th = TeamsHandler(url=endpoint, level=logging.INFO)
logger.addHandler(th)
cf = Office365CardFormatter(facts=["name", "levelname", "lineno"])
th.setFormatter(cf)
tf = TeamsFilter(capture_flags=tf_capture_flags, regex=tf_regex)
logger.addFilter(tf)
return logger
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
6738,
19720,
1330,
4479,
11,
360,
713,
11,
7343,
198,
198,
11748,
7007,
198,
6738,
3466,
62,
6404,
1362,
1330,
24690,
25060,
11,
4452,
... | 2.555695 | 799 |
n = int(input("Enter a number: "))
statement = "is a prime number."
for x in range(2,n):
if n%x == 0:
statement = 'is not a prime number.'
print(n,statement)
| [
201,
198,
77,
796,
493,
7,
15414,
7203,
17469,
257,
1271,
25,
366,
4008,
201,
198,
26090,
796,
366,
271,
257,
6994,
1271,
526,
201,
198,
1640,
2124,
287,
2837,
7,
17,
11,
77,
2599,
201,
198,
220,
220,
220,
611,
299,
4,
87,
6624,... | 2.271605 | 81 |
"""Friedrich Schotte, May 1, 2015 - May 1, 2015"""
from ftplib import FTP
from io import BytesIO
from struct import pack
data = ""
data += pack(">bbHIII",0x03,0x000,0x0001,0xF0FFB044,0x00000001,0x00000000)
data += pack(">bbHIII",0x03,0x000,0x0001,0xF0FFB044,0x00000001,0x00000001)
##file("/tmp/sequence.bin","w").write(data) # for debugging
f = BytesIO()
f.write(data)
f.seek(0)
ftp = FTP("pico25.niddk.nih.gov","root","root")
##ftp.storbinary ("STOR /tmp/sequence.bin",f) # for debugging
ftp.storbinary ("STOR /dev/sequencer",f)
ftp.close()
| [
37811,
37,
2228,
7527,
3059,
11404,
11,
1737,
352,
11,
1853,
532,
1737,
352,
11,
1853,
37811,
198,
6738,
10117,
489,
571,
1330,
45854,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
2878,
1330,
2353,
198,
198,
7890,
796,
13538,
... | 2.339056 | 233 |
from plugins.adversary.app.commands import cmd
from plugins.adversary.app.operation.operation import Step, OPFile, OPHost, OPRat, OPVar
class DirListCollection(Step):
"""
Description:
This step enumerates files on the target machine. Specifically, it looks for files with 'password' or
'admin' in the name.
Requirements:
This step only requires the existence of a RAT on a host in order to run.
"""
attack_mapping = [("T1005", "Collection"), ("T1083", "Discovery"), ('T1106', 'Execution')]
display_name = "list_files"
summary = "Enumerate files locally with a for loop and the dir command recursively"
preconditions = [('rat', OPRat),
('host', OPHost(OPVar("rat.host")))]
postconditions = [('file_g', OPFile({'use_case': 'collect',
'host': OPVar("host")}))]
significant_parameters = ['host'] # no need to do this more than once per host
postproperties = ['file_g.path']
@staticmethod
@staticmethod
@staticmethod
| [
6738,
20652,
13,
324,
690,
560,
13,
1324,
13,
9503,
1746,
1330,
23991,
198,
6738,
20652,
13,
324,
690,
560,
13,
1324,
13,
27184,
13,
27184,
1330,
5012,
11,
13349,
8979,
11,
440,
11909,
455,
11,
440,
4805,
265,
11,
13349,
19852,
628,... | 2.62069 | 406 |
# lec10memo1.py
# Code shown in Lecture 10, memo 1
# An iterative "Pythonic" search procedure:
# The recursive way:
# A recursive "Pythonic" binary search procedure:
| [
2,
443,
66,
940,
11883,
78,
16,
13,
9078,
198,
198,
2,
6127,
3402,
287,
31209,
495,
838,
11,
16155,
352,
198,
198,
2,
1052,
11629,
876,
366,
37906,
291,
1,
2989,
8771,
25,
628,
198,
2,
383,
45115,
835,
25,
628,
198,
2,
317,
45... | 3.127273 | 55 |
#%%
import tarfile
from io import BytesIO
from PIL import Image
import os
from pathlib import Path
import pandas as pd
basefolder_loc = Path(__file__).parents[1]
TARFILE = tarfile.open(
os.path.join(basefolder_loc, "1.download-data", "data", "training.tar.gz"),
"r:gz",
)
def load_img(
target: str = "adrenoceptor",
plate: str = "P1",
cell_id: int = 1,
replicate: int = 1,
well: str = "C10",
field: int = 1,
) -> Image:
"""
All the parameters can be found in the csv files:
- /1.download-data/data/training_data.csv
- /1.download-data/data/validation_data.csv
They match the headers
target (str): Name of the mechanism of action e.q. "adrenoceptor",
plate (str): Name of the plate e.q. "P1",
cell_id (int): Identification number of the cell e.q. 1,
replicate (int): Number of replication e.q. 1,
well (str): Name of the well relative to the wellplate e.q. "C10",
field (int): Number of the field e.q. 1,
Find the single image that matches on all given parameters.
Returns: image
WARNING:
Use this only if you need to load a single image.
Every time you run this function it loops over the whole zip file.
If you need all images use get_all_images.
"""
newPlateName = plate.replace("P", "S")
extracted_file = TARFILE.extractfile(
f"training/{target}/211_11_17_X_Man_LOPAC_X5_LP_{newPlateName}_{replicate}_{well}_{field}_{cell_id}.tiff"
)
b = extracted_file.read()
img = Image.open(BytesIO(b))
return img
def get_all_images(metadata: pd.DataFrame) -> (Image, str, str):
"""
metadata (pd.DataFrame):
This is a list of the images that will be loaded.
Each row has target, cell_id, well, plate, field and replicate.
The data is compressed in /1.download-data/data/training.tar.gz
Every image in the data matching a row in metadata will be loaded
returns: Generator (https://wiki.python.org/moin/Generators)
Each iteration has (img, cell_code, target)
"""
for member in TARFILE.getmembers():
path, name = os.path.split(member.name)
if not name.endswith(".tiff"):
continue
path, target = os.path.split(path)
(
l211,
l11,
l17,
X,
Man,
LOPAC,
X5,
LP,
newPlateName,
replicate,
wellName,
field,
cell_id,
) = name.replace(".tiff", "").split("_")
plateName = newPlateName.replace("S", "P")
rows = metadata.loc[
(metadata["target"] == target)
& (metadata["cell_id"] == int(cell_id))
& (metadata["well"] == wellName)
& (metadata["plate"] == plateName)
& (metadata["field"] == int(field))
& (metadata["replicate"] == int(replicate))
]
if len(rows) == 0:
continue
elif len(rows) > 1:
print("To many rows", rows)
extracted_file = TARFILE.extractfile(member)
b = extracted_file.read()
img = Image.open(BytesIO(b))
yield img, list(rows.cell_code)[0], target
| [
2,
16626,
198,
11748,
13422,
7753,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
8692,
43551,
62,
17946... | 2.261905 | 1,428 |
"""Resource files for INDRA CoGEx."""
from pathlib import Path
from typing import List
__all__ = [
"ensure_disprot",
]
HERE = Path(__file__).parent.resolve()
#: URL for downloading most recent version of DisProt
DISPROT_URL = "https://www.disprot.org/api/search?release=current&show_ambiguous=true&show_obsolete=false&format=tsv&namespace=all&get_consensus=false"
DISPROT_PATH = HERE.joinpath("disprot_hgnc_ids.txt")
#: A set of genes that have *too* much information (e.g., TP53, IL-6)
#: that will be excluded
DISPROT_SKIP = {
"1678", # CD4 usually misgrounded to CD4 T cells
"6018",
"11998",
}
def main():
"""Rebuild all resources"""
ensure_disprot(refresh=True)
if __name__ == "__main__":
main()
| [
37811,
26198,
3696,
329,
24413,
3861,
1766,
38,
3109,
526,
15931,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
7343,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
641,
495,
62,
6381,
11235,
1600,
198,... | 2.626335 | 281 |
from aetherling.modules.fifo import DefineFIFO
from magma import *
from magma.bitutils import *
from magma.simulator.coreir_simulator import CoreIRSimulator
from magma.scope import Scope
| [
6738,
257,
6750,
1359,
13,
18170,
13,
32041,
78,
1330,
2896,
500,
37,
5064,
46,
198,
6738,
2153,
2611,
1330,
1635,
198,
6738,
2153,
2611,
13,
2545,
26791,
1330,
1635,
198,
6738,
2153,
2611,
13,
14323,
8927,
13,
7295,
343,
62,
14323,
... | 3.298246 | 57 |
# -*- coding: utf-8 -*-
"""
:Created: 3/12/14
:Author: timic
"""
import datetime
import os
from lxml import etree
from .._base import BaseSmev, BaseSmevWsdl
from .._utils import EmptyCtx, el_name_with_ns
from .. fault import ApiError as _ApiError
from .. import _xmlns as ns
from model import MessageType, ServiceType, HeaderType, AppDocument
try:
from spyne.protocol.xml.model import complex_from_element as _spyne_cfe
except ImportError:
# spyne>=2.11.0
else:
# spyne<=2.10.10
class Smev256(BaseSmev):
"""
Имплементация протокола СМЕВ версии 2.5.6
"""
_smev_schema_path = os.path.join(
os.path.dirname(__file__), '../xsd', 'smev256.xsd')
_ns = ns.nsmap256
_interface_document_type = Smev256Wsdl
def _create_message_element(self, ctx):
"""
Констрирует болванку для smev:Message
:param ctx: Сквозной контекст метода
:rtype: lxml.etree.Element
"""
# TODO: сделать нормальный биндинг
if getattr(ctx, "udc", None) is None:
ctx.udc = EmptyCtx()
if not getattr(ctx.udc, "out_smev_message", None):
ctx.udc.out_smev_message = EmptyCtx()
SMEV = el_name_with_ns(self._ns["smev"])
root = etree.Element(SMEV("Message"), nsmap={"smev": self._ns["smev"]})
sender = etree.SubElement(root, SMEV("Sender"))
etree.SubElement(sender, SMEV("Code")).text = (
ctx.udc.out_smev_message.Sender.Code
or self.smev_params.get("SenderCode", ""))
etree.SubElement(sender, SMEV("Name")).text = (
ctx.udc.out_smev_message.Sender.Name
or self.smev_params.get("SenderName", ""))
recipient = etree.SubElement(root, SMEV("Recipient"))
etree.SubElement(recipient, SMEV("Code")).text = (
ctx.udc.out_smev_message.Recipient.Code
or self.smev_params.get("RecipientCode", "")
or ctx.udc.in_smev_message.Sender.Code or "")
etree.SubElement(recipient, SMEV("Name")).text = (
ctx.udc.out_smev_message.Recipient.Name
or self.smev_params.get("RecipientName", "")
or ctx.udc.in_smev_message.Sender.Name or "")
if ctx.udc.out_smev_message.Originator:
originator = etree.SubElement(root, SMEV("Originator"))
etree.SubElement(originator, SMEV(
"Code")).text = ctx.udc.out_smev_message.Originator.Code or ""
etree.SubElement(originator, SMEV(
"Name")).text = ctx.udc.out_smev_message.Originator.Name or ""
service = etree.SubElement(root, SMEV("Service"))
etree.SubElement(service, SMEV("Mnemonic")).text = (
ctx.udc.out_smev_message.Service.Mnemonic
or self.smev_params.get("Mnemonic", "")
or (ctx.udc.in_smev_message.Service
and ctx.udc.in_smev_message.Service.Mnemonic
or ""))
etree.SubElement(service, SMEV("Version")).text = (
ctx.udc.out_smev_message.Service.Version
or self.smev_params.get("Version", "")
or (ctx.udc.in_smev_message.Service
and ctx.udc.in_smev_message.Service.Version)
or "1.00")
etree.SubElement(root, SMEV(
"TypeCode")).text = ctx.udc.out_smev_message.TypeCode or "GSRV"
if ctx.out_error and isinstance(ctx.out_error, _ApiError):
status = getattr(ctx.out_error, "Status", None) or "INVALID"
else:
status = "RESULT"
etree.SubElement(root, SMEV(
"Status")).text = ctx.udc.out_smev_message.Status or status
etree.SubElement(
root, SMEV("Date")).text = datetime.datetime.utcnow().isoformat()
exchange_type = (
self.smev_params.get("ExchangeType") or
unicode(ctx.udc.in_smev_message.ExchangeType) or
"0")
etree.SubElement(root, SMEV("ExchangeType")).text = exchange_type
request_id_ref = (
ctx.udc.out_smev_message.RequestIdRef
or ctx.udc.in_smev_header.MessageId)
if request_id_ref:
etree.SubElement(root, SMEV("RequestIdRef")).text = request_id_ref
origin_request_id_ref = (
ctx.udc.out_smev_message.OriginRequestIdRef or
ctx.udc.in_smev_message.OriginRequestIdRef or request_id_ref)
if origin_request_id_ref:
etree.SubElement(
root, SMEV("OriginRequestIdRef")).text = origin_request_id_ref
service_code = (
ctx.udc.out_smev_message.ServiceCode or
self.smev_params.get("ServiceCode") or
ctx.udc.in_smev_message.ServiceCode)
if service_code:
etree.SubElement(root, SMEV("ServiceCode")).text = service_code
case_number = (
ctx.udc.out_smev_message.CaseNumber or
ctx.udc.in_smev_message.CaseNumber)
if case_number:
etree.SubElement(
root, SMEV("CaseNumber")
).text = case_number or ""
if "OKTMO" in self.smev_params:
etree.SubElement(
root, SMEV("OKTMO")).text = self.smev_params.get("OKTMO", "")
test_msg = (
ctx.udc.out_smev_message.TestMsg
or ctx.udc.in_smev_message.TestMsg or None)
if test_msg:
etree.SubElement(root, SMEV("TestMsg")).text = test_msg
return root
def _create_message_data_element(self, ctx):
"""
Конструирует болванку для MessageData
:rtype: lxml.etree.Element
"""
SMEV = el_name_with_ns(self._ns["smev"])
root = etree.Element(
SMEV("MessageData"), nsmap={"smev": self._ns["smev"]})
etree.SubElement(root, SMEV("AppData"))
if ctx.udc.out_smev_appdoc.BinaryData:
app_document = etree.SubElement(root, SMEV("AppDocument"))
etree.SubElement(
app_document, SMEV("RequestCode")
).text = ctx.udc.out_smev_appdoc.RequestCode
etree.SubElement(
app_document, SMEV("BinaryData")
).text = ctx.udc.out_smev_appdoc.BinaryData
return root | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
25,
41972,
25,
513,
14,
1065,
14,
1415,
198,
25,
13838,
25,
4628,
291,
198,
37811,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
6738,
300,
198... | 1.901967 | 3,254 |
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from api.models.mixins.DisplayOrder import DisplayOrder
from api.models.mixins.EffectiveDates import EffectiveDates
from auditable.models import Auditable
from api.managers.OrganizationStatusManager import OrganizationStatusManager
| [
37811,
198,
220,
220,
220,
30617,
7824,
43925,
329,
262,
40034,
24958,
6998,
10504,
25469,
15678,
628,
220,
220,
220,
383,
15198,
13333,
1424,
29595,
4482,
318,
852,
3562,
284,
4269,
1370,
11846,
6447,
329,
9358,
5252,
20499,
287,
10213,
... | 3.94863 | 292 |
import os
import datetime as dt
import moviepy.video.io.ImageSequenceClip
image_folder = 'images/natural'
fps = 15
beg = dt.datetime.now()
image_files = [image_folder + '/' + img for img in os.listdir(image_folder) if img.endswith(".png")]
print(image_files)
clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(image_files, fps=fps)
clip.write_videofile('my_video.mp4')
end = dt.datetime.now()
print(beg, end, end - beg, sep='-') | [
11748,
28686,
201,
198,
11748,
4818,
8079,
355,
288,
83,
201,
198,
11748,
3807,
9078,
13,
15588,
13,
952,
13,
5159,
44015,
594,
2601,
541,
201,
198,
9060,
62,
43551,
796,
705,
17566,
14,
11802,
6,
201,
198,
29647,
796,
1315,
201,
19... | 2.494505 | 182 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == "__main__":
diction = {1: 'Атос', 2: 'Портос', 3: 'Арамис'}
print(diction)
diction_swap = {v:k for k, v in diction.items()}
print(diction_swap)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
48589,
796,
1391,
16,
25,
705,
... | 1.890756 | 119 |
"""Test min heap."""
import pytest
import random
@pytest.fixture
def empty_heap():
"""Instantiate a heap for testing."""
from binheap import BinHeap
min_heap = BinHeap()
return min_heap
@pytest.fixture
def random_heap():
"""Generate a list for use in a heap."""
from binheap import BinHeap
iterable = list(
set(
[random.randint(0, 200) for _ in range(random.randrange(500))]
)
)
min_heap = BinHeap(iterable)
return min_heap
@pytest.fixture
def full_heap():
"""Instantiate a heap from a list for testing."""
from binheap import BinHeap
min_heap = BinHeap([67, 5, 32, 1, 0, 2, 4, 101, 94, 72])
return min_heap
def test_heap_initialization_empty_heap(empty_heap):
"""Test that there's nothing initialized."""
from binheap import BinHeap
assert isinstance(empty_heap, BinHeap)
def test_heap_type_error():
"""Ensure TypeError if we pass anything but a list or None."""
from binheap import BinHeap
with pytest.raises(TypeError):
test_heap = BinHeap(1, 2, 3, 4)
def test_heap_initialized_with_list(full_heap):
"""Test that there's stuff in there."""
from binheap import BinHeap
assert isinstance(full_heap, BinHeap)
assert full_heap._iterable == [0, 1, 4, 2, 5, 67, 32, 101, 94, 72]
def test_heap_push_none(empty_heap):
"""Test that the heap won't let you push None."""
with pytest.raises(TypeError):
empty_heap.push()
def test_len(full_heap):
"""Verify length works on heap."""
assert len(full_heap) == 10
def test_empty_heap_pop(empty_heap):
"""Test that the heap won't let you pop if it's empty."""
with pytest.raises(TypeError):
empty_heap.pop()
def test_successful_pop(full_heap):
"""Test that we get the smallest number when we pop."""
assert full_heap.pop() == 0
assert full_heap._iterable[0] == 1
assert full_heap.pop() == 1
assert full_heap._iterable[0] == 2
assert full_heap.pop() == 2
assert full_heap._iterable[0] == 4
assert len(full_heap) == 7
def test_successful_push(empty_heap):
"""Test that pushes are successful."""
empty_heap.push(2)
assert empty_heap._iterable[0] == 2
empty_heap.push(55)
assert empty_heap._iterable[0] == 2
empty_heap.push(1)
assert empty_heap._iterable[0] == 1
assert empty_heap._iterable == [1, 55, 2]
def test_push_and_pop_dont_screw_with_each_other(full_heap):
"""Make sure they don't interfere with each other."""
assert full_heap.pop() == 0
assert full_heap._iterable[0] == 1
full_heap.push(67)
assert full_heap._iterable[0] == 1
full_heap.push(0)
assert full_heap._iterable[0] == 0
def test_big_random_heap(random_heap):
"""Make sure it works for a big ass heap."""
for pop in random_heap._iterable:
random_heap_min = min(random_heap._iterable)
assert random_heap.pop() == random_heap_min
| [
37811,
14402,
949,
24575,
526,
15931,
198,
11748,
12972,
9288,
198,
11748,
4738,
628,
198,
31,
9078,
9288,
13,
69,
9602,
198,
4299,
6565,
62,
258,
499,
33529,
198,
220,
220,
220,
37227,
49933,
9386,
257,
24575,
329,
4856,
526,
15931,
... | 2.427393 | 1,212 |
import json
import logging
import os
import boto3
import pandas as pd
import numpy as np
import base64
import io
logger = logging.getLogger()
client = boto3.client("sagemaker-runtime")
region = os.environ["region"]
endpoint_name = os.environ["endpoint_name"]
content_type = os.environ["content_type"]
fg_name = os.environ["fg_name"]
boto_session = boto3.Session(region_name=region)
featurestore_runtime = boto_session.client(
service_name="sagemaker-featurestore-runtime", region_name=region
)
client_sm = boto_session.client("sagemaker-runtime", region_name=region) | [
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
11748,
275,
2069,
18,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2779,
2414,
198,
11748,
33245,
198,
198,
6404,
1362,
796,
18931,
1... | 2.923858 | 197 |
import os
import sys
import matplotlib.pyplot as plt
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..')))
from galaxy_model.galaxy import Galaxy # noqa
gal = Galaxy()
if __name__ == "__main__":
test_add_and_remove_coords()
test_plot_galaxy_basic()
test_on_spur()
test_on_spiral_arm()
test_on_anything()
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
159... | 2.05 | 200 |
from abc import ABCMeta, abstractmethod
if __name__ == "__main__":
c = Concrete()
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
269,
796,
1482,
38669,
3419,
198
] | 2.8125 | 32 |
'''input
huaauhahhuahau
'''
inp = input('')
vowel = ""
for i in inp:
if i == "a" or i == "i" or i == "u" or i == "e" or i == "o":
vowel += i
if vowel == vowel[::-1]:
print("S")
else:
print("N") | [
7061,
6,
15414,
198,
33061,
559,
71,
993,
13415,
993,
559,
198,
7061,
6,
198,
259,
79,
796,
5128,
7,
7061,
8,
198,
85,
322,
417,
796,
13538,
198,
1640,
1312,
287,
287,
79,
25,
198,
197,
361,
1312,
6624,
366,
64,
1,
393,
1312,
... | 1.98 | 100 |
"""
This connects to an IRC network/channel and launches an 'bot' onto it.
The bot then pipes what is being said between the IRC channel and one or
more Evennia channels.
"""
# TODO: This is deprecated!
from twisted.words.protocols import irc
from twisted.internet import protocol
from twisted.internet import reactor
from django.conf import settings
from src.irc.models import IRCChannelMapping
#from src import comsys
from src.utils import logger
#store all irc channels
IRC_CHANNELS = []
def cemit_info(message):
"""
Send info to default info channel
"""
comsys.send_cmessage(settings.COMMCHAN_IRC_INFO, 'IRC: %s' % message)
def connect_to_IRC(irc_network,irc_port,irc_channel,irc_bot_nick ):
"Create the bot instance and connect to the IRC network and channel."
connect = reactor.connectTCP(irc_network, irc_port,
IRC_BotFactory(irc_channel,irc_network,irc_bot_nick))
| [
37811,
201,
198,
1212,
20417,
284,
281,
30039,
3127,
14,
17620,
290,
18617,
281,
705,
13645,
6,
4291,
340,
13,
201,
198,
464,
10214,
788,
19860,
644,
318,
852,
531,
1022,
262,
30039,
6518,
290,
530,
393,
201,
198,
3549,
3412,
18142,
... | 2.713499 | 363 |
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
# importing Magics module
from Magics.macro import *
ref = 'obsjson'
# Setting of the output file name
output = output(output_formats=['png'],
output_name_first_page_number='off',
output_name=ref)
# Setting the coordinates of the geographical area
projection = mmap(
subpage_x_length=24.,
subpage_upper_right_longitude=50.00,
subpage_upper_right_latitude=65.00,
subpage_lower_left_latitude=25.00,
subpage_lower_left_longitude=-20.0,
subpage_map_projection='cylindrical',
)
# Coastlines setting
coast = mcoast(map_grid='on', map_grid_colour='grey',
map_grid_thickness=2,
map_coastline_colour='RGB(0.4,0.4,0.4)',
map_coastline_thickness=3)
obs = mobs(
obsjson_info_list = ['{"type": "ersagun", "identifier": "era1", "temperature": -3.0, \
"pressure_after": 1008.0, "pressure_before": 1008.0,\
"pressure": 1010.0, "longitude": 0.3, \
"latitude": 49.5, "temperature_before": -2.0}',
'{"type": "ersagun","identifier": "era2", "temperature": -5.0, \
"pressure_after": 1038.0, "pressure_before": 999.0,\
"pressure": 1010.0, "longitude": 5.39, \
"latitude": 55., "temperature_before": -2.0}'
],
obs_template_file_name = "obs.template",
obs_size=0.3,
obs_ring_size=0.2,
obs_distance_apart = 0.
)
title = mtext(text_lines=["Observation plotting ..." ],
text_justification='left', text_font_size=0.8,
text_colour='charcoal')
# To the plot
plot(
output,
projection,
obs,
coast,
title,
)
| [
2,
357,
34,
8,
15069,
8235,
12,
5304,
13182,
14326,
37,
13,
198,
2,
220,
198,
2,
770,
3788,
318,
11971,
739,
262,
2846,
286,
262,
24843,
10483,
594,
10628,
362,
13,
15,
198,
2,
543,
460,
307,
6492,
379,
2638,
1378,
2503,
13,
430... | 2.37455 | 833 |
# source: https://github.com/awjuliani/DeepRL-Agents/blob/master/Vanilla-Policy.ipynb
# https://medium.com/@awjuliani/super-simple-reinforcement-learning-tutorial-part-2-ded33892c724
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import gym
import matplotlib as plt
env = gym.make('CartPole-v0')
render = True
gamma = .99
tf.reset_default_graph()
player = agent(lr=1e-2,s_size=4,a_size=2,h_size=8)
total_episodes = 2000
max_ep = 999
update_frequency = 5
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
i = 0
total_reward = []
total_length = []
gradBuffer = sess.run(tf.trainable_variables())
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
while i < total_episodes:
s = env.reset()
if render: env.render()
running_reward = 0
ep_history = []
for j in range(max_ep):
# pick an action given outputs
a_dist = sess.run(player.output,feed_dict={player.state_in:[s]})
a = np.random.choice(a_dist[0],p=a_dist[0])
a = np.argmax(a_dist == a)
s1,r,d,_ = env.step(a)
ep_history.append([s,a,r,s1])
s = s1
running_reward += r
if d == True:
ep_history = np.array(ep_history)
ep_history[:,2] = discount_rewards(ep_history[:,2])
feed_dict={player.reward:ep_history[:,2],
player.action:ep_history[:,1],player.state_in:np.vstack(ep_history[:,0])}
grads = sess.run(player.gradients, feed_dict=feed_dict)
for idx,grad in enumerate(grads):
gradBuffer[idx] += grad
if i % update_frequency == 0 and i != 0:
feed_dict= dictionary = dict(zip(player.gradient_holders, gradBuffer))
_ = sess.run(player.update_batch, feed_dict=feed_dict)
for ix,grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
total_reward.append(running_reward)
total_length.append(j)
break
if i % 100 == 0:
print(np.mean(total_reward[-100:]))
i += 1
| [
2,
2723,
25,
3740,
1378,
12567,
13,
785,
14,
707,
73,
377,
25111,
14,
29744,
7836,
12,
10262,
658,
14,
2436,
672,
14,
9866,
14,
25298,
5049,
12,
36727,
13,
541,
2047,
65,
198,
2,
3740,
1378,
24132,
13,
785,
14,
31,
707,
73,
377,... | 1.974048 | 1,156 |
# coding: utf-8
__version__ = "0.3.3"
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
18,
13,
18,
1,
198
] | 1.857143 | 21 |
from tkinter import *
import tkinter.filedialog
import os
import os.path
from check_input import Input, InputError
import chroma_clade
from PIL import Image, ImageTk
# colour choices:
#https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
# py2app saves data files in "<project>.app/Contents/Resources/", which is also where app's main file resides
# therefore we can use the path to this file to get the path to the data files
# this will work also for CLI version, since the data files reside in same dir as source code.
# By contast, PyInstaller creates a temp folder and stores path in _MEIPASS when using --onefile on windows
def get_resource(filename): # https://stackoverflow.com/questions/7674790/bundling-data-files-with-pyinstaller-onefile
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
base_path = sys._MEIPASS
except Exception:
#base_path = os.path.abspath(".")
base_path = os.path.split(__file__)[0] # assuming resources are in same directory as this file
return os.path.join(base_path, filename)
root = Tk()
gui = GuiInput()
root.title("ChromaClade")
title_image = ImageTk.PhotoImage(Image.open(get_resource("title.png")))
WIDTH = title_image.width()
HEIGHT = WIDTH*1.5
#root.minsize(int(WIDTH), int(HEIGHT))
root.resizable(False, False)
root.geometry("%dx%d"%(round(WIDTH), round(HEIGHT)))
root.configure(bg="gray")
if os.name == "nt": # if windows
try:
root.wm_iconbitmap(get_resource("tree_256.ico"))
except Exception as e:
pass
# ================ window layout ===============
f_title = Frame(root, height=HEIGHT*0.1, width=WIDTH*1.0, bg="cyan")
f_input = Frame(root, height=HEIGHT*0.50, width=WIDTH*0.5, bg="white") # nice pale cyan:
f_image = Frame(root, height=HEIGHT*0.30, width=WIDTH*0.5, bg="white") # nice pale cyan: #9BFBFB
f_messages = Frame(root, height=HEIGHT*0.1, width=WIDTH*1.0, bg="cyan")
root.grid_rowconfigure(0, weight=1)
root.grid_rowconfigure(1, weight=1)
root.grid_rowconfigure(2, weight=1)
root.grid_rowconfigure(3, weight=1)
root.grid_columnconfigure(0, weight=1)
# place large frames on root grid
f_title.grid(column=0, row=0, sticky="nesw")
f_input.grid(column=0, row=1, sticky="nesw")
f_image.grid(column=0, row=2, sticky="nesw")
f_messages.grid(column=0, row=3, sticky="nesw")
propagate = False
f_title.grid_propagate(propagate)
f_input.grid_propagate(propagate)
f_image.grid_propagate(propagate)
f_messages.grid_propagate(propagate)
# ================ title ===============
f_title.grid_rowconfigure(0, weight=1)
f_title.grid_columnconfigure(0, weight=1)
l_title = Label(f_title, image=title_image, bg="cyan") # #9BFBFB
l_title.image = title_image # PIL docs say to keep a reference of image
#l_title = Label(f_title, text="title", bg="cyan") # #9BFBFB
l_title.grid(column=0, row=0, sticky="nsew")
# ================ file input ===============
# two columns in f_input, for tree and alignment panels
for i in range(13):
f_input.grid_rowconfigure(i, weight=1)
for j in range(9):
f_input.grid_columnconfigure(j, weight=1)
#L, M, R = left, middle, right
L_COL = 3
M_COL = L_COL + 1
R_COL = M_COL + 1
L_BG = "white" # label background colour
L_FG = "darkgray" # label text colour for file choices
# CHOOSE TREE
l_tree = Label(f_input, text="Tree:", bg=L_BG)
l_tree.grid(column=L_COL, row=0, sticky="")
b_tree = Button(f_input, text="Choose file", bg=L_BG, command=gui.set_tree)
b_tree.grid(column=M_COL, row=0, sticky="")
l_tree_file = Label(f_input, textvariable=gui.get_tree_file(), fg=L_FG, bg=L_BG, width=GuiInput.MAX_FILE_LEN)
l_tree_file.grid(column=R_COL, row=0, sticky="")
# TREE FORMAT
l_tree_format = Label(f_input, text="Format:", bg=L_BG)
l_tree_format.grid(column=L_COL, row=1, sticky="")
o_tree_format = OptionMenu(f_input, gui.get_tree_format(), *gui.tree_choices)
o_tree_format.config(bg=L_BG)
o_tree_format.grid(column=M_COL, row=1)
# BLANK ROW
Label(f_input, text="", bg=L_BG).grid(column=M_COL, row=2, sticky="nesw")
# CHOOSE ALIGN
l_align = Label(f_input, text="Alignment:", bg=L_BG)
l_align.grid(column=L_COL, row=3, sticky="")
b_align = Button(f_input, text="Choose file", bg=L_BG, command=gui.set_align)
b_align.grid(column=M_COL, row=3)
l_align_file = Label(f_input, textvariable=gui.get_align_file(), fg=L_FG, bg=L_BG, width=GuiInput.MAX_FILE_LEN)
l_align_file.grid(column=R_COL, row=3, sticky="")
l_align_format = Label(f_input, text="Format:", bg=L_BG)
l_align_format.grid(column=L_COL, row=4, sticky="")
o_align = OptionMenu(f_input, gui.get_align_format(), *gui.align_choices)
o_align.config(bg=L_BG)
o_align.grid(column=M_COL, row=4)
# ================ image ===============
f_image.grid_rowconfigure(0, weight=1)
f_image.grid_columnconfigure(0, weight=1)
plain_image = ImageTk.PhotoImage(Image.open(get_resource("tree.png")))
col_image = ImageTk.PhotoImage(Image.open(get_resource("col.tree.png")))
l_image = Label(f_image, image=plain_image, bg=L_BG)
l_image.image = plain_image # PIL docs say to keep a reference
l_image.grid(column=0, row=0, sticky="nesw")
# ================ options ===============
# BLANK ROW
Label(f_input, text="", bg=L_BG).grid(column=M_COL, row=5, sticky="nesw")
# COLOUR BRANCHES
cb_branches = Checkbutton(f_input, text="Colour branches", bg=L_BG, command=image_callback, variable=gui.get_colour_branches())
cb_branches.grid(column=M_COL, row=6, sticky="w")
# BLANK ROW
Label(f_input, text="", bg=L_BG).grid(column=M_COL, row=7, sticky="nesw")
# CHOOSE ALIGNMENT SITES
e_sites = Entry(f_input, textvariable=gui.get_site_range_str(), state="disabled", fg="gray")
e_sites.grid(column=R_COL, row=9)
r_all_sites = Radiobutton(f_input, text="All sites", bg=L_BG, variable=gui.get_all_sites(), value=True, command=restore_site_example)
r_all_sites.grid(column=M_COL, row=8, sticky="w")
r_range_sites = Radiobutton(f_input, text="Choose sites:", bg=L_BG, variable=gui.get_all_sites(), value=False, command=clear_site_example)
r_range_sites.grid(column=M_COL, row=9, sticky="w")
# BLANK ROW
Label(f_input, text="", bg=L_BG).grid(column=M_COL, row=10, sticky="nesw")
# output format
o_out_format = OptionMenu(f_input, gui.get_save_format(), *GuiInput.save_choices)
o_out_format.config(bg=L_BG)
o_out_format.grid(column=M_COL, row=11)
l_out_format = Label(f_input, text="Output format:", bg=L_BG)
l_out_format.grid(column=L_COL, row=11, sticky="")
# output file
b_outfile = Button(f_input, text="Save as", bg=L_BG, command=gui.set_save)
b_outfile.grid(column=M_COL, row=12, sticky="")
l_outfile = Label(f_input, text="Destination:", bg=L_BG)
l_outfile.grid(column=L_COL, row=12, sticky="")
l_outfile = Label(f_input, textvariable=gui.get_save_file(), fg=L_FG, bg=L_BG, width=GuiInput.MAX_FILE_LEN)
l_outfile.grid(column=R_COL, row=12, sticky="")
# go button
b_run = Button(f_input, text="Go", bg=L_BG, command=go)
b_run.grid(column=M_COL, row=13, sticky="")
# ================ messages ===============
f_messages.grid_columnconfigure(0, weight=1)
for i in range(1):
f_messages.grid_rowconfigure(i, weight=1)
l_messages = Label(f_messages, font=("Helvetica", 16), textvariable=gui.get_message(), bg="cyan") # #9BFBFB
l_messages.grid(column=0, row=0, sticky="news")
#event loop
root.mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
198,
11748,
256,
74,
3849,
13,
69,
3902,
498,
519,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
6738,
2198,
62,
15414,
1330,
23412,
11,
23412,
12331,
198,
11748,
15358,
64,
62,
565,
671,
198,
... | 2.486505 | 2,927 |
from __future__ import print_function
import os
import re
import numpy as np
import pdb
from scipy import stats
line_num = -1
seed_range = range(1, 6)
datasets = ['yahoo_music', 'douban', 'flixster']
prefixs = ['_s']
print()
for prefix in prefixs:
print('Results of ' + prefix)
for dataset in datasets:
res_base = 'results/' + dataset + prefix
RMSE = []
for seed in seed_range:
res_dir = res_base + str(seed) + '_testmode/log.txt'
with open(res_dir, 'r') as f:
line = f.readlines()[line_num]
rmse = float(line.split(' ')[-1])
RMSE.append(rmse)
RMSE = np.array(RMSE)
print('\033[91m Results of ' + dataset + '\033[00m')
print(RMSE)
print('Mean and std of test rmse:')
print('%.4f$\pm$%.4f' %
(np.around(np.mean(RMSE), 4), np.around(np.std(RMSE), 4)))
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
279,
9945,
198,
6738,
629,
541,
88,
1330,
9756,
198,
198,
1370,
62,
22510,
796,
532,
16,
198,
198,
2882... | 2.060948 | 443 |
import random
import sys
import time
import re
import os
from contextlib import contextmanager
from uuid import uuid4
import logbook
import click
from .bootstrapping import requires_env
_DATABASE_URI_RE = re.compile(r"(?P<driver>(?P<db_type>sqlite|postgresql)(\+.*)?):\/\/(?P<host>[^/]*)\/(?P<db>.+)")
@click.group()
@db.command()
@requires_env("app")
@db.command()
@db.command()
@requires_env("app")
@db.command()
@requires_env("app")
@db.command()
@requires_env("app")
@db.command()
@requires_env("app")
@contextmanager
@db.command()
@requires_env("app", "develop")
@contextmanager
| [
11748,
4738,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
302,
198,
11748,
28686,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
11748,
2604,
2070,
198,
198,
11748,
3904,
198,
198,
67... | 2.626609 | 233 |
# Bank Program
import sqlite3
import os.path
# Gets the directory path where the db is located and links the program to the db
dirPath = os.path.dirname(os.path.abspath(__file__))
db = os.path.join(dirPath, "bankari.db")
conn = sqlite3.connect(db)
source = conn.execute(''' SELECT user, checking, savings FROM accounts ''')
# Startup Function
# Deposit Function
# Withdraw Function
# Transfer Function
# Exit Function
# Interface Actions Function
# Program Spin Up
# Pulls in data from db, formats and assigns to variables
for row in source:
checking = "{:.2f}".format(row[1])
savings = "{:.2f}".format(row[2])
user = input('Please enter your username: ')
start(checking, savings) | [
2,
5018,
6118,
201,
198,
201,
198,
11748,
44161,
578,
18,
201,
198,
11748,
28686,
13,
6978,
201,
198,
201,
198,
2,
29620,
262,
8619,
3108,
810,
262,
20613,
318,
5140,
290,
6117,
262,
1430,
284,
262,
20613,
201,
198,
15908,
15235,
79... | 2.885375 | 253 |
# -----------------------------------------------------------------------------
# A Three-Pronged Approach to Exploring the Limits of Static Malware Analyses:
# Callsite Parameter Cardinality (CPC) Counting: ida_cpc_extract.py
#
# The driver for ennumerating CPC for a Linux AMD64 binary using IDA Pro.
#
# Luke Jones (luke.t.jones.814@gmail.com)
#
# The MIT License (MIT)
# Copyright (c) 2016 Chthonian Cyber Services
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# -----------------------------------------------------------------------------
# Notes
# * "arg regs" are often referenced. These are the "argument registers" used by
# the System V calling convention. They can be found in asm_helper.py
# * "caller" variables are abbreviated with "er" and "callee" with "ee"
# * "ea" stands for "effective address"
# * Dictionaries are in the form "key_type"_to_"value_type"
# * Lists are in a pluralized form, "ea" becomes "eas" etc.
# -----------------------------------------------------------------------------
from idaapi import *
from idautils import *
from idc import *
import re
import sys
import copy
import asm_helper
import callee_context
import caller_context
import operands
idaapi.require("asm_helper")
idaapi.require("callee_context")
idaapi.require("caller_context")
idaapi.require("operands")
BATCH_MODE = True # switch to false if testing manually in IDA
MAX_CALLEE_RECURSION = 4 # how far to pursue child contexts in callee
# analysis
MAX_CALLEE_SWEEP = 1000 # how many bytes past function start to analyze
# for callee analysis
MAX_ARG_REGS = 14
INVALID_CPC = -1
DICT_OUTPUT = False # output function name to cpc dictionary
CPC_OUTPUT = False # output cpc chains
NAME_DEBUG = False # include function name with cpc chain
SPLIT_CPC = False # split CPC value into integer and float parts
# (more correct but harder to debug as split)
# set to true if using testing framework
CALLER_CPC_THRESH = 0.75 # What percentage of caller determined cpcs
# must agree for value to be considered as cpc
CALLER_CONTEXT_REFRESH = 15 # how many instructions w/o arg reg before context reset
SEP = "," # what to print between cpc chains
f_ea_to_ee_ctx = dict() # function ea -> resulting context from callee
# analysis
f_ea_to_er_ctxs = dict() # function ea -> list of resulting contexts
# from caller analysis at each callsite
f_eas, f_names, f_ea_to_name = list(), list(), dict()
def caller_arg_analysis(ea):
"""
Linearly proceeds through whole binary, spinning off callee analyses at
callsites and recording possible CPCs at each callsite
:param ea: Effective address where analysis is started
:return: Linear list of all called effective addresses
"""
dst_eas = list()
er_ctx = caller_context.CallerContext()
i_nextf = 0
i_ins = 0
for h_ea in Heads(SegStart(ea), SegEnd(ea)):
if h_ea >= f_eas[i_nextf]: # have we reached the next function?
if NAME_DEBUG:
dst_eas.append(SEP + f_names[i_nextf] + ": ")
else:
dst_eas.append(SEP)
er_ctx.reset()
i_nextf += 1
if i_ins >= CALLER_CONTEXT_REFRESH: # have we passed so many instructions without a set arg reg?
i_ins = 0
er_ctx.reset()
if isCode(GetFlags(h_ea)):
mnem = GetMnem(h_ea)
ops = operands.Operands(h_ea)
i_curf = i_nextf-1
if asm_helper.is_jmp(mnem) or asm_helper.is_call(mnem):
er_ctx, dst_eas = caller_add_contexts(h_ea, mnem, ops, i_curf, er_ctx, dst_eas)
er_ctx, i_ins = caller_update_context(h_ea, mnem, ops, er_ctx, i_ins)
i_ins += 1
return dst_eas
def caller_add_contexts(h_ea, mnem, ops, i_curf, er_ctx, dst_eas):
"""
At a function call, adds a caller context and callee context for the
callsite. Multiple caller contexts are created but only one callee context
is created
:param h_ea: effective address of the call instruction
:param mnem: mnemonic of call instruction
:param ops: operands object of the call instruction
:param i_curf: index of the current function
:param er_ctx: caller context
:param dst_eas: destination or called effective addresses
:return: er_ctx, dst_eas
"""
if is_addr(ops.o1.type):
called_ea = ops.o1.val
if called_ea in f_eas:
#debug target func names of cpc chain
if f_names[i_curf] == '/debug_function/':
print("%x: %s" % (h_ea, f_ea_to_name[called_ea]))
ee_ctx = f_ea_to_ee_ctx.get(called_ea, None)
if ee_ctx is None:
j_f = f_eas.index(called_ea)
j_nextf = j_f + 1
#debug callee analysis
if f_ea_to_name[called_ea] == '/debug_function/':
ee_ctx = callee_arg_analysis(called_ea, True, f_eas[j_nextf], 0)
else:
ee_ctx = callee_arg_analysis(called_ea, False, f_eas[j_nextf], 0)
f_ea_to_ee_ctx[called_ea] = ee_ctx
# ltj: move this out one indent to make er contexts for all calls,
# not just internal calls.
# ------------------------------------------------------
if called_ea != f_eas[i_curf]: #called_ea not recursive
l = f_ea_to_er_ctxs.get(called_ea, None)
if l is None:
f_ea_to_er_ctxs[called_ea] = list()
cur_context = copy.copy(er_ctx)
f_ea_to_er_ctxs[called_ea].append(cur_context)
er_ctx.reset()
else:
#print skipped functions:
#print("called_ea: %x. func: %s" % (called_ea,func_name_list[i_nextf-1]))
pass
dst_eas.append(called_ea)
# ------------------------------------------------------
if asm_helper.is_call(mnem):
# ltj:keeping this in case parsing plt at beginning doesn't always work
# add target function name to dictionary
# try:
# func_dict[called_ea]
# except KeyError:
# func_dict[called_ea] = GetFunctionName(called_ea)
er_ctx.reset()
return er_ctx, dst_eas
def caller_update_context(h_ea, mnem, ops, er_ctx, i_ins):
"""
Updates the caller context with appropriate registers set and used
:param h_ea: effective address of the instruction we're updating with
:param mnem: mnemonic of the instruction we're updating with
:param ops: operands of the instruction we're updating with
:param er_ctx: caller context to update
:param i_ins: count of instructions since an arg reg setter has been seen
:return: er_ctx, i_ins
"""
if ops.count == 0:
if debug:
print("%x: %s" % (h_ea, mnem))
if ops.count == 1:
if debug:
print("%x: %s %s" % (h_ea, mnem, ops.o1.text))
if ops.o1.type == o_reg:
if asm_helper.is_arg_reg(ops.o1.text):
if mnem in asm_helper.r_group:
er_ctx.add_src_arg(ops.o1.text)
elif mnem in asm_helper.w_group or mnem in asm_helper.rw_group:
er_ctx.add_set_arg(ops.o1.text)
i_ins = 0
else:
print("Unrecognized mnemonic: %x: %s %s" % (h_ea, mnem, ops.o1.text))
if ops.o1.type == o_phrase or ops.o1.type == o_displ: #o_displ is part of idaapi - more details
for arg in arg_extract(ops.o1.text):
er_ctx.add_src_arg(arg)
if ops.count == 2:
if debug:
print("%x: %s %s %s" % (h_ea, mnem, ops.o1.text, ops.o2.text))
# XOR REG1 REG1 case:
if ops.o1.text == ops.o2.text:
if mnem in asm_helper.xor_insts or mnem in asm_helper.xorx_insts:
er_ctx.add_set_arg(ops.o1.text)
i_ins = 0
if ops.o2.type == o_reg:
if asm_helper.is_arg_reg(ops.o2.text):
er_ctx.add_src_arg(ops.o2.text)
elif ops.o2.type == o_phrase or ops.o2.type == o_displ:
for arg in arg_extract(ops.o2.text):
er_ctx.add_src_arg(arg)
if ops.o1.type == o_reg:
if asm_helper.is_arg_reg(ops.o1.text):
if mnem in asm_helper.w_r_group or mnem in asm_helper.rw_r_group:
er_ctx.add_set_arg(ops.o1.text)
i_ins = 0
elif mnem in asm_helper.r_r_group:
er_ctx.add_src_arg(ops.o1.text)
else:
print("Unrecognized mnemonic: %x: %s %s %s" % (h_ea, mnem, ops.o1.text, ops.o2.text))
elif ops.o1.type == o_phrase or ops.o1.type == o_displ:
for arg in arg_extract(ops.o1.text):
er_ctx.add_src_arg(arg)
if ops.count == 3:
if debug:
print("%x: %s %s %s %s" % (h_ea, mnem, ops.o1.text, ops.o2.text, ops.o3.text))
if ops.o1.type == o_reg:
if asm_helper.is_arg_reg(ops.o1.text):
er_ctx.add_set_arg(ops.o1.text)
i_ins = 0
elif ops.o1.type == o_phrase or ops.o1.type == o_displ:
for arg in arg_extract(ops.o1.text):
er_ctx.add_src_arg(arg)
if ops.o2.type == o_reg:
if asm_helper.is_arg_reg(ops.o2.text):
er_ctx.add_src_arg(ops.o2.text)
elif ops.o2.type == o_phrase or ops.o2.type == o_displ:
for arg in arg_extract(ops.o2.text):
er_ctx.add_src_arg(arg)
if ops.o3.type == o_reg:
if asm_helper.is_arg_reg(ops.o3.text):
er_ctx.add_src_arg(ops.o3.text)
elif ops.o3.type == o_phrase or ops.o3.type == o_displ:
for arg in arg_extract(ops.o3.text):
er_ctx.add_src_arg(arg)
return er_ctx, i_ins
def callee_arg_analysis(cur_f_ea, debug, next_f_ea, depth):
"""
Analyzing a callee for number of arguments
:param cur_f_ea: effective address that callee starts at
:param debug: enable debugging or not
:param next_f_ea: effective address of next function
:param depth: how deep in recursion this call is
:return: ee_ctx, a callee context
"""
if debug:
print("next_func_ea:%x" % next_f_ea)
ee_ctx = callee_context.CalleeContext()
stack_args = list()
f = idaapi.get_func(cur_f_ea)
if f.regvarqty > 0:
add_aliased_regs(f, cur_f_ea, ee_ctx, f.regvarqty)
for h_ea in Heads(cur_f_ea, cur_f_ea+MAX_CALLEE_SWEEP):
# if we've reached the next function
if h_ea >= next_f_ea:
break
mnem = GetMnem(h_ea)
ops = operands.Operands(h_ea)
if "+arg_" in ops.o2.text:
stack_args = add_stack_arg(stack_args, ops, debug)
if "+arg_" in ops.o3.text:
stack_args = add_stack_arg(stack_args, ops, debug)
if asm_helper.is_jmp(mnem) or asm_helper.is_call(mnem):
b, ee_ctx = callee_add_child_context(ops, ee_ctx, depth)
if b:
break
ee_ctx = callee_update_context(h_ea, mnem, ops, ee_ctx, debug)
if debug:
print("stack_args len: %d" % len(stack_args))
ee_ctx.stack_arg_count = len(stack_args)
if debug:
ee_ctx.print_arg_regs()
return ee_ctx
def callee_add_child_context(ops, ee_ctx, depth):
"""
Add child callee context at new function call to parent callee context
:param ops: operands of call instruction
:param ee_ctx: parent callee context
:param depth: depth of recursion
:return: b, ee_ctx (b is boolean on whether to break callee arg analysis loop)
"""
b = False
if is_addr(ops.o1.type):
called_ea = ops.o1.val
if called_ea in f_eas:
if depth < MAX_CALLEE_RECURSION:
child_ee_ctx = f_ea_to_ee_ctx.get(called_ea, None)
if child_ee_ctx is None:
j_f = f_eas.index(called_ea)
j_nextf = j_f + 1
if f_ea_to_name[called_ea] == '/debug_func_name/':
child_ee_ctx = callee_arg_analysis(called_ea, True, f_eas[j_nextf], depth + 1)
else:
child_ee_ctx = callee_arg_analysis(called_ea, False, f_eas[j_nextf], depth + 1)
f_ea_to_ee_ctx[called_ea] = child_ee_ctx
cpc = child_ee_ctx.calculate_cpc()
if debug:
print("child cpc: %d" % cpc)
if cpc < 14: # ltj: imprecise checking for varargs
ee_ctx.add_child_context(child_ee_ctx)
b = True # whether to break callee_arg_analysis loop
return b, ee_ctx
def callee_update_context(h_ea, mnem, ops, ee_ctx, debug):
"""
Updates callee context with arg regs used but not set
:param h_ea: effective address of instruction updating context
:param mnem: mnemonic of instruction updating context
:param ops: operands of instruction updating context
:param ee_ctx: callee context
:param debug: debug or not
:return: ee_ctx
"""
if ops.count == 0:
if debug:
print("%x: %s" % (h_ea, mnem))
# Add source and set register arguments for instruction with 1 operand
if ops.count == 1:
if debug:
print("%x: %s %s" % (h_ea, mnem, ops.o1.text))
if ops.o1.type == o_reg:
if asm_helper.is_arg_reg(ops.o1.text):
if mnem in asm_helper.r_group or mnem in asm_helper.rw_group:
added = ee_ctx.add_src_arg(ops.o1.text)
if debug and added:
print("%s added" % ops.o1.text)
elif mnem in asm_helper.w_group:
ee_ctx.add_set_arg(ops.o1.text)
else:
print("Unrecognized mnemonic: %x: %s %s" % (h_ea, mnem, ops.o1.text))
if ops.o1.type == o_phrase or ops.o1.type == o_displ:
for arg in arg_extract(ops.o1.text):
added = ee_ctx.add_src_arg(arg)
if debug and added:
print("%s arg added" % arg)
# Add source and set register arguments for instruction with 2 operands
if ops.count == 2:
if debug:
print("%x: %s %s %s" % (h_ea, mnem, ops.o1.text, ops.o2.text))
# XOR REG1 REG1 case:
if ops.o1.text == ops.o2.text:
if mnem in asm_helper.xor_insts or mnem in asm_helper.xorx_insts:
ee_ctx.add_set_arg(ops.o1.text)
if ops.o2.type == o_reg:
if asm_helper.is_arg_reg(ops.o2.text):
added = ee_ctx.add_src_arg(ops.o2.text)
if debug and added:
print("%s added" % ops.o2.text)
elif ops.o2.type == o_phrase or ops.o2.type == o_displ:
for arg in arg_extract(ops.o2.text):
added = ee_ctx.add_src_arg(arg)
if debug and added:
print("%s arg added" % arg)
if ops.o1.type == o_reg:
if asm_helper.is_arg_reg(ops.o1.text):
if mnem in asm_helper.w_r_group:
ee_ctx.add_set_arg(ops.o1.text)
elif mnem in asm_helper.r_r_group or mnem in asm_helper.rw_r_group:
added = ee_ctx.add_src_arg(ops.o1.text)
if debug and added:
print("%s added" % ops.o1.text)
else:
print("Unrecognized mnemonic: %x: %s %s %s" % (h_ea, mnem, ops.o1.text, ops.o2.text))
elif ops.o1.type == o_phrase or ops.o1.type == o_displ:
for arg in arg_extract(ops.o1.text):
added = ee_ctx.add_src_arg(arg)
if debug and added:
print("%s arg added" % arg)
# Add source and set register arguments for instruction with 3 operands
if ops.count == 3:
if debug:
print("%x: %s %s %s %s" % (h_ea, mnem, ops.o1.text, ops.o2.text, ops.o3.text))
if ops.o1.type == o_reg:
if asm_helper.is_arg_reg(ops.o1.text):
ee_ctx.add_set_arg(ops.o1.text)
elif ops.o1.type == o_phrase or ops.o1.type == o_displ:
for arg in arg_extract(ops.o1.text):
added = ee_ctx.add_src_arg(arg)
if debug and added:
print("%s arg added" % arg)
if ops.o2.type == o_reg:
if asm_helper.is_arg_reg(ops.o2.text):
added = ee_ctx.add_src_arg(ops.o2.text)
if debug and added:
print("%s added" % ops.o2.text)
elif ops.o2.type == o_phrase or ops.o2.type == o_displ:
for arg in arg_extract(ops.o2.text):
added = ee_ctx.add_src_arg(arg)
if debug and added:
print("%s arg added" % arg)
if ops.o3.type == o_reg:
if asm_helper.is_arg_reg(ops.o3.text):
added = ee_ctx.add_src_arg(ops.o3.text)
if debug and added:
print("%s added" % ops.o3.text)
elif ops.o3.type == o_phrase or ops.o3.type == o_displ:
for arg in arg_extract(ops.o3.text):
added = ee_ctx.add_src_arg(arg)
if debug and added:
print("%s arg added" % arg)
return ee_ctx
def add_stack_arg(stack_args, ops, debug):
"""
Add second operand to stack_args
:param stack_args: current arguments from stack
:param ops: operands with second operand to add to stack
:param debug: debug prints or not
:return: stack_args
"""
if ops.o2.text not in stack_args:
stack_args.append(ops.o2.text)
if debug:
print("stack arg: %s" % ops.o2.text)
return stack_args
def arg_extract(opnd):
"""
Extracts all argument registers found in an operand
:param opnd: the operand to search for argument registers
:return: list of arguments found in operand.
"""
arg_list = list()
arg_rdi = check_arg(asm_helper.arg_reg_rdi, opnd)
arg_rsi = check_arg(asm_helper.arg_reg_rsi, opnd)
arg_rdx = check_arg(asm_helper.arg_reg_rdx, opnd)
arg_rcx = check_arg(asm_helper.arg_reg_rcx, opnd)
arg_r10 = check_arg(asm_helper.arg_reg_r10, opnd)
arg_r8 = check_arg(asm_helper.arg_reg_r8, opnd)
arg_r9 = check_arg(asm_helper.arg_reg_r9, opnd)
arg_xmm0 = check_arg(asm_helper.arg_reg_xmm0, opnd)
arg_xmm1 = check_arg(asm_helper.arg_reg_xmm1, opnd)
arg_xmm2 = check_arg(asm_helper.arg_reg_xmm2, opnd)
arg_xmm3 = check_arg(asm_helper.arg_reg_xmm3, opnd)
arg_xmm4 = check_arg(asm_helper.arg_reg_xmm4, opnd)
arg_xmm5 = check_arg(asm_helper.arg_reg_xmm5, opnd)
arg_xmm6 = check_arg(asm_helper.arg_reg_xmm6, opnd)
arg_xmm7 = check_arg(asm_helper.arg_reg_xmm7, opnd)
if arg_rdi != "":
arg_list.append(arg_rdi)
if arg_rsi != "":
arg_list.append(arg_rsi)
if arg_rdx != "":
arg_list.append(arg_rdx)
if arg_rcx != "":
arg_list.append(arg_rcx)
if arg_r10 != "":
arg_list.append(arg_r10)
if arg_r8 != "":
arg_list.append(arg_r8)
if arg_r9 != "":
arg_list.append(arg_r9)
if arg_xmm0 != "":
arg_list.append(arg_xmm0)
if arg_xmm1 != "":
arg_list.append(arg_xmm1)
if arg_xmm2 != "":
arg_list.append(arg_xmm2)
if arg_xmm3 != "":
arg_list.append(arg_xmm3)
if arg_xmm4 != "":
arg_list.append(arg_xmm4)
if arg_xmm5 != "":
arg_list.append(arg_xmm5)
if arg_xmm6 != "":
arg_list.append(arg_xmm6)
if arg_xmm7 != "":
arg_list.append(arg_xmm7)
return arg_list
def check_arg(arg_regs, opnd):
"""
Check for argument register text in various possible formats
:param arg_regs: list of argument registers
:param opnd: operand to search for matches
:return: register text if found in opnd
"""
for reg in arg_regs:
# if reg in opnd:
m = re.search('[+*\[]'+reg+'[+*\]]', opnd)
if m is not None:
return reg
return ""
def add_aliased_regs(f, ea, context):
"""
Goes through every possible argument register and determines if function
is calling it something else. Adds them as src args
:param f: idaapi function
:param ea: effective address of function
:param context: context to add arg regs to
:return: none
"""
for reg in asm_helper.arg_regs_all:
rv = idaapi.find_regvar(f, ea, reg)
if rv is not None:
# ltj: simplistic way is assuming that this regvar is used as src
# ltj: make this more robust by just adding it to list of possible
# names of arg reg for this function.
context.add_src_arg(reg)
def is_addr(op_type):
"""
Is op_type an address type?
:param op_type: op_type to check
:return: Bool
"""
if op_type == o_near or op_type == o_far:
return True
else:
return False
def construct_cpc_aggregate(dst_eas):
"""
Chooses between caller(s) or callee CPC to use as final output
:param dst_eas: All the called functions
:return: dst_eas and a dictionary of function ea to cpc
"""
dst_cpcs, f_ea_to_cpc = "", dict()
for ea in f_ea_to_ee_ctx:
ee_cpc = f_ea_to_ee_ctx[ea].calculate_cpc()
ee_cpcspl = f_ea_to_ee_ctx[ea].calculate_cpc_split()
try:
er_cpcs, er_cpcspls = list(), list()
for er_cxt in f_ea_to_er_ctxs[ea]:
er_cpcs.append(er_cxt.calculate_cpc())
er_cpcspls.append(er_cxt.calculate_cpc_split())
del f_ea_to_er_ctxs[ea] # so remainder can be handled later
maj, er_cpc, er_cpcspl = find_most_frequent_cpc(er_cpcs, er_cpcspls)
if ee_cpc >= MAX_ARG_REGS:
ee_cpc = INVALID_CPC
else:
if maj < CALLER_CPC_THRESH:
er_cpc = INVALID_CPC
if er_cpc > ee_cpc:
if SPLIT_CPC:
f_ea_to_cpc[ea] = er_cpcspl
else:
f_ea_to_cpc[ea] = er_cpc
else:
if SPLIT_CPC:
f_ea_to_cpc[ea] = ee_cpcspl
else:
f_ea_to_cpc[ea] = ee_cpc
except KeyError: #TODO: what could throw this exception?
if SPLIT_CPC:
f_ea_to_cpc[ea] = ee_cpcspl
else:
f_ea_to_cpc[ea] = ee_cpc
# now check remaining contexts in caller_context_dict
for ea in f_ea_to_er_ctxs:
er_cpcs, er_cpcspls = list(), list()
for er_cxt in f_ea_to_er_ctxs[ea]:
er_cpcs.append(er_cxt.calculate_cpc())
er_cpcspls.append(er_cxt.calculate_cpc_split())
maj, er_cpc, er_cpcspl = find_most_frequent_cpc(er_cpcs, er_cpcspls)
if SPLIT_CPC:
f_ea_to_cpc[ea] = er_cpcspl
else:
f_ea_to_cpc[ea] = er_cpc
for ea in dst_eas:
if SEP in str(ea):
dst_cpcs += ea
else:
dst_cpcs += str(f_ea_to_cpc[ea])
return dst_cpcs, f_ea_to_cpc
def find_most_frequent_cpc(er_cpcs, er_cpcspls):
"""
Out of all the caller cpcs, find the most common ont
:param er_cpcs: caller cpcs
:param er_cpcspls: caller cpcs, split between integer and float arguments
:return: the percentage that the most common cpc takes up, and the chosen cpc
"""
max_num = 0
er_cpc = -1
er_cpcspl = ""
for i in range(0,len(er_cpcs)):
cpc = er_cpcs[i]
if er_cpcs.count(cpc) > max_num:
max_num = er_cpcs.count(cpc)
er_cpc = cpc
er_cpcspl = er_cpcspls[i]
maj = float(max_num) / float(len(er_cpcs))
return maj, er_cpc, er_cpcspl
def output_cpc(dst_cpcs, f_ea_to_cpc):
"""
Output results as either list of cpcs or dictionary
:param dst_cpcs: all the called function's cpcs
:param f_ea_to_cpc: dictionary of function ea to cpc
:return: none
"""
if CPC_OUTPUT:
filename = GetInputFilePath() + ".cpc." + ext
f = open(filename, 'w')
f.write(dst_cpcs)
f.close()
elif DICT_OUTPUT:
dict_out = ""
for ea in f_ea_to_cpc:
try:
dict_out += f_ea_to_name[ea] + ": " + str(f_ea_to_cpc[ea]) + "\n"
except KeyError:
pass
# debug:
# dict_out += str(ea) + " not found as start of function"
print dict_out
filename = GetInputFilePath() + ".cpc." + ext
f = open(filename, 'w')
f.write(dict_out)
f.close()
def get_functions_in_section(ea):
"""
Fill in function eas list, function names list and function ea to name
dictionary
:param ea: effective address of section to start finding functions
:return: f_eas, f_names, f_ea_to_name
"""
for f_ea in Functions(SegStart(ea), SegEnd(ea)):
f_eas.append(f_ea)
f_names.append(GetFunctionName(f_ea))
f_ea_to_name[f_ea] = GetFunctionName(f_ea)
return f_eas, f_names, f_ea_to_name
if __name__ == '__main__':
if BATCH_MODE:
if ARGV[1] == '-c':
SEP = ","
CPC_OUTPUT = True
ext = "chain"
elif ARGV[1] == '-f':
SEP = "\n"
NAME_DEBUG = True
CPC_OUTPUT = True
ext = "func"
elif ARGV[1] == '-l':
SEP = "\n"
CPC_OUTPUT = True
ext = "feature"
elif ARGV[1] == '-d':
DICT_OUTPUT = True
ext = "dict"
else:
print("Must pass -c (chain), -f (per function), -l (list), or -d (dictionary)")
sys.exit(1)
debug = False
autoWait()
print("Starting")
textSel = SegByName(".text")
textEa = SegByBase(textSel)
pltSel = SegByName(".plt")
pltEa = SegByBase(pltSel)
# find functions so we can easily tell function boundaries, debug specific
# functions and find jumps to functions
f_eas, f_names, f_ea_to_name = get_functions_in_section(textEa)
f_eas, f_names, f_ea_to_name = get_functions_in_section(pltEa)
f_eas.append(sys.maxint)
# visit every callsite, start callee analyses at callsites,
# build context dicts, return called addresses chained per function
dst_eas = caller_arg_analysis(debug, textEa)
dst_cpcs, f_ea_to_cpc = "", dict()
dst_cpcs, f_ea_to_cpc = construct_cpc_aggregate(dst_eas)
output_cpc(dst_cpcs, f_ea_to_cpc)
print("Finished")
if BATCH_MODE:
Exit(0) | [
2,
16529,
32501,
198,
2,
317,
7683,
12,
6836,
506,
276,
38066,
284,
5905,
3255,
262,
44943,
286,
36125,
4434,
1574,
1052,
43710,
25,
198,
2,
4889,
15654,
25139,
2357,
25564,
414,
357,
34,
5662,
8,
2764,
278,
25,
220,
3755,
62,
13155... | 1.989154 | 14,107 |
import graphene
from graphene_django import DjangoObjectType
from graphql import GraphQLError
from django.db.models import Q
from .models import Track, Like
from users.schema import UserType
| [
11748,
42463,
198,
6738,
42463,
62,
28241,
14208,
1330,
37770,
10267,
6030,
198,
6738,
4823,
13976,
1330,
29681,
48,
2538,
81,
1472,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
198,
6738,
764,
27530,
1330,
17762,
11,
... | 3.636364 | 55 |
############################################################
# GPU Implementation of Random Forest Classifier - Training
# v0.1
# Seymour Knowles-Barley
############################################################
# Based on c code from:
# http://code.google.com/p/randomforest-matlab/
# License: GPLv2
############################################################
import numpy as np
import sys
import h5py
import glob
import mahotas
import pycuda.autoinit
import pycuda.driver as cu
import pycuda.compiler as nvcc
import pycuda.gpuarray as gpuarray
gpu_randomforest_train_source = """
#include "curand_kernel.h"
#define NODE_TERMINAL -1
#define NODE_TOSPLIT -2
#define NODE_INTERIOR -3
__device__ void movedata() {
}
__device__ void sampledata(const int nclass, const int* nsamples, const int* samplefrom,
const int maxnsamples, int* bagstart, curandState_t *randstate)
{
//Select random samples
int iclass, isamp;
for (iclass=0; iclass < nclass; ++iclass) {
for (isamp=0; isamp < nsamples[iclass]; ++isamp) {
bagstart[isamp + iclass*maxnsamples] = curand(randstate) % samplefrom[iclass];
}
}
}
__device__ void sortbagbyx(
const float *baggedxstart, int totsamples, int mdim, int featurei, int *bagstart, int ndstart, int ndend, int *tempbagstart)
{
//Sort elements of bagstart (from ndstart to ndend) according to x values
//Write results into bagstart
int length = ndend-ndstart+1;
if (length == 1)
{
return;
}
int xstart = featurei * totsamples;
int *inbag = bagstart;
int *outbag = tempbagstart;
//For-loop merge sort
int i = 1;
int start1, start2, end1, end2, p1, p2, output;
while (i < length)
{
for (start1 = ndstart; start1 <= ndend; start1 += i*2)
{
end1 = start1 + i - 1;
start2 = start1 + i;
end2 = start2 + i - 1;
p1 = start1; p2 = start2;
output = start1;
while (p1 <= end1 && p1 <= ndend && p2 <= end2 && p2 <= ndend && output <= ndend)
{
if (baggedxstart[xstart + inbag[p1]] < baggedxstart[xstart + inbag[p2]])
{
outbag[output] = inbag[p1];
++p1;
}
else
{
outbag[output] = inbag[p2];
++p2;
}
++output;
}
while (p1 <= end1 && p1 <= ndend)
{
outbag[output] = inbag[p1];
++p1;
++output;
}
while (p2 <= end2 && p2 <= ndend)
{
outbag[output] = inbag[p2];
++p2;
++output;
}
}
//swap for next run
if (inbag == bagstart)
{
inbag = tempbagstart;
outbag = bagstart;
}
else
{
inbag = bagstart;
outbag = tempbagstart;
}
//Loop again with larger chunks
i *= 2;
}
//Copy output to bagstart (if necessary)
if (inbag == tempbagstart)
{
for (p1 = ndstart; p1 <= ndend; ++p1)
{
bagstart[p1] = tempbagstart[p1];
}
}
}
__device__ void findBestSplit(
const float *baggedxstart, const int *baggedclassstart, int mdim, int nclass, int *bagstart,
int totsamples, int k, int ndstart, int ndend, int *ndendl,
int *msplit, float *gini_score, float *best_split, int *best_split_index, bool *isTerminal,
int mtry, int idx, int maxTreeSize, int *classpop, float* classweights,
curandState_t *randstate,
int *wlstart, int *wrstart, int *dimtempstart, int *tempbagstart)
{
//Compute initial values of numerator and denominator of Gini
float gini_n = 0.0;
float gini_d = 0.0;
float gini_rightn, gini_rightd, gini_leftn, gini_leftd;
int ctreestart = k * nclass + nclass * idx * maxTreeSize;
int i;
for (i = 0; i < nclass; ++i)
{
gini_n += classpop[i + ctreestart] * classpop[i + ctreestart];
gini_d += classpop[i + ctreestart];
}
float gini_crit0 = gini_n / gini_d;
//start main loop through variables to find best split
float gini_critmax = -1.0e25;
float crit;
int trynum, featurei;
int maxfeature = mdim;
for (i = 0; i < mdim; ++i)
{
dimtempstart[i] = i;
}
*msplit = -1;
//for (trynum = 0; trynum < 1; ++trynum)
for (trynum = 0; trynum < mtry && trynum < mdim; ++trynum)
{
//Choose a random feature
i = curand(randstate) % maxfeature;
featurei = dimtempstart[i];
dimtempstart[i] = dimtempstart[maxfeature-1];
dimtempstart[maxfeature-1] = featurei;
--maxfeature;
//Sort according to this feature
sortbagbyx(baggedxstart, totsamples, mdim, featurei, bagstart, ndstart, ndend, tempbagstart);
//Split on numerical predictor featurei
gini_rightn = gini_n;
gini_rightd = gini_d;
gini_leftn = 0;
gini_leftd = 0;
for (i = 0; i < nclass; ++i)
{
wrstart[i] = classpop[i + ctreestart];
wlstart[i] = 0;
}
int splitpoint;
int splitxi;
float split_weight, thisx, nextx;
int split_class;
int ntie = 1;
//Loop through all possible split points
for (splitpoint = ndstart; splitpoint <= ndend-1; ++splitpoint)
{
//Get split details
splitxi = bagstart[splitpoint];
//Determine class based on index and nsamples vector
split_class = baggedclassstart[splitxi]-1;
split_weight = classweights[split_class];
//Update neumerator and demominator
gini_leftn += split_weight * (2 * wlstart[split_class] + split_weight);
gini_rightn += split_weight * (-2 * wrstart[split_class] + split_weight);
gini_leftd += split_weight;
gini_rightd -= split_weight;
wlstart[split_class] += split_weight;
wrstart[split_class] -= split_weight;
//Check if the next value is the same (no point splitting)
thisx = baggedxstart[splitxi + totsamples * featurei];
nextx = baggedxstart[bagstart[splitpoint+1] + totsamples * featurei];
if (thisx != nextx)
{
//Check if either node is empty (or very small to allow for float errors)
if (gini_rightd > 1.0e-5 && gini_leftd > 1.0e-5)
{
//Check the split
crit = (gini_leftn / gini_leftd) + (gini_rightn / gini_rightd);
if (crit > gini_critmax)
{
*best_split = (thisx + nextx) / 2;
*best_split_index = splitpoint;
gini_critmax = crit;
*msplit = featurei;
*ndendl = splitpoint;
ntie = 1;
}
else if (crit == gini_critmax)
{
++ntie;
//Break ties at random
if ((curand(randstate) % ntie) == 0)
{
*best_split = (thisx + nextx) / 2;
*best_split_index = splitpoint;
gini_critmax = crit;
*msplit = featurei;
*ndendl = splitpoint;
}
}
}
}
} // end splitpoint for
} // end trynum for
if (gini_critmax < -1.0e10 || *msplit == -1)
{
//We could not find a suitable split - mark as a terminal node
*isTerminal = true;
}
else if (*msplit != featurei)
{
//Resort for msplit (if necessary)
sortbagbyx(baggedxstart, totsamples, mdim, *msplit, bagstart, ndstart, ndend, tempbagstart);
}
*gini_score = gini_critmax - gini_crit0;
}
extern "C" __global__ void trainKernel(
const float *x, int n, int mdim, int nclass,
const int *classes, const int *classindex,
const int *nsamples, const int *samplefrom,
int maxnsamples,
unsigned long long seed, unsigned long long sequencestart,
int ntree, int maxTreeSize, int mtry, int nodeStopSize,
int *treemap, int *nodestatus, float *xbestsplit,
int *bestvar, int *nodeclass, int *ndbigtree,
int *nodestart, int *nodepop,
int *classpop, float *classweights,
int *weight_left, int *weight_right,
int *dimtemp, int *bagspace, int *tempbag, float *baggedx, int *baggedclass)
{
// Optional arguments for debug (place after xbestsplit): int *nbestsplit, float *bestgini,
int idx = threadIdx.x + blockDim.x * blockIdx.x;
//Make sure we don't overrun
if (idx < ntree) {
//Init random number generators (one for each thread)
curandState_t state;
curand_init(seed, sequencestart + idx, 0, &state);
int i,j,k,cioffset,bioffset;
int totsamples = 0;
for (i = 0; i < nclass; ++i){
totsamples += nsamples[i];
}
//Choose random samples for all classes
int *bagstart = bagspace + idx * nclass * maxnsamples;
int *tempbagstart = tempbag + idx * nclass * maxnsamples;
float *baggedxstart = baggedx + idx * mdim * totsamples;
int *baggedclassstart = baggedclass + idx * totsamples;
//TODO: offset weightleft, weightright and dimtemp !
sampledata(nclass, nsamples, samplefrom, maxnsamples, bagstart, &state);
//Remove gaps and index into x (instead of into class)
k = 0;
cioffset = 0;
bioffset = 0;
for (i = 0; i < nclass; ++i){
for (j = 0; j < nsamples[i]; ++j) {
//Move memory into local block?
int xindex = classindex[bagstart[j + i * maxnsamples] + cioffset];
int dimindex;
for (dimindex = 0; dimindex < mdim; ++dimindex){
baggedxstart[j + bioffset + totsamples * dimindex] = x[xindex + n * dimindex];
}
baggedclassstart[j + bioffset] = classes[xindex];
bagstart[k] = j + bioffset;
++k;
}
cioffset += samplefrom[i];
bioffset += nsamples[i];
classpop[i + idx * nclass * maxTreeSize] = nsamples[i];
}
//Wipe other values
for (;k < nclass * maxnsamples; ++k) {
bagstart[k] = -1;
}
int ndstart, ndend, ndendl;
int msplit, best_split_index;
float best_split, gini_score;
//Repeat findbestsplit until the tree is complete
int ncur = 0;
int treeoffset1 = idx * maxTreeSize;
int treeOffset2 = idx * 2 * maxTreeSize;
nodestart[treeoffset1] = 0;
nodepop[treeoffset1] = totsamples;
nodestatus[treeoffset1] = NODE_TOSPLIT;
for (k = 0; k < maxTreeSize-2; ++k) {
//Check for end of tree
if (k > ncur || ncur >= maxTreeSize - 2) break;
//Skip nodes we don't need to split
if (nodestatus[treeoffset1+k] != NODE_TOSPLIT) continue;
/* initialize for next call to findbestsplit */
ndstart = nodestart[treeoffset1 + k];
ndend = ndstart + nodepop[treeoffset1 + k] - 1;
bool isTerminal = false;
gini_score = 0.0;
best_split_index = -1;
findBestSplit(baggedxstart, baggedclassstart, mdim, nclass, bagstart, totsamples, k, ndstart, ndend, &ndendl,
&msplit, &gini_score, &best_split, &best_split_index, &isTerminal, mtry, idx, maxTreeSize, classpop, classweights,
&state, weight_left + nclass * idx, weight_right + nclass * idx, dimtemp + mdim * idx, tempbagstart);
if (isTerminal) {
/* Node is terminal: Mark it as such and move on to the next. */
nodestatus[k] = NODE_TERMINAL;
//bestvar[treeoffset1 + k] = 0;
//xbestsplit[treeoffset1 + k] = 0;
continue;
}
// this is a split node - prepare for next round
bestvar[treeoffset1 + k] = msplit + 1;
//bestgini[treeoffset1 + k] = gini_score;
xbestsplit[treeoffset1 + k] = best_split;
//nbestsplit[treeoffset1 + k] = best_split_index;
nodestatus[treeoffset1 + k] = NODE_INTERIOR;
//varUsed[msplit - 1] = 1;
//tgini[msplit - 1] += decsplit;
int leftk = ncur + 1;
int rightk = ncur + 2;
nodepop[treeoffset1 + leftk] = ndendl - ndstart + 1;
nodepop[treeoffset1 + rightk] = ndend - ndendl;
nodestart[treeoffset1 + leftk] = ndstart;
nodestart[treeoffset1 + rightk] = ndendl + 1;
// Check for terminal node conditions
nodestatus[treeoffset1 + leftk] = NODE_TOSPLIT;
if (nodepop[treeoffset1 + leftk] <= nodeStopSize) {
nodestatus[treeoffset1 + leftk] = NODE_TERMINAL;
}
nodestatus[treeoffset1 + rightk] = NODE_TOSPLIT;
if (nodepop[treeoffset1 + rightk] <= nodeStopSize) {
nodestatus[treeoffset1 + rightk] = NODE_TERMINAL;
}
//Calculate class populations
int nodeclass = 0;
int ctreestart_left = leftk * nclass + idx * nclass * maxTreeSize;
int ctreestart_right = rightk * nclass + idx * nclass * maxTreeSize;
for (i = ndstart; i <= ndendl; ++i) {
nodeclass = baggedclassstart[bagstart[i]]-1;
classpop[nodeclass + ctreestart_left] += classweights[nodeclass];
}
for (i = ndendl+1; i <= ndend; ++i) {
nodeclass = baggedclassstart[bagstart[i]]-1;
classpop[nodeclass + ctreestart_right] += classweights[nodeclass];
}
for(i = 0; i < nclass; ++i)
{
if (classpop[i + ctreestart_left] == nodepop[treeoffset1 + leftk])
{
nodestatus[treeoffset1 + leftk] = NODE_TERMINAL;
}
if (classpop[i + ctreestart_right] == nodepop[treeoffset1 + rightk])
{
nodestatus[treeoffset1 + rightk] = NODE_TERMINAL;
}
}
//Update treemap offset (indexed from 1 rather than 0)
treemap[treeOffset2 + k*2] = ncur + 2;
treemap[treeOffset2 + 1 + k*2] = ncur + 3;
ncur += 2;
}
//Tidy up
//TODO: Check results - should not be necessary to go up to maxTreeSize
ndbigtree[idx] = ncur+1;
//ndbigtree[idx] = maxTreeSize;
for(k = maxTreeSize-1; k >= 0; --k)
{
//if (nodestatus[treeoffset1 + k] == 0)
// --ndbigtree[idx];
if (nodestatus[treeoffset1 + k] == NODE_TOSPLIT)
nodestatus[treeoffset1 + k] = NODE_TERMINAL;
}
//Calculate prediction for terminal nodes
for (k = 0; k < maxTreeSize; ++k)
{
treeoffset1 = idx * maxTreeSize;
if (nodestatus[treeoffset1 + k] == NODE_TERMINAL)
{
int toppop = 0;
int ntie = 1;
for (i = 0; i < nclass; ++i)
{
int ctreeoffset = k * nclass + idx * nclass * maxTreeSize;
if (classpop[i + ctreeoffset] > toppop)
{
nodeclass[treeoffset1 + k] = i+1;
toppop = classpop[i + ctreeoffset];
}
//Break ties at random
if (classpop[i + ctreeoffset] == toppop)
{
++ntie;
if ((curand(&state) % ntie) == 0)
{
nodeclass[treeoffset1 + k] = i+1;
toppop = classpop[i + ctreeoffset];
}
}
}
}
}
//ndbigtree[idx] = idx;
}
}
"""
# input_image_folder = 'D:\\dev\\Rhoana\\classifierTraining\\membraneDetectionECSx4ds2\\'
# input_image_suffix = '_train.png'
# input_features_suffix = '_rhoanafeatures.hdf5'
# output_path = 'D:\\dev\\Rhoana\\classifierTraining\\membraneDetectionECSx4ds2\\rhoana_forest.hdf5'
input_image_folder = 'D:\\dev\\Rhoana\\classifierTraining\\Miketraining\\training2\\'
input_image_suffix = '_labeled_update.tif'
input_features_suffix = '.hdf5'
output_path = 'D:\\dev\\Rhoana\\classifierTraining\\Miketraining\\training2\\rhoana_forest_3class.hdf5'
# Prep the gpu function
gpu_train = nvcc.SourceModule(gpu_randomforest_train_source, no_extern_c=True).get_function('trainKernel')
# Load training data
files = sorted( glob.glob( input_image_folder + '\\*' + input_image_suffix ) )
# 2 Class
#class_colors = [[255,0,0], [0,255,0]]
#class_colors = [[255,85,255], [255,255,0]]
# 3 Class
#class_colors = [[255,0,0], [0,255,0], [0,0,255]]
#class_colors = [[255,85,255], [255,255,0], [0,255,255]]
class_colors = [0, 1, 2]
nclass = len(class_colors)
training_x = np.zeros((0,0), dtype=np.float32)
training_y = np.zeros((0,1), dtype=np.int32)
print 'Found {0} training images.'.format(len(files))
# Loop through all images
for file in files:
training_image = mahotas.imread(file)
for classi in range(nclass):
this_color = class_colors[classi]
# Find pixels for this class
class_indices = np.nonzero(np.logical_and(
training_image[:,:,this_color] > training_image[:,:,(this_color + 1) % 3],
training_image[:,:,this_color] > training_image[:,:,(this_color + 2) % 3]))
# class_indices = np.nonzero(np.logical_and(
# training_image[:,:,0] == this_color[0],
# training_image[:,:,1] == this_color[1],
# training_image[:,:,2] == this_color[2]))
# Add features to x and classes to y
training_y = np.concatenate((training_y, np.ones((len(class_indices[0]), 1), dtype=np.int32) * (classi + 1)))
# Load the features
f = h5py.File(file.replace(input_image_suffix, input_features_suffix), 'r')
nfeatures = len(f.keys())
train_features = np.zeros((nfeatures, len(class_indices[0])), dtype=np.float32)
for i,k in enumerate(f.keys()):
feature = f[k][...]
train_features[i,:] = feature[class_indices[0], class_indices[1]]
f.close()
if training_x.size > 0:
training_x = np.concatenate((training_x, train_features), axis=1)
else:
training_x = train_features
for classi in range(nclass):
print 'Class {0}: {1} training pixels.'.format(classi, np.sum(training_y == classi + 1))
# Train on GPU
ntree = np.int32(512)
mtry = np.int32(np.floor(np.sqrt(training_x.shape[0])))
#nsamples = np.ones((1,nclass), dtype=np.int32) * (training_x.shape[1] / nclass)
nsamples = np.ones((1,nclass), dtype=np.int32) * 1000
classweights = np.ones((1,nclass), dtype=np.float32)
# Sanity check
assert(training_x.shape[1] == training_y.shape[0])
# Random number seeds
seed = np.int64(42)
sequencestart = np.int64(43)
samplefrom = np.zeros((nclass), dtype=np.int32)
maxTreeSize = np.int32(2 * np.sum(nsamples) + 1)
nodeStopSize = np.int32(1)
for classi in range(nclass):
samplefrom[classi] = np.sum(training_y == (classi + 1))
maxnsamples = np.max(nsamples)
classindex = -1 * np.ones((np.max(samplefrom) * nclass), dtype=np.int32)
cioffset = 0
for classi in range(nclass):
classindex[cioffset:cioffset + samplefrom[classi]] = np.nonzero(training_y == (classi + 1))[0]
cioffset = cioffset + samplefrom[classi]
bagmem = -1 * np.ones((ntree, maxnsamples * nclass), dtype=np.int32)
d_bagspace = gpuarray.to_gpu(bagmem)
d_tempbag = gpuarray.to_gpu(bagmem)
bagmem = None
d_treemap = gpuarray.zeros((long(ntree * 2), long(maxTreeSize)), np.int32)
d_nodestatus = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.int32)
d_xbestsplit = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.float32)
#d_nbestsplit = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.int32)
#d_bestgini = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.float32)
d_bestvar = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.int32)
d_nodeclass = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.int32)
d_ndbigtree = gpuarray.zeros((long(ntree), 1), np.int32)
d_nodestart = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.int32)
d_nodepop = gpuarray.zeros((long(ntree), long(maxTreeSize)), np.int32)
d_classpop = gpuarray.zeros((long(ntree), long(maxTreeSize*nclass)), np.int32)
d_classweights = gpuarray.to_gpu(classweights)
d_weight_left = gpuarray.zeros((long(ntree), long(nclass)), np.int32)
d_weight_right = gpuarray.zeros((long(ntree), long(nclass)), np.int32)
d_dimtemp = gpuarray.zeros((long(ntree), long(training_x.shape[0])), np.int32)
d_baggedx = gpuarray.zeros((long(np.sum(nsamples)*training_x.shape[0]), long(ntree)), np.float32)
d_baggedclass = gpuarray.zeros((long(ntree), long(np.sum(nsamples))), np.int32)
d_training_x = gpuarray.to_gpu(training_x)
d_training_y = gpuarray.to_gpu(training_y)
d_classindex = gpuarray.to_gpu(classindex)
d_nsamples = gpuarray.to_gpu(nsamples)
d_samplefrom = gpuarray.to_gpu(samplefrom)
threadsPerBlock = 32
block = (32, 1, 1)
grid = (int(ntree / block[0] + 1), 1)
gpu_train(d_training_x, np.int32(training_x.shape[1]), np.int32(training_x.shape[0]), np.int32(nclass),
d_training_y, d_classindex, d_nsamples, d_samplefrom,
np.int32(maxnsamples), seed, sequencestart, np.int32(ntree), np.int32(maxTreeSize), np.int32(mtry), np.int32(nodeStopSize),
d_treemap, d_nodestatus, d_xbestsplit,
d_bestvar, d_nodeclass, d_ndbigtree,
d_nodestart, d_nodepop,
d_classpop, d_classweights,
d_weight_left, d_weight_right,
d_dimtemp, d_bagspace, d_tempbag, d_baggedx, d_baggedclass,
block=block, grid=grid)
treemap = d_treemap.get()
nodestatus = d_nodestatus.get()
xbestsplit = d_xbestsplit.get()
bestvar = d_bestvar.get()
nodeclass = d_nodeclass.get()
ndbigtree = d_ndbigtree.get()
# Save results
out_hdf5 = h5py.File(output_path, 'w')
out_hdf5['/forest/treemap'] = treemap
out_hdf5['/forest/nodestatus'] = nodestatus
out_hdf5['/forest/xbestsplit'] = xbestsplit
out_hdf5['/forest/bestvar'] = bestvar
out_hdf5['/forest/nodeclass'] = nodeclass
out_hdf5['/forest/ndbigtree'] = ndbigtree
out_hdf5['/forest/nrnodes'] = maxTreeSize
out_hdf5['/forest/ntree'] = ntree
out_hdf5['/forest/nclass'] = nclass
out_hdf5['/forest/classweights'] = classweights
out_hdf5['/forest/mtry'] = mtry
out_hdf5.close()
| [
29113,
14468,
7804,
4242,
198,
2,
11362,
46333,
286,
14534,
9115,
5016,
7483,
532,
13614,
198,
2,
410,
15,
13,
16,
198,
2,
42843,
9365,
829,
12,
10374,
1636,
198,
29113,
14468,
7804,
4242,
198,
2,
13403,
319,
269,
2438,
422,
25,
198... | 2.314016 | 8,490 |
import fire
import pandas as pd
if __name__ == '__main__':
fire.Fire(splicing_exon_position)
| [
11748,
2046,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2046,
13,
13543,
7,
22018,
6345,
62,
1069,
261,
62,
9150,
8,
198
] | 2.564103 | 39 |
import os
from codecs import open
from setuptools import find_packages, setup
import backend
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(ROOT_DIR, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = read_requirements('requirements.txt')
dev_requires = read_requirements('requirements-dev.txt')
setup(
name='flask_base',
version=backend.__version__,
description=backend.__doc__,
long_description=long_description,
url=backend.__homepage__,
author=backend.__author__,
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(exclude=['ansible', 'tests']),
install_requires=install_requires,
extras_require={'test': dev_requires, 'docs': dev_requires},
include_package_data=True,
zip_safe=False,
entry_points='''
[console_scripts]
flask=manage:main
''',
)
| [
11748,
28686,
198,
6738,
40481,
82,
1330,
1280,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
11748,
30203,
198,
198,
13252,
2394,
62,
34720,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
... | 2.591324 | 438 |
from rest_framework import viewsets, filters, permissions
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from core.serializers import (
CardCreateSerializer,
CardSerializer,
UserExtendedSerializer,
NfcCardCreateSerializer,
NfcCardSerializer,
GroupSerializer,
)
from core.models import Card, User, NfcCard, Group
from core.filters import CardFilter, UserFilter, NfcCardFilter
from core.permissions import CardPermission
| [
6738,
1334,
62,
30604,
1330,
5009,
1039,
11,
16628,
11,
21627,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
1148,
47649,
3474,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
198,
6... | 3.559524 | 168 |
#!/usr/bin/env python3-64
from __future__ import absolute_import, division, print_function, unicode_literals
# Install TensorFlow
import numpy as np
import networkx as nx
import json
import os
import sys
import yaml
loader = yaml.Loader
topology = nx.Graph()
LABEL_END = "_erlang"
with open("../Yml/topology.yml") as file:
data = yaml.load(file, Loader=loader)
nodes = [node["name"] for node in data["nodes"]]
topology.add_nodes_from(nodes)
for node in nodes:
topology.nodes[node]["volTTL"] = 0
links = [key for key in list(data["links"].keys())]
topology.add_edges_from(links)
for link in links:
topology[link[0]][link[1]]["weight"] = data["links"][link]["length"]
# adj = nx.adjacency_matrix(topology)
# identity = np.identity(26)
# a_ca = adj + identity
# print(nx.normalized_laplacian_matrix(topology).A)
file_list = []
DATA_PATH = "../Test_Data/From_Liam/REAL-DATA-1"
for item in os.listdir(DATA_PATH):
file_list.append(item)
grouped_files = list(zip(file_list[::2], file_list[1::2]))
for item in grouped_files:
demand_file = item[0]
erlang = 300 + (grouped_files.index(item) * 100)
with open(os.path.join(DATA_PATH, demand_file)) as file:
data = json.load(file)
# print(data[0])
demands = [key[list(key.keys())[0]] for key in data]
# print(demands)
for tick in range(555):
for node in topology.nodes:
topology.nodes[node]["volTTL"] = 0
batch = []
lower = tick * 180
upper = (tick + 1) * 180
grouped = demands[lower:upper]
for demand in grouped:
demand["initialttl"] -= 180 - tick
if demand["initialttl"] > 0:
batch.append(demand)
for demand in batch:
erl = demand["initialttl"] * demand["volume"]
if "source" in demand:
shortest_path = nx.dijkstra_path(
topology,
demand["source"]["name"],
demand["destination"]["name"],
weight=calc_weight,
)
for node in shortest_path:
topology.nodes[node]["volTTL"] += erl
with open(f"data\erlang_{erlang}\edge_list_{tick}.txt", "w") as file:
for node, data_dict in topology.adj.items():
for nbr, length_dict in data_dict.items():
data_line = " ".join(
[
str(node)[5::],
str(nbr)[5::],
str(
topology.nodes[node].get("volTTL")
+ topology.nodes[nbr].get("volTTL")
),
]
)
file.write(f"{data_line}\n")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
12,
2414,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
2,
15545,
309,
22854,
37535,
628,
198,
117... | 1.767749 | 1,817 |
from behave import given, when
from flask import json
@when("the last returned thread is closed")
@given("the last returned thread is closed")
def step_impl_the_response_message_thread_is_closed(context):
"""close the conversation of the last saved message"""
thread_id = context.bdd_helper.single_message_responses_data[0]['thread_id']
url = context.bdd_helper.thread_get_url.format(thread_id)
context.response = context.client.patch(url, data=json.dumps({"is_closed": True}),
headers=context.bdd_helper.headers)
| [
6738,
17438,
1330,
1813,
11,
618,
198,
6738,
42903,
1330,
33918,
628,
198,
31,
12518,
7203,
1169,
938,
4504,
4704,
318,
4838,
4943,
198,
31,
35569,
7203,
1169,
938,
4504,
4704,
318,
4838,
4943,
198,
4299,
2239,
62,
23928,
62,
1169,
62... | 2.691589 | 214 |
from __future__ import print_function
import os
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from PIL import Image
import matplotlib.pyplot as plt
from src.nets_test import *
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_dir", type=str, default='./data_scene_flow/training', help="where the dataset is stored")
parser.add_argument("--save_root", type=str, default='./dataset', help="Where to dump the data")
parser.add_argument("--checkpoint_dir", type=str, default='./saved_models/kitti_b128_3pxloss', help="Where the ckpt files are")
parser.add_argument("--checkpoint_file", type=str, default='edlsm_38000.ckpt', help="checkpoint file name to load")
parser.add_argument("--resize_image", type=str, default='True', help="Resize image")
parser.add_argument("--test_num", type=int, default=80, help="Image number to do inference")
parser.add_argument("--disp_range", type=int, default=128, help="Search range for disparity")
parser.add_argument("--use_gpu", type=int, default=1, help="Check to use GPU")
args = parser.parse_args()
print('----------------------------------------')
print('FLAGS:')
for arg in vars(args):
print("'", arg,"'", ": ", getattr(args, arg))
print('----------------------------------------')
print('Inference....')
# Useful functions
#################################### Main #####################################
# Input Channels
nChannel = 3
# Search range
disp_range = args.disp_range
# Trained model file
model_fn = os.path.join(args.checkpoint_dir, args.checkpoint_file)
# Build Test Graph
net = Net(nChannel)
# Loading the trained model
net.load_state_dict(torch.load(model_fn))
net.eval()
print(net)
print('Model Loaded')
# Check to use GPU
if args.use_gpu:
net = net.cuda()
# Load the images
ll_image, rr_image, ll_image1, rr_image1 = load_and_resize_l_and_r_image(args.test_num)
# Normalize images. All the patches used for training were normalized.
l_img = (ll_image - ll_image.mean())/(ll_image.std())
r_img = (rr_image - rr_image.mean())/(rr_image.std())
img_h = l_img.size(1)
img_w = l_img.size(2)
print('Image size:', img_h, img_w)
# Convert to batch x channel x height x width format
l_img = l_img.view(1, l_img.size(0), l_img.size(1), l_img.size(2))
r_img = r_img.view(1, r_img.size(0), r_img.size(1), r_img.size(2))
if args.use_gpu:
l_img = l_img.cuda()
r_img = r_img.cuda()
# Forward pass. extract deep features
left_feat = net(Variable(l_img, requires_grad=False))
# forward pass right image
right_feat = net(Variable(r_img, requires_grad=False))
# output tensor
output = torch.Tensor(img_h, img_w, disp_range).zero_()
start_id = 0
end_id = img_w -1
total_loc = disp_range
# Output tensor
unary_vol = torch.Tensor(img_h, img_w, total_loc).zero_()
right_unary_vol = torch.Tensor(img_h, img_w, total_loc).zero_()
while start_id <= end_id:
for loc_idx in range(0, total_loc):
x_off = -loc_idx + 1 # always <= 0
if end_id+x_off >= 1 and img_w >= start_id+x_off:
l = left_feat[:, :, :, np.max([start_id, -x_off+1]): np.min([end_id, img_w-x_off])]
r = right_feat[:, :, :, np.max([1, x_off+start_id]) : np.min([img_w, end_id+x_off])]
p = torch.mul(l,r)
q = torch.sum(p, 1)
unary_vol[:, np.max([start_id, -x_off+1]): np.min([end_id, img_w-x_off]) ,loc_idx] = q.data.view(q.data.size(1), q.data.size(2))
right_unary_vol[:, np.max([1, x_off+start_id]) : np.min([img_w, end_id+x_off]) ,loc_idx] = q.data.view(q.data.size(1), q.data.size(2))
start_id = end_id + 1
#misc.imsave('pred_disp_' + str(test_img_num) + '.png', pred_disp)
max_disp1, pred_1 = torch.max(unary_vol, 2)
max_disp2, pred_2 = torch.max(right_unary_vol, 2)
# image_path_1 = '%s/cost_img/%06d_10.t7' % ('./save_disp', test_img_num)
# image_path_2 = '%s/cost_img_r/%06d_10.t7' % ('./save_disp', test_img_num)
# torch.save(unary_vol, image_path_1)
# torch.save(right_unary_vol, image_path_2)
# disparity map (height x width)
pred_disp1 = pred_1.view(unary_vol.size(0), unary_vol.size(1))
pred_disp2 = pred_2.view(unary_vol.size(0), unary_vol.size(1))
# Display the images
plt.subplot(411)
plt.imshow(ll_image1)
plt.title('Left Image')
plt.axis('off')
plt.subplot(412)
plt.imshow(rr_image1)
plt.title('Right Image')
plt.axis('off')
plt.subplot(413)
plt.imshow(pred_disp1, cmap='gray')
plt.title('Predicted Disparity')
plt.axis('off')
plt.subplot(414)
plt.imshow(pred_disp2, cmap='gray')
plt.title('Right Disparity')
plt.axis('off')
plt.show()
print('Complete!')
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
... | 2.421351 | 1,939 |
import logging
from fraud_detection.settings import FILE_DIRS
logger = logging.getLogger(__name__)
| [
198,
11748,
18931,
198,
198,
6738,
7394,
62,
15255,
3213,
13,
33692,
1330,
45811,
62,
34720,
50,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
628,
198
] | 3.028571 | 35 |
"""Contains tests for the query module"""
import unittest
from cubejsclientasync import (
Cube,
DateRange,
Order,
Query,
TimeDimension,
TimeGranularity,
)
from cubejsclientasync.enums import FilterOperator
from cubejsclientasync.filters import And, Filter
class QueryTests(unittest.TestCase):
"""Tests Query"""
def test_basic(self):
"""Should serialize a query"""
cube = Cube("c__app-123__us_accidents")
q = Query(
measures=[cube.measure("foo")],
dimensions=[cube.dimension("bar")],
time_dimensions=[
TimeDimension(
cube.dimension("time"),
DateRange(relative="last year"),
granularity=TimeGranularity.month,
)
],
filters=[
And(Filter(cube.dimension("state"), FilterOperator.equals, ["WA"]))
],
order=[(cube.dimension("bar"), Order.asc)],
)
self.assertEqual(
q.serialize(),
{
"measures": ["c__app-123__us_accidents.foo"],
"timeDimensions": [
{
"dimension": "c__app-123__us_accidents.time",
"dateRange": "last year",
"granularity": "month",
}
],
"filters": [
{
"and": [
{
"member": "c__app-123__us_accidents.state",
"operator": "equals",
"values": ["WA"],
}
]
}
],
"limit": 10000,
"offset": 0,
"timezone": "UTC",
"ungrouped": False,
"dimensions": ["c__app-123__us_accidents.bar"],
"order": [("c__app-123__us_accidents.bar", "asc")],
},
)
| [
37811,
4264,
1299,
5254,
329,
262,
12405,
8265,
37811,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
23441,
8457,
16366,
292,
13361,
1330,
357,
198,
220,
220,
220,
23315,
11,
198,
220,
220,
220,
7536,
17257,
11,
198,
220,
220,
220,
... | 1.680421 | 1,236 |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import enum
import functools
import json
from collections import defaultdict
from datetime import datetime
from typing import Any, Iterable, Dict
from aiohttp import web
from sqlalchemy import insert, select
from tglib.clients import APIServiceClient, MySQLClient
from .models import NetworkStatsHealth, NetworkHealthExecution
routes = web.RouteTableDef()
@routes.get("/health/latest")
async def handle_get_network_health(request: web.Request) -> web.Response:
"""
---
description: Return latest health of links and nodes of the requested network.
tags:
- Network Health Service
parameters:
- in: query
name: network_name
description: The name of the network.
type: string
produces:
- application/json
responses:
"200":
description: Successful operation.
"400":
description: Invalid filter parameters.
"""
network_name = request.rel_url.query.get("network_name")
if network_name is None:
raise web.HTTPBadRequest(text="Missing required 'network_name' param")
if network_name not in APIServiceClient.network_names():
raise web.HTTPBadRequest(text=f"Invalid network name: {network_name}")
topology = await APIServiceClient(timeout=1).request(network_name, "getTopology")
node_to_site_name = {node["name"]: node["site_name"] for node in topology["nodes"]}
async with MySQLClient().lease() as sa_conn:
query = (
select([NetworkHealthExecution.id])
.order_by(NetworkHealthExecution.id.desc())
.limit(1)
)
cursor = await sa_conn.execute(query)
execution_row = await cursor.first()
latest_execution_id = execution_row.id
query = select(
[
NetworkStatsHealth.link_name,
NetworkStatsHealth.node_name,
NetworkStatsHealth.stats_health,
]
).where(
(NetworkStatsHealth.execution_id == latest_execution_id)
& (NetworkStatsHealth.network_name == network_name)
)
cursor = await sa_conn.execute(query)
network_stats_health: Iterable = await cursor.fetchall()
results: Dict = {
"data": {"links": {}, "nodes": {}, "sites": {}},
"legend": {
"links": {
"items": [
{"color": "#00dd44", "label": "Excellent", "value": 1},
{"color": "#ffdd00", "label": "Good", "value": 2},
{"color": "#dd0000", "label": "Poor", "value": 4},
{"color": "#999999", "label": "Unknown", "value": 5},
],
},
"nodes": {
"items": [
{"color": "#00dd44", "label": "Excellent", "value": 1},
{"color": "#ffdd00", "label": "Good", "value": 2},
{"color": "#dd0000", "label": "Poor", "value": 4},
{"color": "#999999", "label": "Unknown", "value": 5},
]
},
"sites": {
"items": [
{"color": "#00dd44", "label": "Excellent", "value": 1},
{"color": "#ffdd00", "label": "Good", "value": 2},
{"color": "#dd0000", "label": "Poor", "value": 4},
{"color": "#999999", "label": "Unknown", "value": 5},
]
},
},
}
for row in network_stats_health:
if row.link_name is not None:
results["data"]["links"][row.link_name] = {
"value": row.stats_health["overall_health"],
"metadata": row.stats_health["stats"],
}
if row.node_name is not None:
results["data"]["nodes"][row.node_name] = {
"value": row.stats_health["overall_health"],
"metadata": row.stats_health["stats"],
}
results["data"]["sites"][node_to_site_name[row.node_name]] = {
"value": row.stats_health["overall_health"],
"metadata": row.stats_health["stats"],
}
return web.json_response(
results, dumps=functools.partial(json.dumps, default=custom_serializer)
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
5472,
12,
25579,
3203,
13,
1439,
6923,
33876,
13,
198,
198,
11748,
33829,
198,
11748,
1257,
310,
10141,
198,
11748,
33918,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
... | 2.163664 | 1,998 |
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
install_requires=required,
name='monitoringHisto',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1',
description='FIWARE Historical monitoring collector',
long_description='',
# The project's main homepage.
url='https://github.com/SmartInfrastructures/FIWARELab-monitoringAPI',
# Author details
author='Daniele Santoro',
author_email='',
# Choose your license
license='Apache v2.0',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['monitoringHisto'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={'console_scripts': [
'monitoringHisto=monitoringHisto.monitoringHisto:main',
],
},
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
10786,
8897,
18883,
13,
14116,
11537,
355,
277,
25,
198,
220,
220,
220,
2672,
796,
277,
13,
961,
22446,
35312,
6615,
3419,
198,
198,
40406,
7,
628,
220,
220,
220,
2721,
62,
... | 3.099751 | 401 |
# All content Copyright (C) 2018 Genomics plc
from wecall.bamutils.sequence_quality import SequenceQuality
import pysam
from wecall.bamutils.sample_bank import SampleBank
from wecall_test_drivers.ascii_wecall_runner import DEFAULT_SAMPLE_NAME
from wecall_test_drivers.base_test import BaseTest
from wecall_test_drivers.variant_caller_builder import VariantCallerBuilderFromSampleBank
import os
| [
2,
1439,
2695,
15069,
357,
34,
8,
2864,
5215,
31994,
458,
66,
198,
6738,
356,
13345,
13,
65,
321,
26791,
13,
43167,
62,
13237,
1330,
45835,
35013,
198,
11748,
279,
893,
321,
198,
6738,
356,
13345,
13,
65,
321,
26791,
13,
39873,
62,
... | 3.405172 | 116 |
import cv2
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# import image
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn import svm
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
import math
import collections
from matplotlib import pyplot as plt
import pandas as pa
#import Image
import os
# extract x & y of kp
if __name__ == '__main__':
# read in whole dataset here !
# use sift to extract kp & des
sift = cv2.xfeatures2d.SIFT_create()
path = "/Users/muyunyan/Documents/Pycharm/EC500/sprint4/DHL1"
kp1, des1 = logo_des(path)
cap = cv2.VideoCapture('DHL.mp4')
count = 0
# roiBox = None
roiBox = []
roiHistt = []
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (640, 360))
# use brute-force knn to match the kps
bf = cv2.BFMatcher()
while( True ):
# Capture frame-by-frame, each frame is 1080 pixels
ret, frame = cap.read()
orig = frame
# bar = np.array([50,50,50,50])
flag1 = 0
# update ROI every 5 frames
roiBox = []
Box = []
pic = frame
gray_trg = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)
kp2, des2 = sift.detectAndCompute(gray_trg, None)
if des2 is not None:
os.chdir(path)
a = os.listdir(".")
a = a[1:-1]
index1 = 0
for i in a:
img = cv2.imread(i, 0)
matches = bf.knnMatch(des1[index1], des2, k=2)
if matches is not None:
kp_trg = bf_knnmatches(matches, img, kp1[index1], kp2)
# if not detecting logo in the image, just skip the tracking and show the original frame.
if len(kp_trg) >= 4:
roiPts = np.array(kp_trg)
s = roiPts.sum(axis=1)
tl, ld, ru, br = extrct_ROI(s)
# roiBox .append([tl[0], tl[1], br[0], br[1]])
roiBox = (tl[0], tl[1], br[0], br[1])
if min(roiBox) > 0 and len(roiBox) > 0:
Box.append([np.int32([ld, tl, ru, br])])
print index1
print index1
index1 += 1
os.chdir("..")
for k in range(len(Box)):
frame = cv2.polylines(frame, Box[k], True, 255, 3, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow('frame', frame)
# write the frame to be .avi
# out.write(frame)
# if the 'q' key is pressed, stop the loop
if cv2.waitKey(1) & 0xFF == ord("q"):
break
count += 1
# When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
201,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
201,
198,
11748,
2603,
29487,
8019,
13,
9060,
355,
29034,
9600,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
2,
1330,
2939,
201,
198,
11748... | 1.877119 | 1,652 |
"""Support for Roku selects."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from rokuecp import Roku
from rokuecp.models import Device as RokuDevice
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import roku_exception_handler
from .const import DOMAIN
from .coordinator import RokuDataUpdateCoordinator
from .entity import RokuEntity
from .helpers import format_channel_name
@dataclass
class RokuSelectEntityDescriptionMixin:
"""Mixin for required keys."""
options_fn: Callable[[RokuDevice], list[str]]
value_fn: Callable[[RokuDevice], str | None]
set_fn: Callable[[RokuDevice, Roku, str], Awaitable[None]]
@dataclass
class RokuSelectEntityDescription(
SelectEntityDescription, RokuSelectEntityDescriptionMixin
):
"""Describes Roku select entity."""
ENTITIES: tuple[RokuSelectEntityDescription, ...] = (
RokuSelectEntityDescription(
key="application",
name="Application",
icon="mdi:application",
set_fn=_launch_application,
value_fn=_get_application_name,
options_fn=_get_applications,
entity_registry_enabled_default=False,
),
)
CHANNEL_ENTITY = RokuSelectEntityDescription(
key="channel",
name="Channel",
icon="mdi:television",
set_fn=_tune_channel,
value_fn=_get_channel_name,
options_fn=_get_channels,
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Roku select based on a config entry."""
coordinator: RokuDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
device: RokuDevice = coordinator.data
unique_id = device.info.serial_number
entities: list[RokuSelectEntity] = []
for description in ENTITIES:
entities.append(
RokuSelectEntity(
device_id=unique_id,
coordinator=coordinator,
description=description,
)
)
if len(device.channels) > 0:
entities.append(
RokuSelectEntity(
device_id=unique_id,
coordinator=coordinator,
description=CHANNEL_ENTITY,
)
)
async_add_entities(entities)
class RokuSelectEntity(RokuEntity, SelectEntity):
"""Defines a Roku select entity."""
entity_description: RokuSelectEntityDescription
@property
def current_option(self) -> str | None:
"""Return the current value."""
return self.entity_description.value_fn(self.coordinator.data)
@property
def options(self) -> list[str]:
"""Return a set of selectable options."""
return self.entity_description.options_fn(self.coordinator.data)
@roku_exception_handler
async def async_select_option(self, option: str) -> None:
"""Set the option."""
await self.entity_description.set_fn(
self.coordinator.data,
self.coordinator.roku,
option,
)
await self.coordinator.async_request_refresh()
| [
37811,
15514,
329,
46194,
40573,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
17268,
13,
39305,
1330,
5851,
4548,
540,
11,
4889,
540,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
686,
74... | 2.644162 | 1,259 |
from dataclasses import replace
import datetime
import secrets
from neuro_sdk import ResourceNotFound
from typing import (
AbstractSet,
AsyncIterator,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
)
from yarl import URL
from neuro_flow.storage.base import (
Attempt,
AttemptStorage,
Bake,
BakeImage,
BakeImageStorage,
BakeMeta,
BakeStorage,
CacheEntry,
CacheEntryStorage,
ConfigFile,
ConfigFileStorage,
ConfigsMeta,
LiveJob,
LiveJobStorage,
Project,
ProjectStorage,
Storage,
Task,
TaskStatusItem,
TaskStorage,
_Unset,
)
from neuro_flow.types import FullID, ImageStatus, TaskStatus
| [
6738,
4818,
330,
28958,
1330,
6330,
198,
198,
11748,
4818,
8079,
198,
11748,
13141,
198,
6738,
7669,
62,
21282,
74,
1330,
20857,
3673,
21077,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
27741,
7248,
11,
198,
220,
220,
220,
1081,
... | 2.550173 | 289 |
# Generated by Django 3.0.2 on 2020-01-24 10:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
17,
319,
12131,
12,
486,
12,
1731,
838,
25,
940,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208... | 3.019231 | 52 |
import base64 as b64
import re
import threading
import time
import flask as f
import requests
import samehadaku as s
app = f.Flask(__name__, template_folder='.')
app.cache = {}
app.init_time = time.time()
app.bounded_semaphore = threading.BoundedSemaphore(12)
app.client_bsemaphores = {}
@app.before_request
@app.after_request
@app.route('/')
@app.route('/<q>')
@app.route('/_/dl/<link>')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, threaded=True, port=20001)
| [
11748,
2779,
2414,
355,
275,
2414,
198,
11748,
302,
198,
11748,
4704,
278,
198,
11748,
640,
198,
198,
11748,
42903,
355,
277,
198,
11748,
7007,
198,
198,
11748,
976,
18108,
8719,
355,
264,
198,
198,
1324,
796,
277,
13,
7414,
2093,
7,
... | 2.532995 | 197 |
from aiohttp import ClientSession
from aiohttp.client import ClientTimeout
from aiohttp.web_exceptions import HTTPError, HTTPForbidden, HTTPNotFound
from holobot.sdk.ioc.decorators import injectable
from holobot.sdk.lifecycle import StartableInterface
from holobot.sdk.logging import LogInterface
from holobot.sdk.network import HttpClientPoolInterface
from holobot.sdk.network.exceptions import HttpStatusError, ImATeapotError, TooManyRequestsError
from multidict import CIMultiDict
from typing import Any, Callable, Dict
DEFAULT_TIMEOUT = ClientTimeout(total=5)
# https://julien.danjou.info/python-and-fast-http-clients/
@injectable(StartableInterface)
@injectable(HttpClientPoolInterface)
| [
6738,
257,
952,
4023,
1330,
20985,
36044,
198,
6738,
257,
952,
4023,
13,
16366,
1330,
20985,
48031,
198,
6738,
257,
952,
4023,
13,
12384,
62,
1069,
11755,
1330,
14626,
12331,
11,
14626,
1890,
37978,
11,
14626,
3673,
21077,
198,
6738,
60... | 3.183486 | 218 |
###########################################################################################
# assets - javascript and css asset handling
#
# Date Author Reason
# ---- ------ ------
# 03/04/20 Lou King Create
#
# Copyright 2020 Lou King. All rights reserved
#
###########################################################################################
'''
assets - javascript and css asset handling
===================================================
'''
from flask_assets import Bundle, Environment
# jquery
jq_ver = '3.5.1'
jq_ui_ver = '1.12.1'
# dataTables
dt_buttons_ver = '1.6.5' # also used for colvis and html5
dt_datatables_ver = '1.10.22'
dt_editor_ver = '1.9.6+discussion-59060'
dt_fixedcolumns_ver = '3.3.1'
dt_responsive_ver = '2.2.6'
dt_rowreorder_ver = '1.2.7'
dt_select_ver = '1.3.1-preXhr-patch'
jszip_ver = '2.5.0'
# select2
# NOTE: patch to jquery ui required, see https://github.com/select2/select2/issues/1246#issuecomment-17428249
# currently in datatables.js
s2_ver = '4.0.12'
# smartmenus
sm_ver = '1.1.0'
# yadcf
yadcf_ver = '0.9.4.beta.45+lk-date_custom_func'
moment_ver = '2.24.0' # moment.js (see https://momentjs.com/)
lodash_ver = '4.17.15' # lodash.js (see https://lodash.com)
d3_ver = '7.1.1' # d3js.org (see https://d3js.org/)
d3_tip_ver = '1.1' # https://github.com/VACLab/d3-tip
fa_ver = '5.13.0' # https://fontawesome.com/
nunjucks_ver = '3.2.0' # https://mozilla.github.io/nunjucks/
cke_type='classic' # https://ckeditor.com/ckeditor-5/
cke_ver='26.0.0-members-414' # https://ckeditor.com/ckeditor-5/
frontend_common_js = Bundle(
'js/jquery-{ver}/jquery-{ver}.js'.format(ver=jq_ver),
'js/jquery-ui-{ver}.custom/jquery-ui.js'.format(ver=jq_ui_ver),
'js/lodash-{ver}/lodash.js'.format(ver=lodash_ver),
'js/smartmenus-{ver}/jquery.smartmenus.js'.format(ver=sm_ver),
# datatables / yadcf
'js/DataTables-{ver}/js/jquery.dataTables.js'.format(ver=dt_datatables_ver),
'js/DataTables-{ver}/js/dataTables.jqueryui.js'.format(ver=dt_datatables_ver),
'js/yadcf-{ver}/jquery.dataTables.yadcf.js'.format(ver=yadcf_ver),
'js/FixedColumns-{ver}/js/dataTables.fixedColumns.js'.format(ver=dt_fixedcolumns_ver),
'js/Responsive-{ver}/js/dataTables.responsive.js'.format(ver=dt_responsive_ver),
'js/Responsive-{ver}/js/responsive.jqueryui.js'.format(ver=dt_responsive_ver),
'js/Editor-{ver}/js/dataTables.editor.js'.format(ver=dt_editor_ver),
'js/Editor-{ver}/js/editor.jqueryui.js'.format(ver=dt_editor_ver),
'js/Select-{ver}/js/dataTables.select.js'.format(ver=dt_select_ver),
# select2 is required for use by Editor forms and interest navigation
'js/select2-{ver}/js/select2.full.js'.format(ver=s2_ver),
# the order here is important
'js/FieldType-Select2/editor.select2.js',
# date time formatting
'js/moment-{ver}/moment.js'.format(ver=moment_ver),
# d3
'js/d3-{ver}/d3.js'.format(ver=d3_ver),
'js/d3-tip-{ver}/d3-tip.js'.format(ver=d3_tip_ver),
'frontend/beforedatatables.js',
'admin/layout.js', # TODO: smartmenus initialization, should be moved to layout.js
'layout.js',
'utils.js', # from loutilities
'editor.select2.mymethods.js', # from loutilities
'datatables.js', # from loutilities
'datatables.dataRender.ellipsis.js', # from loutilities
'datatables.dataRender.datetime.js', # from loutilities
'editor.buttons.editrefresh.js', # from loutilities
'editor.buttons.editchildrowrefresh.js',# from loutilities
'filters.js', # from loutilities
'user/admin/groups.js', # from loutilities
'admin/afterdatatables.js', # TODO: should move common bits up a level and pieces to frontend/afterdatatables
filters='jsmin',
output='gen/frontendcommon.js',
)
frontend_members = Bundle(
'frontend/membership-stats.js',
filters='jsmin',
output='gen/frontendmembers.js',
)
asset_bundles = {
'frontend_js': Bundle(
frontend_common_js,
),
'frontendmembers_js': Bundle(
frontend_common_js,
frontend_members,
),
'frontend_css': Bundle(
'js/jquery-ui-{ver}.custom/jquery-ui.css'.format(ver=jq_ui_ver),
'js/jquery-ui-{ver}.custom/jquery-ui.structure.css'.format(ver=jq_ui_ver),
'js/jquery-ui-{ver}.custom/jquery-ui.theme.css'.format(ver=jq_ui_ver),
'js/DataTables-{ver}/css/dataTables.jqueryui.css'.format(ver=dt_datatables_ver),
'js/Buttons-{ver}/css/buttons.jqueryui.css'.format(ver=dt_buttons_ver),
'js/FixedColumns-{ver}/css/fixedColumns.jqueryui.css'.format(ver=dt_fixedcolumns_ver),
'js/Responsive-{ver}/css/responsive.dataTables.css'.format(ver=dt_responsive_ver),
'js/Responsive-{ver}/css/responsive.jqueryui.css'.format(ver=dt_responsive_ver),
'js/Select-{ver}/css/select.jqueryui.css'.format(ver=dt_select_ver),
'js/select2-{ver}/css/select2.css'.format(ver=s2_ver),
'js/yadcf-{ver}/jquery.dataTables.yadcf.css'.format(ver=yadcf_ver),
'js/fontawesome-{ver}/css/fontawesome.css'.format(ver=fa_ver),
'js/fontawesome-{ver}/css/solid.css'.format(ver=fa_ver),
'datatables.css', # from loutilities
'editor.css', # from loutilities
'filters.css', # from loutilities
'branding.css', # from loutilities
'js/smartmenus-{ver}/css/sm-core-css.css'.format(ver=sm_ver),
'js/smartmenus-{ver}/css/sm-blue/sm-blue.css'.format(ver=sm_ver),
'style.css',
'admin/style.css', # TODO: some of this is for smartmenus, should be in style.css
output='gen/frontend.css',
# cssrewrite helps find image files when ASSETS_DEBUG = False
filters=['cssrewrite', 'cssmin'],
),
'admin_js': Bundle(
Bundle('js/jquery-{ver}/jquery-{ver}.js'.format(ver=jq_ver), filters='jsmin'),
Bundle('js/jquery-ui-{ver}.custom/jquery-ui.js'.format(ver=jq_ui_ver), filters='jsmin'),
Bundle('js/smartmenus-{ver}/jquery.smartmenus.js'.format(ver=sm_ver), filters='jsmin'),
Bundle('js/lodash-{ver}/lodash.js'.format(ver=lodash_ver), filters='jsmin'),
Bundle('js/JSZip-{ver}/jszip.js'.format(ver=jszip_ver), filters='jsmin'),
Bundle('js/DataTables-{ver}/js/jquery.dataTables.js'.format(ver=dt_datatables_ver), filters='jsmin'),
Bundle('js/DataTables-{ver}/js/dataTables.jqueryui.js'.format(ver=dt_datatables_ver), filters='jsmin'),
Bundle('js/Editor-{ver}/js/dataTables.editor.js'.format(ver=dt_editor_ver), filters='jsmin'),
Bundle('js/Editor-{ver}/js/editor.jqueryui.js'.format(ver=dt_editor_ver), filters='jsmin'),
Bundle('js/Buttons-{ver}/js/dataTables.buttons.js'.format(ver=dt_buttons_ver), filters='jsmin'),
Bundle('js/Buttons-{ver}/js/buttons.jqueryui.js'.format(ver=dt_buttons_ver), filters='jsmin'),
Bundle('js/Buttons-{ver}/js/buttons.colVis.js'.format(ver=dt_buttons_ver), filters='jsmin'),
Bundle('js/Buttons-{ver}/js/buttons.html5.js'.format(ver=dt_buttons_ver), filters='jsmin'),
Bundle('js/FixedColumns-{ver}/js/dataTables.fixedColumns.js'.format(ver=dt_fixedcolumns_ver), filters='jsmin'),
Bundle('js/Responsive-{ver}/js/dataTables.responsive.js'.format(ver=dt_responsive_ver), filters='jsmin'),
Bundle('js/RowReorder-{ver}/js/dataTables.rowReorder.js'.format(ver=dt_rowreorder_ver), filters='jsmin'),
Bundle('js/Select-{ver}/js/dataTables.select.js'.format(ver=dt_select_ver), filters='jsmin'),
Bundle('js/yadcf-{ver}/jquery.dataTables.yadcf.js'.format(ver=yadcf_ver), filters='jsmin'),
# select2 is required for use by Editor forms and interest navigation
Bundle('js/select2-{ver}/js/select2.full.js'.format(ver=s2_ver), filters='jsmin'),
# the order here is important
Bundle('js/FieldType-Select2/editor.select2.js', filters='jsmin'),
# date time formatting for datatables editor, per https://editor.datatables.net/reference/field/datetime
Bundle('js/moment-{ver}/moment.js'.format(ver=moment_ver), filters='jsmin'),
# d3
Bundle('js/d3-{ver}/d3.js'.format(ver=d3_ver), filters='jsmin'),
# ckeditor (note this is already minimized, and filter through jsmin causes problems)
'js/ckeditor5-build-{type}-{ver}/build/ckeditor.js'.format(ver=cke_ver, type=cke_type),
Bundle('admin/layout.js', filters='jsmin'),
Bundle('layout.js', filters='jsmin'),
# must be before datatables
Bundle('editor-saeditor.js', filters='jsmin'), # from loutilities
Bundle('js/nunjucks-{ver}/nunjucks.js'.format(ver=nunjucks_ver), filters='jsmin'),
Bundle('admin/nunjucks/templates.js', filters='jsmin'),
Bundle('editor.fieldType.display.js', filters='jsmin'), # from loutilities
Bundle('editor.ckeditor5.js', filters='jsmin'), # from loutilities
Bundle('admin/beforedatatables.js', filters='jsmin'),
Bundle('editor.googledoc.js', filters='jsmin'), # from loutilities
Bundle('datatables.dataRender.googledoc.js', filters='jsmin'), # from loutilities
Bundle('user/admin/beforedatatables.js', filters='jsmin'), # from loutilities
Bundle('editor.select2.mymethods.js', filters='jsmin'), # from loutilities
Bundle('editor.displayController.onPage.js', filters='jsmin'), # from loutilities
Bundle('datatables-childrow.js', filters='jsmin'), # from loutilities
Bundle('datatables.js', filters='jsmin'), # from loutilities
# must be after datatables.js
Bundle('datatables.dataRender.ellipsis.js', filters='jsmin'), # from loutilities
Bundle('datatables.dataRender.datetime.js', filters='jsmin'), # from loutilities
Bundle('editor.buttons.editrefresh.js', filters='jsmin'), # from loutilities
Bundle('editor.buttons.editchildrowrefresh.js', filters='jsmin'), # from loutilities
Bundle('editor.buttons.separator.js', filters='jsmin'), # from loutilities
Bundle('filters.js', filters='jsmin'), # from loutilities
Bundle('utils.js', filters='jsmin'), # from loutilities
Bundle('user/admin/groups.js', filters='jsmin'), # from loutilities
# Bundle('admin/editor.buttons.invites.js', filters='jsmin'),
Bundle('admin/afterdatatables.js', filters='jsmin'),
output='gen/admin.js',
),
'admin_css': Bundle(
Bundle('js/jquery-ui-{ver}.custom/jquery-ui.css'.format(ver=jq_ui_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/jquery-ui-{ver}.custom/jquery-ui.structure.css'.format(ver=jq_ui_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/jquery-ui-{ver}.custom/jquery-ui.theme.css'.format(ver=jq_ui_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/smartmenus-{ver}/css/sm-core-css.css'.format(ver=sm_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/smartmenus-{ver}/css/sm-blue/sm-blue.css'.format(ver=sm_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/DataTables-{ver}/css/dataTables.jqueryui.css'.format(ver=dt_datatables_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/Editor-{ver}/css/editor.dataTables.css'.format(ver=dt_editor_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/Editor-{ver}/css/editor.jqueryui.css'.format(ver=dt_editor_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/Buttons-{ver}/css/buttons.jqueryui.css'.format(ver=dt_buttons_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/FixedColumns-{ver}/css/fixedColumns.jqueryui.css'.format(ver=dt_fixedcolumns_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/Responsive-{ver}/css/responsive.jqueryui.css'.format(ver=dt_responsive_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/RowReorder-{ver}/css/rowReorder.jqueryui.css'.format(ver=dt_rowreorder_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/Select-{ver}/css/select.jqueryui.css'.format(ver=dt_select_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/select2-{ver}/css/select2.css'.format(ver=s2_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/yadcf-{ver}/jquery.dataTables.yadcf.css'.format(ver=yadcf_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/fontawesome-{ver}/css/fontawesome.css'.format(ver=fa_ver), filters=['cssrewrite', 'cssmin']),
Bundle('js/fontawesome-{ver}/css/solid.css'.format(ver=fa_ver), filters=['cssrewrite', 'cssmin']),
Bundle('datatables.css', filters=['cssrewrite', 'cssmin']), # from loutilities
Bundle('editor.css', filters=['cssrewrite', 'cssmin']), # from loutilities
Bundle('filters.css', filters=['cssrewrite', 'cssmin']), # from loutilities
Bundle('branding.css', filters=['cssrewrite', 'cssmin']), # from loutilities
# this doesn't look like it's needed, was testing for #284
# Bundle('js/ckeditor5-build-{type}-{ver}/sample/styles.css'.format(ver=cke_ver, type=cke_type),
# filters=['cssrewrite', 'cssmin']),
Bundle('style.css', filters=['cssrewrite', 'cssmin']),
Bundle('admin/style.css', filters=['cssrewrite', 'cssmin']),
output='gen/admin.css',
# cssrewrite helps find image files when ASSETS_DEBUG = False
# filters=['cssrewrite', 'cssmin'],
)
}
asset_env = Environment()
| [
29113,
29113,
14468,
7804,
21017,
198,
2,
6798,
532,
44575,
290,
269,
824,
11171,
9041,
198,
2,
198,
2,
220,
220,
220,
220,
220,
220,
7536,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
6434,
220,
220,
220,
220,
220,
220,... | 2.25602 | 6,105 |