content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from trac.test import EnvironmentStub
from trac.ticket.roadmap import *
from trac.core import ComponentManager
import unittest
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
6738,
491,
330,
13,
9288,
1330,
9344,
1273,
549,
201,
198,
6738,
491,
330,
13,
43350,
13,
6344,
8899,
1330,
1635,
201,
198,
6738,
491,
330,
13,
7295,
1330,
35100,
13511,
201,
198,
201,
198,
11748,
555,
715,
395,
201,
198,
201,
198,
... | 2.723684 | 76 |
import pandas as pd
import numpy as np
#sample code
ONES = pd.DataFrame(np)
ZEROES = pd.DataFrame(np.zeros(50))
#sample functions | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
628,
198,
2,
39873,
2438,
198,
39677,
796,
279,
67,
13,
6601,
19778,
7,
37659,
8,
198,
57,
34812,
1546,
796,
279,
67,
13,
6601,
19778,
7,
37659,
13,
9107,
418,
7... | 2.607843 | 51 |
"""Config flow to configure the Luxtronik heatpump controller integration."""
# region Imports
from __future__ import annotations
from typing import Any
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.dhcp import HOSTNAME, IP_ADDRESS
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from .const import (CONF_CONTROL_MODE_HOME_ASSISTANT,
CONF_HA_SENSOR_INDOOR_TEMPERATURE,
CONF_LANGUAGE_SENSOR_NAMES, CONF_LOCK_TIMEOUT, CONF_SAFE,
CONF_UPDATE_IMMEDIATELY_AFTER_WRITE,
DEFAULT_PORT, DOMAIN,
LANG_DEFAULT, LANGUAGES_SENSOR_NAMES, LOGGER)
from .helpers.lux_helper import discover
# endregion Imports
class LuxtronikFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Luxtronik heatpump controller config flow."""
VERSION = 1
_hassio_discovery = None
_discovery_host = None
_discovery_port = None
async def async_step_dhcp(self, discovery_info: dict):
"""Prepare configuration for a DHCP discovered Luxtronik heatpump."""
LOGGER.info(
"Found device with hostname '%s' IP '%s'",
discovery_info.get(HOSTNAME),
discovery_info[IP_ADDRESS],
)
# Validate dhcp result with socket broadcast:
broadcast_discover_ip, broadcast_discover_port = discover()
if broadcast_discover_ip != discovery_info[IP_ADDRESS]:
return
await self.async_set_unique_id(discovery_info.get(HOSTNAME))
self._abort_if_unique_id_configured()
self._discovery_host = discovery_info[IP_ADDRESS]
self._discovery_port = (
DEFAULT_PORT if broadcast_discover_port is None else broadcast_discover_port
)
self.discovery_schema = self._get_schema()
return await self.async_step_user()
async def _show_setup_form(
self, errors: dict[str, str] | None = None
) -> FlowResult:
"""Show the setup form to the user."""
return self.async_show_form(
step_id="user",
data_schema=self._get_schema(),
errors=errors or {},
)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
if user_input is None:
return await self._show_setup_form(user_input)
data = {
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
CONF_SAFE: False,
CONF_LOCK_TIMEOUT: 30,
CONF_UPDATE_IMMEDIATELY_AFTER_WRITE: True,
CONF_CONTROL_MODE_HOME_ASSISTANT: user_input[
CONF_CONTROL_MODE_HOME_ASSISTANT
],
CONF_HA_SENSOR_INDOOR_TEMPERATURE: user_input[
CONF_HA_SENSOR_INDOOR_TEMPERATURE
],
CONF_LANGUAGE_SENSOR_NAMES: user_input[CONF_LANGUAGE_SENSOR_NAMES],
}
self._async_abort_entries_match(data)
return self.async_create_entry(title=user_input[CONF_HOST], data=data)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get default options flow."""
return LuxtronikOptionsFlowHandler(config_entry)
class LuxtronikOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a Luxtronik options flow."""
def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry
def _get_options_schema(self):
"""Return a schema for Luxtronik configuration options."""
return vol.Schema(
{
vol.Optional(
CONF_CONTROL_MODE_HOME_ASSISTANT,
default=self._get_value(CONF_CONTROL_MODE_HOME_ASSISTANT, False),
): bool,
vol.Optional(
CONF_HA_SENSOR_INDOOR_TEMPERATURE,
default=self._get_value(CONF_HA_SENSOR_INDOOR_TEMPERATURE, ""),
): str,
vol.Optional(
CONF_LANGUAGE_SENSOR_NAMES,
default=self._get_value(CONF_LANGUAGE_SENSOR_NAMES, LANG_DEFAULT),
): vol.In(LANGUAGES_SENSOR_NAMES),
}
)
async def async_step_init(self, _user_input=None):
"""Manage the options."""
return await self.async_step_user(_user_input)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="user", data_schema=self._get_options_schema()
)
| [
37811,
16934,
5202,
284,
17425,
262,
6026,
742,
1313,
1134,
4894,
79,
931,
10444,
11812,
526,
15931,
198,
2,
3814,
1846,
3742,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
4377,
198,
198,
11748,
1363,
562,
101... | 2.121983 | 2,320 |
from django.conf import settings
if 'django_select2' in settings.INSTALLED_APPS:
try:
from django_select2.fields import AutoModelSelect2Field
except ImportError:
pass
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
361,
705,
28241,
14208,
62,
19738,
17,
6,
287,
6460,
13,
38604,
7036,
1961,
62,
2969,
3705,
25,
198,
220,
220,
220,
1949,
25,
628,
220,
220,
220,
220,
220,
220,
220,
422,
42625,
14208,... | 2.694444 | 72 |
import torch
from torch import nn
if __name__=="__main__":
#test
net_encoder = encoder_extract(dim_bottleneck=64*64*3, ch=64).cuda()
net_decoder = decoder_extract(dim_bottleneck=64*64*3, ch=64).cuda()
net_encoder = nn.DataParallel(net_encoder)
net_decoder = nn.DataParallel(net_decoder)
x = torch.randn(10, 3, 64,64).cuda()
f = net_encoder(x)
xh, yh = net_decoder(f)
print(f.size())
print(xh.size())
print(yh.size())
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
628,
628,
628,
198,
361,
11593,
3672,
834,
855,
1,
834,
12417,
834,
1298,
198,
220,
220,
220,
1303,
9288,
628,
220,
220,
220,
2010,
62,
12685,
12342,
796,
2207,
12342,
62,
2302,
974,
7,... | 2.122172 | 221 |
import ctypes
import msvcrt
import os
import sys
import code
import windows
from .. import winproxy
from ..generated_def import windef
from ..generated_def.winstructs import *
# Function resolution !
def create_file_from_handle(handle, mode="r"):
"""Return a Python :class:`file` arround a windows HANDLE"""
fd = msvcrt.open_osfhandle(handle, os.O_TEXT)
return os.fdopen(fd, mode, 0)
def get_handle_from_file(f):
"""Get the windows HANDLE of a python :class:`file`"""
return msvcrt.get_osfhandle(f.fileno())
def create_console():
"""Create a new console displaying STDOUT
Useful in injection of GUI process"""
winproxy.AllocConsole()
stdout_handle = winproxy.GetStdHandle(windef.STD_OUTPUT_HANDLE)
console_stdout = create_file_from_handle(stdout_handle, "w")
sys.stdout = console_stdout
stdin_handle = winproxy.GetStdHandle(windef.STD_INPUT_HANDLE)
console_stdin = create_file_from_handle(stdin_handle, "r+")
sys.stdin = console_stdin
stderr_handle = winproxy.GetStdHandle(windef.STD_ERROR_HANDLE)
console_stderr = create_file_from_handle(stderr_handle, "w")
sys.stderr = console_stderr
def enable_privilege(lpszPrivilege, bEnablePrivilege):
"""Enable of disable a privilege: enable_privilege(SE_DEBUG_NAME, True)"""
tp = TOKEN_PRIVILEGES()
luid = LUID()
hToken = HANDLE()
winproxy.OpenProcessToken(winproxy.GetCurrentProcess(), TOKEN_ALL_ACCESS, byref(hToken))
winproxy.LookupPrivilegeValueA(None, lpszPrivilege, byref(luid))
tp.PrivilegeCount = 1
tp.Privileges[0].Luid = luid
if bEnablePrivilege:
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED
else:
tp.Privileges[0].Attributes = 0
winproxy.AdjustTokenPrivileges(hToken, False, byref(tp), sizeof(TOKEN_PRIVILEGES))
winproxy.CloseHandle(hToken)
if winproxy.GetLastError() == windef.ERROR_NOT_ALL_ASSIGNED:
raise ValueError("Failed to get privilege {0}".format(lpszPrivilege))
return True
def check_is_elevated():
"""Return True if process is Admin"""
hToken = HANDLE()
elevation = TOKEN_ELEVATION()
cbsize = DWORD()
winproxy.OpenProcessToken(winproxy.GetCurrentProcess(), TOKEN_ALL_ACCESS, byref(hToken))
winproxy.GetTokenInformation(hToken, TokenElevation, byref(elevation), sizeof(elevation), byref(cbsize))
winproxy.CloseHandle(hToken)
return elevation.TokenIsElevated
def check_debug():
"""Check that kernel is in debug mode
beware of NOUMEX (https://msdn.microsoft.com/en-us/library/windows/hardware/ff556253(v=vs.85).aspx#_______noumex______)"""
hkresult = HKEY()
cbsize = DWORD(1024)
bufferres = (c_char * cbsize.value)()
winproxy.RegOpenKeyExA(HKEY_LOCAL_MACHINE, "System\\CurrentControlSet\\Control", 0, KEY_READ, byref(hkresult))
winproxy.RegGetValueA(hkresult, None, "SystemStartOptions", RRF_RT_REG_SZ, None, byref(bufferres), byref(cbsize))
winproxy.RegCloseKey(hkresult)
control = bufferres[:]
if "DEBUG" not in control:
# print "[-] Enable debug boot!"
# print "> bcdedit /debug on"
return False
if "DEBUG=NOUMEX" not in control:
pass
# print "[*] Warning noumex not set!"
# print "> bcdedit /set noumex on"
return True
def pop_shell():
"""Pop a console with an InterativeConsole"""
create_console()
FixedInteractiveConsole(locals()).interact()
class VirtualProtected(object):
"""A context manager usable like `VirtualProtect` that will restore the old protection at exit
Example::
with utils.VirtualProtected(IATentry.addr, ctypes.sizeof(PVOID), windef.PAGE_EXECUTE_READWRITE):
IATentry.value = 0x42424242
"""
class DisableWow64FsRedirection(object):
"""A context manager that disable the Wow64 Fs Redirection"""
| [
11748,
269,
19199,
198,
11748,
13845,
85,
6098,
83,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
2438,
198,
198,
11748,
9168,
198,
6738,
11485,
1330,
1592,
36436,
198,
6738,
11485,
27568,
62,
4299,
1330,
2344,
891,
198,
6738,
11485,
... | 2.565333 | 1,500 |
import os
import shutil
import subprocess
import sys
def run(*args, env: dict = None, check=True):
"""Runs command and exits script gracefully on errors."""
print(f'+ {" ".join(args)}')
if env is None:
env = os.environ
else:
env = {**env, **os.environ}
result = subprocess.run(args, env=env)
if check:
try:
result.check_returncode()
except subprocess.CalledProcessError as err:
if result.stderr:
print(result.stderr.decode('utf-8'))
print(err)
sys.exit(1)
def get_output(*args):
"""Gets output from command"""
try:
return subprocess.run(args, check=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
except subprocess.CalledProcessError as err:
print(err)
sys.exit(1)
def require(*commands: str):
"""Checks that required commands are available somewhere on $PATH."""
# Allow syntax of `command:snap-package` to control the name of the
# snap package to tell the user to install.
commands = [c.rsplit(':', 1) for c in commands]
# Check that the commands exist.
missing = [c for c in commands if shutil.which(c[0]) is None]
if missing:
print('Some dependencies were not found. Please install them with:\n')
for command in missing:
print(f' sudo snap install {command[-1]} --classic')
print()
sys.exit(1)
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
850,
14681,
198,
11748,
25064,
628,
198,
4299,
1057,
46491,
22046,
11,
17365,
25,
8633,
796,
6045,
11,
2198,
28,
17821,
2599,
198,
220,
220,
220,
37227,
10987,
82,
3141,
290,
30151,
4226,... | 2.432886 | 596 |
""" Investment adviser module """
import os
import json
import config
from model.bank_account import get_next_investment_account
_INVEST_FILE = "invest.json"
class InvestmentAdviser:
""" Investment adviser class """
def advise(self, amount: float) -> []:
""" Advise investment """
result = []
for inv in self._invest:
if inv["percentage"] <= 0:
continue
entry_amount = amount * inv["percentage"] / 100
entry = {"bank": "", "account": "", "amount": entry_amount}
if inv["type"] == "CURRENCY":
inv_bank, inv_acc = get_next_investment_account()
entry["bank"] = inv_bank
entry["account"] = inv_acc
elif inv["type"] == "STOCK":
entry["bank"] = inv["company"]
entry["account"] = inv["type"]
elif inv["type"] == "CRYPTO":
entry["bank"] = inv["company"]
entry["account"] = inv["type"]
else:
raise Exception("Unknown investment type: " + inv["type"])
result.append(entry)
return result
| [
37811,
20877,
12534,
8265,
37227,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
4566,
198,
6738,
2746,
13,
17796,
62,
23317,
1330,
651,
62,
19545,
62,
24859,
434,
62,
23317,
198,
198,
62,
1268,
53,
6465,
62,
25664,
796,
366,
24859,
... | 2.179775 | 534 |
import numpy as np
import matplotlib.pyplot as plt
import time | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
640
] | 3.263158 | 19 |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from goodtables.checks.blank_header import blank_header
import goodtables.cells
# Check
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,... | 3.441558 | 77 |
import numpy as np
import pandas as pd
d1 = {
'c1':[1,2,3,4],
'c2':[444,555,666,444],
'c3':'abc def hij lmn'.split()}
d2 = {
'c1':[1,2,3],
'c4':'x y z'.split()
}
print (d1)
df1 = pd.DataFrame(d1)
print (df1)
print(df1['c2'].unique())
df2 = pd.DataFrame(d2)
print (pd.merge(df1,df2,how="inner",on='c1'))
print (df1['c2'].value_counts())
print (df1.index.names ) | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
67,
16,
796,
1391,
198,
6,
66,
16,
10354,
58,
16,
11,
17,
11,
18,
11,
19,
4357,
198,
6,
66,
17,
10354,
58,
30272,
11,
31046,
11,
27310,
11,
30272,
... | 1.867347 | 196 |
__version__ = '36.1.0'
| [
834,
9641,
834,
796,
705,
2623,
13,
16,
13,
15,
6,
198
] | 1.916667 | 12 |
# -*- coding: utf-8 -*-
'''
for database schema migration.
Memo for Usage:
migrate.migrate(torcms_migrator.rename_table('e_layout', 'mablayout'))
migrate.migrate(torcms_migrator.drop_column('tabtag', 'role_mask'))
'''
from playhouse import migrate
from playhouse.postgres_ext import BinaryJSONField
import config
def run_migrate(*args):
'''
running some migration.
:return:
'''
print('Begin migrate ...')
torcms_migrator = migrate.PostgresqlMigrator(config.DB_CON)
version_field = migrate.IntegerField(null = False, default=1)
try:
migrate.migrate(torcms_migrator.add_column('mabgson', 'version', version_field))
except:
pass
print('Migration finished.')
if __name__ == '__main__':
run_migrate('aa')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
1640,
6831,
32815,
13472,
13,
198,
198,
13579,
78,
329,
29566,
25,
198,
220,
220,
220,
32492,
13,
76,
42175,
7,
13165,
46406,
62,
76,
3692,
1352,
... | 2.574257 | 303 |
from asyncorm.apps.app_config import AppConfig
| [
6738,
30351,
579,
13,
18211,
13,
1324,
62,
11250,
1330,
2034,
16934,
628
] | 3.692308 | 13 |
import ast
import itertools
import textwrap
from pytype import config
from pytype.tests import test_base
from pytype.tools.annotate_ast import annotate_ast
import six
test_base.main(globals(), __name__ == '__main__')
| [
11748,
6468,
198,
11748,
340,
861,
10141,
198,
11748,
2420,
37150,
198,
198,
6738,
12972,
4906,
1330,
4566,
198,
6738,
12972,
4906,
13,
41989,
1330,
1332,
62,
8692,
198,
6738,
12972,
4906,
13,
31391,
13,
34574,
378,
62,
459,
1330,
24708... | 3.157143 | 70 |
from sqlalchemy.orm.session import make_transient, make_transient_to_detached
from app.utils.settings import app_config
from flask_allows import Not, Permission
from app.utils.requirements import IsAdmin
from flask_jwt_extended import current_user
from app.user.models import User, Group
from ..schemas import UsersSchema, UserSchema, UserAddSchema, UserUpdateSchema
from app.extensions import db
from app.core.exceptions import ValidationError
| [
6738,
44161,
282,
26599,
13,
579,
13,
29891,
1330,
787,
62,
7645,
1153,
11,
787,
62,
7645,
1153,
62,
1462,
62,
15255,
2317,
198,
6738,
598,
13,
26791,
13,
33692,
1330,
598,
62,
11250,
198,
6738,
42903,
62,
47205,
1330,
1892,
11,
244... | 3.596774 | 124 |
import dash
import dash_core_components as dcc
import dash_html_components as html
# object Dash app
app = dash.Dash()
app.layout = html.Div([
# Div untuk Dropdown
html.Div([
html.Label(["Length Unit"]),
dcc.Dropdown(id='my-dropdown',
options=[{'label':'cm', 'value': 'centimeter'},
{'label':'m', 'value': 'meter'},
{'label':'km', 'value': 'kilometer'},
{'label':'ft', 'value': 'feet'}
],
value='feet'
)
], style={'width': '100px'}),
# Div untuk Input
html.Div([
html.Label(["Length Value"]),
dcc.Input(id='my-input',
placeholder='Masukkan Nilai',
type='number', # tipe bisa "text", "number", "password", "email"
value=0 # default value yang menyesuaikan tipe
)
], style={'width': '100px'}),
# Div untuk RadioItems
html.Div([
html.Label(["Type of Unit"]),
dcc.RadioItems(id='my-radio',
options=[{'label':'Length', 'value': 'length'},
{'label':'Temperature', 'temperature': ''},
{'label':'Pressure', 'value': 'pressure'},
{'label':'Angle', 'value': 'angle'}
],
value='length'
)
], style={'width': '100%'}),
# Div untuk Button
html.Div([
html.Label(["Push the button !"]),
html.Button(["Click Me"],id='my-button')
], style={'width': '200px'}),
])
if __name__ == '__main__':
app.run_server() | [
11748,
14470,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
220,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
198,
2,
2134,
16189,
598,
198,
1324,
796,
14470,
13,
43041,
3419,
220,
198,
198,
1324,
13,
... | 2.224252 | 602 |
# coding: UTF-8
'''
Created on Nov 13, 2018
@author: Yusuke_Tokugawa
'''
import dataclasses
from typing import List
@dataclasses.dataclass | [
2,
19617,
25,
41002,
12,
23,
198,
7061,
6,
198,
41972,
319,
5267,
1511,
11,
2864,
198,
198,
31,
9800,
25,
41749,
4649,
62,
19042,
1018,
6909,
198,
7061,
6,
198,
11748,
4818,
330,
28958,
198,
6738,
19720,
1330,
7343,
198,
198,
31,
... | 2.8 | 50 |
# -*- encoding: utf-8 -*-
#Written by: Karim shoair - D4Vinci ( Cr3dOv3r )
import os,time,subprocess,pkg_resources
from . import updater
from .color import *
banner = """{G}
/T /I
/ |/ | .-~/
T\ Y I |/ / _
/T | \I | I Y.-~/
I l /I T\ | | l | T /
__ | \l \l \I l __l l \ ` _. |
\ ~-l `\ `\ \ \\ ~\ \ `. .-~ |
\ ~-. "-. ` \ ^._ ^. "-. / \ |
.--~-._ ~- ` _ ~-_.-"-." ._ /._ ." ./
>--. ~-. ._ ~>-" "\\\ 7 7 ]
^.___~"--._ ~-( .-~ . `\ Y . / |
<__ ~"-. ~ /_/ \ \I Y : |
^-.__ ~(_/ \ >._: | l______
^--.,___.-~" /_/ ! `-.~"--l_ / ~"-.
(_/ . ~( /' "~"--,Y -{W}=b{G}-. _) ______ _ ___ _
(_/ . \ : / l c"~o \\ | ___ \ | | |_ | | |
\ / `. . .^ \_.-~"~--. ) | |_/ /_ _ ___| |_ ___ | | __ _ ___| | _____ _ __
(_/ . ` / / ! )/ | __/ _` / __| __/ _ \ | |/ _` |/ __| |/ / _ \ '__|
/ / _. '. .': / ' | | | (_| \__ \ || __/\__/ / (_| | (__| < __/ |
~(_/ . / _ ` .-<_ \_| \__,_|___/\__\___\____/ \__,_|\___|_|\_\___|_|
/_/ . ' .-~" `. / \ \ ,z=. /────────────────────────────────────────────────\\
~( / ' : | K "-.~-.______// {W}[{Y}=>{W}] Add PasteJacking to web-delivery attacks [{Y}<={W}]{G}
"-,. l I/ \_ __(--->._(==. {W}[{Y}=>{W}] {B}Created by: {R}Karim Shoair (D4Vinci) {W}[{Y}<={W}]{G}
//( \ < ~"~" // {W}[{Y}=>{W}] {B}Version: {R}{version} {W}[{Y}<={W}]{G}
/' /\ \ \ ,v=. (( {W}[{Y}=>{W}] {B}Codename:{R} Hijack {W}[{Y}<={W}]{G}
.^. / /\ " )__ //===- ` {W}[{Y}=>{W}] {B}Follow me on Twitter: {R}@D4Vinci1 {W}[{Y}<={W}]{G}
/ / ' ' "-.,__ (---(==- {W}[{Y}=>{W}] [{Y}<={W}]{G}
.^ ' : T ~" ll {W}[{Y}=>{W}] CHOOSE A TARGET TO BEGIN [{Y}<={W}]{G}
/ . . . : | :! \\ \________________________________________________/
(_/ / | | j-" ~^
~-<_(_.^-~"
"""
core_dir = pkg_resources.resource_filename('PasteJacker', 'Core')
templates_dir = pkg_resources.resource_filename('PasteJacker', 'templates')
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
25354,
416,
25,
9375,
320,
7354,
958,
532,
360,
19,
53,
259,
979,
357,
3864,
18,
67,
46,
85,
18,
81,
1267,
198,
11748,
28686,
11,
2435,
11,
7266,
14681,
11,
353... | 1.436447 | 1,959 |
import numpy as np
from ..feat_selectors import FeatureSelector
| [
11748,
299,
32152,
355,
45941,
198,
6738,
11485,
27594,
62,
19738,
669,
1330,
27018,
17563,
273,
628
] | 3.823529 | 17 |
import os
import csv
import codecs
import yaml
import time
import numpy as np
import nltk
from nltk.translate import bleu_score
import pickle
import gzip
def read_config(path):
'''读取config文件'''
return AttrDict(yaml.load(open(path, 'r')))
| [
11748,
28686,
201,
198,
11748,
269,
21370,
201,
198,
11748,
40481,
82,
201,
198,
11748,
331,
43695,
201,
198,
11748,
640,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
299,
2528,
74,
201,
198,
6738,
299,
2528,
74,
13,
7645... | 2.049645 | 141 |
# -*- coding: utf-8 -*-
"""Add jinja-evaluated types to lektor.
"""
import jinja2
from lektor.environment import (
Expression,
FormatExpression,
)
from lektor.pluginsystem import Plugin
from lektor.types import Type
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
4550,
474,
259,
6592,
12,
18206,
6605,
3858,
284,
443,
74,
13165,
13,
198,
37811,
198,
11748,
474,
259,
6592,
17,
198,
6738,
443,
74,
13165,
13,
38986,
1330,
35... | 2.709302 | 86 |
'''
Non-business-logic utility functions
'''
def firsts(rows):
'''
Returns the set of first elements of all rows:
>>> sorted(firsts([(7, 1, 2),\
[5, 4, 0, 3],\
[8, 4],\
[5, 4]]))
[5, 7, 8]
'''
return set(r[0] for r in rows)
def pack_by(l, n):
'''
Yields elements from l in successive lists of size n
>>> list(pack_by(list(range(10)), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
'''
rest = l
while rest:
curr, rest = rest[:n], rest[n:]
yield curr
def append_each(l, to_append):
'''
>>> append_each(['a', 'b', 'c'], tuple(range(2))) # doctest: +NORMALIZE_WHITESPACE
[('a', 0, 1),
('b', 0, 1),
('c', 0, 1)]
'''
return [(element, ) + to_append for element in l]
| [
7061,
6,
198,
15419,
12,
22680,
12,
6404,
291,
10361,
5499,
198,
7061,
6,
198,
198,
4299,
717,
82,
7,
8516,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
16409,
262,
900,
286,
717,
4847,
286,
477,
15274,
25,
628,
220,
... | 2.005115 | 391 |
from stuff import *
import json,os
from requests import get
import numpy as np
from random import random
from math import sqrt,log
np.set_printoptions(precision=4,suppress=True)
np.set_printoptions(edgeitems=30, linewidth=100000)
adir='casesbyspecdate'
mindate='2021-02-24'# Can't get archive data earlier than 2021-02-24
now=datetime.datetime.utcnow().strftime('%Y-%m-%d')
infinity=7# Assume cases have stabilised after this many days
minday=datetoday(mindate)
maxday=datetoday(now)# exclusive
monday=datetoday('2021-06-07')
cases=[]
for day in range(minday,maxday):
date=daytodate(day)
fn=os.path.join(adir,date)
if not os.path.isfile(fn):
print("Loading cases as at",date)
url='https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&areaCode=E92000001&metric=newCasesBySpecimenDate&format=json&release='+date
response=get(url,timeout=10)
if not response.ok: raise RuntimeError(f'Request failed: '+response.text)
data=response.json()['body']
with open(fn,'w') as fp: json.dump(data,fp,indent=2)
with open(fn,'r') as fp:
a=json.load(fp)
l=[d['newCasesBySpecimenDate'] for d in a if d['date']!=date]# 2021-03-15 has erroneous entry for 2021-03-15
cases.append(l)
# cases[x][y] = cases from specimen day minday+x-(y+1), as reported on day minday+x
# Specimen day minday+s, report day minday+r --> cases[r][r-s-1]
# transfer[d][r'] = number of cases on weekday d that get reported by (specimen date)+r'. r'=1, ..., infinity
transfer=np.zeros([7,infinity+1],dtype=int)
p0=np.zeros([7],dtype=int)
p1=np.zeros([7,infinity+1])
p2=np.zeros([7,infinity+1])
l1=np.zeros([7,infinity+1])
l2=np.zeros([7,infinity+1])
for s in range(len(cases)-infinity):
d=(minday+s-monday)%7
p0[d]+=1
for r in range(s+1,s+infinity+1):
transfer[d][r-s]+=cases[r][r-s-1]
p=cases[r][r-s-1]/cases[s+infinity][infinity-1]
p1[d][r-s]+=p
p2[d][r-s]+=p*p
l1[d][r-s]+=log(p)
l2[d][r-s]+=log(p)**2
print("Across = number of days after specimen day that result is reported")
print("Down = day of the week of the specimen day, starting at Monday")
print()
print(transfer)
print()
mu0=transfer/transfer[:,infinity][:,None]
print(mu0)
print()
print("mean(p)")
mu=p1/p0[:,None]
print(mu)
print("mean(p) Python format")
print(np.array2string(mu,separator=','))
sd=np.sqrt((p2-p1**2/p0[:,None])/(p0[:,None]-1))
print()
print("sd(p)")
print(sd)
print()
#sdq=np.sqrt(mu*(1-mu)/p0[:,None])
#print(sdq)
#print()
#print(sd/sdq)
sdl=np.sqrt((l2-l1**2/p0[:,None])/(p0[:,None]-1))
print("sd(logp)")
print(sdl)
print()
| [
6738,
3404,
1330,
1635,
198,
11748,
33918,
11,
418,
198,
6738,
7007,
1330,
651,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4738,
1330,
4738,
198,
6738,
10688,
1330,
19862,
17034,
11,
6404,
198,
198,
37659,
13,
2617,
62,
4798,
25811,... | 2.301259 | 1,112 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Concentric Transmon.
"""
from math import sin, cos
import numpy as np
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import BaseQubit
class TransmonConcentric(BaseQubit):
"""The base `TrasmonConcentric` class .
Inherits `BaseQubit` class.
Metal transmon object consisting of a circle surrounding by a concentric
ring. There are two Josephson Junction connecting the circle to the ring;
one at the south end and one at the north end. There is a readout resonator.
.. image::
transmon_concentric.png
.. meta::
Transmon Concentric
BaseQubit Default Options:
* connection_pads: empty Dict -- the dictionary which contains all active
connection lines for the qubit.
* _default_connection_pads: empty Dict -- the default values for the
(if any) connection lines of the qubit.
Default Options:
* width: '1000um' -- Width of transmon pocket
* height: '1000um' -- Height of transmon pocket
* rad_o: '170um' -- Outer radius
* rad_i: '115um' -- Inner radius
* gap: '35um' -- Radius of gap between two pads
* jj_w: '10um' -- Josephson Junction width
* res_s: '100um' -- Space between top electrode and readout resonator
* res_ext: '100um' -- Extension of readout resonator in x-direction
beyond midpoint of transmon
* fbl_rad: '100um' -- Radius of the flux bias line loop
* fbl_sp: '100um' -- Spacing between metal pad and flux bias loop
* fbl_gap: '80um' -- Space between parallel lines of the flux bias loop
* fbl_ext: '300um' -- Run length of flux bias line between circular
loop and edge of pocket
* pocket_w: '1500um' -- Transmon pocket width
* pocket_h: '1000um' -- Transmon pocket height
* cpw_width: '10.0um' -- Width of the readout resonator and flux bias line
"""
# default drawing options
default_options = Dict(
width='1000um', # width of transmon pocket
height='1000um', # height of transmon pocket
rad_o='170um', # outer radius
rad_i='115um', # inner radius
gap='35um', # radius of gap between two pads
jj_w='10um', # Josephson Junction width
res_s='100um', # space between top electrode and readout resonator
res_ext=
'100um', # extension of readout resonator in x-direction beyond midpoint of transmon
fbl_rad='100um', # radius of the flux bias line loop
fbl_sp='100um', # spacing between metal pad and flux bias loop
fbl_gap='80um', # space between parallel lines of the flux bias loop
fbl_ext=
'300um', # run length of flux bias line between circular loop and edge of pocket
pocket_w='1500um', # transmon pocket width
pocket_h='1000um', # transmon pocket height
cpw_width='10.0um', # width of the readout resonator and flux bias line
inductor_width='5.0um' # width of the Josephson Junctions
)
"""Default drawing options"""
TOOLTIP = """The base `TrasmonConcentric` class."""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# draw the concentric pad regions
outer_pad = draw.Point(0, 0).buffer(p.rad_o)
space = draw.Point(0, 0).buffer((p.gap + p.rad_i))
outer_pad = draw.subtract(outer_pad, space)
inner_pad = draw.Point(0, 0).buffer(p.rad_i)
#gap = draw.subtract(space, inner_pad)
#pads = draw.union(outer_pad, inner_pad)
# draw the top Josephson Junction
jj_t = draw.LineString([(0.0, p.rad_i), (0.0, p.rad_i + p.gap)])
# draw the bottom Josephson Junction
jj_b = draw.LineString([(0.0, -1.0 * p.rad_i),
(0.0, -1.0 * p.rad_i - 1.0 * p.gap)])
# draw the readout resonator
qp1a = (-0.5 * p.pocket_w, p.rad_o + p.res_s
) # the first (x,y) coordinate is qpin #1
qp1b = (p.res_ext, p.rad_o + p.res_s
) # the second (x,y) coordinate is qpin #1
rr = draw.LineString([qp1a, qp1b])
# draw the flux bias line
a = (0.5 * p.pocket_w, -0.5 * p.fbl_gap)
b = (0.5 * p.pocket_w - p.fbl_ext, -0.5 * p.fbl_gap)
c = (p.rad_o + p.fbl_sp + p.fbl_rad, -1.0 * p.fbl_rad)
d = (p.rad_o + p.fbl_sp + 0.2929 * p.fbl_rad, 0.0 - 0.7071 * p.fbl_rad)
e = (p.rad_o + p.fbl_sp, 0.0)
f = (p.rad_o + p.fbl_sp + 0.2929 * p.fbl_rad, 0.0 + 0.7071 * p.fbl_rad)
g = (p.rad_o + p.fbl_sp + p.fbl_rad, p.fbl_rad)
h = (0.5 * p.pocket_w - p.fbl_ext, 0.5 * p.fbl_gap)
i = (0.5 * p.pocket_w, 0.5 * p.fbl_gap)
fbl = draw.LineString([a, b, c, d, e, f, g, h, i])
# draw the transmon pocket bounding box
pocket = draw.rectangle(p.pocket_w, p.pocket_h)
# Translate and rotate all shapes
objects = [outer_pad, inner_pad, jj_t, jj_b, pocket, rr, fbl]
objects = draw.rotate(objects, p.orientation, origin=(0, 0))
objects = draw.translate(objects, xoff=p.pos_x, yoff=p.pos_y)
[outer_pad, inner_pad, jj_t, jj_b, pocket, rr, fbl] = objects
# define a function that both rotates and translates the qpin coordinates
# rotate and translate the qpin coordinates
qp1a = qpin_rotate_translate(qp1a)
qp1b = qpin_rotate_translate(qp1b)
a = qpin_rotate_translate(a)
b = qpin_rotate_translate(b)
h = qpin_rotate_translate(h)
i = qpin_rotate_translate(i)
##############################################################
# Use the geometry to create Metal QGeometry
geom_rr = {'path1': rr}
geom_fbl = {'path2': fbl}
geom_outer = {'poly1': outer_pad}
geom_inner = {'poly2': inner_pad}
geom_jjt = {'poly4': jj_t}
geom_jjb = {'poly5': jj_b}
geom_pocket = {'poly6': pocket}
self.add_qgeometry('path',
geom_rr,
layer=1,
subtract=False,
width=p.cpw_width)
self.add_qgeometry('path',
geom_fbl,
layer=1,
subtract=False,
width=p.cpw_width)
self.add_qgeometry('poly', geom_outer, layer=1, subtract=False)
self.add_qgeometry('poly', geom_inner, layer=1, subtract=False)
self.add_qgeometry('junction',
geom_jjt,
layer=1,
subtract=False,
width=p.inductor_width)
self.add_qgeometry('junction',
geom_jjb,
layer=1,
subtract=False,
width=p.inductor_width)
self.add_qgeometry('poly', geom_pocket, layer=1, subtract=True)
###########################################################################
# Add Qpin connections
self.add_pin('pin1',
points=np.array([qp1b, qp1a]),
width=0.01,
input_as_norm=True)
self.add_pin('pin2',
points=np.array([b, a]),
width=0.01,
input_as_norm=True)
self.add_pin('pin3',
points=np.array([h, i]),
width=0.01,
input_as_norm=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
2177,
11,
33448,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
... | 2.099458 | 3,871 |
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2014 Justin Lovinger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Helpful functions for most metaheuristics."""
import random
def random_binary_solution(solution_size):
"""Make a list of random 0s and 1s."""
return [random.randint(0, 1) for _ in range(solution_size)]
def random_real_solution(solution_size, lower_bounds, upper_bounds):
"""Make a list of random real numbers between lower and upper bounds."""
return [
random.uniform(lower_bounds[i], upper_bounds[i])
for i in range(solution_size)
]
def make_population(population_size, solution_generator, *args, **kwargs):
"""Make a population with the supplied generator."""
return [
solution_generator(*args, **kwargs) for _ in range(population_size)
]
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
1946,
10799,
39911,
3889,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
... | 3.586643 | 554 |
import csv
import json
from itertools import zip_longest
with open('../energy_detectors/data/ros-discourse_data.json') as f:
rosd_data = json.load(f)
rosd_url = [item.get('url') for item in rosd_data]
rosd_tcontents = [item.get('thread_contents')
for item in rosd_data]
rosd_tdetails = [item.get('thread_details') for item in rosd_data]
rosd_title = [item.get('title') for item in rosd_data]
rosd_id = []
rosd_battery = []
rosd_energy = []
rosd_sustain = []
rosd_power = []
rosd_green = []
rosd_tcontents_new = []
rosd_tdetails_new = []
collection_name = []
raw_contents = []
for i in range(len(rosd_url)):
y = "ROSD" + str(i)
rosd_id.append(y)
for i in range(len(rosd_url)):
collection_name.append("ROSDiscourse")
for contents in rosd_tcontents:
contents = ''.join(contents)
rosd_tcontents_new.append(contents)
for details in rosd_tdetails:
try:
details = ''.join(details)
rosd_tdetails_new.append(details)
except TypeError:
details = ''
rosd_tdetails_new.append(details)
# print(len(rosd_url))
# print(len(rosd_title))
# print(len(rosd_tcontents_new))
# print(len(rosd_tdetails_new))
for i in range(197):
rcontents = rosd_tcontents_new[i] + '' + rosd_tdetails_new[i]
raw_contents.append(rcontents)
# print(len(raw_contents))
power_keyword = 'power'
battery_keyword = 'battery'
energy_keyword = 'energy'
sustain_keyword = 'sustainab'
green_keyword = 'green'
raw_contents_final = []
for rc in raw_contents:
if (power_keyword in rc):
a, b = rc.split(power_keyword, 1)
a = a[-45:]
b = b[0:45]
power_string = a + power_keyword + b
raw_contents_final.append(power_string)
elif (battery_keyword in rc):
a, b = rc.split(battery_keyword, 1)
a = a[-45:]
b = b[0:45]
battery_string = a + battery_keyword + b
raw_contents_final.append(battery_string)
elif (energy_keyword in rc):
a, b = rc.split(energy_keyword, 1)
a = a[-45:]
b = b[0:45]
energy_string = a + energy_keyword + b
raw_contents_final.append(energy_string)
elif (sustain_keyword in rc):
a, b = rc.split(sustain_keyword, 1)
a = a[-45:]
b = b[0:45]
sustain_string = a + sustain_keyword + b
raw_contents_final.append(sustain_string)
elif (green_keyword in rc):
a, b = rc.split(green_keyword, 1)
a = a[-45:]
b = b[0:45]
green_string = a + green_keyword + b
raw_contents_final.append(green_string)
else:
other_string = rc[0:90]
raw_contents_final.append(other_string)
# print(raw_contents_final[56])
for battery in raw_contents:
b = battery.count('batter')
rosd_battery.append(b)
for power in raw_contents:
p = power.count('power')
rosd_power.append(p)
for energy in raw_contents:
e = energy.count('energy')
rosd_energy.append(e)
for sustainab in raw_contents:
s = sustainab.count('sustainab')
rosd_sustain.append(s)
for green in raw_contents:
g = green.count('green')
rosd_green.append(g)
rosd_list = [rosd_id,
rosd_url,
collection_name,
rosd_title,
raw_contents_final,
rosd_battery,
rosd_energy,
rosd_power,
rosd_sustain,
rosd_green
]
export_data = zip_longest(*rosd_list, fillvalue='')
with open('data/energy_data.csv', 'a', newline='') as myfile:
wr = csv.writer(myfile)
wr.writerows(export_data)
myfile.close()
| [
11748,
269,
21370,
198,
11748,
33918,
198,
6738,
340,
861,
10141,
1330,
19974,
62,
6511,
395,
198,
198,
4480,
1280,
10786,
40720,
22554,
62,
15255,
478,
669,
14,
7890,
14,
4951,
12,
15410,
9047,
62,
7890,
13,
17752,
11537,
355,
277,
2... | 2.095627 | 1,715 |
import tensorflow as tf
import numpy as np
def mu_law(x, mu=255, int8=False):
"""A TF implementation of Mu-Law encoding.
Args:
x: The audio samples to encode between [-1, 1]
mu: The Mu to use in our Mu-Law.
int8: Use int8 encoding.
Returns:
out: The Mu-Law encoded int8 data [-128, 127].
"""
out = tf.clip_by_value(x, -1, 0.999)
out = tf.sign(out) * tf.log(1 + mu * tf.abs(out)) / np.log(1 + mu)
out = tf.floor(out * 128)
if int8:
out = tf.cast(out, tf.int8)
return out
def mu_law_numpy(x, mu=255, int8=False):
"""A TF implementation of Mu-Law encoding.
Args:
x: The audio samples to encode between [-1, 1]
mu: The Mu to use in our Mu-Law.
int8: Use int8 encoding.
Returns:
out: The Mu-Law encoded int8 data [-128, 127].
"""
out = np.clip(x, -1, 0.999)
out = np.sign(out) * np.log(1 + mu * np.abs(out)) / np.log(1 + mu)
out = np.floor(out * 128)
if int8:
out = tf.cast(out, tf.int8)
return out
def inv_mu_law(x, mu=255, name=None):
"""A TF implementation of inverse Mu-Law.
Args:
x: The Mu-Law samples to decode.
mu: The Mu we used to encode these samples.
Returns:
out: The decoded data.
"""
# this method expects input x as an int between [-128, 127]
x = tf.cast(x, tf.float32)
out = (x + 0.5) * 2. / (mu + 1)
# TODO I think it should be the following, to have out \in [-1,1]
# out = (x + 0.5) * 2. / mu
out = tf.sign(out) / mu * ((1 + mu)**tf.abs(out) - 1)
out = tf.where(tf.equal(x, 0), x, out, name=name)
return out
def condition(x, encoding):
"""Condition the input on the encoding.
Args:
x: The [mb, length, channels] float tensor input.
encoding: The [mb, encoding_length, channels] float tensor encoding.
Returns:
The output after broadcasting the encoding to x's shape and adding them.
"""
mb = tf.shape(x)[0]
length = tf.shape(x)[1]
channels = x.get_shape().as_list()[2]
enc_mb = tf.shape(encoding)[0]
enc_length = tf.shape(encoding)[1]
enc_channels = encoding.get_shape().as_list()[2]
assert enc_channels == channels
with tf.control_dependencies([tf.assert_equal(enc_mb, mb)]):
encoding = tf.reshape(encoding, [mb, enc_length, 1, channels])
x = tf.reshape(x, [mb, enc_length, -1, channels])
x += encoding
x = tf.reshape(x, [mb, length, channels])
return x
def shift_right(x):
"""Shift the input over by one and a zero to the front.
Args:
x: The [mb, time, channels] tensor input.
Returns:
x_sliced: The [mb, time, channels] tensor output.
"""
ch = x.get_shape().as_list()[2]
length = tf.shape(x)[1]
x_padded = tf.pad(x, [[0, 0], [1, 0], [0, 0]])
x_sliced = tf.slice(x_padded, [0, 0, 0], tf.stack([-1, length, ch]))
# x_sliced.set_shape(shape)
return x_sliced
def pool1d(x, pool_size, name, mode='avg', stride=None):
"""1D pooling function that supports multiple different modes.
Args:
x: The [mb, time, channels] float tensor that we are going to pool over.
window_length: The amount of samples we pool over.
name: The name of the scope for the variables.
mode: The type of pooling, either avg or max.
stride: The stride length.
Returns:
pooled: The [mb, time // stride, channels] float tensor result of pooling.
"""
if mode == 'avg':
pool_fn = tf.layers.average_pooling1d
elif mode == 'max':
pool_fn = tf.layers.max_pooling1d
else:
raise TypeError("No such pooling function")
stride = stride or pool_size
# batch_size, length, num_channels = x.get_shape().as_list()
length = tf.shape(x)[1]
# assert length % window_length == 0
# assert length % stride == 0
with tf.control_dependencies([tf.assert_equal(tf.mod(length, pool_size), 0),
tf.assert_equal(tf.mod(length, stride), 0)
]):
pooled = pool_fn(x, pool_size, stride, padding='VALID', name=name)
return pooled
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
38779,
62,
6270,
7,
87,
11,
38779,
28,
13381,
11,
493,
23,
28,
25101,
2599,
198,
220,
220,
220,
37227,
32,
24958,
7822,
286,
8252,
12,
16966,
... | 2.337486 | 1,766 |
import unittest
from TestUtils import TestParser
| [
11748,
555,
715,
395,
198,
6738,
6208,
18274,
4487,
1330,
6208,
46677,
198
] | 3.769231 | 13 |
#Faça um Programa que calcule a área de um quadrado,
# em seguida mostre o dobro desta área para o usuário.
from decimal import Decimal
lado = Decimal(input("Informe o lado do quadrado:"))
a = lado ** 2
print("O dobro da área do quadrado é",(a * 2)) | [
2,
50110,
50041,
23781,
6118,
64,
8358,
2386,
23172,
257,
6184,
94,
21468,
390,
23781,
15094,
81,
4533,
11,
198,
2,
795,
384,
5162,
3755,
749,
260,
267,
466,
7957,
2244,
64,
6184,
94,
21468,
31215,
267,
514,
84,
6557,
27250,
13,
220... | 2.55102 | 98 |
#!/usr/bin/env python3
# IBM_PROLOG_BEGIN_TAG
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2022
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
#
# @package OpTestVIOS
# This class contains common functions for Virtual IO Server(VIOS)
import os
import re
import time
import common
from common.Exceptions import CommandFailed
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class OpTestVIOS():
'''
Utility and functions of Virtual I/O Server(VIOS) object
'''
def gather_logs(self, list_of_commands=[], output_dir=None):
'''
Gather logs - this function gathers default information like version,
ioslevel, errlog, snap and custom commands given through parameter
'list of commands'
:param list_of_commands: list, of commands for which output to be logged
:output_dir: string, to store the gatherd logs
:returns: True on success, Command Failed exception on failed
'''
if not output_dir:
output_dir = "Vios_Logs_%s" % (time.asctime(time.localtime())).replace(" ", "_")
output_dir = os.path.join(self.conf.host().results_dir, output_dir, self.name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
default_commands = ['cat /proc/version', 'ioslevel', 'errlog', 'snap']
list_of_commands.extend(default_commands)
try:
for cmd in set(list_of_commands):
output = "\n".join(self.run_command(r"%s" % cmd, timeout=600))
filename = "%s.log" % '-'.join((re.sub(r'[^a-zA-Z0-9]', ' ', cmd)).split())
filepath = os.path.join(output_dir, filename)
with open(filepath, 'w') as f:
f.write(output)
snap_backup_filename = time.strftime("%d_%m_%Y_%H_%M_%S") + "_snap.pax.Z"
self.run_command("mv snap.pax.Z %s" % snap_backup_filename)
log.warn("Please collect the snap logs. snap.pax.Z renamed to %s." % snap_backup_filename)
return True
except CommandFailed as cmd_failed:
raise cmd_failed
def run_command(self, cmd, timeout=60):
'''
Wrapper for running ssh.run_command
:param cmd: string, command to run
:param timeout: number, number of seconds for timeout
'''
return self.ssh.run_command(cmd, timeout)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19764,
62,
4805,
33462,
62,
33,
43312,
62,
42197,
198,
2,
4946,
47,
36048,
17406,
515,
6208,
4935,
198,
2,
198,
2,
25767,
669,
406,
6347,
10383,
532,
27975,
38162,
9947,
33160... | 2.517617 | 1,192 |
import mido
from pythonosc import udp_client
import time
oscip = "127.0.0.1"
oscport = 31337
client = udp_client.SimpleUDPClient(oscip, oscport)
# Read from midi
#with mido.open_input() as inport:
# while True:
# for msg in inport:
# client.send_message("/traktor/beat", msg.type)
while True:
time.sleep(0.04)
client.send_message("/traktor/beat", []) | [
11748,
3095,
78,
198,
6738,
21015,
17500,
1330,
334,
26059,
62,
16366,
198,
11748,
640,
628,
198,
17500,
541,
796,
366,
16799,
13,
15,
13,
15,
13,
16,
1,
198,
17500,
634,
796,
35897,
2718,
628,
198,
16366,
796,
334,
26059,
62,
16366... | 2.347561 | 164 |
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
import datetime
from mediagoblin import messages, mg_globals
from mediagoblin.db.models import User
from mediagoblin.tools.response import render_to_response, redirect, render_404
from mediagoblin.tools.translate import pass_to_ugettext as _
from mediagoblin.auth import lib as auth_lib
from mediagoblin.auth import forms as auth_forms
from mediagoblin.auth.lib import send_verification_email, \
send_fp_verification_email
def email_debug_message(request):
"""
If the server is running in email debug mode (which is
the current default), give a debug message to the user
so that they have an idea where to find their email.
"""
if mg_globals.app_config['email_debug_mode']:
# DEBUG message, no need to translate
messages.add_message(request, messages.DEBUG,
u"This instance is running in email debug mode. "
u"The email will be on the console of the server process.")
def register(request):
"""
Your classic registration view!
"""
# Redirects to indexpage if registrations are disabled
if not mg_globals.app_config["allow_registration"]:
messages.add_message(
request,
messages.WARNING,
_('Sorry, registration is disabled on this instance.'))
return redirect(request, "index")
register_form = auth_forms.RegistrationForm(request.form)
if request.method == 'POST' and register_form.validate():
# TODO: Make sure the user doesn't exist already
username = unicode(request.form['username'].lower())
em_user, em_dom = unicode(request.form['email']).split("@", 1)
em_dom = em_dom.lower()
email = em_user + "@" + em_dom
users_with_username = User.query.filter_by(username=username).count()
users_with_email = User.query.filter_by(email=email).count()
extra_validation_passes = True
if users_with_username:
register_form.username.errors.append(
_(u'Sorry, a user with that name already exists.'))
extra_validation_passes = False
if users_with_email:
register_form.email.errors.append(
_(u'Sorry, a user with that email address already exists.'))
extra_validation_passes = False
if extra_validation_passes:
# Create the user
user = User()
user.username = username
user.email = email
user.pw_hash = auth_lib.bcrypt_gen_password_hash(
request.form['password'])
user.verification_key = unicode(uuid.uuid4())
user.save()
# log the user in
request.session['user_id'] = unicode(user.id)
request.session.save()
# send verification email
email_debug_message(request)
send_verification_email(user, request)
# redirect the user to their homepage... there will be a
# message waiting for them to verify their email
return redirect(
request, 'mediagoblin.user_pages.user_home',
user=user.username)
return render_to_response(
request,
'mediagoblin/auth/register.html',
{'register_form': register_form})
def login(request):
"""
MediaGoblin login view.
If you provide the POST with 'next', it'll redirect to that view.
"""
login_form = auth_forms.LoginForm(request.form)
login_failed = False
if request.method == 'POST' and login_form.validate():
user = User.query.filter_by(username=request.form['username'].lower()).first()
if user and user.check_login(request.form['password']):
# set up login in session
request.session['user_id'] = unicode(user.id)
request.session.save()
if request.form.get('next'):
return redirect(request, location=request.form['next'])
else:
return redirect(request, "index")
else:
# Prevent detecting who's on this system by testing login
# attempt timings
auth_lib.fake_login_attempt()
login_failed = True
return render_to_response(
request,
'mediagoblin/auth/login.html',
{'login_form': login_form,
'next': request.GET.get('next') or request.form.get('next'),
'login_failed': login_failed,
'allow_registration': mg_globals.app_config["allow_registration"]})
def verify_email(request):
"""
Email verification view
validates GET parameters against database and unlocks the user account, if
you are lucky :)
"""
# If we don't have userid and token parameters, we can't do anything; 404
if not 'userid' in request.GET or not 'token' in request.GET:
return render_404(request)
user = User.query.filter_by(id=request.args['userid']).first()
if user and user.verification_key == unicode(request.GET['token']):
user.status = u'active'
user.email_verified = True
user.verification_key = None
user.save()
messages.add_message(
request,
messages.SUCCESS,
_("Your email address has been verified. "
"You may now login, edit your profile, and submit images!"))
else:
messages.add_message(
request,
messages.ERROR,
_('The verification key or user id is incorrect'))
return redirect(
request, 'mediagoblin.user_pages.user_home',
user=user.username)
def resend_activation(request):
"""
The reactivation view
Resend the activation email.
"""
if request.user is None:
messages.add_message(
request,
messages.ERROR,
_('You must be logged in so we know who to send the email to!'))
return redirect(request, 'mediagoblin.auth.login')
if request.user.email_verified:
messages.add_message(
request,
messages.ERROR,
_("You've already verified your email address!"))
return redirect(request, "mediagoblin.user_pages.user_home", user=request.user['username'])
request.user.verification_key = unicode(uuid.uuid4())
request.user.save()
email_debug_message(request)
send_verification_email(request.user, request)
messages.add_message(
request,
messages.INFO,
_('Resent your verification email.'))
return redirect(
request, 'mediagoblin.user_pages.user_home',
user=request.user.username)
def forgot_password(request):
"""
Forgot password view
Sends an email with an url to renew forgotten password
"""
fp_form = auth_forms.ForgotPassForm(request.form,
username=request.GET.get('username'))
if request.method == 'POST' and fp_form.validate():
# '$or' not available till mongodb 1.5.3
user = User.query.filter_by(username=request.form['username']).first()
if not user:
user = User.query.filter_by(email=request.form['username']).first()
if user:
if user.email_verified and user.status == 'active':
user.fp_verification_key = unicode(uuid.uuid4())
user.fp_token_expire = datetime.datetime.now() + \
datetime.timedelta(days=10)
user.save()
send_fp_verification_email(user, request)
messages.add_message(
request,
messages.INFO,
_("An email has been sent with instructions on how to "
"change your password."))
email_debug_message(request)
else:
# special case... we can't send the email because the
# username is inactive / hasn't verified their email
messages.add_message(
request,
messages.WARNING,
_("Could not send password recovery email as "
"your username is inactive or your account's "
"email address has not been verified."))
return redirect(
request, 'mediagoblin.user_pages.user_home',
user=user.username)
return redirect(request, 'mediagoblin.auth.login')
else:
messages.add_message(
request,
messages.WARNING,
_("Couldn't find someone with that username or email."))
return redirect(request, 'mediagoblin.auth.forgot_password')
return render_to_response(
request,
'mediagoblin/auth/forgot_password.html',
{'fp_form': fp_form})
def verify_forgot_password(request):
"""
Check the forgot-password verification and possibly let the user
change their password because of it.
"""
# get form data variables, and specifically check for presence of token
formdata = _process_for_token(request)
if not formdata['has_userid_and_token']:
return render_404(request)
formdata_token = formdata['vars']['token']
formdata_userid = formdata['vars']['userid']
formdata_vars = formdata['vars']
# check if it's a valid user id
user = User.query.filter_by(id=formdata_userid).first()
if not user:
return render_404(request)
# check if we have a real user and correct token
if ((user and user.fp_verification_key and
user.fp_verification_key == unicode(formdata_token) and
datetime.datetime.now() < user.fp_token_expire
and user.email_verified and user.status == 'active')):
cp_form = auth_forms.ChangePassForm(formdata_vars)
if request.method == 'POST' and cp_form.validate():
user.pw_hash = auth_lib.bcrypt_gen_password_hash(
request.form['password'])
user.fp_verification_key = None
user.fp_token_expire = None
user.save()
messages.add_message(
request,
messages.INFO,
_("You can now log in using your new password."))
return redirect(request, 'mediagoblin.auth.login')
else:
return render_to_response(
request,
'mediagoblin/auth/change_fp.html',
{'cp_form': cp_form})
# in case there is a valid id but no user with that id in the db
# or the token expired
else:
return render_404(request)
def _process_for_token(request):
"""
Checks for tokens in formdata without prior knowledge of request method
For now, returns whether the userid and token formdata variables exist, and
the formdata variables in a hash. Perhaps an object is warranted?
"""
# retrieve the formdata variables
if request.method == 'GET':
formdata_vars = request.GET
else:
formdata_vars = request.form
formdata = {
'vars': formdata_vars,
'has_userid_and_token':
'userid' in formdata_vars and 'token' in formdata_vars}
return formdata
| [
2,
22961,
6343,
38,
672,
2815,
1377,
28062,
515,
11,
18284,
2056,
13662,
198,
2,
15069,
357,
34,
8,
2813,
11,
2321,
6343,
38,
672,
2815,
20420,
13,
220,
4091,
37195,
20673,
13,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
... | 2.372453 | 5,104 |
def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
return make_divisible(channels, divisor, channel_min)
| [
628,
198,
4299,
2835,
62,
354,
8961,
7,
354,
8961,
11,
33090,
28,
16,
13,
15,
11,
2659,
271,
273,
28,
23,
11,
6518,
62,
1084,
28,
14202,
2599,
198,
220,
220,
220,
37227,
22685,
1271,
286,
16628,
1912,
319,
6795,
33090,
526,
15931,... | 2.956522 | 92 |
import unittest
from module import webpage_get
NORMAL_URL_LIST = ["http://www.baidu.com"]
ABNORMAL_URL_LIST = ["aaa"]
DEFAULT_TIMEOUT = 1
| [
11748,
555,
715,
395,
198,
6738,
8265,
1330,
35699,
62,
1136,
198,
198,
35510,
42126,
62,
21886,
62,
45849,
796,
14631,
4023,
1378,
2503,
13,
65,
1698,
84,
13,
785,
8973,
198,
6242,
35510,
42126,
62,
21886,
62,
45849,
796,
14631,
4607... | 2.660377 | 53 |
#
# Copyright (c) 2015 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'eperez'
from bson import ObjectId
from datetime import datetime
from flask import current_app, request
from eduid_action.common.action_abc import ActionPlugin
from eduid_userdb.tou import ToUEvent
from eduid_userdb.actions.tou import ToUUserDB, ToUUser
| [
2,
198,
2,
15069,
357,
66,
8,
1853,
399,
12532,
3118,
316,
317,
14,
50,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
220,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
198,
2,
220,
220,
1231,
176... | 3.228769 | 577 |
import time
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from throttle.zones.remoteip import RemoteIP
from throttle.exceptions import ThrottleZoneNotDefined, ThrottleImproperlyConfigured, RateLimitExceeded
from throttle.utils import load_class_from_path, serialize_bucket_key
from throttle.backends import get_backend
THROTTLE_ENABLED = getattr(settings, 'THROTTLE_ENABLED', not settings.DEBUG)
_THROTTLE_ZONES = {}
| [
11748,
640,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
29976,
13,
89,
1952,
13,
47960,
541,
1330,
21520,
4061,
198,
6738,
29976... | 3.28169 | 142 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Mingjun Zhou <mingjun.zhou@gmail.com>
# Licence: BSD 3 clause
import numpy as np
def potvariables(pots):
"""Returns information about all variables in a set of potentials
Return the variables and their number of states.
If there is a dimension mismatch in the table then return con = 0.
convec(i)=0 reports that variable i has conflicting dimension.
Args:
pots: a set of potentials
Returns:
variables: A list of all variables in pots
nstates: A list of integers. nstates[idx] = number of dimension of
variables[idx]
con: con = 0 if there is a dimension mismatch in the table;
con = 1 otherwise
convect: convec(i) = 0 reports that variable i has conflicting
dimensions
Raises:
NameError: An error occured accessing a None set of potentials
ValueError: An error occurred accessing pots with None field or
deffernt size in table and variables field
"""
if not pots:
raise NameError('potentials should not be None')
"""if not isinstance(pots, list):
raise TypeError('pots should be list Type')
"""
for i, pot in enumerate(pots):
#if not isinstance(pot.variables, list):
# raise TypeError('No.%d field of variables should be list type')
if pot.variables.size == 0:
raise ValueError('No.%d field of variables should not be None', i)
#if not isinstance(pot.table, np.ndarray):
# raise TypeError('No.%d field of variables shoud be np.ndarray\
# type', i)
if len(pot.table) is 0:
raise ValueError('No.%d field of table should not be None', i)
if len(pot.variables) != len(pot.table.shape):
raise ValueError('No.%d field of table and variables should not\
be different size', i)
variables = list(pots[0].variables)
nstates = list(pots[0].table.shape)
con = 1
convec = list(np.ones(len(variables), 'int8'))
for pot in pots[1:]:
vs = pot.variables
ns = list(pot.table.shape)
for i, v in enumerate(vs):
if v in variables:
idx_va = variables.index(v)
if ns[i] != nstates[idx_va]:
convec[idx_va] = 0
con = 0
else:
variables.append(v)
nstates.append(ns[i])
convec.append(1)
return variables, nstates, con, convec
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
6434,
25,
26980,
29741,
32222,
1279,
2229,
29741,
13,
38536,
31,
14816,
13,
785,
29,
198,
2,
10483,
594,
25,
... | 2.28446 | 1,139 |
"""
-*- coding: utf-8 -*-
Time : 2019/7/13 15:51
Author : Hansybx
"""
import re
import requests
from bs4 import BeautifulSoup
from flask import jsonify
from app.models.error import PasswordFailed
from app.models.student_info import StudentInfo
from app.utils.common_utils import put_to_mysql
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
, 'Origin': 'https://vpn.just.edu.cn',
'Upgrade-Insecure-Requests': '1'
}
if __name__ == '__main__':
student_info('182210711114', 'hanzy2000')
| [
37811,
628,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7575,
220,
220,
220,
1058,
13130,
14,
22,
14,
1485,
1315,
25,
4349,
198,
13838,
220,
1058,
13071,
88,
65,
87,
198,
198,
37811,
198,
198,
11748,
302,
198,
198,... | 2.55 | 240 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Column, MetaData, String, Table
from sysinv.common import constants
ENGINE = 'InnoDB'
CHARSET = 'utf8'
| [
2,
43907,
25,
7400,
11338,
28,
19,
6482,
10394,
28,
19,
2705,
8658,
11338,
28,
19,
198,
2,
198,
2,
15069,
357,
66,
8,
2211,
12,
5304,
3086,
5866,
11998,
11,
3457,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
... | 2.804124 | 97 |
# flake8: noqa
from importlib_metadata import version # type: ignore
from songpal.common import SongpalException
from songpal.device import Device
from songpal.notification import (
ConnectChange,
ContentChange,
Notification,
PowerChange,
VolumeChange,
)
__version__ = version("python-songpal")
| [
2,
781,
539,
23,
25,
645,
20402,
198,
6738,
1330,
8019,
62,
38993,
1330,
2196,
220,
1303,
2099,
25,
8856,
198,
6738,
3496,
18596,
13,
11321,
1330,
10940,
18596,
16922,
198,
6738,
3496,
18596,
13,
25202,
1330,
16232,
198,
6738,
3496,
1... | 3.268041 | 97 |
from gym.envs.registration import register
from .wrappers import *
from .logger import *
from .envs import *
register(
id='BanditsX2-v0',
kwargs = {'num_bandits' : 2},
entry_point='torch_rl.envs:BanditEnv',
)
register(
id='BanditsX4-v0',
kwargs = {'num_bandits' : 4},
entry_point='torch_rl.envs:BanditEnv',
)
register(
id='BanditsX8-v0',
kwargs = {'num_bandits' : 8},
entry_point='torch_rl.envs:BanditEnv',
)
try:
from .roboschool_envs import *
register(
id='TRLRoboschoolReacher-v1',
kwargs = {},
entry_point='torch_rl.envs:RoboschoolReacher',
max_episode_steps=150,
reward_threshold=18.0,
tags={ "pg_complexity": 1*1000000 },
)
except ImportError as e:
print('Roboschool environments excluded, import error')
try:
from .opensim_envs import *
register(
id='OsimArm2D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Arm2DEnv'
)
register(
id='OsimArm3D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Arm3DEnv'
)
register(
id='OsimRun3D-v1',
kwargs={'visualize': False},
entry_point='osim.env:Run3DEnv'
)
except ImportError as e:
print('Opensim environments excluded, import error ', e)
| [
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
6738,
764,
29988,
11799,
1330,
1635,
198,
6738,
764,
6404,
1362,
1330,
1635,
198,
6738,
764,
268,
14259,
1330,
1635,
198,
198,
30238,
7,
198,
220,
220,
220,
4686,
11639,
3... | 2.069337 | 649 |
import dash_html_components as html
import dash_core_components as dcc
import dash_table as dt
import pandas as pd
heading_2 = html.Header(
html.H2(
"Classify Emails (Spam or Ham)",
style={
"text-align": "center",
"margin": "10px",
"font-weight": "lighter",
}
)
)
para = html.P(
"Welcome to the spam classifier! Enter the email you wish to classify below:",
style={"margin": "10px", "font-weight": "lighter", }
)
heading_3_1 = html.H2(
"Email",
style={
"text-align": "left",
"margin": "10px",
"font-weight": "lighter",
}
)
heading_3_2 = html.H2(
"Prediction History",
style={
"text-align": "left",
"margin": "10px",
"font-weight": "lighter",
}
)
input_text = dcc.Textarea(
id="predict-input",
placeholder="Copy and Paste Email Here...",
style={
"width": "100%",
"margin": "10px",
"height": "300px"
}
)
button_back = dcc.Link(
html.Button('Back'), href="/",
style={"margin": "10px"}
)
input = html.Div(
[input_text, button_back],
style={"margin": "10px"}
)
df = pd.DataFrame({'Spam': [], 'Text': []})
output = dt.DataTable(
style_cell={
"text-align": "left",
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 0
},
id='predict-output',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
sort_action="native"
)
loading_wrapper_output = dcc.Loading(
id="loading-index",
type="circle",
children=[output]
)
predict_layout = html.Div(
[
heading_2,
heading_3_1,
para,
input,
heading_3_2,
loading_wrapper_output,
],
style={
"font-family": 'Palatino, "Palatino Linotype", "Palatino LT STD"',
}
)
| [
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
11487,
355,
288,
83,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
33878,
62,
17,
796,
27711,
1... | 2.096882 | 898 |
from PIL import Image
import numpy as np
label_colours = [(178, 45, 45), (153, 115, 115), (64, 36, 32), (255, 68, 0), (89, 24, 0), (191, 121, 96), (191, 102, 0),
(76, 41, 0), (153, 115, 38), (102, 94, 77), (242, 194, 0), (191, 188, 143), (226, 242, 0),
(119, 128, 0), (59, 64, 0), (105, 191, 48), (81, 128, 64), (0, 255, 0), (0, 51, 7), (191, 255, 208),
(96, 128, 113), (0, 204, 136), (13, 51, 43), (0, 191, 179), (0, 204, 255), (29, 98, 115), (0, 34, 51),
(163, 199, 217), (0, 136, 255), (41, 108, 166), (32, 57, 128), (0, 22, 166), (77, 80, 102),
(119, 54, 217), (41, 0, 77), (222, 182, 242), (103, 57, 115), (247, 128, 255), (191, 0, 153),
(128, 96, 117), (127, 0, 68), (229, 0, 92), (76, 0, 31), (255, 128, 179), (242, 182, 198)]
| [
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
355,
45941,
628,
198,
18242,
62,
4033,
4662,
796,
47527,
23188,
11,
4153,
11,
4153,
828,
357,
21395,
11,
12279,
11,
12279,
828,
357,
2414,
11,
4570,
11,
3933,
828,
357,
13381,
11,
... | 1.922374 | 438 |
import sys
from fractions import *
sys.stdin = open('input.txt')
numTest = int(input())
for itertest in range(numTest):
n = int(input())
print 'Case %d: %s' % (itertest + 1, Fraction(n * (n - 1), 4))
| [
11748,
25064,
198,
6738,
49876,
1330,
1635,
198,
198,
17597,
13,
19282,
259,
796,
1280,
10786,
15414,
13,
14116,
11537,
198,
22510,
14402,
796,
493,
7,
15414,
28955,
198,
1640,
340,
861,
395,
287,
2837,
7,
22510,
14402,
2599,
198,
220,
... | 2.518072 | 83 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" test cases for LDA implementation """
import unittest
import os
from sparktkregtests.lib import sparktk_test
from sparktkregtests.lib import scoring_utils
if __name__ == '__main__':
unittest.main()
| [
2,
43907,
25,
900,
21004,
28,
40477,
12,
23,
198,
198,
2,
220,
15069,
1849,
7,
66,
8,
1849,
5304,
8180,
1849,
10606,
1819,
341,
1849,
198,
2,
198,
2,
220,
49962,
1849,
4625,
1849,
1169,
1849,
25189,
4891,
1849,
34156,
11,
1849,
14... | 2.591331 | 323 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import collections
from typing import Callable
import torch.nn as nn
from ..modules import DropBlock
class LinearDownsample(nn.Sequential):
'''
Downsample class with linear mapping.
This is a default donwsample mudule for ResNets.
'''
| [
11748,
17268,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
11485,
18170,
1330,
14258,
12235,
628,
198,
4871,
44800,
8048,
39873,
7,
20471,
13,
44015,
1843,
2599,
198,
220,
220,
220,... | 3.227848 | 79 |
Cellname="???rk"
NodeName="???dea"
#Query()
#RemoveCertificate()
#ListKeystores()
RemoveKeystores2()
print "Saving configuration"
AdminConfig.save()
| [
28780,
3672,
2625,
28358,
81,
74,
1,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.113953 | 860 |
# This script is identical to the on for BFSongRepository but with canaries
from collections import defaultdict
import json
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
import vak.device
import vak.files
from vak.labeled_timebins import lbl_tb2segments, majority_vote_transform, lbl_tb_segment_inds_list, \
remove_short_segments
from vak import config, io, models, transforms
from vak.datasets.vocal_dataset import VocalDataset
def compute_metrics(metrics, y_true, y_pred, y_true_labels, y_pred_labels):
"""helper function to compute metrics
Parameters
----------
metrics : dict
where keys are metric names and values are callables that compute the metric
given ground truth and prediction
y_true : torch.Tensor
vector of labeled time bins
y_pred : torch.Tensor
vector of labeled time bins
y_true_labels : str
sequence of segment labels
y_pred_labels : str
sequence of segment labels
Returns
-------
metric_vals : defaultdict
"""
metric_vals = {}
for metric_name, metric_callable in metrics.items():
if metric_name == 'acc':
metric_vals[metric_name] = metric_callable(y_pred, y_true)
elif metric_name == 'levenshtein':
metric_vals[metric_name] = metric_callable(y_pred_labels, y_true_labels)
elif metric_name == 'segment_error_rate':
metric_vals[metric_name] = metric_callable(y_pred_labels, y_true_labels)
return metric_vals
ALPHANUMERIC = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
def remap(labelmap):
"""map integer labels to alphanumeric characters so we can compute edit distance metrics.
The mapping can be arbitrary as long as it is constant across all times we compute the metric.
"""
return {ALPHANUMERIC[ind]: val for ind, (key, val) in enumerate(labelmap.items())}
def map_number_labels_to_alphanumeric(labelvec):
"""Take a vector of 'str' labels that are all numbers and replace them with a string of characters
"""
return ''.join([ALPHANUMERIC[int(x)] for x in labelvec])
def metrics_df_from_toml_path(toml_path,
min_segment_dur,
device='cuda',
spect_key='s',
timebins_key='t'):
"""computes evaluation metrics on a dataset from a config.toml file
computes the metrics without and with transforms used for prediction
Parameters
----------
toml_path
min_segment_dur
device
spect_key
timebins_key
Returns
-------
df : pandas.Dataframe
"""
toml_path = Path(toml_path)
cfg = config.parse.from_toml(toml_path)
# spect_standardizer = joblib.load(cfg.eval.spect_scaler_path)
with cfg.eval.labelmap_path.open('r') as f:
labelmap = json.load(f)
model_config_map = config.models.map_from_path(toml_path, cfg.eval.models)
# ---- make eval dataset that we'll use to compute metrics
# each batch will give us dict with 'spect', 'annot' and 'spect_path'
# we can use 'spect_path' to find prediction in pred_dict and then compare to target
# dict also includes 'padding_mask' so we can "unpad" the prediction vectors
item_transform = transforms.get_defaults('eval',
spect_standardizer=None,
window_size=cfg.dataloader.window_size,
return_padding_mask=True,
)
eval_dataset = VocalDataset.from_csv(csv_path=cfg.eval.csv_path,
split='test',
labelmap=labelmap,
spect_key=spect_key,
timebins_key=timebins_key,
item_transform=item_transform,
)
eval_data = torch.utils.data.DataLoader(dataset=eval_dataset,
shuffle=False,
# batch size 1 because each spectrogram reshaped into a batch of windows
batch_size=1,
num_workers=cfg.eval.num_workers)
# get timebin dur to use when converting labeled timebins to labels, onsets and offsets
timebin_dur = io.dataframe.validate_and_get_timebin_dur(
pd.read_csv(cfg.eval.csv_path)
)
input_shape = eval_dataset.shape
# if dataset returns spectrogram reshaped into windows,
# throw out the window dimension; just want to tell network (channels, height, width) shape
if len(input_shape) == 4:
input_shape = input_shape[1:]
models_map = models.from_model_config_map(
model_config_map,
num_classes=len(labelmap),
input_shape=input_shape
)
if device is None:
device = vak.device.get_default_device()
records = defaultdict(list) # will be used with pandas.DataFrame.from_records to make output csv
to_long_tensor = transforms.ToLongTensor()
for model_name, model in models_map.items():
model.load(cfg.eval.checkpoint_path)
metrics = model.metrics # metric name -> callable map we use below in loop
pred_dict = model.predict(pred_data=eval_data,
device=device)
error_position_distribution = [] # will accumulate error time differences from syllable edges
num_err_bin = [] # will accumulate total number of error frames for normalization
progress_bar = tqdm(eval_data)
for ind, batch in enumerate(progress_bar):
y_true, padding_mask, spect_path = batch['annot'], batch['padding_mask'], batch['spect_path']
# need to convert spect_path to tuple for match in call to index() below
spect_path = tuple(spect_path)
records['spect_path'].append(spect_path[0]) # remove str from tuple
y_true = y_true.to(device)
y_true_np = np.squeeze(y_true.cpu().numpy())
t_vec = vak.files.spect.load(spect_path[0])['t']
y_true_labels, t_ons_s, t_offs_s = lbl_tb2segments(y_true_np,
labelmap,
t_vec)
y_true_labels = map_number_labels_to_alphanumeric(y_true_labels)
y_pred_ind = spect_path[0] # pred_dict['y'].index(spect_path)
y_pred = pred_dict[y_pred_ind] # pred_dict['y_pred'][y_pred_ind]
y_pred = torch.argmax(y_pred, dim=1) # assumes class dimension is 1
y_pred = torch.flatten(y_pred)
y_pred = y_pred.unsqueeze(0)[padding_mask]
y_pred_np = np.squeeze(y_pred.cpu().numpy())
y_pred_labels, _, _ = lbl_tb2segments(y_pred_np,
labelmap,
t_vec,
min_segment_dur=None,
majority_vote=False)
y_pred_labels = map_number_labels_to_alphanumeric(y_pred_labels)
metric_vals_batch = compute_metrics(metrics, y_true, y_pred, y_true_labels, y_pred_labels)
for metric_name, metric_val in metric_vals_batch.items():
records[metric_name].append(metric_val)
# --- apply majority vote and min segment dur transforms separately
# need segment_inds_list for both transforms
segment_inds_list = lbl_tb_segment_inds_list(y_pred_np,
unlabeled_label=labelmap['unlabeled'])
# ---- majority vote transform
y_pred_np_mv = majority_vote_transform(y_pred_np, segment_inds_list)
y_pred_mv = to_long_tensor(y_pred_np_mv).to(device)
y_pred_mv_labels, _, _ = lbl_tb2segments(y_pred_np_mv,
labelmap,
t_vec,
min_segment_dur=None,
majority_vote=False)
y_pred_mv_labels = map_number_labels_to_alphanumeric(y_pred_mv_labels)
metric_vals_batch_mv = compute_metrics(metrics, y_true, y_pred_mv,
y_true_labels, y_pred_mv_labels)
for metric_name, metric_val in metric_vals_batch_mv.items():
records[f'{metric_name}_majority_vote'].append(metric_val)
# ---- min segment dur transform
y_pred_np_mindur, _ = remove_short_segments(y_pred_np,
segment_inds_list,
timebin_dur=timebin_dur,
min_segment_dur=min_segment_dur,
unlabeled_label=labelmap['unlabeled'])
y_pred_mindur = to_long_tensor(y_pred_np_mindur).to(device)
y_pred_mindur_labels, _, _ = lbl_tb2segments(y_pred_np_mindur,
labelmap,
t_vec,
min_segment_dur=None,
majority_vote=False)
y_pred_mindur_labels = map_number_labels_to_alphanumeric(y_pred_mindur_labels)
metric_vals_batch_mindur = compute_metrics(metrics, y_true, y_pred_mindur,
y_true_labels, y_pred_mindur_labels)
for metric_name, metric_val in metric_vals_batch_mindur.items():
records[f'{metric_name}_min_segment_dur'].append(metric_val)
# ---- and finally both transforms, in same order we apply for prediction
y_pred_np_mindur_mv, segment_inds_list = remove_short_segments(y_pred_np,
segment_inds_list,
timebin_dur=timebin_dur,
min_segment_dur=min_segment_dur,
unlabeled_label=labelmap[
'unlabeled'])
y_pred_np_mindur_mv = majority_vote_transform(y_pred_np_mindur_mv,
segment_inds_list)
y_pred_mindur_mv = to_long_tensor(y_pred_np_mindur_mv).to(device)
y_pred_mindur_mv_labels, _, _ = lbl_tb2segments(y_pred_np_mindur_mv,
labelmap,
t_vec,
min_segment_dur=None,
majority_vote=False)
y_pred_mindur_mv_labels = map_number_labels_to_alphanumeric(y_pred_mindur_mv_labels)
metric_vals_batch_mindur_mv = compute_metrics(metrics, y_true, y_pred_mindur_mv,
y_true_labels, y_pred_mindur_mv_labels)
for metric_name, metric_val in metric_vals_batch_mindur_mv.items():
records[f'{metric_name}_min_dur_maj_vote'].append(metric_val)
# ---- accumulate error distances from true segment edges
num_err_bin.append(sum(y_true_np - y_pred_np_mindur_mv != 0))
err = (y_true_np - y_pred_np_mindur_mv != 0) & ((y_true_np == 0) | (y_pred_np_mindur_mv == 0))
error_position_distribution.append(
[min(np.abs(np.concatenate((t_ons_s, t_offs_s)) - tm)) for tm in t_vec[err == True]])
error_position_distribution = np.concatenate(error_position_distribution)
df = pd.DataFrame.from_records(records)
t1 = t_vec[1]
return df, error_position_distribution, num_err_bin, t1
CONFIG_ROOT = Path('src\\configs\\Canaries')
BIRD_ID_MIN_SEGMENT_DUR_MAP = {'llb3': 0.005,
'llb11': 0.005,
'llb16': 0.005}
if __name__ == '__main__':
main()
| [
2,
770,
4226,
318,
10411,
284,
262,
319,
329,
347,
10652,
506,
6207,
13264,
475,
351,
460,
3166,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
... | 1.829617 | 6,996 |
import json
from test_plus.test import TestCase
from instanotifier.feedsource.models import FeedSource
from instanotifier.feedsource.forms import FeedSourceForm
| [
11748,
33918,
198,
198,
6738,
1332,
62,
9541,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
916,
272,
313,
7483,
13,
12363,
10459,
13,
27530,
1330,
18272,
7416,
198,
6738,
916,
272,
313,
7483,
13,
12363,
10459,
13,
23914,
1330,
18272,
... | 3.727273 | 44 |
import pytest
from temperature import celsius_to_fahrenheit | [
11748,
12972,
9288,
198,
6738,
5951,
1330,
269,
32495,
62,
1462,
62,
69,
993,
34032
] | 3.933333 | 15 |
txt = str('S')
desconto = int(0)
while txt == 'S':
valor = float(input('Insira o valor do carro (sem desconto): '))
ano = int(input('Insira o ano de fabricação do veículo: '))
if ano <= 2010:
desconto = float(valor * 0.2)
elif ano <= 2020:
desconto = float(valor * 0.15)
elif ano > 2020:
desconto = float(valor * 0.1)
print(f'Com um desconto de R${desconto:.2f} o carro passa a custar R${valor - desconto:.2f}')
txt = str(input('Deseja continuar [S/N]? ')).upper()
while txt != 'S' and txt != 'N':
txt = str(input('Opção inválida\nDeseja continuar [S/N]? ')).upper()
print('\033[1;31mFIM DO PROGRAMA') | [
14116,
796,
965,
10786,
50,
11537,
198,
8906,
3642,
78,
796,
493,
7,
15,
8,
198,
198,
4514,
256,
742,
6624,
705,
50,
10354,
198,
220,
220,
220,
1188,
273,
796,
12178,
7,
15414,
10786,
20376,
8704,
267,
1188,
273,
466,
1097,
305,
3... | 2.121019 | 314 |
# -*- coding: utf-8 -*-
import abc
import os
import torchvision.transforms as transforms
from DLtorch.base import BaseComponent
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
450,
66,
198,
11748,
28686,
198,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
198,
6738,
23641,
13165,
354,
13,
8692,
1330,
7308,
21950,
628
] | 3.069767 | 43 |
from __future__ import division
"""Implementation of naive and inefficient mandelbrot calculator."""
import mandelbrot
import math
logger = mandelbrot.get_logger(__name__)
class NaiveCalculator(mandelbrot.MandelbrotCalculator):
"""See parrent."""
file_name_data = "naive_data.csv"
file_name_plot = "naive_plot.png"
def calculate(self):
"""See parrent."""
ms = list()
im_span = self.pim_max-self.pim_min
im_step = im_span / self.Pim
re_span = self.pre_max-self.pre_min
re_step = re_span / self.Pre
for i_im in range(self.Pim):
im = i_im*im_step + self.pim_min
row = list()
for i_re in range(self.Pre):
c = i_re*re_step + self.pre_min + im*1j
i = 0
z = 0+0j
while math.sqrt(abs(z)) <= self.T and i < self.I:
z = z**2 + c
i += 1
row.append(i/self.I)
ms.append(row)
return ms
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
37811,
3546,
32851,
286,
24354,
290,
30904,
6855,
417,
7957,
83,
28260,
526,
15931,
198,
198,
11748,
6855,
417,
7957,
83,
198,
11748,
10688,
198,
198,
6404,
1362,
796,
6855,
417,
7957,
83,
13,
... | 1.882784 | 546 |
import pytest
from listlookup import ListLookup
sample_list = [
{"id": 1, "country": "us", "name": "Atlanta"},
{"id": 2, "country": "us", "name": "Miami"},
{"id": 3, "country": "uk", "name": "Britain"},
{"id": 5, "country": "uk", "name": "Bermingham"},
{"id": 4, "country": "ca", "name": "Barrie"},
]
def test_lookup_does_not_modify_indexes():
"""
There was a bug that modified index after lookup
"""
cities = ListLookup(sample_list)
cities.index("country", lambda d: d['country'])
cities.index("name", lambda d: d['name'])
result = list(cities.lookup(country='us', name='Miami'))
assert len(result) == 1
second_res = list(cities.lookup(country='us', name='Atlanta'))
assert len(second_res) == 1
| [
11748,
12972,
9288,
198,
198,
6738,
1351,
5460,
929,
1330,
7343,
8567,
929,
198,
198,
39873,
62,
4868,
796,
685,
198,
220,
220,
220,
19779,
312,
1298,
352,
11,
366,
19315,
1298,
366,
385,
1600,
366,
3672,
1298,
366,
43482,
25719,
198,... | 2.564784 | 301 |
from uniclass_to_nf_ea_com_source.b_code.configurations.common_constants.uniclass_bclearer_constants import \
UNICLASS2015_TOP_LEVEL_OBJECTS_TABLE_NAME
| [
6738,
28000,
31172,
62,
1462,
62,
77,
69,
62,
18213,
62,
785,
62,
10459,
13,
65,
62,
8189,
13,
11250,
20074,
13,
11321,
62,
9979,
1187,
13,
46903,
31172,
62,
65,
2375,
11258,
62,
9979,
1187,
1330,
3467,
198,
220,
220,
220,
4725,
2... | 2.532258 | 62 |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.callbacks import History, EarlyStopping
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv1D, MaxPooling1D, Input
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.utils import to_categorical
from kerastuner.tuners import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters
import time
LOG_DIR = f"{int(time.time())}"
if __name__ == "__main__":
main()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
8997,
3351,
36213,
198,
198,
6738,
... | 3.2 | 200 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_colmet_collector
----------------------------------
Tests for `colmet_collector` module.
"""
import unittest
from colmet_collector import colmet_collector
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
198,
9288,
62,
4033,
4164,
62,
33327,
273,
198,
3880,
438,
198,
198,
51,
3558,
329,
4600,
4033,
4164... | 2.918919 | 74 |
# Python stubs generated by omniidl from ..\..\..\..\..\idl\COS\CosNaming.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
#
# Start of module "CosNaming"
#
__name__ = "CosNaming"
_0_CosNaming = omniORB.openModule("CosNaming", r"..\..\..\..\..\idl\COS\CosNaming.idl")
_0_CosNaming__POA = omniORB.openModule("CosNaming__POA", r"..\..\..\..\..\idl\COS\CosNaming.idl")
# typedef ... Istring
_0_CosNaming.Istring = Istring
_0_CosNaming._d_Istring = (omniORB.tcInternal.tv_string,0)
_0_CosNaming._ad_Istring = (omniORB.tcInternal.tv_alias, Istring._NP_RepositoryId, "Istring", (omniORB.tcInternal.tv_string,0))
_0_CosNaming._tc_Istring = omniORB.tcInternal.createTypeCode(_0_CosNaming._ad_Istring)
omniORB.registerType(Istring._NP_RepositoryId, _0_CosNaming._ad_Istring, _0_CosNaming._tc_Istring)
del Istring
# struct NameComponent
_0_CosNaming.NameComponent = omniORB.newEmptyClass()
_0_CosNaming.NameComponent = NameComponent
_0_CosNaming._d_NameComponent = (omniORB.tcInternal.tv_struct, NameComponent, NameComponent._NP_RepositoryId, "NameComponent", "id", omniORB.typeMapping["IDL:omg.org/CosNaming/Istring:1.0"], "kind", omniORB.typeMapping["IDL:omg.org/CosNaming/Istring:1.0"])
_0_CosNaming._tc_NameComponent = omniORB.tcInternal.createTypeCode(_0_CosNaming._d_NameComponent)
omniORB.registerType(NameComponent._NP_RepositoryId, _0_CosNaming._d_NameComponent, _0_CosNaming._tc_NameComponent)
del NameComponent
# typedef ... Name
_0_CosNaming.Name = Name
_0_CosNaming._d_Name = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNaming/NameComponent:1.0"], 0)
_0_CosNaming._ad_Name = (omniORB.tcInternal.tv_alias, Name._NP_RepositoryId, "Name", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNaming/NameComponent:1.0"], 0))
_0_CosNaming._tc_Name = omniORB.tcInternal.createTypeCode(_0_CosNaming._ad_Name)
omniORB.registerType(Name._NP_RepositoryId, _0_CosNaming._ad_Name, _0_CosNaming._tc_Name)
del Name
# enum BindingType
_0_CosNaming.nobject = omniORB.EnumItem("nobject", 0)
_0_CosNaming.ncontext = omniORB.EnumItem("ncontext", 1)
_0_CosNaming.BindingType = omniORB.Enum("IDL:omg.org/CosNaming/BindingType:1.0", (_0_CosNaming.nobject, _0_CosNaming.ncontext,))
_0_CosNaming._d_BindingType = (omniORB.tcInternal.tv_enum, _0_CosNaming.BindingType._NP_RepositoryId, "BindingType", _0_CosNaming.BindingType._items)
_0_CosNaming._tc_BindingType = omniORB.tcInternal.createTypeCode(_0_CosNaming._d_BindingType)
omniORB.registerType(_0_CosNaming.BindingType._NP_RepositoryId, _0_CosNaming._d_BindingType, _0_CosNaming._tc_BindingType)
# struct Binding
_0_CosNaming.Binding = omniORB.newEmptyClass()
_0_CosNaming.Binding = Binding
_0_CosNaming._d_Binding = (omniORB.tcInternal.tv_struct, Binding, Binding._NP_RepositoryId, "Binding", "binding_name", omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], "binding_type", omniORB.typeMapping["IDL:omg.org/CosNaming/BindingType:1.0"])
_0_CosNaming._tc_Binding = omniORB.tcInternal.createTypeCode(_0_CosNaming._d_Binding)
omniORB.registerType(Binding._NP_RepositoryId, _0_CosNaming._d_Binding, _0_CosNaming._tc_Binding)
del Binding
# typedef ... BindingList
_0_CosNaming.BindingList = BindingList
_0_CosNaming._d_BindingList = (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNaming/Binding:1.0"], 0)
_0_CosNaming._ad_BindingList = (omniORB.tcInternal.tv_alias, BindingList._NP_RepositoryId, "BindingList", (omniORB.tcInternal.tv_sequence, omniORB.typeMapping["IDL:omg.org/CosNaming/Binding:1.0"], 0))
_0_CosNaming._tc_BindingList = omniORB.tcInternal.createTypeCode(_0_CosNaming._ad_BindingList)
omniORB.registerType(BindingList._NP_RepositoryId, _0_CosNaming._ad_BindingList, _0_CosNaming._tc_BindingList)
del BindingList
# forward interface BindingIterator;
_0_CosNaming._d_BindingIterator = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosNaming/BindingIterator:1.0", "BindingIterator")
omniORB.typeMapping["IDL:omg.org/CosNaming/BindingIterator:1.0"] = _0_CosNaming._d_BindingIterator
# interface NamingContext
_0_CosNaming._d_NamingContext = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosNaming/NamingContext:1.0", "NamingContext")
omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContext:1.0"] = _0_CosNaming._d_NamingContext
_0_CosNaming.NamingContext = omniORB.newEmptyClass()
_0_CosNaming.NamingContext = NamingContext
_0_CosNaming._tc_NamingContext = omniORB.tcInternal.createTypeCode(_0_CosNaming._d_NamingContext)
omniORB.registerType(NamingContext._NP_RepositoryId, _0_CosNaming._d_NamingContext, _0_CosNaming._tc_NamingContext)
# NamingContext operations and attributes
NamingContext._d_bind = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"]), (), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName, _0_CosNaming.NamingContext.AlreadyBound._NP_RepositoryId: _0_CosNaming.NamingContext._d_AlreadyBound})
NamingContext._d_rebind = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"]), (), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName})
NamingContext._d_bind_context = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContext:1.0"]), (), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName, _0_CosNaming.NamingContext.AlreadyBound._NP_RepositoryId: _0_CosNaming.NamingContext._d_AlreadyBound})
NamingContext._d_rebind_context = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContext:1.0"]), (), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName})
NamingContext._d_resolve = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"], ), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName})
NamingContext._d_unbind = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], ), (), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName})
NamingContext._d_new_context = ((), (omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContext:1.0"], ), None)
NamingContext._d_bind_new_context = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContext:1.0"], ), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName, _0_CosNaming.NamingContext.AlreadyBound._NP_RepositoryId: _0_CosNaming.NamingContext._d_AlreadyBound})
NamingContext._d_destroy = ((), (), {_0_CosNaming.NamingContext.NotEmpty._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotEmpty})
NamingContext._d_list = ((omniORB.tcInternal.tv_ulong, ), (omniORB.typeMapping["IDL:omg.org/CosNaming/BindingList:1.0"], omniORB.typeMapping["IDL:omg.org/CosNaming/BindingIterator:1.0"]), None)
# NamingContext object reference
omniORB.registerObjref(NamingContext._NP_RepositoryId, _objref_NamingContext)
_0_CosNaming._objref_NamingContext = _objref_NamingContext
del NamingContext, _objref_NamingContext
# NamingContext skeleton
__name__ = "CosNaming__POA"
NamingContext._omni_skeleton = NamingContext
_0_CosNaming__POA.NamingContext = NamingContext
omniORB.registerSkeleton(NamingContext._NP_RepositoryId, NamingContext)
del NamingContext
__name__ = "CosNaming"
# interface BindingIterator
_0_CosNaming._d_BindingIterator = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosNaming/BindingIterator:1.0", "BindingIterator")
omniORB.typeMapping["IDL:omg.org/CosNaming/BindingIterator:1.0"] = _0_CosNaming._d_BindingIterator
_0_CosNaming.BindingIterator = omniORB.newEmptyClass()
_0_CosNaming.BindingIterator = BindingIterator
_0_CosNaming._tc_BindingIterator = omniORB.tcInternal.createTypeCode(_0_CosNaming._d_BindingIterator)
omniORB.registerType(BindingIterator._NP_RepositoryId, _0_CosNaming._d_BindingIterator, _0_CosNaming._tc_BindingIterator)
# BindingIterator operations and attributes
BindingIterator._d_next_one = ((), (omniORB.tcInternal.tv_boolean, omniORB.typeMapping["IDL:omg.org/CosNaming/Binding:1.0"]), None)
BindingIterator._d_next_n = ((omniORB.tcInternal.tv_ulong, ), (omniORB.tcInternal.tv_boolean, omniORB.typeMapping["IDL:omg.org/CosNaming/BindingList:1.0"]), None)
BindingIterator._d_destroy = ((), (), None)
# BindingIterator object reference
omniORB.registerObjref(BindingIterator._NP_RepositoryId, _objref_BindingIterator)
_0_CosNaming._objref_BindingIterator = _objref_BindingIterator
del BindingIterator, _objref_BindingIterator
# BindingIterator skeleton
__name__ = "CosNaming__POA"
BindingIterator._omni_skeleton = BindingIterator
_0_CosNaming__POA.BindingIterator = BindingIterator
omniORB.registerSkeleton(BindingIterator._NP_RepositoryId, BindingIterator)
del BindingIterator
__name__ = "CosNaming"
# interface NamingContextExt
_0_CosNaming._d_NamingContextExt = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosNaming/NamingContextExt:1.0", "NamingContextExt")
omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContextExt:1.0"] = _0_CosNaming._d_NamingContextExt
_0_CosNaming.NamingContextExt = omniORB.newEmptyClass()
_0_CosNaming.NamingContextExt = NamingContextExt
_0_CosNaming._tc_NamingContextExt = omniORB.tcInternal.createTypeCode(_0_CosNaming._d_NamingContextExt)
omniORB.registerType(NamingContextExt._NP_RepositoryId, _0_CosNaming._d_NamingContextExt, _0_CosNaming._tc_NamingContextExt)
# NamingContextExt operations and attributes
NamingContextExt._d_to_string = ((omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContextExt/StringName:1.0"], ), {_0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName})
NamingContextExt._d_to_name = ((omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContextExt/StringName:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CosNaming/Name:1.0"], ), {_0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName})
NamingContextExt._d_to_url = ((omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContextExt/Address:1.0"], omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContextExt/StringName:1.0"]), (omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContextExt/URLString:1.0"], ), {_0_CosNaming.NamingContextExt.InvalidAddress._NP_RepositoryId: _0_CosNaming.NamingContextExt._d_InvalidAddress, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName})
NamingContextExt._d_resolve_str = ((omniORB.typeMapping["IDL:omg.org/CosNaming/NamingContextExt/StringName:1.0"], ), (omniORB.typeMapping["IDL:omg.org/CORBA/Object:1.0"], ), {_0_CosNaming.NamingContext.NotFound._NP_RepositoryId: _0_CosNaming.NamingContext._d_NotFound, _0_CosNaming.NamingContext.CannotProceed._NP_RepositoryId: _0_CosNaming.NamingContext._d_CannotProceed, _0_CosNaming.NamingContext.InvalidName._NP_RepositoryId: _0_CosNaming.NamingContext._d_InvalidName, _0_CosNaming.NamingContext.AlreadyBound._NP_RepositoryId: _0_CosNaming.NamingContext._d_AlreadyBound})
# NamingContextExt object reference
omniORB.registerObjref(NamingContextExt._NP_RepositoryId, _objref_NamingContextExt)
_0_CosNaming._objref_NamingContextExt = _objref_NamingContextExt
del NamingContextExt, _objref_NamingContextExt
# NamingContextExt skeleton
__name__ = "CosNaming__POA"
NamingContextExt._omni_skeleton = NamingContextExt
_0_CosNaming__POA.NamingContextExt = NamingContextExt
omniORB.registerSkeleton(NamingContextExt._NP_RepositoryId, NamingContextExt)
del NamingContextExt
__name__ = "CosNaming"
#
# End of module "CosNaming"
#
__name__ = "CosNaming_idl"
_exported_modules = ( "CosNaming", )
# The end.
| [
2,
11361,
17071,
82,
7560,
416,
39030,
8461,
312,
75,
422,
11485,
59,
492,
59,
492,
59,
492,
59,
492,
59,
312,
75,
59,
34,
2640,
59,
36734,
45,
3723,
13,
312,
75,
201,
198,
2,
8410,
5626,
48483,
12680,
45811,
0,
201,
198,
201,
... | 2.475428 | 5,494 |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 07:52:07 2020
@author: SungJun Won
This code is written based on WEC-sim.
wonsungjun0000@gmail.com
Note: readData method is included to import data type of both .txt and .mat
Note: irregularWaveSpectrum method has been modified for the faster computation.
The original method from WEC-sim is commented.
Note: MATLAB column vectors are changed to array for faster computation
Note: waveElevationGrid method and write_paraview_vtp_wave method is moved to
paraviewClass.py
Note: Values are equal to tolerance rtol=1e-07, atol=0
Values will not be exact to WEC-sim due to difference in significant figures
or the way some methods are used.
(e.g)
integrate.cumtrapz(S_f,freq) and MATLAB cumtrapz(freq,S_f) does not produce
same results but silimar values to rtol=1e-06
Note: "RuntimeWarning: overflow encountered in sinh"
When using irregular wave or spectrumImport, Equal Energy with wDepth value
over 100 can generate "RuntimeWarning: overflow encountered in sinh" as the
value of sinh in waveSetup method reaches infinity.
"""
from scipy import integrate
import matplotlib.pyplot as plt
import numpy as np
import numpy.matlib
import warnings
import scipy.io as sio
def arange_MATLAB(start, end, step):
"""
Change np.arange to have same sequence as MATLAB when step is float
"""
return step*np.arange(start/step, np.floor(end/step)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2758,
220,
513,
8753,
25,
4309,
25,
2998,
12131,
198,
198,
31,
9800,
25,
29802,
22396,
23306,
198,
198,
1212,
2438,
318,
3194,
1912,
319,
... | 3.060669 | 478 |
"""
AOC2020 - day1
"""
import sys;
FILEPATH = "./day1.txt";
with open(FILEPATH) as fp:
lines = fp.readlines();
EXISTING = [];
for line in lines:
val = int(line);
## part 1
# for i in EXISTING:
# if val + i == 2020:
# print(val * i);
# sys.exit();
# EXISTING.append(val);
for i, v1 in enumerate(EXISTING):
for j, v2 in enumerate(EXISTING, i):
if val + v1 + v2 == 2020:
print(val * v1 * v2);
sys.exit();
EXISTING.append(val);
| [
37811,
201,
198,
32,
4503,
42334,
532,
1110,
16,
201,
198,
37811,
201,
198,
11748,
25064,
26,
201,
198,
201,
198,
25664,
34219,
796,
366,
19571,
820,
16,
13,
14116,
8172,
201,
198,
201,
198,
4480,
1280,
7,
25664,
34219,
8,
355,
277,... | 1.708108 | 370 |
import itertools
import torch
class Kernel:
""" Base class for kernels """
| [
11748,
340,
861,
10141,
198,
11748,
28034,
198,
198,
4871,
32169,
25,
198,
220,
220,
220,
37227,
7308,
1398,
329,
50207,
37227,
198
] | 3.478261 | 23 |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workarounds for compatibility issues between versions and libraries."""
import functools
import importlib
import os
import re
import sys
import traceback
import warnings
from types import ModuleType
from typing import Any, Callable, Optional, Dict, Tuple, Type, Set
import numpy as np
import pandas as pd
import sympy
def proper_repr(value: Any) -> str:
"""Overrides sympy and numpy returning repr strings that don't parse."""
if isinstance(value, sympy.Basic):
result = sympy.srepr(value)
# HACK: work around https://github.com/sympy/sympy/issues/16074
# (only handles a few cases)
fixed_tokens = ['Symbol', 'pi', 'Mul', 'Pow', 'Add', 'Mod', 'Integer', 'Float', 'Rational']
for token in fixed_tokens:
result = result.replace(token, 'sympy.' + token)
return result
if isinstance(value, np.ndarray):
if np.issubdtype(value.dtype, np.datetime64):
return f'np.array({value.tolist()!r}, dtype=np.{value.dtype!r})'
return f'np.array({value.tolist()!r}, dtype=np.{value.dtype})'
if isinstance(value, pd.MultiIndex):
return f'pd.MultiIndex.from_tuples({repr(list(value))}, names={repr(list(value.names))})'
if isinstance(value, pd.Index):
return (
f'pd.Index({repr(list(value))}, '
f'name={repr(value.name)}, '
f'dtype={repr(str(value.dtype))})'
)
if isinstance(value, pd.DataFrame):
cols = [value[col].tolist() for col in value.columns]
rows = list(zip(*cols))
return (
f'pd.DataFrame('
f'\n columns={proper_repr(value.columns)}, '
f'\n index={proper_repr(value.index)}, '
f'\n data={repr(rows)}'
f'\n)'
)
return repr(value)
def proper_eq(a: Any, b: Any) -> bool:
"""Compares objects for equality, working around __eq__ not always working.
For example, in numpy a == b broadcasts and returns an array instead of
doing what np.array_equal(a, b) does. This method uses np.array_equal(a, b)
when dealing with numpy arrays.
"""
if type(a) == type(b):
if isinstance(a, np.ndarray):
return np.array_equal(a, b)
if isinstance(a, (pd.DataFrame, pd.Index, pd.MultiIndex)):
return a.equals(b)
if isinstance(a, (tuple, list)):
return len(a) == len(b) and all(proper_eq(x, y) for x, y in zip(a, b))
return a == b
def deprecated(
*, deadline: str, fix: str, name: Optional[str] = None
) -> Callable[[Callable], Callable]:
"""Marks a function as deprecated.
Args:
deadline: The version where the function will be deleted. It should be a minor version
(e.g. "v0.7").
fix: A complete sentence describing what the user should be using
instead of this particular function (e.g. "Use cos instead.")
name: How to refer to the function.
Defaults to `func.__qualname__`.
Returns:
A decorator that decorates functions with a deprecation warning.
"""
_validate_deadline(deadline)
return decorator
def deprecated_class(
*, deadline: str, fix: str, name: Optional[str] = None
) -> Callable[[Type], Type]:
"""Marks a class as deprecated.
Args:
deadline: The version where the function will be deleted. It should be a minor version
(e.g. "v0.7").
fix: A complete sentence describing what the user should be using
instead of this particular function (e.g. "Use cos instead.")
name: How to refer to the class.
Defaults to `class.__qualname__`.
Returns:
A decorator that decorates classes with a deprecation warning.
"""
_validate_deadline(deadline)
return decorator
def deprecated_parameter(
*,
deadline: str,
fix: str,
func_name: Optional[str] = None,
parameter_desc: str,
match: Callable[[Tuple[Any, ...], Dict[str, Any]], bool],
rewrite: Optional[
Callable[[Tuple[Any, ...], Dict[str, Any]], Tuple[Tuple[Any, ...], Dict[str, Any]]]
] = None,
) -> Callable[[Callable], Callable]:
"""Marks a function parameter as deprecated.
Also handles rewriting the deprecated parameter into the new signature.
Args:
deadline: The version where the function will be deleted. It should be a minor version
(e.g. "v0.7").
fix: A complete sentence describing what the user should be using
instead of this particular function (e.g. "Use cos instead.")
func_name: How to refer to the function.
Defaults to `func.__qualname__`.
parameter_desc: The name and type of the parameter being deprecated,
e.g. "janky_count" or "janky_count keyword" or
"positional janky_count".
match: A lambda that takes args, kwargs and determines if the
deprecated parameter is present or not. This determines whether or
not the deprecation warning is printed, and also whether or not
rewrite is called.
rewrite: Returns new args/kwargs that don't use the deprecated
parameter. Defaults to making no changes.
Returns:
A decorator that decorates functions with a parameter deprecation
warning.
"""
_validate_deadline(deadline)
return decorator
def deprecate_attributes(module: ModuleType, deprecated_attributes: Dict[str, Tuple[str, str]]):
"""Wrap a module with deprecated attributes that give warnings.
Args:
module: The module to wrap.
deprecated_attributes: A dictionary from attribute name to a tuple of
strings, where the first string gives the version that the attribute
will be removed in, and the second string describes what the user
should do instead of accessing this deprecated attribute.
Returns:
Wrapped module with deprecated attributes. Use of these attributes
will cause a warning for these deprecated attributes.
"""
for (deadline, _) in deprecated_attributes.values():
_validate_deadline(deadline)
return Wrapped(module.__name__, module.__doc__)
class DeprecatedModuleLoader(importlib.abc.Loader):
"""A Loader for deprecated modules.
It wraps an existing Loader instance, to which it delegates the loading. On top of that
it ensures that the sys.modules cache has both the deprecated module's name and the
new module's name pointing to the same exact ModuleType instance.
Args:
loader: the loader to be wrapped
old_module_name: the deprecated module's fully qualified name
new_module_name: the new module's fully qualified name
"""
def __init__(self, loader: Any, old_module_name: str, new_module_name: str):
"""A module loader that uses an existing module loader and intercepts
the execution of a module.
"""
self.loader = loader
if hasattr(loader, 'exec_module'):
# mypy#2427
self.exec_module = self._wrap_exec_module(loader.exec_module) # type: ignore
# while this is rare and load_module was deprecated in 3.4
# in older environments this line makes them work as well
if hasattr(loader, 'load_module'):
# mypy#2427
self.load_module = self._wrap_load_module(loader.load_module) # type: ignore
if hasattr(loader, 'create_module'):
# mypy#2427
self.create_module = loader.create_module # type: ignore
self.old_module_name = old_module_name
self.new_module_name = new_module_name
def _is_internal(filename: str) -> bool:
"""Returns whether filename is internal to python.
This is similar to how the built-in warnings module differentiates frames from internal modules.
It is specific to CPython - see
https://github.com/python/cpython/blob/41ec17e45d54473d32f543396293256f1581e44d/Lib/warnings.py#L275.
"""
return 'importlib' in filename and '_bootstrap' in filename
_warned: Set[str] = set()
def _should_dedupe_module_deprecation() -> bool:
"""Whether module deprecation warnings should be deduped or not.
We should always dedupe when not called from test.
We should only dedupe during tests if forced.
"""
force_dedupe = "CIRQ_FORCE_DEDUPE_MODULE_DEPRECATION" in os.environ
return not _called_from_test() or force_dedupe
# TODO(#3388) Add documentation for Args.
# pylint: disable=missing-param-doc
class DeprecatedModuleFinder(importlib.abc.MetaPathFinder):
"""A module finder to handle deprecated module references.
It sends a deprecation warning when a deprecated module is asked to be found.
It is meant to be used as a wrapper around existing MetaPathFinder instances.
Args:
finder: the finder to wrap.
new_module_name: the new module's fully qualified name
old_module_name: the deprecated module's fully qualified name
deadline: the deprecation deadline
"""
def __init__(
self,
finder: Any,
new_module_name: str,
old_module_name: str,
deadline: str,
broken_module_exception: Optional[BaseException],
):
"""An aliasing module finder that uses an existing module finder to find a python
module spec and intercept the execution of matching modules.
"""
self.finder = finder
self.new_module_name = new_module_name
self.old_module_name = old_module_name
self.deadline = deadline
self.broken_module_exception = broken_module_exception
# to cater for metadata path finders
# https://docs.python.org/3/library/importlib.metadata.html#extending-the-search-algorithm
if hasattr(finder, "find_distributions"):
self.find_distributions = find_distributions
if hasattr(finder, "invalidate_caches"):
# mypy#2427
self.invalidate_caches = invalidate_caches # type: ignore
def find_spec(self, fullname: str, path: Any = None, target: Any = None) -> Any:
"""Finds the specification of a module.
This is an implementation of the importlib.abc.MetaPathFinder.find_spec method.
See https://docs.python.org/3/library/importlib.html#importlib.abc.MetaPathFinder.
Args:
fullname: name of the module.
path: if presented, this is the parent module's submodule search path.
target: When passed in, target is a module object that the finder may use to make a more
educated guess about what spec to return. We don't use it here, just pass it along
to the wrapped finder.
"""
if fullname != self.old_module_name and not fullname.startswith(self.old_module_name + "."):
# if we are not interested in it, then just pass through to the wrapped finder
return self.finder.find_spec(fullname, path, target)
if self.broken_module_exception is not None:
raise self.broken_module_exception
# warn for deprecation
_deduped_module_warn_or_error(self.old_module_name, self.new_module_name, self.deadline)
new_fullname = self.new_module_name + fullname[len(self.old_module_name) :]
# find the corresponding spec in the new structure
if fullname == self.old_module_name:
# this is the first time the deprecated module is being found
# which means that the new parent needs to be found first and under
# the new parent's path, we should be able to find the new name of
# the deprecated module
# this code is heavily inspired by importlib.util.find_spec
parent_name = new_fullname.rpartition('.')[0]
if parent_name:
parent = __import__(parent_name, fromlist=['__path__'])
# note that compared to importlib.util.find_spec we don't handle
# AttributeError here because it is not expected to happen in case
# of a DeprecatedModuleLoader - the new parent should exist and be
# a proper package
parent_path = parent.__path__
else:
parent_path = None
spec = self.finder.find_spec(new_fullname, parent_path, None)
else:
# we are finding a submodule of the parent of the deprecated module,
# which means that the parent was already found, and thus, `path` is
# correctly pointing to the module's parent in the new hierarchy
spec = self.finder.find_spec(
new_fullname,
path=path,
target=target,
)
# if the spec exists, return the DeprecatedModuleLoader that will do the loading as well
# as set the alias(es) in sys.modules as necessary
if spec is not None:
# change back the name to the deprecated module name
spec.name = fullname
# some loaders do a check to ensure the module's name is the same
# as the loader was created for
if getattr(spec.loader, "name", None) == new_fullname:
setattr(spec.loader, "name", fullname)
spec.loader = DeprecatedModuleLoader(spec.loader, fullname, new_fullname)
return spec
# pylint: enable=missing-param-doc
# TODO(#3388) Add documentation for Args.
# pylint: disable=missing-param-doc
def deprecated_submodule(
*, new_module_name: str, old_parent: str, old_child: str, deadline: str, create_attribute: bool
):
"""Creates a deprecated module reference recursively for a module.
For `new_module_name` (e.g. cirq_google) creates an alias (e.g cirq.google) in Python's module
cache. It also recursively checks for the already imported submodules (e.g. cirq_google.api) and
creates the alias for them too (e.g. cirq.google.api). With this method it is possible to create
an alias that really looks like a module, e.g you can do things like
`from cirq.google import api` - which would be otherwise impossible.
Note that this method will execute `new_module_name` in order to ensure that it is in the module
cache.
Args:
new_module_name: absolute module name for the new module
old_parent: the current module that had the original submodule
old_child: the submodule that is being relocated
create_attribute: if True, the submodule will be added as a deprecated attribute to the
old_parent module
Returns:
None
"""
_validate_deadline(deadline)
old_module_name = f"{old_parent}.{old_child}"
broken_module_exception = None
if create_attribute:
try:
new_module = importlib.import_module(new_module_name)
_setup_deprecated_submodule_attribute(
new_module_name, old_parent, old_child, deadline, new_module
)
except ImportError as ex:
msg = (
f"{new_module_name} cannot be imported. The typical reasons are"
f" that\n 1.) {new_module_name} is not installed, or"
f"\n 2.) when developing Cirq, you don't have your PYTHONPATH "
f"setup. In this case run `source dev_tools/pypath`.\n\n You can "
f"check the detailed exception above for more details or run "
f"`import {new_module_name} to reproduce the issue."
)
broken_module_exception = DeprecatedModuleImportError(msg)
broken_module_exception.__cause__ = ex
_setup_deprecated_submodule_attribute(
new_module_name,
old_parent,
old_child,
deadline,
_BrokenModule(new_module_name, broken_module_exception),
)
sys.meta_path = [wrap(finder) for finder in sys.meta_path]
# pylint: enable=missing-param-doc
| [
2,
15069,
2864,
383,
21239,
80,
34152,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,... | 2.561576 | 6,496 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import random
import os
import pandas as pd
import torch
import torch.utils.data
from torchvision import transforms
import slowfast.utils.logging as logging
from .build import DATASET_REGISTRY
from .epickitchens_record import EpicKitchensVideoRecord
from . import autoaugment as autoaugment
from . import transform as transform
from . import utils as utils
from .frame_loader import pack_frames_to_video_clip
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
19... | 3.348315 | 178 |
# Copyright (c) 2015 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from kingbird.objects import base as obj_base
from kingbird.tests import base
from oslo_versionedobjects import fields as obj_fields
| [
2,
15069,
357,
66,
8,
1853,
7651,
16528,
9564,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
... | 3.724138 | 203 |
import copy
from collections import OrderedDict
from typing import List, Dict, Any
from spine_json_lib.data.data_types.base_type import SpineData
| [
11748,
4866,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4377,
198,
198,
6738,
19656,
62,
17752,
62,
8019,
13,
7890,
13,
7890,
62,
19199,
13,
8692,
62,
4906,
1330,
1338,
500,
6601,
6... | 3.386364 | 44 |
"""This module tests config.py."""
from typing import Dict
import pytest
from alias_cd import config
@pytest.fixture
def config_data() -> str:
"""Sample config data in yaml format."""
return """---
"~":
_alias: root
my_long_directory_1:
_alias: d1
my_sub_directory_1:
_alias: sd1
my_sub_directory_2:
my_sub_directory_3:
_alias: sd3"""
@pytest.fixture
def config_yaml() -> Dict:
"""Sample config data as a dictonary."""
return {
"~": {
"_alias": "root",
"my_long_directory_1": {
"_alias": "d1",
"my_sub_directory_1": {"_alias": "sd1"},
"my_sub_directory_2": {"my_sub_directory_3": {"_alias": "sd3"}},
},
},
}
@pytest.fixture
def config_obj() -> config.Config:
"""Sample config data as a Config object."""
return config.Config(
aliases={
"root": "~",
"d1": "~/my_long_directory_1",
"sd1": "~/my_long_directory_1/my_sub_directory_1",
"sd3": "~/my_long_directory_1/my_sub_directory_2/my_sub_directory_3",
},
)
def test_yaml_parsing(config_data, config_yaml):
"""Test that the config_data fixture matches the config_yaml fixture."""
assert config._load_yaml(config_data) == config_yaml
def test_config_parsing(config_yaml, config_obj):
"""Test that the _get_config creates the Config object correctly."""
assert config._get_config(config_yaml=config_yaml) == config_obj
| [
37811,
1212,
8265,
5254,
4566,
13,
9078,
526,
15931,
198,
198,
6738,
19720,
1330,
360,
713,
198,
11748,
12972,
9288,
198,
6738,
16144,
62,
10210,
1330,
4566,
628,
198,
31,
9078,
9288,
13,
69,
9602,
198,
4299,
4566,
62,
7890,
3419,
461... | 2.186178 | 709 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
功能实现:检查列表中的所有值是否都是唯一的。
解读:
在给定的列表中使用set()来保持唯一的出现。
使用len()将唯一值的长度与原始列表进行比较。
"""
# Examples
x = [1, 2, 3, 4, 5, 6]
y = [1, 2, 2, 3, 4, 5]
print(all_unique(x))
print(all_unique(y))
# output:
# True
# False
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
27950,
253,
47797,
121,
22522,
252,
163,
236,
108,
171,
120,
248,
162,
96,
222,
162,
253,
98,
26344,
245,... | 1.151111 | 225 |
from output.models.ms_data.regex.specials_xsd.specials import Doc
__all__ = [
"Doc",
]
| [
6738,
5072,
13,
27530,
13,
907,
62,
7890,
13,
260,
25636,
13,
20887,
82,
62,
87,
21282,
13,
20887,
82,
1330,
14432,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
23579,
1600,
198,
60,
198
] | 2.358974 | 39 |
{%- from "taiga/map.jinja" import server with context -%}
# -*- coding: utf-8 -*-
from kombu import Queue
broker_url = 'amqp{% if server.message_queue.get('ssl', False) %}s{% endif %}://{{ server.message_queue.user }}:{{ server.message_queue.password }}@{{ server.message_queue.host }}:{{ server.message_queue.get('port', 5672) }}/{{ server.message_queue.get('virtual_host', '/') }}'
result_backend = 'redis://localhost:6379/0'
accept_content = ['pickle',] # Values are 'pickle', 'json', 'msgpack' and 'yaml'
task_serializer = "pickle"
result_serializer = "pickle"
timezone = '{{ pillar.linux.system.timezone|default("UTC") }}'
task_default_queue = 'tasks'
task_queues = (
Queue('tasks', routing_key='task.#'),
Queue('transient', routing_key='transient.#', delivery_mode=1)
)
task_default_exchange = 'tasks'
task_default_exchange_type = 'topic'
task_default_routing_key = 'task.default'
{#-
vim: syntax=jinja
-#}
| [
90,
33963,
422,
366,
8326,
13827,
14,
8899,
13,
18594,
6592,
1,
1330,
4382,
351,
4732,
532,
4,
92,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
479,
2381,
84,
1330,
4670,
518,
198,
198,
7957,
61... | 2.663793 | 348 |
import glob, os, json
import logging
import numpy as np
logger = logging.Logger("vdb")
def seg2bb(obj_mask):
''' Convert binary seg mask of object to bouding box, (x0, y0, x1, y1) format '''
y, x = np.where(obj_mask == True)
bb = [x.min(), x.max(), y.min(), y.max()]
return bb
def get_obj_mask(seg_im, color):
''' Get object binary mask from a color coded mask '''
seg_mask = np.array(seg_im[:,:,0] * (256 ** 2) + seg_im[:,:,1] * 256 + seg_im[:,:,2])
if isinstance(color, list):
R, G, B = color
if isinstance(color, dict):
R, G, B = color['R'], color['G'], color['B']
val = R * (256 ** 2) + G * 256 + B
obj_mask = np.equal(seg_mask, val)
return obj_mask
| [
11748,
15095,
11,
28686,
11,
33918,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6404,
1362,
796,
18931,
13,
11187,
1362,
7203,
85,
9945,
4943,
628,
198,
4299,
384,
70,
17,
11848,
7,
26801,
62,
27932,
2599,
198,
22... | 2.229102 | 323 |
import tushare as ts
import matplotlib.pyplot as plt
import matplotlib.finance as mpf
from matplotlib.pylab import date2num
import datetime
import time
import os
import pandas as pd
import sys
from multiprocessing.dummy import Pool as ThreadPool
stockBasicInfo=None
myG={}
workingStock=[]
blackList=["000033"]
lastTradeDay=""
last2TradeDay=""
todayData=None
#getStockInfo("600848")
if __name__=="__main__" :
print(sys.argv)
if len(sys.argv)==2:
workType=sys.argv[1]
print(workType)
if workType=="getdatafast":
updateDataWorkLoop(False,True)
if workType=="getdata":
updateDataWorkLoop()
if workType=="getdataR":
updateDataWorkLoop(True)
if workType=="threadGetData":
threadpoolwork()
if workType=="getbasicInfo":
initStockBasic(True)
if workType=="getpicR":
analyseWorkLoop(True)
if workType=="checkStock":
checkStocksLoop()
else:
#fastUpdateData()
#updateDataWorkLoop(False,True)
analyseWorkLoop()
#threadpoolworkPic()
#updateDataWorkLoop(True)
#threadpoolwork()
#initStockBasic()
#updateStockDataWork("600601",True)
print("done")
| [
171,
119,
123,
11748,
256,
1530,
533,
355,
40379,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
69,
14149,
355,
29034,
69,
198,
6738,
2603,
29487,
8019,
13,
79,
2645,
397,
1330,
3128,... | 2.173109 | 595 |
# Built in python libs
import os
import time
# Additional libs
import numpy as np
import cv2
from numba import jit
# Custom imports
try:
from logger.logger import Logger
import utilities.exceptions
from cameras.CaptureManager import CaptureManager, createCaptureSourceData
from cameras.DisplayManager import DisplayManager, createDisplaySourceData
from utilities.exceptions import CameraReadError
except ImportError:
from Source.logger.logger import Logger
from Source.utilities import exceptions
from Source.cameras.CaptureManager import CaptureManager, createCaptureSourceData
from Source.cameras.DisplayManager import DisplayManager, createDisplaySourceData
from Source.utilities.exceptions import CameraReadError
# gets the camera frames from the captureManager
# makes grayscale images of the bgr_images returned by readCameras
# @jit(forceobj=True)
# Function makes a window which displays both camera feeds next to each other
# Takes the images as two arguments: left, right images
# Has no return value
@jit(forceobj=True)
# gets the camera images from the capture manager
# converts the images to grayscale
# shows the images
# creates the cameras sources for ThreadedCapture and runs them into CaptureManager
# closes the camera sources
# loads all files from data that the robot needs
# Function to write K matrix and dist coeffs to npz files
# K matrix is a 3x3 and dist coeffs is of length 4
# # Function to get the new frames from both cameras
# # "Safe" such that it will throw an exception if the cameras do not yield frames
# # Takes both cameras as left and right
# # Returns both image in leftImage, rightImage
# # Left image in return tuple corresponds to left camera number in return tuple
# # @jit(forceobj=True) # forceobj is used here since the opencv videoCaptures cannot be compiled
# def readCameras(left, right):
# # Got image boolean and retrieved image
# gotLeft, gotRight = left.grab(), right.grab()
# # Ensure images were received
# if not gotLeft:
# raise exceptions.CameraReadError("Left")
# if not gotRight:
# raise exceptions.CameraReadError("Right")
# # Return images
# return left.retrieve()[1], right.retrieve()[1]
#
# # Convenience function which will read and show the images given by readCameras and showCameras
# # Will pass on exceptions
# def readAndShowCameras(leftCam, rightCam, leftK, rightK, leftDistC, rightDistC, show=True):
# try:
# leftImage, rightImage = readCameras(leftCam, rightCam)
# undistLeft, undistRight = undistortImages(leftImage, rightImage, leftK, rightK, leftDistC, rightDistC)
# if show:
# showCameras(undistLeft, undistRight)
# return undistLeft, undistRight
# except Exception as e:
# raise e
#
# def writeCameraImages(cameraPath, leftImage, rightImage, cameraLocks):
# cameraLocks[0].acquire()
# cv2.imwrite(cameraPath + "left_image.jpg", leftImage)
# cameraLocks[0].release()
# cameraLocks[1].acquire()
# cv2.imwrite(cameraPath + "right_image.jpg", rightImage)
# cameraLocks[1].release()
#
# # Function for undistorting the read in images
# # Utilizes pre-saved camera coefficient matrices and dist coeff arrays
# # Takes two images(np arrays of shape (w,h,c)) as parameters
# # returns the undistorted images or raises an exception
# def undistortImages(left, right, leftK, rightK, leftDistC, rightDistC):
# try:
# leftNewK, _ = cv2.getOptimalNewCameraMatrix(leftK, leftDistC, (left.shape[1], left.shape[0]), 1, (left.shape[1], left.shape[0]))
# rightNewK, _ = cv2.getOptimalNewCameraMatrix(rightK, rightDistC, (right.shape[1], right.shape[0]), 1, (right.shape[1], right.shape[0]))
# return cv2.undistort(left, leftK, leftDistC, None, leftNewK), cv2.undistort(right, rightK, rightDistC, None, rightNewK)
# except FileNotFoundError:
# raise FileNotFoundError("Cannot load calibration data in undistortImages -> cameras.py")
# except:
# raise exceptions.UndistortImageError("undistortImages function error")
#
# def readAndWriteCameras(cameraPath, leftCam, rightCam, leftK, rightK, leftDistC, rightDistC, cameraLocks):
# leftImg, rightImg = readCameras(leftCam, rightCam)
# undistortedLeft, undistortedRight = undistortImages(leftImg, rightImg, leftK, rightK, leftDistC, rightDistC)
# writeCameraImages(cameraPath, undistortedLeft, undistortedRight, cameraLocks)
#
# def cameraProcess(cameraPath, leftCam, rightCam, leftK, rightK, leftDistC, rightDistC, cameraLocks):
# leftCamera = cv2.VideoCapture(leftCam)
# rightCamera = cv2.VideoCapture(rightCam)
# while True:
# try:
# readAndWriteCameras(cameraPath, leftCamera, rightCamera, leftK, rightK, leftDistC, rightDistC, cameraLocks)
# except exceptions.CameraReadError as e:
# Logger.log(e)
# except:
# Logger.log("Uncaught exception in readAndWriteCameras")
# finally:
# time.sleep(0.064)
| [
2,
28477,
287,
21015,
9195,
82,
198,
11748,
28686,
198,
11748,
640,
198,
198,
2,
15891,
9195,
82,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
6738,
997,
7012,
1330,
474,
270,
198,
198,
2,
8562,
220,
17944,
198,... | 2.894495 | 1,744 |
import retro | [
11748,
12175
] | 6 | 2 |
import csv
import os
import re
import time
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from filenames import file_extention
from get_dir import pwd
from reuse_func import GetData
| [
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
640,
198,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
19738,
1330,
9683,
198,
198,
6738,
6060,
13,
17143,
7307,
1330,
6060,
198,
6738,
1226,
268,
1047,... | 3.555556 | 63 |
num1 = float(input('Digite o primeiro número: '))
num2 = float(input('Digite o segundo: '))
num3 = float(input('Digite o terceiro: '))
# Para descobrir qual o maior:
maior = num1
if num2 > maior:
maior = num2
if num3 > maior:
maior = num3
print(f'O maior número é {maior}')
# Para descobrir qual o menor:
menor = num1
if num2 < menor:
menor = num2
if num3 < menor:
menor = num3
print(f'E o menor número é {menor}')
| [
22510,
16,
796,
12178,
7,
15414,
10786,
19511,
578,
267,
6994,
7058,
299,
21356,
647,
78,
25,
705,
4008,
198,
22510,
17,
796,
12178,
7,
15414,
10786,
19511,
578,
267,
384,
70,
41204,
25,
705,
4008,
198,
22510,
18,
796,
12178,
7,
154... | 2.18593 | 199 |
"""
Write a program to construct aBayesian network considering medical data. Use this model to demonstrate the diagnosis of heart patients using standard Heart Disease Data Set. You can use Java/Python ML library classes/API.
"""
import numpy as np
import pandas as pd
import csv
from pgmpy.estimators import MaximumLikelihoodEstimator
from pgmpy.models import BayesianModel
from pgmpy.inference import VariableElimination
lines = list(csv.reader(open('heart_disease.csv','r')))
attribute = lines[0]
heartDisease = pd.read_csv('heart_disease.csv')
heartDisease = heartDisease.replace('?',np.nan)
#print("Few examples from dataset are :-")
#print(heartDisease.head())
print("Attributes and datatypes")
print(heartDisease.dtypes)
model = BayesianModel([('age','trestbps'),('age','fbs'),('sex', 'trestbps'), ('sex', 'trestbps'), ('exang', 'trestbps'),('trestbps','heartdisease'),('fbs','heartdisease'),
('heartdisease','restecg'),('heartdisease','thalach'),('heartdisease','chol')])
print("Learning CPDs using max lilelihood estimatos")
model.fit(heartDisease,estimator = MaximumLikelihoodEstimator)
print("Inferencing with bayesian network")
HeartDisease_infer = VariableElimination(model)
q = HeartDisease_infer.query(variables = ['heartdisease'], evidence = {'age':28})
print(q)
print(q['heartdisease'])
print("2. Probability of Heart disease given chol = 100")
q = HeartDisease_infer.query(variables = ['heartdisease'], evidence = {'chol':100})
print(q['heartdisease'])
| [
37811,
198,
198,
16594,
257,
1430,
284,
5678,
257,
15262,
35610,
3127,
6402,
3315,
1366,
13,
5765,
428,
2746,
284,
10176,
262,
13669,
286,
2612,
3871,
1262,
3210,
8894,
17344,
6060,
5345,
13,
921,
460,
779,
7349,
14,
37906,
10373,
5888,... | 3.016227 | 493 |
import cv2
import numpy as np
from keras.models import load_model
from skimage.transform import resize, pyramid_reduce
model = load_model('model.h5')
while True:
cam_capture = cv2.VideoCapture(0)
_, image_frame = cam_capture.read()
# Select ROI
im2 = crop_image(image_frame, 300,300,300,300)
image_grayscale = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
image_grayscale_blurred = cv2.GaussianBlur(image_grayscale, (15,15), 0)
#resized_img = image_resize(image_grayscale_blurred, width = 28, height = 28, inter = cv2.INTER_AREA)
#resized_img = keras_process_image(image_grayscale_blurred)
resized_img = cv2.resize(image_grayscale_blurred,(28,28))
#ar = np.array(resized_img)
ar = resized_img.reshape(1,784)
pred_probab, pred_class = keras_predict(model, ar )
print(pred_class, pred_probab)
# Display cropped image
cv2.imshow("Image2",im2)
cv2.imshow("Image4",resized_img)
cv2.imshow("Image3",image_grayscale_blurred)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
cam_capture.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
6738,
1341,
9060,
13,
35636,
1330,
47558,
11,
27944,
62,
445,
7234,
198,
198,
19849,
796,
3440,
62,
19849,
10786,
19... | 2.195817 | 526 |
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip\
as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__)) | [
11748,
302,
198,
11748,
28686,
198,
198,
6738,
15881,
88,
13,
2777,
1304,
1330,
7308,
41294,
198,
6738,
15881,
88,
13,
19738,
273,
1330,
367,
20369,
55,
15235,
17563,
273,
198,
6738,
15881,
88,
13,
4023,
1330,
19390,
11,
367,
20369,
3... | 2.869822 | 169 |
import tensorflow as tf
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
from collections import defaultdict
import pandas as pd
import numpy as np
import string
from itertools import combinations, permutations
from sklearn.preprocessing import OrdinalEncoder
from sklearn.metrics import accuracy_score
from sklearn.ensemble import StackingClassifier, AdaBoostClassifier
from xgboost import XGBRFClassifier
from sklearn.model_selection import train_test_split
import os
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
1330,
406,
2257,
44,
11,
360,
1072,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
384,
11925,
150... | 3.690583 | 223 |
from .one_hot import one_hot
from .get_file import get_file
from .tensor_type import TensorType
from .list_recursive_subclasses import list_recursive_concrete_subclasses
| [
6738,
764,
505,
62,
8940,
1330,
530,
62,
8940,
198,
6738,
764,
1136,
62,
7753,
1330,
651,
62,
7753,
198,
6738,
764,
83,
22854,
62,
4906,
1330,
309,
22854,
6030,
198,
6738,
764,
4868,
62,
8344,
30753,
62,
7266,
37724,
1330,
1351,
62,... | 3.269231 | 52 |
import re
from subprocess import Popen, PIPE
from board.Board import BLACK, NONE, getOtherColor, getPieceSymbol, WHITE, getDirection, Board
from move.Move import MoveNode
from move.MovementFactory import generate_moves
from players.Player import Player
| [
11748,
302,
198,
6738,
850,
14681,
1330,
8099,
268,
11,
350,
4061,
36,
198,
6738,
3096,
13,
29828,
1330,
31963,
11,
399,
11651,
11,
651,
6395,
10258,
11,
651,
47,
8535,
13940,
23650,
11,
44925,
11,
651,
35,
4154,
11,
5926,
198,
6738... | 3.657143 | 70 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
BASE_URL = 'http://codeforces.com/api/'
contest_standings = method('contest.standings')
user_info = method('user.info')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7007,
628,
198,
33,
11159,
62,
21886,
796,
705,
4023,
1378,
19815,
891,
273,
728,
13,
785,
14,
15042,
14,
... | 2.506667 | 75 |
"""
This encoding is an interface between neural networks and the robot blueprint
It is used to create a 'tree' structure that is interpreted as a robot.
"""
import numpy as np
import copy
import random
from Encodings import abstract_encoding as enc
import Tree as tree_structure
from NeuralNetwork import NEAT_NN
from Encodings import cellular_encoding
from enum import Enum
MAX_MODULES = 20
"""
Container Module : This is used to store arbitrary information of the module
which the L-System uses as a placeholder to create the tree structure
"""
# This module class is a duplicate from the L-System encoding. TODO: change location of this class to
class NN_enc(enc.Encoding):
'''
The neural networks that are being used to create the robot
directed tree blueprints can both have a genotypic and
phenotypic part to them. For the cellular encoding,
the genotypic part is mutable, it is the short set of
rules that creates a neural network. The phenotype is
the network that is actually created from these rules.
'''
def create(self, treedepth):
"""
creating a tree structure from the neural network is done in a similar manner as the L-System;
intead of rewriting the tree structure a few times using the rules of the L-System
the neural network will try to expand the tree structure every rewrite iteration
"""
# when using NEAT, a phenotype first needs ot be created out of a genotype.
# Since we will only use the phenotype for constructing the robot tree,
# we discard the phenotype after we're done
self.maxTreeDepth = treedepth
if (self.networkType == NETWORK_TYPE.CE):
self.nn_g.create()
self.nn_p = self.nn_g
elif (self.networkType == NETWORK_TYPE.CPPN):
self.nn_p = self.nn_g.getPhenotype()
# 1: first create the container module dependecy
axiom = C_Module(0,self.moduleList[0],-1)
axiom.controller = copy.deepcopy(self.moduleList[0].controller)
index = 0
axiom.children = []
axiom.index = index
index+=1
base = axiom
for i in range(treedepth): # number of times iterated over the L-System
index = self.iterate(base, index,0)
# remove nn_p
self.nn_p = None
# 1: create the tree from the container modules
# transform the string into a usable tree structure
tree = tree_structure.Tree(self.moduleList)
self.recursiveNodeGen(-1,base,tree,0)
# print("number of nodes is : ",len(tree.nodes))
return tree
# return super().create()
# NOTE: The function below is copied from the L-System. Should be defined in abstract class.
| [
37811,
198,
1212,
21004,
318,
281,
7071,
1022,
17019,
7686,
290,
262,
9379,
30881,
198,
1026,
318,
973,
284,
2251,
257,
705,
21048,
6,
4645,
326,
318,
16173,
355,
257,
9379,
13,
220,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941... | 3.24968 | 781 |
from django.conf import settings
from .sender_controller import TaskSender
from sparrow_cloud.registry.service_discovery import consul_service
from sparrow_cloud.restclient.exception import HTTPException
from functools import lru_cache
import time
#
# @lru_cache(maxsize=None)
# def get_tasks_sender_object(message_backend):
# task_sender = TaskSender(message_backend)
# return task_senderml
def get_settings_value(name):
"""获取settings中的配置"""
value = getattr(settings, name, None)
if value == '' or value is None:
raise NotImplementedError("没有配置这个参数%s" % name)
return value
def send_task(exchange, routing_key, message_code, retry_times=3, *args, **kwargs):
"""
发送实时任务
参数:
exchange/routing_key/message_code, 创建消息服务时返回的配置信息
*args
**kwargs
settings配置:
MESSAGE_SENDER_CONF = {
"SERVICE_CONF": {
"ENV_NAME": "DLJFLS_LSDK_LDKEND",
"VALUE": "xxxxx-svc",
},
"API_PATH": "/api/sparrow_task/producer/send/",
}
"""
message_conf = get_settings_value("MESSAGE_SENDER_CONF")
service_addr = consul_service(message_conf['SERVICE_CONF'])
message_backend = "http://{}{}".format(service_addr, message_conf['API_PATH'])
task_sender = TaskSender(message_backend)
# 发送任务出现异常时的初始重试时间间隔
interval_time = 1
error_message = None
for _ in range(retry_times):
try:
task_result = task_sender.send_task(
exchange=exchange,
routing_key=routing_key,
message_code=message_code,
*args,
**kwargs
)
return task_result
except Exception as ex:
time.sleep(interval_time)
error_message = ex.__str__()
raise Exception("消息发送失败,失败原因{},重试次数{},消息内容message_code={},消息参数{}{}".format(
error_message, retry_times, message_code, args, kwargs))
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
764,
82,
2194,
62,
36500,
1330,
15941,
50,
2194,
198,
6738,
599,
6018,
62,
17721,
13,
2301,
4592,
13,
15271,
62,
67,
40821,
1330,
762,
377,
62,
15271,
198,
6738,
599,
6018,
62,
1... | 1.797166 | 1,129 |
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
from . import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of14']
port_desc_prop.subtypes[65535] = experimenter
experimenter.subtypes[6035143] = bsn
bsn.subtypes[3] = bsn_breakout
bsn.subtypes[7] = bsn_driver_info_json
bsn.subtypes[8] = bsn_extended_capabilities
bsn.subtypes[2] = bsn_forward_error_correction
bsn.subtypes[1] = bsn_generation_id
bsn.subtypes[5] = bsn_misc_capabilities
bsn.subtypes[6] = bsn_sff_json
bsn.subtypes[4] = bsn_speed_capabilities
bsn.subtypes[0] = bsn_uplink
port_desc_prop.subtypes[0] = ethernet
port_desc_prop.subtypes[1] = optical
| [
2,
15069,
357,
66,
8,
3648,
383,
5926,
286,
9870,
2841,
286,
383,
406,
8822,
13863,
20000,
2059,
198,
2,
15069,
357,
66,
8,
2813,
11,
2321,
4946,
7311,
278,
5693,
198,
2,
15069,
357,
66,
8,
2321,
11,
2211,
4403,
14645,
27862,
11,
... | 2.747899 | 357 |
from classes.IP.IPGrepr import IPGrepr
from classes.IP.utils import handle_mask_or_no_mask
| [
6738,
6097,
13,
4061,
13,
4061,
43887,
1050,
1330,
6101,
43887,
1050,
198,
6738,
6097,
13,
4061,
13,
26791,
1330,
5412,
62,
27932,
62,
273,
62,
3919,
62,
27932,
198
] | 3.033333 | 30 |
from rlkit.torch.sac.policies import ScriptPolicy
import argparse
import json
import torch
from torch.utils.data import Dataset, DataLoader
import os
import pandas as pd
import numpy as np
from collections import deque
import cv2
import albumentations as A
import copy
from clothmanip.envs.template_renderer import TemplateRenderer
from clothmanip.utils import mujoco_model_kwargs
import mujoco_py
import random
import cv2
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser("Parser")
parser.add_argument('folder', type=str)
parser.add_argument('frame_stack_size', type=int)
parser.add_argument('save_every_epoch', type=int)
args = parser.parse_args()
main(args) | [
6738,
374,
75,
15813,
13,
13165,
354,
13,
30584,
13,
79,
4160,
444,
1330,
12327,
36727,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
11,
6060,
17401,
198,
... | 2.923695 | 249 |
""" http://adventofcode.com/2015/day/4 """
from hashlib import md5
# pylint: disable=inconsistent-return-statements
def validate_hash(input_str, num_zeros):
"""Check if hex md5 starts with '00000'"""
if md5(input_str.encode('utf-8')).hexdigest().startswith('0'*num_zeros):
return input_str
def find_min_suffix(prefix, num_zeros, suffix=0):
"""Find min string that is prefixINT and hash starts with 00000"""
result = None
while not result:
suffix += 1
result = validate_hash('%s%s' % (prefix, suffix), num_zeros)
return suffix
| [
37811,
2638,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
4626,
14,
820,
14,
19,
37227,
198,
6738,
12234,
8019,
1330,
45243,
20,
628,
198,
2,
279,
2645,
600,
25,
15560,
28,
1939,
684,
7609,
12,
7783,
12,
14269,
3196,
198,
4299,
26571,
... | 2.603604 | 222 |
from DoubleLinkedList import DLinkedList as dList
class Mt:
"""
:raise Exception("BAD DELTA ELEMENT")
"""
@classmethod
def from_text(cls, text: str):
"""
:raise Exception("BAD DELTA ELEMENT")
"""
obj = cls()
lines = text.splitlines()
flag = 'alphabet'
for line in lines:
if flag == 'alphabet':
if line == '####':
flag = 'spec_alphabet'
continue
obj.alphabet.add(line)
continue
if flag == 'spec_alphabet':
if line == '####':
flag = 'states'
continue
obj.alphabet_spec.add(line)
continue
if flag == 'states':
if line == '####':
flag = 'start_state'
continue
obj.states.add(line)
continue
if flag == 'start_state':
if line == '####':
flag = 'final_states'
continue
obj.start_state = line
continue
if flag == 'final_states':
if line == '####':
flag = 'delta'
continue
obj.final_states.add(line)
continue
if flag == 'delta':
if line == '####':
flag = 'stop'
continue
delta_ln = line.split(' ')
if not (delta_ln[0] in obj.states and delta_ln[1] in obj.alphabet and
delta_ln[2] in obj.states and delta_ln[3] in obj.alphabet and
delta_ln[4] in obj.memory_step):
print("Bad delta")
raise Exception("BAD DELTA ELEMENT")
obj.delta[(delta_ln[0], delta_ln[1])] = (delta_ln[2], delta_ln[3], delta_ln[4])
continue
return obj
| [
6738,
11198,
11280,
276,
8053,
1330,
360,
11280,
276,
8053,
355,
288,
8053,
628,
198,
4871,
18632,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1058,
40225,
35528,
7203,
33,
2885,
28163,
5603,
40342,
10979,
4943,
198,
220,
220,
... | 1.696817 | 1,194 |
#!/usr/bin/env python
# encoding: utf-8
"""
The database connection management
"""
from __future__ import print_function
import datetime
import json
import logging
import os
import re
import subprocess
import tempfile
from mongokit import Connection
from .common import hash_password
from .play import Play, PlayMigration
from .users import User
class Database(object):
"""The database connection"""
@staticmethod
def get_db():
"""Return the database based on DATABASE_URI env var
:rtype: Database
"""
if 'DATABASE_URI' in os.environ:
uri = os.environ['DATABASE_URI']
return Database(uri=uri)
raise EnvironmentError('DATABASE_URI environment variable is missing')
def __init__(self, uri):
"""Init the Database using given uri
:param uri: The URI to connect to, such as
mongodb://LOGIN:PASSWORD@SERVER:PORT/DB_NAME
"""
self.uri = uri
self.connect(uri)
self.dbname = uri.split('/')[-1]
logging.info('dbname is %s', self.dbname)
def connect(self, uri):
"""Connect to given uri
:param uri: The URI to connect to, such as
mongodb://LOGIN:PASSWORD@SERVER:PORT/DB_NAME
"""
logging.info('Connecting to uri %s', uri)
self.connection = Connection(host=uri)
self.connection.register([User, Play])
return self.connection
# pylint: disable=C0103
@property
def db(self):
"""Return the pymongo's db object using the database name"""
return self.connection[self.dbname]
def add_user(self, login, name, passwd, email):
"""Add a user
:param login: The user login
:param name: The user complete name
:param passwd: The user password, will be hashed
:param email: The user email"""
# must not already exist
if self.get_user(login=login):
msg = 'A user with login "%s" has already been declared' % login
raise ValueError(msg)
user = self.db.User()
user['login'] = login
user['name'] = name
user['email'] = email
user['passwd'] = hash_password(passwd)
user.save()
def delete_user(self, login):
"""Delete the user with the given login"""
user = self.get_user(login=login)
if user:
user.delete()
def drop(self):
"""Drop the database"""
self.connection.drop_database(self.dbname)
# pylint: disable=R0201
def authenticate_user(self, user, passwd):
"""Authenticate the user
"""
hashed_passwd = hash_password(passwd)
user.authauthenticate(hashed_passwd)
def get_user(self, login):
"""Retrieve the user with given login or None"""
return self.db.User.one({'login': login})
def add_play(self, date, game, creator):
"""Add a play
:type date: datetime.datetime
:type game: basestring
:rtype: Play"""
play = self.db.Play()
play.set_date(date)
play.set_game(game)
play.set_created_by(creator)
play.save()
return play
def add_play_from_json(self, json_play):
"""Adds a play from a json definition
:type json_play: dict|basestring
:rtype: Play"""
# TODO: improve typecheck
if type(json_play) == dict:
json_play = json.dumps(json_play)
play = self.db.Play.from_json(json_play)
play.save()
return play
def get_plays(self):
"""Return all plays"""
return [play for play in self.db.Play.find()]
def migrate_all(self):
"""Runs the migration rules in bulk"""
migration_play = PlayMigration(Play)
migration_play.migrate_all(self.db.plays) # pylint: disable=E1101
def dump(self, dump_folder=None):
"""Dump the database in the given dump_file
Use the archive option to compress
if uri is None, will use DATABASE_URI env var
if dump_folder is None, will use a timetagged folder"""
logging.info('mongodumping')
info = Database.get_uri_info(uri=self.uri)
if dump_folder is None:
timetag = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
dump_foldername = '{}_{}'.format(timetag, info['db_name'])
dump_folder = os.path.join('dump', dump_foldername)
info['dump_folder'] = dump_folder
info['temp_folder'] = tempfile.mkdtemp()
logging.info('mongodump on %s', info)
cmd = '' \
'mongodump -h {host} --port {port} -u {user} -p {password}' \
' --db {db_name} --out={temp_folder}'.format(**info)
logging.info(cmd)
if dump_folder != '' and not os.path.exists(dump_folder):
os.makedirs(dump_folder)
rcode = subprocess.call(cmd.split(' '))
if rcode == 0:
logging.info('dumped to %s', dump_folder)
os.rename(os.path.join(info['temp_folder'], info['db_name']),
dump_folder)
else:
logging.fatal('Failed to dump! - return code is %s', rcode)
def restore(self, dump_folder, delete=False):
"""Restore a dump saved using mongodump in the given database"""
logging.info('mongorestoring')
if not os.path.exists(dump_folder):
raise RuntimeError('dump folder does not exist %s' % dump_folder)
info = Database.get_uri_info(uri=self.uri)
info['dump_folder'] = dump_folder
logging.info('mongorestore on %s', info)
if delete:
self.drop()
cmd = '' \
'mongorestore -h {host} --port {port} -u {user} -p {password}' \
' --db {db_name} {dump_folder}'.format(**info)
logging.info(cmd)
rcode = subprocess.call(cmd.split(' '))
if rcode == 0:
logging.info('restored from %s', dump_folder)
else:
logging.fatal('Failed to restore! - return code is %s', rcode)
@staticmethod
def get_uri_info(uri):
"""Return configured UriInfo (host, port, username, password, dbname)
based on the configured DATABASE_URI env var
:rtype: tuple
"""
if uri is None and 'DATABASE_URI' not in os.environ:
msg = 'Must give uri or have os.environ[\'DATABASE_URI\']'
raise RuntimeError(msg)
elif uri is None:
uri = os.environ['DATABASE_URI']
return Database.parse_uri(uri)
@staticmethod
def parse_uri(uri):
"""Return the elements of the uri:
(host, port, username, password, dbname)
"""
match = re.match(
(r'mongodb://(?P<user>[^:]+):(?P<password>[^@]+)'
r'@(?P<host>[^:]+):(?P<port>\d+)/(?P<db_name>\w+)'), uri)
if match:
return {
'host': match.group('host'),
'port': match.group('port'),
'user': match.group('user'),
'password': match.group('password'),
'db_name': match.group('db_name')
}
raise RuntimeError('Failed to parse uri: {}'.format(uri))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
464,
6831,
4637,
4542,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
4818,
8079,
198,
11748,
... | 2.19 | 3,300 |
from Board import Board
class Engine(object):
"""
Takes a board position and returns the best move
"""
INF = 1000000
def evaluate(self, depth=0):
"""
Returns a numeric evaluation of the position
Written from the perspective of Tiger
"""
winner = self.board.winner
if not winner:
return 300 * self.board.movable_tigers() + 700 * self.board.deadGoats\
- 700 * self.board.no_of_closed_spaces - depth
if winner == Board.Player.G:
return -Engine.INF
elif winner == Board.Player.T:
return Engine.INF
| [
6738,
5926,
1330,
5926,
628,
198,
4871,
7117,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
33687,
257,
3096,
2292,
290,
5860,
262,
1266,
1445,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
45594,
796,
1802,
2388,
... | 2.345588 | 272 |
# coding=utf-8
from contracts import contract
from geometry.utils import assert_allclose
import numpy as np
from .matrix_linear_space import MatrixLinearSpace
__all__ = ['Euclidean', 'R', 'R1', 'R2', 'R3']
class Euclidean(MatrixLinearSpace):
'''
This is the usual Euclidean space of finite dimension;
this is mostly used for debugging.
There is no proper Haar measure; as an arbitrary choice,
the :py:func:`sample_uniform`
returns a sample from a Gaussian distribution centered at 0.
'''
@contract(x='array')
@contract(returns='belongs')
R1 = Euclidean(1)
R2 = Euclidean(2)
R3 = Euclidean(3)
R = {1: R1, 2: R2, 3: R3}
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
8592,
1330,
2775,
198,
6738,
22939,
13,
26791,
1330,
6818,
62,
439,
19836,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
6759,
8609,
62,
29127,
62,
13200,
1330,
24936,
14993,
451,
141... | 2.590909 | 264 |
from django.urls import path
import mainapp.views as mainapp
app_name = 'mainapp'
urlpatterns = [
path('', mainapp.index, name='index'),
path('cabinet/', mainapp.cabinet, name='cabinet'),
path('cabinet/profile/', mainapp.profile, name='profile'),
path('cabinet/profile/edit/', mainapp.edit_profile, name='edit_profile'),
path('cabinet/profile/edit/change_password/', mainapp.change_password, name='change_password'),
path('about/', mainapp.about, name='about'),
path('organizations/', mainapp.organizations, name='organizations'),
path('participants/', mainapp.participants, name='participants'),
path('cabinet/group/index/<int:pk>/', mainapp.group_info, name='group_info'),
path('cabinet/group/create/', mainapp.create_group, name='create_group'),
path('cabinet/group/edit/<int:pk>/', mainapp.edit_group, name='edit_group'),
path('cabinet/group/delete/<int:pk>/', mainapp.delete_group, name='delete_group'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
11748,
1388,
1324,
13,
33571,
355,
1388,
1324,
198,
198,
1324,
62,
3672,
796,
705,
12417,
1324,
6,
198,
198,
6371,
33279,
82,
796,
685,
628,
220,
220,
220,
3108,
10786,
3256,
1... | 2.731638 | 354 |
#!/usr/bin/env python3
"""
Building Skills in Object-Oriented Design V4
The blackjack module includes the Suit class and Card class hierarchy.
:author: S. Lott
:license: http://creativecommons.org/licenses/by-nc-nd/3.0/us/
"""
from typing import Any
import enum
class Suit(enum.Enum):
"""Enumerated suit names and values."""
Clubs = u"\N{BLACK CLUB SUIT}"
Diamonds = u"\N{WHITE DIAMOND SUIT}"
Hearts = u"\N{WHITE HEART SUIT}"
Spades = u"\N{BLACK SPADE SUIT}"
class Card:
"""A single playing card, suitable for Blackjack or
Poker. While a suit is retained, it doesn't figure into
the ordering of cards, as it would in Bridge.
.. note:: Aces and Facecards.
Ace and Facecards are separate subclasses.
.. attribute:: rank
The numeric rank of the card. 2-13, ace has an effective
rank of 14 when used in Poker.
.. attribute:: suit
The string suit of the card. This should be from the
named constants (Clubs, Diamonds, Hearts, Spades).
At the class level, there are four constants that can
make code look a little nicer.
:var: Jack
:var: Queen
:var: King
:var: Ace
"""
Jack = 11
Queen = 12
King = 13
Ace = 1
def __init__(self, rank: int, suit: Suit) -> None:
"""Build a card with a given rank and suit.
:param rank: numeric rank, 2-10. Aces and FaceCards are separate.
:type rank: integer in the range 2 to 10 inclusive.
:param suit: suit, a value from the Suit enum
:type suit: Suit
"""
assert isinstance(suit, Suit)
self.rank = rank
self.suit = suit
self.points = rank
def hardValue(self) -> int:
"""For blackjack, the hard value of this card.
:returns: int
"""
return self.points
def softValue(self) -> int:
"""For blackjack, the soft value of this card.
:returns: int
"""
return self.points
def __eq__(self, other: Any) -> bool:
"""Compare cards, ignoring suit.
>>> from blackjack_doc import Card, Suit
>>> Card(2, Suit.Diamonds) == Card(2, Suit.Spades)
True
>>> Card(2, Suit.Diamonds) == Card(10, Suit.Spades)
False
"""
return self.rank == other.rank
def __lt__(self, other: Any) -> bool:
"""Compare cards, ignoring suit.
>>> from blackjack_doc import Card, Suit
>>> Card(2, Suit.Diamonds) < Card(3, Suit.Spades)
True
>>> Card(10, Suit.Diamonds) < Card(10, Suit.Spades)
False
"""
return self.rank < other.rank
def __str__(self) -> str:
"""
>>> from blackjack_doc import Card, Suit
>>> str(Card(2, Suit.Diamonds))
' 2♢'
"""
return f"{self.rank:2d}{self.suit.value}"
def __repr__(self) -> str:
"""
>>> from blackjack_doc import Card, Suit
>>> repr(Card(2, Suit.Diamonds))
"Card(rank=2, suit=<Suit.Diamonds: '♢'>)"
"""
return f"{self.__class__.__name__}(rank={self.rank!r}, suit={self.suit!r})"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
25954,
20389,
287,
9515,
12,
46,
380,
4714,
8495,
569,
19,
198,
198,
464,
2042,
19650,
8265,
3407,
262,
28871,
1398,
290,
5172,
1398,
18911,
13,
198,
198,
25,
9800,
2... | 2.358744 | 1,338 |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Kerrlib
=======
Provides numerical routines for the propagation of mean fields and phase sensitive
and insensitive moments in a (linearized) Kerr medium and some extra utility function.
"""
import numpy as np
from scipy.linalg import expm
# Pulse Shapes
def gaussian(z):
r"""Returns a Gaussian function in z
Args:
z (array): Input values
Returns:
(array): Output array, element-wise exponential of negative z**2/2.
This is a scalar if x is a scalar.
"""
return np.exp(-z ** 2 / 2.0)
def sech(z):
r"""Returns a hyperbolic secant function in z
Args:
z (array): Input values
Returns:
(array): Output array, element-wise sech of z. This is a scalar if x is a scalar.
"""
return 1.0 / (np.cosh(z))
def rect(z, w=2 * np.sqrt(2 * np.log(2))):
r"""Returns a Gaussian function in z
Args:
z (array): Input values
l (array): Width of the top hat function
Returns:
(array): Output array, element-wise top hat function of z. This is a scalar if x is a
scalar.
"""
return np.where(abs(z) <= 0.5 * w, 1, 0)
def lorentzian(z):
r"""Returns a Lorentzian function in z
Args:
z (array): Input values
Returns:
(array): Output array, element-wise lorentzian of z. This is a scalar if x is a scalar.
"""
return 1.0 / np.sqrt(1.0 + z ** 2)
# Helper For Determining Mean-Field Widths
def FWHM(X, Y):
r""" Calculates the Full Width at Half Maximum of the function Y=f(X)
Args:
X (array): Abscissae in which the function f( ) was sampled
Y (array): Ordinate values, Y=f(X)
Returns:
(float): FWHM of the function Y=f(X)
"""
half_max = np.max(Y) / 2.0
d = np.sign(half_max - np.array(Y[0:-1])) - np.sign(half_max - np.array(Y[1:]))
left_idx = np.where(d > 0)[0]
right_idx = np.where(d < 0)[-1]
return X[right_idx] - X[left_idx]
# Fourier Transform Functions
def myfft(z, dz):
r""" Numerical fourier transform of z=f(t) with t sampled at intervals dz
Args:
z (array): The function evaluated on a real space grid of points
dz (float): The spacing between the grid points
Returns:
(array): The fourier transform of z=f(t)
"""
return np.fft.fftshift(np.fft.fft(z) * dz / np.sqrt(2.0 * np.pi))
def myifft(k, dk, n):
r""" Numerical inverse fourier transform of k=f(s) with s sampled at intervals dk
for a total of n grid points
Args:
k (array): The function evaluated on a real space grid of points
dk (float): The spacing between the grid points
n (int): Number of sampling points
Returns:
(array): The fourier transform of z=f(t)
"""
return np.fft.ifftshift(np.fft.ifft(k) * dk * n / np.sqrt(2.0 * np.pi))
# Split-Step Fourier Operators For Mean-Field Evolution
def opD(u, TD, G, kk, dt):
r"""Short time "kinetic" or "dispersive" propagator. It applies exp(1j dt*(1/2*TD) d^2/dx^2) to
u(x). The differential operator is applied as multiplication in reciprocal space using fast
Fourier transforms.
Args:
u (array): The function evaluated on a real space grid of points
TD (float): Dispersion time
G (float): Loss rate
kk (array): Grid of reciprocal space points with DC point at start
dt (float): Size of time steps
Returns:
(array): The propagated array u by amount dt/2 (note the factor of 1/2)
"""
k = np.fft.fft(u)
return np.fft.ifft(np.exp(dt / 2.0 * (1j * kk ** 2 / (2.0 * TD))) * k) * np.exp(
dt / 2.0 * (-G / 2.0)
)
def opN(u, TN, ui, dt):
r"""Short time "potential" or "nonlinear" propagator. It applies exp(1j dt*(TN) |ui(x)|^2) to
u(x).
Args:
u (array): The initial function evaluated on a real space grid of points
TN (float): Nonlinear time
ui (array): Square root of the potential
dt (float): Size of time steps
Returns:
(array): The propagated array u by amount dt
"""
return np.exp(dt * 1j / TN * np.abs(ui) ** 2) * u
# Mean-Field Evolution
def P_mean_field(u, TD, TN, G, zz, dz, kk, N, dt):
r"""Propagates the wavefunction u by time N*dt under both dispersion and nonlinearity.
Args:
u (array): The initial function evaluated on a real space grid of points
TD (float): Dispersion time
TN (float): Nonlinear time
G (float): Loss rate
zz (array): Grid of real space points
dz (float): Size of discretization in real space
kk (array): Grid of reciprocal space points with DC point at start
N (int): Number of time steps
dt (float): Size of time steps
Returns:
(array): The time evolved wavefunction after N*dt time.
"""
for _ in range(N):
ui = u
u = opD(u, TD, G, kk, dt)
u = opN(u, TN, ui, dt)
u = opD(u, TD, G, kk, dt)
return u
# Matrices For Fluctuation Evolution
def cal_S(u, TN, dz):
r""" Constructs the \mathcal{S} array for fluctuation propagation
Args:
u (array): Mean field values evaluated on a real space grid of points
TN (float): Nonlinear time
dz (float): Size of discretization in real space
Returns:
(array): cal_S array
"""
return myfft(u ** 2, dz) / TN
def cal_M(u, TN, dz):
r""" Constructs the \mathcal{M} array for fluctuation propagation
Args:
u (array): Mean field values evaluated on a real space grid of points
TN (float): Nonlinear time
dz (float): Size of discretization in real space
Returns:
(array): cal_M array
"""
return myfft(np.abs(u) ** 2, dz) / TN
def R(u, TD, TN, dz, ks, dk, im, n):
r""" Constructs the R matrix for fluctuation propagation
Args:
u (array): Mean field values evaluated on a real space grid of points
TD (float): Dispersion time
TN (float): Nonlinear time
dz (float): Size of discretization in real space
ks (array): Grid of reciprocal space points with DC point at centre
dk (float): Size of discretization in reciprocal space
im (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i-j (clipped to be between 0 and n-1 so as not to fall off the grid).
n (int): Size of the output matrix A
Returns:
(array): R matrix
"""
Mk = cal_M(u, TN, dz)
D = np.diag(np.full(n, ks ** 2 / (2.0 * TD)))
return D + 2.0 * dk * Mk[im] / np.sqrt(2.0 * np.pi)
def S(u, TN, dz, dk, ip):
r""" Constructs the S matrix for fluctuation propagation
Args:
u (array): Mean field values evaluated on a real space grid of points
TN (float): Nonlinear time
dz (float): Size of discretization in real space
dk (float): Size of discretization in reciprocal space
ip (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i+j (clipped to be between 0 and n-1 so as not to fall off the grid).
Returns:
(array): S matrix
"""
Sk = cal_S(u, TN, dz)
return dk * Sk[ip] / np.sqrt(2.0 * np.pi)
def Q(u, TD, TN, dz, ks, dk, im, ip, n):
r""" Construct the Q matrix for fluctuation propagation
Args:
u (array): Mean field values evaluated on a real space grid of points
TD (float): Dispersion time
TN (float): Nonlinear time
dz (float): Size of discretization in real space
ks (array): Grid of reciprocal space points with DC point at centre
dk (float): Size of discretization in reciprocal space
im (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i-j (clipped to be between 0 and n-1 so as not to fall off the grid).
ip (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i+j (clipped to be between 0 and n-1 so as not to fall off the grid).
n (int): Size of the output matrix Q
Returns:
(array): Q matrix
"""
r = R(u, TD, TN, dz, ks, dk, im, n)
s = S(u, TN, dz, dk, ip)
return np.block([[r, s], [-s.conj().T, -r.conj()]])
# Lossless Propagation
def P_no_loss(u, TD, TN, dz, kk, ks, dk, im, ip, tf, dt, n):
r""" Lossless propagation of the mean and fluctuations in a Kerr medium
Args:
u (array): Mean field values evaluated on a real space grid of points
TD (float): Dispersion time
TN (float): Nonlinear time
dz (float): Size of discretization in real space
kk (array): Grid of reciprocal space points with DC point at start
ks (array): Grid of reciprocal space points with DC point at centre
dk (float): Size of discretization in reciprocal space
im (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i-j (clipped to be between 0 and n-1 so as not to fall off the grid).
ip (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i+j (clipped to be between 0 and n-1 so as not to fall off the grid).
tf (int): Number of time steps
dt (int): Size of time steps
n (int): Size of the output matrices
Returns:
(tuple): (u,M,N), the first (u) and second order moments (M,N).
"""
M = np.zeros(n)
N = np.zeros(n)
K = np.identity(2 * n)
for _ in range(tf):
ui = u
u = opD(u, TD, 0, kk, dt)
u = opN(u, TN, ui, dt)
u = opD(u, TD, 0, kk, dt)
K = expm(1j * dt * Q(u, TD, TN, dz, ks, dk, im, ip, n)) @ K
U = K[0:n, 0:n]
W = K[0:n, n:2 * n]
M = U @ W.T
N = W.conj() @ W.T
return u, M, N
# Lossy Propagation
def P_loss(u, TD, TN, G, dz, kk, ks, dk, im, ip, tf, dt, n):
r""" Lossy propagation of the mean and fluctuations in a Kerr medium
Args:
u (array): Mean field values evaluated on a real space grid of points
TD (float): Dispersion time
TN (float): Nonlinear time
G (float): Loss rate
dz (float): Size of discretization in real space
kk (array): Grid of reciprocal space points with DC point at start
ks (array): Grid of reciprocal space points with DC point at centre
dk (float): Size of discretization in reciprocal space
im (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i-j (clipped to be between 0 and n-1 so as not to fall off the grid).
ip (int(n,n)): 2D array of integers (i,j) corresponding to the k-space gridpoints associated
with i+j (clipped to be between 0 and n-1 so as not to fall off the grid).
tf (int): Number of time steps
dt (int): Size of time steps
n (int): Size of the output matrices
Returns:
(tuple): (u,M,N), the first (u) and second order moments (M,N).
"""
M = np.zeros(n)
N = np.zeros(n)
for _ in range(tf):
ui = u
u = opD(u, TD, G, kk, dt)
u = opN(u, TN, ui, dt)
u = opD(u, TD, G, kk, dt)
K = expm(1j * dt * Q(u, TD, TN, dz, ks, dk, im, ip, n))
U = K[0:n, 0:n]
W = K[0:n, n:2 * n]
M = U @ M @ (U.T) + W @ (M.conj()) @ (W.T) + W @ N @ (U.T) + U @ (N.T) @ (W.T) + U @ (W.T)
N = (
W.conj() @ M @ (U.T) +
U.conj() @ (M.conj()) @ (W.T) +
U.conj() @ N @ (U.T) +
W.conj() @ (N.T) @ (W.T) +
W.conj() @ (W.T)
)
M = (1 - G * dt) * M
N = (1 - G * dt) * N
return u, M, N
def expected_squeezing_g(n_phi):
r"""Calculate expected squeezing for Gaussian pulse for lossless, dispersionless propagation,
with a maximum nonlinear phase shift of n_phi according to JOSA B 7, 30 (1990).
Args:
n_phi (float): Maximal nonlinear phase shift.
Returns:
Associated squeezing in dB.
"""
return 10 * np.log10(1 + 2 * n_phi**2 / np.sqrt(3) -
(np.sqrt(2) * n_phi + 2 * np.sqrt(2) * n_phi**3 / 3) /
np.sqrt(1 + 2 * n_phi**2 / 3))
def expected_squeezing_r(n_phi):
r"""Calculate expected squeezing for Rect pulse for lossless, dispersionless propagation,
with a maximum nonlinear phase shift of n_phi according to JOSA B 7, 30 (1990).
Args:
n_phi (float): Maximal nonlinear phase shift.
Returns:
Associated squeezing in dB.
"""
return 10 * np.log10(1 + 2 * n_phi**2 -
(2 * n_phi + 2 * n_phi**3) /
np.sqrt(1 + n_phi**2))
def expected_squeezing_s(n_phi):
r"""Calculate expected squeezing for Sech pulse for lossless, dispersionless propagation,
with a maximum nonlinear phase shift of n_phi according to JOSA B 7, 30 (1990).
Args:
n_phi (float): Maximal nonlinear phase shift.
Returns:
Associated squeezing in dB.
"""
return 10 * np.log10(1 + 16 * n_phi**2 / 15 -
(4 * n_phi / 3 + 64 * n_phi**3 / 75) /
np.sqrt(1 + 16 * n_phi**2 / 25))
def expected_squeezing_l(n_phi):
r"""Calculate expected squeezing for Lorentzian pulse for lossless, dispersionless propagation,
with a maximum nonlinear phase shift of n_phi according to JOSA B 7, 30 (1990).
Args:
n_phi (float): Maximal nonlinear phase shift.
Returns:
Associated squeezing in dB.
"""
return 10 * np.log10(1 + 3 * n_phi**2 / 4 -
(n_phi + 9 * n_phi**3 / 16) /
np.sqrt(1 + 9 * n_phi**2 / 16))
| [
2,
15069,
13130,
47482,
324,
84,
29082,
21852,
3457,
13,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 2.341603 | 6,127 |
# This solution counts how many times the words "fizz" and "buzz" appear in a range provided by the user
# The input has to be a valid positive integer
# The current count of the word is shown each time the word appears, and a random expression for FizzBuzz
# Author: @moisesjsalmeida
import random
fizz = 0
buzz = 0
fizzbuzz = 0
fb_range = False
interjections = [
"Wow! ",
"Yay! ",
"Ooh-la-la! ",
"Whoa! ",
"Yeah! ",
"Eureka! ",
"Voila! ",
"Yipee! ",
"Boo-ya! ",
]
while not fb_range or fb_range < 1:
try:
fb_range = int(input("Type the range of the fizzbuzz count: "))
if fb_range < 1:
raise ValueError
except ValueError:
print("\nEnter a valid positive integer!")
continue
for i in range(1, int(fb_range)):
if i % 3 == 0 and i % 5 == 0:
fizzbuzz += 1
i = random.choice(interjections) + "It's a FizzBuzz! #" + str(fizzbuzz)
elif i % 3 == 0:
fizz += 1
i = "Fizz #" + str(fizz)
elif i % 5 == 0:
buzz += 1
i = "Buzz #" + str(buzz)
print(i)
print("\n")
print("Total Fizzes: " + str(fizz))
print("Total Buzzes: " + str(buzz))
print("Total FizzBuzzes: " + str(fizzbuzz))
| [
2,
770,
4610,
9853,
703,
867,
1661,
262,
2456,
366,
69,
6457,
1,
290,
366,
65,
4715,
1,
1656,
287,
257,
2837,
2810,
416,
262,
2836,
198,
2,
383,
5128,
468,
284,
307,
257,
4938,
3967,
18253,
198,
2,
383,
1459,
954,
286,
262,
1573... | 2.229091 | 550 |
#! python3
import sys
import json
import serial
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QSpinBox, \
QGridLayout, QPushButton, QGroupBox, QVBoxLayout
from PyQt5.QtCore import QTimer
COMPORT = "/dev/cu.usbmodemfd121"
if __name__ == "__main__":
app = QApplication(sys.argv)
w = CtrlTestGui()
w.show()
app.exec_()
| [
2,
0,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
11389,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
38300,
11,
1195,
33986,
11,
1195,
4561,
259,
14253,
11,
3467,
198,
... | 2.414966 | 147 |
"""A port of dweetio-client's (the official javascript one, to python)
"""
# stdlib imports
import os
import subprocess
import unittest
import uuid
# local imports
import ts_dweepy
test_data = {
'hello': "world",
'somenum': 6816513845,
}
test_lock = os.environ.get('DWEET_LOCK')
test_key = os.environ.get('DWEET_KEY')
test_alert_condition = "if(dweet.alertValue > 10) return 'TEST: Greater than 10'; if(dweet.alertValue < 10) return 'TEST: Less than 10';"
| [
37811,
32,
2493,
286,
288,
7277,
952,
12,
16366,
338,
357,
1169,
1743,
44575,
530,
11,
284,
21015,
8,
198,
37811,
198,
2,
14367,
8019,
17944,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
555,
715,
395,
198,
11748,
334,
27112... | 2.685393 | 178 |