content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A protocol for implementing high performance clifford tableau evolutions
for Clifford Simulator."""
from typing import Any, Dict, TYPE_CHECKING, List, Sequence
import numpy as np
from cirq.ops import common_gates
from cirq.ops import global_phase_op
from cirq.sim.clifford.act_on_stabilizer_args import ActOnStabilizerArgs
if TYPE_CHECKING:
import cirq
class ActOnCliffordTableauArgs(ActOnStabilizerArgs):
"""State and context for an operation acting on a clifford tableau."""
def __init__(
self,
tableau: 'cirq.CliffordTableau',
prng: np.random.RandomState,
log_of_measurement_results: Dict[str, Any],
qubits: Sequence['cirq.Qid'] = None,
):
"""Inits ActOnCliffordTableauArgs.
Args:
tableau: The CliffordTableau to act on. Operations are expected to
perform inplace edits of this object.
qubits: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
prng: The pseudo random number generator to use for probabilistic
effects.
log_of_measurement_results: A mutable object that measurements are
being recorded into.
"""
super().__init__(prng, qubits, log_of_measurement_results)
self.tableau = tableau
def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:
"""Returns the measurement from the tableau."""
return [self.tableau._measure(self.qubit_map[q], self.prng) for q in qubits]
| [
2,
15069,
2864,
383,
21239,
80,
34152,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,... | 2.766667 | 810 |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : python้ฉฑๅจpyog
Case Name : ๆฎ้ๆจกๅผ่ฟๆฅๆฐๆฎๅบ๏ผ่ฟๆฅไฟกๆฏ้่ฏฏ
Description :
1.ๅๅปบๅบใ็จๆท๏ผ็จๆทๅฏ็ ไธๅซ็นๆฎๅญ็ฌฆ๏ผๅนถ่ตๆ
2.้
็ฝฎpg_hbaๅ
ฅๅฃ
3.่ฟๆฅๆฐๆฎๅบ
Expect :
1.ๆง่กๆๅ
2.ๆง่กๆๅ
3.่ฟๆฅๅคฑ่ดฅ๏ผๆ็ธๅบๆ็คบไฟกๆฏ
History :
"""
import os
import re
import unittest
import py_opengauss
from py_opengauss.exceptions import AuthenticationSpecificationError, \
ClientCannotConnectError, ConnectionRejectionError, ServerNotReadyError
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
| [
37811,
198,
15269,
357,
66,
8,
33160,
43208,
21852,
1766,
1539,
43,
8671,
13,
198,
198,
9654,
35389,
1046,
318,
11971,
739,
17996,
272,
6599,
43,
410,
17,
13,
198,
1639,
460,
779,
428,
3788,
1864,
284,
262,
2846,
290,
3403,
286,
262... | 2.142308 | 520 |
from itertools import combinations
import numpy as np
import matplotlib.pyplot as plt
from utils import *
import configparser
import torch
class CurveEnvironment:
"""
Curve ํ๊ฒฝ, ๊ฐํ ํ์ต ๋ชจ๋ธ์ด ์์ฑํ action์ ๊ธฐ๋ฐ์ผ๋ก ์ปค๋ธ์ ์์๋ฅผ ๋ฐ๊พธ๊ณ , ๋ฐ๋ ์ปค๋ธ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก reward๋ฅผ ์ธก์
"""
def __init__(self, order=3, dim=2, data_size=10, init_curve='zig-zag', normalize=True, life=10, seed=1234):
"""
:param order: Curve iteration ๊ฐ์
:param dim: ์ฐจ์ ์
:param data_size: ํ์ฑํ ๋ฐ์ดํฐ ๊ฐ์
:param init_curve: ์ด๊ธฐ ์ปค๋ธ, ์ด ์ปค๋ธ์ ์์๋ฅผ ๋ฐ๊พธ๋ฉด์ ์ต์ ์ ์ปค๋ธ๋ฅผ ์ฐพ์
:param normalize: ์ฃผ์ด์ง coordinate๋ฅผ normalize ํ ๊ฒ์ธ์ง?
:param life: ํ episode ๋น ์ฃผ์ด์ง๋ ๋ชฉ์จ
:param seed: ํ์ฑํ ๋ฐ์ดํฐ ์์ฑ ์๋
"""
self.order = order
self.dim = dim
self.data_size = data_size
self.total_grid = 2 ** (order * dim)
self.side = int(np.sqrt(self.total_grid)) # grid ์ธ๋ก ๋๋ ๊ฐ๋ก ๊ฐ์
self.init_curve = init_curve
self.normalized = normalize
self.debug = dict() # ๋๋ฒ๊ทธ์ฉ ์ ๋ณด๊ฐ ๋ด๊ธด dictionary. ์ฃผ๋ก, cost ์ ๋ณด๋ฅผ ๋ด์
np.random.seed(seed)
# ์์์ ๋ฐ์ดํฐ ๋ถํฌ ์์ฑ
self.data_index = np.random.choice(self.total_grid, size=data_size, replace=False)
self.data_coord = np.array(
list(map(lambda x: list([x // self.side, x % self.side]), self.data_index))) # ์์ฑ๋ ๋ฐ์ดํฐ์ ์ขํ ๊ตฌ์ฑ
# episode ์ข
๋ฃ ๊ธฐ์ค
self.life = life # life ๊ฐ 0์ ๋๋ฌํ๋ฉด episode ์ข
๋ฃ
self.ori_life = life
# ์ปค๋ธ ์์ฑ
self.curve_coord = self.reset()
# reward ์ธก์ ์ฉ ๊ธฐ์ค
self.min_cost = self.get_l2_norm_locality()
self.prev_cost = self.min_cost
@staticmethod
def build_init_coords(self):
"""
์ด๊ธฐ ๊ณก์ ํ์
์ ๋ฐ๋ฅธ n ์ฐจ์ ์ขํ list๋ฅผ ๋ง๋๋ ํจ์, list ๋ด ์ขํ ๋ฐฐ์น ์์๋ ๊ณก์ ํ์
์ ๋ฐ๋ฆ
:return:
"""
coords = None
try:
if self.init_curve == 'zig-zag':
whole_index = np.arange(self.total_grid)
coords = np.array(list(map(lambda x: list([x // self.side, x % self.side]), whole_index)))
elif self.init_curve == 'hilbert':
coords = HilbertCurve(dimension=self.dim).getCoords(order=self.order)
elif self.init_curve == 'z':
coords = ZCurve(dimension=self.dim).getCoords(order=self.order)
else:
raise Exception('Curve type must be "zig-zag" or "hilbert" or "z".')
except Exception as e:
print(e)
finally:
return coords
def reset(self):
"""
n ์ฐจ์ ๊ณก์ ์ขํ list๋ฅผ ์์ฑํ๊ณ , ํด๋น ์ขํ์ ํ์ฑํ ๋ฐ์ดํฐ ์ฌ๋ถ๋ฅผ ํ์ํ๋ ํจ์
๋ํ reward ์ธก์ ์ ์ํ ๊ธฐ์ค์ ์ด๊ธฐํํจ
:return:
"""
self.curve_coord = self.build_init_coords() # ๊ณก์ ์ n ์ฐจ์ ์ขํ list๋ก ๊ตฌ์ฑ
avail = np.zeros(shape=(self.total_grid, 1), dtype=np.int)
# ์ด๋ฏธ ์์ฑ๋ ํ์ฑํ ๋ฐ์ดํฐ์ ์ขํ๊ฐ ์ผ์น๋๋ ๊ณณ์ ํ์ฑํ
for index in map(lambda x: np.where(np.all(self.curve_coord == x, axis=1)), self.data_coord):
avail[index] = 1 # ํ์ฑํ ๋ฐ์ดํฐ ์ฌ๋ถ ํ์
self.curve_coord = np.concatenate((avail, self.curve_coord), axis=1)
if self.normalized: # do feature scaling
self.curve_coord = CurveEnvironment.normalize_state(self.curve_coord)
self.min_cost = self.get_l2_norm_locality()
self.prev_cost = self.min_cost
self.life = self.ori_life
return self.curve_coord
def get_l2_norm_locality(self):
"""
l2 norm ratio locality ์ธก์ ํจ์
sum(1 - (l2 norm/ l1 norm)) ์ ํํ
:return:
"""
avail_data = []
for idx, point in enumerate(self.curve_coord):
if point[0] == 1: # ํ์ฑํ ๋ฐ์ดํฐ์ธ ๊ฒฝ์ฐ
avail_data.append([point[1], point[2], idx])
cost = 0
# ํ์ฑํ๋ ๋ฐ์ดํฐ๋ง ๋ชจ์, ๊ฒฐ๊ณผ๋ (x, y, ๋ฐ์ดํฐ ์์)
for (x, y) in combinations(avail_data, 2):
dist_2d = np.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2)
dist_1d = np.abs(x[2] - y[2])
# Locality Ratio ๊ฐ 1๊ณผ ๊ฐ๊น์ด์ง ์ธก์
cost += np.abs(1 - (dist_1d / dist_2d))
return cost
def get_reward(self):
"""
๋ณด์ ์ธก์ ํจ์, l2_norm_locality๊ฐ ๊ฐ์ํ ๊ฒฝ์ฐ positive reward๋ฅผ ๋ถ์ฌํ๋ค. ๊ทธ ์ธ์๋ 0 ๋๋ negative reward
:return:
"""
curr_cost = self.get_l2_norm_locality()
reward = 0
self.debug['cost'] = curr_cost
if self.min_cost < curr_cost: # ์ต์ cost ๋ณด๋ค ์์์ง์ง ๋ชปํ ๊ฒฝ์ฐ
if self.prev_cost < curr_cost:
self.life -= 1
reward = -1
elif self.prev_cost > curr_cost: # ์ต์ cost ๋ณด๋ค ์์์ง์ง ๋ชปํ์ง๋ง, ์ด์ ์ปค๋ธ cost ๋ณด๋ค๋ ์์์ก์ ๊ฒฝ์ฐ
reward = 0
else:
reward = 0
elif self.prev_cost == curr_cost:
reward = 0
else:
reward = max(1, abs(curr_cost - self.min_cost))
self.min_cost = curr_cost # ์ต์ cost ๊ฐฑ์
self.prev_cost = curr_cost # ์ด์ cost ๊ฐฑ์
return reward
if '__main__' == __name__:
test_env = CurveEnvironment()
for curve_name in ['z', 'hilbert', 'zig-zag']:
test_env = CurveEnvironment(init_curve=curve_name)
print(test_env.get_l2_norm_locality())
print(test_env.get_reward())
| [
6738,
340,
861,
10141,
1330,
17790,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
3384,
4487,
1330,
1635,
198,
11748,
4566,
48610,
198,
11748,
28034,
628,
198,
4871,
46... | 1.466155 | 3,516 |
from email import utils
import re
import time
import urllib
def strftime(datetime, formatstr):
"""
Uses Python's strftime with some tweaks
"""
return datetime.strftime(formatstr).lstrip("0").replace(" 0", " ")
# def slugify(s):
# """
# Use Django's slugify method
# """
# return defaultfilters.slugify(s)
| [
6738,
3053,
1330,
3384,
4487,
198,
11748,
302,
198,
11748,
640,
198,
11748,
2956,
297,
571,
628,
628,
198,
4299,
965,
31387,
7,
19608,
8079,
11,
5794,
2536,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
36965,
11361,
338,
965,
... | 2.621212 | 132 |
"""OpenMediaVault Controller."""
import asyncio
import time
from datetime import timedelta
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .const import DOMAIN
from .helper import parse_api
from .omv_api import OpenMediaVaultAPI
# ---------------------------
# OMVControllerData
# ---------------------------
class OMVControllerData(object):
"""OMVControllerData Class."""
def __init__(self, hass, config_entry):
"""Initialize OMVController."""
self.hass = hass
self.config_entry = config_entry
self.name = config_entry.data[CONF_NAME]
self.host = config_entry.data[CONF_HOST]
self.data = {
"hwinfo": {},
"disk": {},
"fs": {},
# "service": {},
}
self.listeners = []
self.lock = asyncio.Lock()
self.api = OpenMediaVaultAPI(
hass,
config_entry.data[CONF_HOST],
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
config_entry.data[CONF_SSL],
config_entry.data[CONF_VERIFY_SSL],
)
self._force_update_callback = None
self._force_hwinfo_update_callback = None
# ---------------------------
# async_init
# ---------------------------
# ---------------------------
# signal_update
# ---------------------------
@property
def signal_update(self):
"""Event to signal new data."""
return f"{DOMAIN}-update-{self.name}"
# ---------------------------
# async_reset
# ---------------------------
async def async_reset(self):
"""Reset dispatchers."""
for unsub_dispatcher in self.listeners:
unsub_dispatcher()
self.listeners = []
return True
# ---------------------------
# connected
# ---------------------------
def connected(self):
"""Return connected state."""
return self.api.connected()
# ---------------------------
# force_hwinfo_update
# ---------------------------
@callback
async def force_hwinfo_update(self, _now=None):
"""Trigger update by timer."""
await self.async_hwinfo_update()
# ---------------------------
# async_hwinfo_update
# ---------------------------
async def async_hwinfo_update(self):
"""Update OpenMediaVault hardware info."""
try:
await asyncio.wait_for(self.lock.acquire(), timeout=30)
except:
return
await self.hass.async_add_executor_job(self.get_hwinfo)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_disk)
self.lock.release()
# ---------------------------
# force_update
# ---------------------------
@callback
async def force_update(self, _now=None):
"""Trigger update by timer."""
await self.async_update()
# ---------------------------
# async_update
# ---------------------------
async def async_update(self):
"""Update OMV data."""
if self.api.has_reconnected():
await self.async_hwinfo_update()
try:
await asyncio.wait_for(self.lock.acquire(), timeout=10)
except:
return
await self.hass.async_add_executor_job(self.get_hwinfo)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_fs)
if self.api.connected():
await self.hass.async_add_executor_job(self.get_smart)
# await self.hass.async_add_executor_job(self.get_service)
async_dispatcher_send(self.hass, self.signal_update)
self.lock.release()
# ---------------------------
# get_hwinfo
# ---------------------------
def get_hwinfo(self):
"""Get hardware info from OMV."""
self.data["hwinfo"] = parse_api(
data=self.data["hwinfo"],
source=self.api.query("System", "getInformation"),
vals=[
{"name": "hostname", "default": "unknown"},
{"name": "version", "default": "unknown"},
{"name": "cpuUsage", "default": 0},
{"name": "memTotal", "default": 0},
{"name": "memUsed", "default": 0},
{"name": "uptime", "default": "0 days 0 hours 0 minutes 0 seconds"},
{"name": "configDirty", "type": "bool", "default": False},
{"name": "rebootRequired", "type": "bool", "default": False},
{"name": "pkgUpdatesAvailable", "type": "bool", "default": False},
],
ensure_vals=[{"name": "memUsage", "default": 0}],
)
if int(self.data["hwinfo"]["version"].split(".")[0])>5:
tmp = self.data["hwinfo"]["uptime"]
pos = abs( int(tmp) )
day = pos / (3600*24)
rem = pos % (3600*24)
hour = rem / 3600
rem = rem % 3600
mins = rem / 60
secs = rem % 60
res = '%d days %02d hours %02d minutes %02d seconds' % (day, hour, mins, secs)
if int(tmp) < 0:
res = "-%s" % res
tmp = res.split(" ")
else:
tmp = self.data["hwinfo"]["uptime"].split(" ")
self.data["hwinfo"]["uptimeEpoch"] = int(tmp[0]) * 24 + int(tmp[2])
self.data["hwinfo"]["cpuUsage"] = round(self.data["hwinfo"]["cpuUsage"], 1)
if int(self.data["hwinfo"]["memTotal"]) > 0:
mem = (
int(self.data["hwinfo"]["memUsed"])
/ int(self.data["hwinfo"]["memTotal"])
) * 100
else:
mem = 0
self.data["hwinfo"]["memUsage"] = round(mem, 1)
# ---------------------------
# get_disk
# ---------------------------
def get_disk(self):
"""Get all filesystems from OMV."""
self.data["disk"] = parse_api(
data=self.data["disk"],
source=self.api.query("DiskMgmt", "enumerateDevices"),
key="devicename",
vals=[
{"name": "devicename"},
{"name": "canonicaldevicefile"},
{"name": "size", "default": "unknown"},
{"name": "israid", "type": "bool", "default": False},
{"name": "isroot", "type": "bool", "default": False},
],
ensure_vals=[
{"name": "devicemodel", "default": "unknown"},
{"name": "serialnumber", "default": "unknown"},
{"name": "firmwareversion", "default": "unknown"},
{"name": "sectorsize", "default": "unknown"},
{"name": "rotationrate", "default": "unknown"},
{"name": "writecacheis", "default": "unknown"},
{"name": "smartsupportis", "default": "unknown"},
{"name": "Raw_Read_Error_Rate", "default": "unknown"},
{"name": "Spin_Up_Time", "default": "unknown"},
{"name": "Start_Stop_Count", "default": "unknown"},
{"name": "Reallocated_Sector_Ct", "default": "unknown"},
{"name": "Seek_Error_Rate", "default": "unknown"},
{"name": "Load_Cycle_Count", "default": "unknown"},
{"name": "Temperature_Celsius", "default": "unknown"},
{"name": "UDMA_CRC_Error_Count", "default": "unknown"},
{"name": "Multi_Zone_Error_Rate", "default": "unknown"},
],
)
for uid in self.data["disk"]:
tmp_data = parse_api(
data={},
source=self.api.query(
"Smart",
"getInformation",
{"devicefile": self.data["disk"][uid]["canonicaldevicefile"]},
),
vals=[
{"name": "devicemodel", "default": "unknown"},
{"name": "serialnumber", "default": "unknown"},
{"name": "firmwareversion", "default": "unknown"},
{"name": "sectorsize", "default": "unknown"},
{"name": "rotationrate", "default": "unknown"},
{"name": "writecacheis", "type": "bool", "default": False},
{"name": "smartsupportis", "type": "bool", "default": False},
],
)
if not tmp_data:
continue
self.data["disk"][uid]["devicemodel"] = tmp_data["devicemodel"]
self.data["disk"][uid]["serialnumber"] = tmp_data["serialnumber"]
self.data["disk"][uid]["firmwareversion"] = tmp_data["firmwareversion"]
self.data["disk"][uid]["sectorsize"] = tmp_data["sectorsize"]
self.data["disk"][uid]["rotationrate"] = tmp_data["rotationrate"]
self.data["disk"][uid]["writecacheis"] = tmp_data["writecacheis"]
self.data["disk"][uid]["smartsupportis"] = tmp_data["smartsupportis"]
# ---------------------------
# get_smart
# ---------------------------
# ---------------------------
# get_fs
# ---------------------------
def get_fs(self):
"""Get all filesystems from OMV."""
self.data["fs"] = parse_api(
data=self.data["fs"],
source=self.api.query("FileSystemMgmt", "enumerateFilesystems"),
key="uuid",
vals=[
{"name": "uuid"},
{"name": "parentdevicefile", "default": "unknown"},
{"name": "label", "default": "unknown"},
{"name": "type", "default": "unknown"},
{"name": "mountpoint", "default": "unknown"},
{"name": "available", "default": "unknown"},
{"name": "size", "default": "unknown"},
{"name": "percentage", "default": "unknown"},
{"name": "_readonly", "type": "bool", "default": False},
{"name": "_used", "type": "bool", "default": False},
],
skip=[{"name": "type", "value": "swap"}],
)
for uid in self.data["fs"]:
self.data["fs"][uid]["size"] = round(
int(self.data["fs"][uid]["size"]) / 1073741824, 1
)
self.data["fs"][uid]["available"] = round(
int(self.data["fs"][uid]["available"]) / 1073741824, 1
)
# ---------------------------
# get_service
# ---------------------------
# def get_service(self):
# """Get OMV services status"""
# self.data["service"] = parse_api(
# data=self.data["service"],
# source=self.api.query("Services", "getStatus"),
# key="name",
# vals=[
# {"name": "name"},
# {"name": "title", "default": "unknown"},
# {"name": "enabled", "type": "bool", "default": False},
# {"name": "running", "type": "bool", "default": False},
# ],
# )
| [
37811,
11505,
13152,
53,
1721,
22741,
526,
15931,
198,
198,
11748,
30351,
952,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
1363,
562,
10167,
13,
9979,
1330,
357,
198,
220,
220,
220,
7102,
37,
62,
39,
10... | 2.088696 | 5,423 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
76,
29487,
18,
67,
1330,
12176,
274,
18,
35,
198,
6738,
2603,
29487,
8019,
1330,
12067,
628,
6... | 2.790698 | 43 |
from math import log
import operator
# ่ฎก็ฎ้ฆๅ็ต
# axis ่กจ็คบ็ฌฌๅ ๅ็็บฌๅบฆindex ๏ผvalue่กจ็คบ่ฟไธช็บฌๅบฆ็ๅผไฝไธบๅ็
# ๅคๆฐ่กจๅณ
# ๅๅปบๆ ็ๅฝๆฐไปฃ็
| [
6738,
10688,
1330,
2604,
198,
11748,
10088,
198,
198,
2,
5525,
106,
94,
163,
106,
245,
165,
99,
247,
37863,
250,
163,
228,
113,
628,
198,
2,
16488,
5525,
94,
101,
163,
97,
118,
163,
105,
105,
49035,
254,
26344,
245,
21410,
163,
11... | 1.008772 | 114 |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 20:00:44 2021
@author: qizhe
"""
if __name__ == '__main__':
solu = Solution()
input_List = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]]
input_List = [[0,1,2,3],[4,5,6,7],[8,9,10,11],[12,13,14,15]]
# input_List = 1
result = solu.solveNQueens(5)
output_Str = ' result = ' + str(result)
print(output_Str)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
2556,
1467,
1160,
25,
405,
25,
2598,
33448,
198,
198,
31,
9800,
25,
10662,
528,
258,
198,
37811,
198,
361,
11593,
3672,
834,
6624,
705,
8... | 1.969849 | 199 |
import h5py
import numpy as np
'''
if 't1.'
i = 0
seq_name = 't1'
elif 't2.' in imagefile:
i = 1
seq_name = 't2'
elif 't1ce.' in imagefile:
i = 2
seq_name = 't1ce'
elif 'flair.' in imagefile:
i = 3
seq_name = 'flair'
'''
import platform
# to make the code portable even on cedar,you need to add conditions here
node_name = platform.node()
if node_name == 'XPS15':
# this is my laptop, so the cedar-rm directory is at a different place
mount_path_prefix = '/home/anmol/'
hdf5_filepath = mount_path_prefix + 'BRATS_Combined.h5'
save_path = '/home/anmol/mounts/cedar-rm/scratch/asa224/Datasets/BRATS2018/mm_synthesis/validation_data/'
elif 'computecanada' in node_name: # we're in compute canada, maybe in an interactive node, or a scheduler node.
hdf5_filepath = '/scratch/asa224/asa224/Datasets/BRATS2018/HDF5_Datasets/' + 'BRATS2018.h5'
save_path = "/scratch/asa224/asa224/Datasets/BRATS2018/mm_synthesis/validation_data/"
hdf5_file = h5py.File(hdf5_filepath, 'r')
hf = hdf5_file['original_data']
hgg_data = hf['validation_data']
pat_names = hf['validation_data_pat_name'][:]
#save the patient names first
np.save(open(save_path + 'pat_names_validation.npz', 'wb'), pat_names)
t1 = hgg_data[:,0,...]
t1 = np.swapaxes(t1, 3, 2)
t1 = np.swapaxes(t1, 2, 1)
np.save(open(save_path + 'T1.npz', 'wb'), t1)
del t1
t2 = hgg_data[:,1,...]
t2 = np.swapaxes(t2, 3, 2)
t2 = np.swapaxes(t2, 2, 1)
np.save(open(save_path + 'T2.npz', 'wb'), t2)
del t2
t1ce = hgg_data[:,2,...]
t1ce = np.swapaxes(t1ce, 3, 2)
t1ce = np.swapaxes(t1ce, 2, 1)
np.save(open(save_path + 'T1CE.npz', 'wb'), t1ce)
del t1ce
t2flair = hgg_data[:,3,...]
t2flair = np.swapaxes(t2flair, 3, 2)
t2flair = np.swapaxes(t2flair, 2, 1)
np.save(open(save_path + 'T2FLAIR.npz', 'wb'), t2flair)
del t2flair
print('Done!')
| [
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
198,
7061,
6,
198,
361,
705,
83,
16,
2637,
220,
198,
220,
220,
220,
1312,
796,
657,
198,
220,
220,
220,
33756,
62,
3672,
796,
705,
83,
16,
6,
198,
417,
361,
705,
83,... | 2.12877 | 862 |
import numpy as np
import pyspawn
pyspawn.import_methods.into_hessian(pyspawn.potential.terachem_cas)
hess = pyspawn.hessian()
ndims = 18
istate = 0
pos = np.asarray([ 0.000000000, 0.000000000, 0.101944554,
0.000000000, 0.000000000, 2.598055446,
0.000000000, 1.743557978, 3.672987826,
0.000000000, -1.743557978, 3.672987826,
0.000000000, 1.743557978, -0.972987826,
0.000000000, -1.743557978, -0.972987826])
dr = 0.001
atoms = ['C', 'C', 'H', 'H', 'H', 'H']
tc_options = {
"method": 'hf',
"basis": '6-31g**',
"atoms": atoms,
"charge": 0,
"spinmult": 1,
"closed_shell": True,
"restricted": True,
"precision": "double",
"threall": 1.0e-20,
}
hess.set_numdims(ndims)
hess.set_istate(istate)
hess.set_positions(pos)
hess.set_tc_options(tc_options)
hess.build_hessian_hdf5_semianalytical(dr)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
279,
893,
79,
3832,
220,
220,
220,
220,
220,
220,
220,
220,
198,
198,
79,
893,
79,
3832,
13,
11748,
62,
24396,
82,
13,
20424,
62,
33979,
666,
7,
79,
893,
79,
3832,
13,
13059,
1843,
13,
... | 1.733668 | 597 |
import sys
import os
from setuptools import setup
from setuptools.command.test import test as TestCommand
import conch
if __name__ == '__main__':
setup(name='conch-sounds',
version=conch.__version__,
description='Analyze acoustic similarity in Python',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Text Processing :: Linguistic',
],
keywords='phonetics, acoustics similarity',
url='https://github.com/mmcauliffe/Conch',
download_url='https://github.com/mmcauliffe/Conch/tarball/{}'.format(
conch.__version__),
author='Michael McAuliffe',
author_email='michael.e.mcauliffe@gmail.com',
packages=['conch',
'conch.analysis',
'conch.analysis.amplitude_envelopes',
'conch.analysis.formants',
'conch.analysis.intensity',
'conch.analysis.mfcc',
'conch.analysis.pitch',
'conch.distance'],
package_data={'conch.analysis.pitch': ['*.praat'],
'conch.analysis.formants': ['*.praat'],
'conch.analysis.intensity': ['*.praat'],
'conch.analysis.mfcc': ['*.praat']},
install_requires=[
'numpy',
'scipy',
'praatio ~= 5.0',
'librosa',
'pyraat'
],
cmdclass={'test': PyTest},
extras_require={
'testing': ['pytest'],
}
)
| [
11748,
25064,
198,
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
900,
37623,
10141,
13,
21812,
13,
9288,
1330,
1332,
355,
6208,
21575,
198,
198,
11748,
369,
354,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,... | 1.885685 | 971 |
import aioreactive as rx
from aioreactive import AsyncAnonymousObserver
from aioreactive.subject import AsyncSubject
from ..utils import dropargs, asyncinit
@asyncinit | [
11748,
257,
72,
382,
5275,
355,
374,
87,
198,
6738,
257,
72,
382,
5275,
1330,
1081,
13361,
20660,
31310,
18497,
198,
6738,
257,
72,
382,
5275,
13,
32796,
1330,
1081,
13361,
19776,
198,
198,
6738,
11485,
26791,
1330,
4268,
22046,
11,
3... | 3.44898 | 49 |
#!/usr/bin/python
# Layer 7 Router
#
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import json, re, os, subprocess
PORT_NUMBER = 8080
# Writes config for a given service
globaladdons = {
'example': [
'ec2-52-16-13-243.eu-west-1.compute.amazonaws.com',
],
}
for name, machines in globaladdons.iteritems():
data = {
'name': name,
'machines': machines,
}
writeservice('addon', data)
writedefaultvcl()
#This class will handles any incoming request from
#the browser
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
34398,
767,
48538,
198,
2,
198,
6738,
7308,
6535,
28820,
18497,
1330,
7308,
40717,
18453,
25060,
11,
6535,
28820,
18497,
198,
11748,
33918,
11,
302,
11,
28686,
11,
850,
14681,
628,
198,
15... | 2.882979 | 282 |
from pypred import ast, compact
| [
6738,
279,
4464,
445,
1330,
6468,
11,
16001,
628
] | 3.666667 | 9 |
#!/usr/bin/env python
""" Build CLI help pages to RST for dynamic inclusion of help messages
This solves the problem of not being able to install YATSM on readthedocs
because of its complicated dependencies without the need to mock out
basically every import. Just run this script before pushing any new changes
to the documentation to make sure the ``yatsm [subcommand] --help`` usage
is up to date.
"""
from contextlib import contextmanager
import errno
import os
import subprocess
import sys
import click
import click_plugins
from yatsm.cli.main import cli as yatsm_cli
# Add YATSM to sys.path
here = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(here, '..'))
@contextmanager
def redirect_stdout(stream):
""" Redirect stdout to file to capture click's printouts
NOTE:
Available as contextlib.redirect_stdout in Python 3.4, but
re-coded here for compatibility with Python 2.7.
See https://bugs.python.org/issue15805
"""
old_stream = sys.stdout
sys.stdout = stream
try:
yield
finally:
sys.stdout = old_stream
if __name__ == '__main__':
help_docs_dst = make_destination()
# CLICK COMMAND LINE
for cmd in [yatsm_cli] + yatsm_cli.commands.values():
if isinstance(cmd, click_plugins.core.BrokenCommand):
continue
name = 'yatsm {}'.format(cmd.name) if cmd.name != 'cli' else 'yatsm'
dst = os.path.join(help_docs_dst,
'{}.txt'.format(name.replace(' ', '_')))
cmd_help_to_rst(cmd, dst, name)
# SCRIPTS IN yatsm/scripts
script_dir = os.path.join(here, '..', 'scripts')
os.environ['PATH'] += '{sep}{path}'.format(sep=os.pathsep, path=script_dir)
for script in os.listdir(script_dir):
script_name = os.path.splitext(script)[0]
dst = os.path.join(help_docs_dst, '{}.txt'.format(script_name))
with open(dst, 'w') as fid:
fid.write('$ {} -h\n'.format(script))
fid.flush()
subprocess.Popen([script, '-h'], stdout=fid).communicate()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
10934,
43749,
1037,
5468,
284,
371,
2257,
329,
8925,
14900,
286,
1037,
6218,
198,
198,
1212,
39107,
262,
1917,
286,
407,
852,
1498,
284,
2721,
575,
1404,
12310,
319,
1100,
83,
7... | 2.489183 | 832 |
try:
foobar
except: #"catch all", highly discourage due to not being able to identify what went wrong
print("PROBLEM")
print("after the try") | [
28311,
25,
198,
220,
220,
220,
11511,
30973,
198,
16341,
25,
220,
1303,
1,
40198,
477,
1600,
4047,
27518,
2233,
284,
407,
852,
1498,
284,
5911,
644,
1816,
2642,
220,
198,
220,
220,
220,
3601,
7203,
4805,
9864,
2538,
44,
4943,
198,
4... | 3.145833 | 48 |
import re
import string
# obtained and modified from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/python/ops/special_math_ops.py#L311
def einsum_parse_and_resolve_equation(equation, input_shapes):
"""Helper for einsum() that splits/resolves inputs & outputs.
Args:
equation: Equation string given as argument to einsum().
input_shapes: List of the shapes of all inputs given to einsum()
Returns:
input_axis_labels, output_axis_labels where:
input_axis_labels: List of length len(input_shapes) of strings
representing the character label for each dimension of each given input,
resolving any broadcast (...) axes,
output_axis_labels: A string of character labels for each axes of output
tensor, filling in missing output subscripts and broadcast axes.
Raises:
ValueError: If equation is in the uncorrect format, incorrect number of
inputs given or broadcast axes "..." or output axes could not be resolved.
"""
equation = equation.replace(' ', '')
match = re.match('^([a-zA-Z,.]+)(->[a-zA-Z.]*)?$', equation)
if not match:
raise ValueError('Indices have incorrect format: %s' % equation)
input_axis_labels = match.group(1).split(',')
output_axis_labels = match.group(2)[2:] if match.group(2) else None
if len(input_shapes) != len(input_axis_labels):
raise ValueError('Got %d arguments for equation "%s", expecting %d' %
(len(input_shapes), equation, len(input_axis_labels)))
# Resolve Ellipsis
# Assign axes labels for unspecified dimensions in inputs. Labels taken
# from unused labels. Follow numpy einsum broadcasting conventions for
# tensors of different length and unlabeled output.
ellipsis_axes = ''
if '...' in equation:
unused = ''.join([c for c in string.ascii_letters
if c not in ''.join(input_axis_labels)])
for i, ax in enumerate(input_axis_labels):
if '...' in ax:
parts = ax.split('...')
if len(parts) != 2:
raise ValueError(
'Unable to resolve ellipsis. Excess number found.')
n = len(input_shapes[i]) - len(''.join(parts))
if n < 0:
raise ValueError('Ellipses lengths do not match.')
if len(unused) < n:
raise ValueError(
'Unable to resolve ellipsis, too many distinct labels.')
replace_axes = unused[-n:] if n > 0 else ''
input_axis_labels[i] = input_axis_labels[i].replace(
'...', replace_axes)
if len(replace_axes) > len(ellipsis_axes):
ellipsis_axes = replace_axes
if any(['.' in ax for ax in input_axis_labels]):
raise ValueError('period "." found outside of ellipsis')
if output_axis_labels is not None:
output_axis_labels = output_axis_labels.replace(
'...', ellipsis_axes)
if '.' in output_axis_labels:
raise ValueError('period "." found outside of ellipsis')
if output_axis_labels is None:
# infer the output subscripts if not given, assume alphabetical order,
# but always place ellipsis axes before given.
axis_labels = set(''.join(input_axis_labels)) - set(ellipsis_axes)
indices = ''.join(sorted(axis_labels))
counts = {ax: 0 for ax in indices}
for axes_ in input_axis_labels:
for ax in axes_:
if ax not in ellipsis_axes:
counts[ax] += 1
output_axis_labels = ellipsis_axes + ''.join(
sorted(ax for ax in axis_labels if counts[ax] == 1))
return input_axis_labels, output_axis_labels
| [
11748,
302,
198,
11748,
4731,
198,
198,
2,
6492,
290,
9518,
422,
198,
2,
3740,
1378,
12567,
13,
785,
14,
83,
22854,
11125,
14,
83,
22854,
11125,
14,
2436,
672,
14,
36993,
67,
21,
68,
891,
22,
68,
6420,
64,
21,
64,
22,
32321,
66,... | 2.284457 | 1,705 |
๏ปฟ#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rhinoscriptsyntax as rs
import Rhino
import os
color_palette = {"cut":(153,204,255),"plunge":(254,184,0),"point":(153,204,255)}
LAYER_NAME = "vector_from_gcode"
if __name__=="__main__":
main() | [
171,
119,
123,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
9529,
11996,
6519,
1837,
41641,
355,
44608,
198,
11748,
47759,
198,
11748,
28686,
198,
8043,
... | 2.20339 | 118 |
#A dictionary in a Dictionary
users = {
'aeinstein':{
'first':'albert',
'last':'einstein',
'location':'princeton',
},
'mcurie':{
'first':'mary',
'last':'curie',
'location':'paris',
},
}
for username,user_info in users.items():
print(f'Username: {username}')
full_name=f"{user_info['first']} {user_info['last']}"
location=user_info['location']
print(f'Full name:{full_name.title()}')
print(f'Location:{location.title()}') | [
2,
32,
22155,
287,
257,
28261,
201,
198,
18417,
796,
1391,
201,
198,
197,
6,
3609,
11962,
10354,
90,
201,
198,
197,
197,
6,
11085,
10354,
6,
282,
4835,
3256,
201,
198,
197,
197,
6,
12957,
10354,
6,
68,
11962,
3256,
201,
198,
197,
... | 2.288557 | 201 |
import os
import sys
try:
import tkinter
import time
import yaml
from tkinter import messagebox
from utils.serial_comm import SerialIface
from datetime import datetime
from utils.test_utils import util_logger
from matplotlib import pyplot as plt
from robot.libraries.BuiltIn import BuiltIn
import shutil
import inspect
import logging
import easygui
import subprocess
from utils import instr_lib
from utils.cli_map import CLIMap
from utils import cli_map
import threading
except Exception as e:
print("Import Exception! Details:", e)
# Adding CLI destination path to sys path in order to import the module
# curr_dir = os.path.join(os.path.abspath(__file__), '../')
# cli_dir = os.path.join(curr_dir, '../adi_study_watch/cli/m2m2/tools')
# sys.path.insert(0, cli_dir)
# import CLI
# from adi_study_watch_cli import CLI
# **********************************************************************
# Initializing TkInter for showing dialog pop ups
root = tkinter.Tk()
root.withdraw()
# **********************************************************************
# ********************** Test Variables ********************************
arduino_port = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port_ble = None # This variable will be updated from station config file [read_station_Cfg()]
fg_instr_addr = None # This variable will be updated from station config file [read_station_Cfg()]
watch_port_type = None # This variable will be updated from station config file [read_station_Cfg()]
sm_instr_addr = None # This variable will be updated from station config file [read_station_Cfg()]
arduino = None
watch_shell = None
ts_mismatch_tolerance = None
fg, sm = None, None
matlab_eng = None
pcb_name_default = 'A1H1'
shared_drive_path = r'\\wilmnas4\Local Programs\FDWatch_TestData\Data_Testsetup\DVT1_Test_Results'
ecg_stream_file_name = 'ecg.csv'
bia_stream_file_name = "bia.csv"
ppg_stream_file_name = 'ppg.csv'
syncppg_stream_file_name = 'sync_ppg.csv'
adxl_stream_file_name = 'adxl.csv'
temperature_stream_file_name = 'temp.csv'
adpd_stream_file_name = 'adpd6.csv'
eda_stream_file_name = 'eda.csv'
volt_scale_range = (0, 5)
# The switch map dictionary maps the various switches to the arduino digital pins (24-42)
switch_map = {'SNOISE1': 22, 'SNOISE2': 23, 'ECG_NEGSEL': 24, 'ECG_POSSEL': 25}
close_plot_mode_global = True
test_report_dir = None
copy_results_to_shared_drive = True
save_plots = False
DVT_version = None
adpd_clk_calib = None
cm = None # CLI Map
ble_mac_addr = None
current_watch_mode = None
test_level_handeler = 0
# **********************************************************************
# ********************* Configure Logging ******************************
test_logger = logging.getLogger('test_logger')
logging_format = "[%(levelname)s] : %(message)s"
date_str = "%m/%d/%Y %I:%M:%S %p"
logger_formatter = logging.Formatter(logging_format, date_str)
test_stream_handler = logging.StreamHandler()
test_logger.setLevel(logging.INFO)
test_logger.addHandler(test_stream_handler)
# logging_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
# logging.basicConfig(# filename='output.log',
# level=logging.INFO,
# # filemode='w',
# format=logging_format,
# datefmt=date_str)
# **********************************************************************
# ********************* Common Functions *******************************
class ConditionCheckFailure(RuntimeError):
"""
This class is used to raise failures from test cases so that
robot framework detects them as failures and continues to
next test case due to the below variable
"""
ROBOT_CONTINUE_ON_FAILURE = True
def update_robot_suite_doc(doc):
"""
:param doc:
:return:
"""
try:
BuiltIn().set_suite_documentation(doc)
except Exception as e:
test_logger.warn('Skipping robot documentation update!')
pass
def write_analysis_report(result_dict, report_file=None, header='Analysis Section', append_report=False):
"""
:param result_dict:
:param report_file:
:param header:
:param append_report:
:return:
"""
report_file = 'analysis_report.txt' if not report_file else report_file
file_mode = 'a' if append_report else 'w'
with open(report_file, file_mode) as f_ref:
f_ref.write('<<< {} >>>\n'.format(header))
for k, v in result_dict.items():
f_ref.write('{} = {}\n'.format(k, v))
f_ref.write('\n'.format(header))
return os.path.abspath(report_file)
def analyze_wfm(file_path, mode='ecg', file_mode='cli', gen_filtered_ppg='1'):
"""
This function calls the BioSigProcess app built from LV vi and extracts results stored in yaml file
:param file_path: waveform *.csv file path
:param mode: 'ecg' | 'ppg'
:param file_mode: 'cli' | 'awt'
:return:
"""
results_dict = {}
if os.path.isfile(file_path):
subprocess.call(['utils/lv/builds/bio_sig_process/BioSigProcess.exe', mode, file_path, file_mode, gen_filtered_ppg])
time.sleep(2)
result_file_path = os.path.join(file_path, '../{}_extracted_results.txt'.format(mode))
with open(result_file_path, 'r') as f_ref:
line_list = f_ref.readlines()
for line in line_list:
result_list = line.split(' - ')
results_dict[result_list[0]] = result_list[1].strip()
else:
test_logger.warn('Input File not found! {}'.format(file_path))
results_dict = None
return results_dict
def quick_start_ecg(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start('ecg', 'ecg')
def quick_start_bia(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w bia 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start('bia', 'bia')
def set_ecg_stream_freq(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
def set_eda_stream_freq(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w eda 0:{}'.format(hex(samp_freq_hz)))
if samp_freq_hz <= 16:
watch_shell.do_lcfg("w eda 0x2:0x2")
else:
watch_shell.do_lcfg("w eda 0x2:0x1")
def quick_start_eda(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz:
set_eda_stream_freq(samp_freq_hz)
watch_shell.quick_start('eda', 'eda')
def quick_start_eda_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz:
set_eda_stream_freq(samp_freq_hz)
watch_shell.quick_start("eda", "eda", fs=True)
watch_shell.do_start_logging("")
def quick_start_bia_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w bia 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start("bia", "bia", fs=True)
watch_shell.do_start_logging("")
def quick_start_ecg_fs(samp_freq_hz=4):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
watch_shell.quick_start("ecg", "ecg", fs=True)
watch_shell.do_start_logging("")
def quick_start_adpd_fs(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state:
:param led:
:param skip_load_cfg:
:return:
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'},
'MWL': {'adpd_cfg': '5', 'clk_calib': adpd_clk_calib, 'sub': '10', 'agc_ctrl_id': '5'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
watch_shell.quick_start("adpd", "adpd{}".format(cfg_dict[led]['sub']), fs=True)
watch_shell.do_start_logging("")
def config_adpd_stream(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state:
:param led:
:param skip_load_cfg:
:return:
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'},
'MWL': {'adpd_cfg': '5', 'clk_calib': adpd_clk_calib, 'sub': '10', 'agc_ctrl_id': '5'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def quick_start_adxl(samp_freq_hz=100):
"""
Set ADXL sampling frequency and start capturing the data
:param samp_freq_hz:
:return:
"""
watch_shell.quick_start("adxl", "adxl")
if samp_freq_hz == 12.5:
watch_shell.do_reg("w adxl 0x2C:0x98")
elif samp_freq_hz == 25:
watch_shell.do_reg("w adxl 0x2C:0x99")
elif samp_freq_hz == 50:
watch_shell.do_reg("w adxl 0x2C:0x9A")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adxl 0x2C:0x9B")
elif samp_freq_hz == 200:
watch_shell.do_reg("w adxl 0x2C:0x9C")
elif samp_freq_hz == 400:
watch_shell.do_reg("w adxl 0x2C:0x9F")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def set_adxl_stream_freq(samp_freq_hz=100):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz == 12.5:
watch_shell.do_reg("w adxl 0x2C:0x98")
elif samp_freq_hz == 25:
watch_shell.do_reg("w adxl 0x2C:0x99")
elif samp_freq_hz == 50:
watch_shell.do_reg("w adxl 0x2C:0x9A")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adxl 0x2C:0x9B")
elif samp_freq_hz == 200:
watch_shell.do_reg("w adxl 0x2C:0x9C")
elif samp_freq_hz == 400:
watch_shell.do_reg("w adxl 0x2C:0x9F")
else:
raise RuntimeError("Sampling Frequency Not Supported!")
def quick_start_adpd(samp_freq_hz=50, agc_state=0, led='G', skip_load_cfg=False):
"""
:param samp_freq_hz:
:param agc_state: [0 | 1]
:param led: ['G' | 'R' | 'IR' | 'B']
:return: stream_file_name
"""
cfg_dict = {'G': {'adpd_cfg': '1', 'clk_calib': adpd_clk_calib, 'sub': '6', 'agc_ctrl_id': '1'},
'R': {'adpd_cfg': '2', 'clk_calib': adpd_clk_calib, 'sub': '7', 'agc_ctrl_id': '2'},
'IR': {'adpd_cfg': '3', 'clk_calib': adpd_clk_calib, 'sub': '8', 'agc_ctrl_id': '3'},
'B': {'adpd_cfg': '4', 'clk_calib': adpd_clk_calib, 'sub': '9', 'agc_ctrl_id': '4'}}
led = led.upper()
if not skip_load_cfg:
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(cfg_dict[led]['clk_calib'])
if agc_state:
watch_shell.do_enable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
else:
watch_shell.do_disable_agc('{}'.format(cfg_dict[led]['agc_ctrl_id']))
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
elif samp_freq_hz is None:
pass
else:
raise RuntimeError("Sampling Frequency Not Supported!")
watch_shell.quick_start('adpd', "adpd{}".format(cfg_dict[led]['sub']))
stream_file_name = 'adpd{}.csv'.format(cfg_dict[led]['sub'])
return stream_file_name
def quick_stop_adpd(led='G'):
"""
:param led: ['G' | 'R' | 'IR' | 'B']
:return:
"""
cfg_dict = {'G': 'adpd6',
'R': 'adpd7',
'IR': 'adpd8',
'B': 'adpd9'}
led = led.upper()
watch_shell.quick_stop('adpd', cfg_dict[led])
def quick_start_ppg(samp_freq_hz=50, agc_state=0):
"""
:param samp_freq_hz:
:param agc_state:
:return:
"""
watch_shell.do_load_adpd_cfg("1")
watch_shell.do_calibrate_clock(adpd_clk_calib)
watch_shell.do_set_ppg_lcfg("5")
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
if agc_state:
watch_shell.do_lcfg("w ppg 0x4:0x1210")
else:
watch_shell.do_lcfg("w ppg 0x4:0x1010")
watch_shell.quick_start('ppg', 'ppg')
def set_adpd_stream_freq(samp_freq_hz=50):
"""
:param samp_freq_hz:
:return:
"""
if samp_freq_hz == 50:
watch_shell.do_reg("w adpd 0xD:0x4e20")
elif samp_freq_hz == 100:
watch_shell.do_reg("w adpd 0xD:0x2710")
elif samp_freq_hz == 500:
watch_shell.do_reg("w adpd 0xD:0x07D0")
def quick_stop_ppg(samp_freq_hz=50):
"""
:param samp_freq_hz:
:return:
"""
watch_shell.quick_stop('ppg', 'ppg')
# def set_ecg_samp_freq(samp_freq_hz=100):
# """
#
# :param samp_freq_hz:
# :return:
# """
# watch_shell.do_lcfg('w ecg 0:{}'.format(hex(samp_freq_hz)))
def dcb_cfg(mode='w', dev='adxl', file_name=''):
"""
:param mode: 'w'| 'r' | 'd'
:param dev: 'adpd' | 'adxl' | 'ecg' | 'eda'
:param file_name: '*_dcb_config.cfg'
:return:
"""
curr_dir = os.getcwd()
dcb_cfg_dir = os.path.join(curr_dir, 'dcb_cfg')
if not os.path.exists(dcb_cfg_dir):
os.mkdir(dcb_cfg_dir)
test_logger.warning("DCG Config Dir Not Found! Creating empty directory 'dcb_cfg'")
if mode == 'w':
if os.path.exists(os.path.join(dcb_cfg_dir, file_name)) and os.path.isfile(os.path.join(dcb_cfg_dir, file_name)):
pkt = watch_shell.do_write_dcb('{} {}'.format(dev, os.path.join(dcb_cfg_dir, file_name)))
if dev == "adpd":
for pkt_element in pkt:
err_stat = watch_shell.check_err_stat(pkt_element)
if err_stat == 1:
break
else:
err_stat = watch_shell.check_err_stat(pkt)
else:
err_stat = 1
test_logger.error("DCB Config file not found!")
raise RuntimeError("DCB Config file not found!\n{}".format(os.path.join(dcb_cfg_dir, file_name)))
elif mode == 'r':
pkt = watch_shell.do_read_dcb('{}'.format(dev))
if dev in ["ecg", "eda", "bia"]:
file_name = r".\dcb_cfg\{}_dcb_get.lcfg".format(dev)
else:
file_name = r".\dcb_cfg\{}_dcb_get.dcfg".format(dev)
with open(file_name, "w") as fs:
if dev == "adpd":
for pkt_element in pkt:
for index, data in enumerate(pkt_element["payload"]["data"]):
if index == 0 and type(data[0]) is int:
convert_2_hex = True
else:
convert_2_hex = False
if convert_2_hex:
data = [hex(data[0]), data[1]]
fs.write(" ".join(data))
fs.write("\n")
err_stat = watch_shell.check_err_stat(pkt_element)
else:
for index, data in enumerate(pkt["payload"]["data"]):
if index == 0 and type(data[0]) is int:
convert_2_hex = True
else:
convert_2_hex = False
if convert_2_hex:
data = [hex(data[0]), data[1]]
fs.write(" ".join(data))
fs.write("\n")
err_stat = watch_shell.check_err_stat(pkt)
test_logger.info('DCB File Name: dcb_cfg\{}_dcb_get.dcfg'.format(dev))
elif mode == 'd':
pkt = watch_shell.do_delete_dcb('{}'.format(dev))
err_stat = watch_shell.check_err_stat(pkt)
else:
err_stat = 1
return err_stat, dcb_cfg_dir
def plot_and_save_png(f_path, col_idx=1, row_offset=1):
"""
This function reads a csv file and plots the data and saves the plot into a png file
:param f_path:
:param col_idx:
:return: plot_path
"""
data_list = read_csv_col(f_path, col_idx, row_offset)
f_name = os.path.splitext(os.path.split(f_path)[-1])[0]
plot_path = os.path.join(os.path.split(f_path)[0], 'plots', f_name+'.png')
plt.plot(data_list)
plt.xlabel('time')
plt.ylabel('amplitude')
plt.savefig(plot_path)
plt.close()
return plot_path
def update_arduino(in_obj):
"""
This function updates the arduino global variable usually from an initialize function call
:param in_obj:
:return:
"""
global arduino
arduino = in_obj
def update_watch_shell(in_obj):
"""
This function updates the watch_shell global variable usually from an initialize function call
:param in_obj:
:return:
"""
global watch_shell
watch_shell = in_obj
def update_dvt_version():
"""
This function updates the DVT_version global variable usually from an initialize function call
:return:
"""
global DVT_version
err_stat, chip_id = watch_shell.get_chip_id("2") # ADPD chip ID index is 2
if chip_id == 0xc0:
test_logger.info("DVT1 Watch Connected")
DVT_version = 0
else:
test_logger.info("DVT2 Watch Connected")
DVT_version = 1
# else:
# raise RuntimeError("Unknown DVT Watch version ADPD Chip ID-{}".format(str(chip_id)))
def read_station_cfg():
"""
This function reads the station config yaml file and updates the global variables. If a file is not found, it will
create a file with the default values in it. The file location is <user>/AppData/Roaming/
:return:
"""
# Default values
cfg_dict = {'arduino_port': 'COM7', 'watch_port': 'COM13', 'watch_port_ble': 'COM7',
'fg_instr_addr': 'USB0::0x0957::0x2C07::MY52802639::0::INSTR',
'sm_instr_addr': 'GPIB0::23::INSTR', 'watch_port_type': 'USB', 'ble_mac': '6B-28-88-26-52-C3'}
station_cfg_path = os.path.join(os.getenv('APPDATA'), 'watch_test.yaml')
if os.path.exists(station_cfg_path) and os.path.isfile(station_cfg_path):
with open(station_cfg_path, 'r') as f_ref:
cfg_dict = yaml.load(f_ref, Loader=yaml.FullLoader)
else:
with open(station_cfg_path, 'w') as f_ref:
yaml.dump(cfg_dict, f_ref)
missing_keys = []
global arduino_port, watch_port, watch_port_ble, fg_instr_addr, sm_instr_addr, watch_port_type, ble_mac_addr
if 'watch_port_ble' not in cfg_dict.keys():
missing_keys.append("watch_port_ble")
watch_port_ble = ""
else:
watch_port_ble = cfg_dict['watch_port_ble']
if 'ble_mac' not in cfg_dict.keys():
missing_keys.append("ble_mac")
ble_mac_addr = ""
else:
ble_mac_addr = cfg_dict['ble_mac']
if len(missing_keys) != 0:
test_logger.warning("Please add the {} values in the {} file".format(" and ".join(missing_keys),
os.path.join(os.getenv('APPDATA'),
'watch_test.yaml')))
# raise ConditionCheckFailure("Please add the {} values in the {} file"
# "".format(" and ".join(missing_keys), os.path.join(os.getenv('APPDATA'),
# 'watch_test.yaml')))
arduino_port = cfg_dict['arduino_port']
watch_port = cfg_dict['watch_port']
fg_instr_addr = cfg_dict['fg_instr_addr']
sm_instr_addr = cfg_dict['sm_instr_addr']
if 'watch_port_type' in cfg_dict.keys():
watch_port_type = cfg_dict['watch_port_type']
else:
watch_port_type = 'USB'
def close_plot_after_run(plot_name_list, close_plot_mode=False):
"""
This function closes all open plot and cmd windows opened by the test.
This checks for a global mode variable or the local mode variable. Local variable defaults to False
:param plot_name_list: This is a list of string values of the plot window names
:param close_plot_mode: This is a boolean arg
:return:
"""
if close_plot_mode or close_plot_mode_global:
for plot_name in plot_name_list:
os.system('taskkill /fi "WINDOWTITLE eq {}"'.format(plot_name))
time.sleep(0.25)
os.system('taskkill /fi "WINDOWTITLE eq C:\WINDOWS\system32\cmd.exe"')
def init_matlab_engine():
"""
This function imports and initializes matlab engine
MATLAB package needs to be installed from <matlab_root>/extern/engine/python directory
Use the command "python setup.py install"
:return:
"""
global matlab_eng
try:
import matlab.engine
matlab_eng = matlab.engine.start_matlab()
except:
print("Error loading MATLAB Engine!")
if matlab_eng:
matlab_dir = os.path.join(os.path.abspath('.'), 'utils', 'matlab_utils')
matlab_eng.addpath(matlab_dir, nargout=0)
return matlab_eng
def initialize_setup(ts_tolerance=10, com_port="NA", mode="NA", ble_mac="NA", ble_com_port="NA"):
"""
This function runs necessary steps to initialize the test setup
- Connects to Arduino and initializes arduino global variable
:return:
"""
global fg, sm, cm
global test_report_dir
read_station_cfg()
# Instantiating watch shell
gui_signaller = cli_map.cli.QtSignaller()
# threading.Thread(target=cli_map.cli._init_gui, args=(gui_signaller,), daemon=True).start()
watch_shell_obj = CLIMap(gui_signaller, testing=True)
if com_port != "NA" and "COM" in com_port:
global watch_port
watch_port = com_port
if ble_com_port != "NA" and "COM" in ble_com_port:
global watch_port_ble
watch_port_ble = ble_com_port
if ble_mac != "NA":
global ble_mac_addr
ble_mac_addr = ble_mac
if mode != "NA":
global watch_port_type
watch_port_type = mode
if watch_port_type == 'USB':
watch_shell_obj.do_connect_usb('{}'.format(watch_port))
else:
watch_shell_obj.do_connect_ble('{} {}'.format(watch_port_ble, ble_mac_addr))
# cm = CLIMap(watch_shell_obj)
# Creating Test Rport Directory
err_stat, sys_info_dict = watch_shell_obj.get_system_info()
if err_stat:
raise RuntimeError('Unable to communicate with the watch!')
pcb_name = str(sys_info_dict['mac_addr'])
if not pcb_name:
pcb_name = easygui.enterbox('PCB Number:', 'Enter PCB Number')
test_report_dir = init_test_report_dir(pcb_name)
test_logger.info('Test Results Directory: {}'.format(test_report_dir))
err_stat, fw_ver_info_dict = watch_shell_obj.get_version_cli()
if not err_stat:
ver_info_str = 'Firmware Version: V{}.{}.{} | Build Info: {}'.format(fw_ver_info_dict['major'],
fw_ver_info_dict['minor'],
fw_ver_info_dict['patch'],
fw_ver_info_dict['build'])
update_robot_suite_doc(ver_info_str)
# Instantiating Arduino
#arduino_obj = SerialIface(port=arduino_port)
#arduino_obj.serial_write('!CfgIOMap\r')
# watch_shell_obj.do_toggleSaveCSV('')
#update_arduino(arduino_obj)
update_watch_shell(watch_shell_obj)
# TODO: Enable below code to configure instruments
#fg = instr_lib.KeysightFG33522B()
#fg.instr_connect(fg_instr_addr)
#sm = instr_lib.KeithleySM2400()
#sm.instr_connect(sm_instr_addr)
update_dvt_version()
update_adpd_clock_calibration_value()
update_ts_mismatch_tolerance(int(ts_tolerance))
def initialize_setup_nk(ts_tolerance=0, usb_com_port="NA", mode="NA",
ble_mac="NA", ble_com_port="NA", clear_flash=0, test_level=0, flash_reset=0):
"""
This function runs necessary steps to initialize the test setup
- Connects to Arduino and initializes arduino global variable
:return:
"""
global test_report_dir
read_station_cfg()
# Instantiating watch shell
gui_signaller = cli_map.cli.QtSignaller()
# threading.Thread(target=cli_map.cli._init_gui, args=(gui_signaller,), daemon=True).start()
watch_shell_obj = CLIMap(gui_signaller, testing=True)
update_watch_shell(watch_shell_obj)
if usb_com_port != "NA" and "COM" in usb_com_port:
global watch_port
watch_port = usb_com_port
if ble_com_port != "NA" and "COM" in ble_com_port:
global watch_port_ble
watch_port_ble = ble_com_port
if ble_mac != "NA":
global ble_mac_addr
ble_mac_addr = ble_mac
if mode != "NA":
global watch_port_type
watch_port_type = mode
connect(watch_port_type)
# Creating Test Report Directory
err_stat, sys_info_dict = watch_shell_obj.get_system_info()
if err_stat:
raise RuntimeError('Unable to communicate with the watch!')
pcb_name = str(sys_info_dict['mac_addr'])
if not pcb_name:
pcb_name = easygui.enterbox('PCB Number:', 'Enter PCB Number')
test_report_dir = init_test_report_dir(pcb_name)
test_logger.info('Test Results Directory: {}'.format(test_report_dir))
err_stat, fw_ver_info_dict = watch_shell_obj.get_version_cli()
if not err_stat:
ver_info_str = 'Firmware Version: V{}.{}.{} | Build Info: {}'.format(fw_ver_info_dict['major'],
fw_ver_info_dict['minor'],
fw_ver_info_dict['patch'],
fw_ver_info_dict['build'])
update_robot_suite_doc(ver_info_str)
update_dvt_version()
update_adpd_clock_calibration_value()
update_ts_mismatch_tolerance(int(ts_tolerance))
if clear_flash:
watch_shell.do_fs_format('')
if flash_reset:
watch_shell.do_flash_reset('')
test_level_update(test_level)
def init_test_report_dir(pcb_name):
"""
This function creates a directory for pcb test reports if not already present and
creates a folder inside it with the current date and time string for storing the test results
:param pcb_name:
:return:
"""
if copy_results_to_shared_drive:
if not pcb_name:
pcb_name = pcb_name_default
pcb_name = pcb_name.upper()
pcb_test_dir_path = os.path.join(shared_drive_path, pcb_name+'_test_result')
if not os.path.exists(shared_drive_path):
raise Exception('Unable to access shared drive path!')
if not os.path.exists(pcb_test_dir_path):
# Creating PCB test directory
os.mkdir(pcb_test_dir_path)
now = datetime.now()
dt_str = now.strftime("%m_%d_%y_%H_%M_%S")
# Creating time-stamped test directory
test_report_dir = os.path.join(pcb_test_dir_path, dt_str)
os.mkdir(test_report_dir)
with open('output_dir.tmp', 'w') as f_ref:
f_ref.write(test_report_dir)
else:
test_report_dir = ''
# Clearing plots folder
if os.path.isdir('plots'):
shutil.rmtree('plots')
time.sleep(1)
os.mkdir('plots')
return test_report_dir
@util_logger
def close_setup():
"""
This function runs necessary steps to close the test setup
:return:
"""
# common.watch_shell.do_exit('') # TODO: Need to enable this after exit command is fixed
update_arduino(None)
update_watch_shell(None)
close_plot_after_run(['ECG Data Plot'], True)
# TODO: Enable below code to configure function generator
# fg.close()
@util_logger
def set_switch(name, state):
"""
This function extracts the io_id from the switch_map based on the input naem and sets the state
:param name:
:param state:
:return:
"""
if name in switch_map:
io_id = switch_map[name]
arduino.serial_write('!SetIO {} {}\r'.format(io_id, state))
else:
raise Exception('Invalid switch name! Unable to find the provided switch name in the switch map')
def rename_stream_file(old_file_name, suffix='', row_offset=1, col_idx=1,
copy_to_shared_drive=copy_results_to_shared_drive, plot=save_plots):
"""
This function renames the old_file_name of stream file by appending a suffix to it
:param old_file_name:
:param suffix:
:param row_offset: If there is header on row 0 of csv data, row_offset can be 1
:param col_idx: If the data is on column 2, col_idx will be 1
:param copy_to_shared_drive:
:param plot: True/False
:return:
"""
if os.path.exists(old_file_name): # Renaming stream file for each iteration
new_name = os.path.splitext(old_file_name)[0] + suffix
if os.path.exists(new_name) and os.path.isfile(new_name):
os.remove(new_name)
time.sleep(0.5)
os.rename(old_file_name, new_name)
else:
new_name = ''
if plot:
plot_path = plot_and_save_png(new_name, col_idx, row_offset)
if copy_to_shared_drive:
total_retry = 1
for retry in range(total_retry): # has to be multiple iteration but limiting due to the delay
try:
test_group_name = inspect.getmodule(inspect.stack()[1][0]).__name__.split('.')[-1]
test_group_dir = os.path.join(test_report_dir, test_group_name)
if not os.path.exists(test_group_dir):
os.mkdir(test_group_dir)
file_name = os.path.split(new_name)[-1]
shutil.copyfile(new_name, os.path.join(test_group_dir, file_name))
if plot:
plot_dir = os.path.join(test_group_dir, 'plots')
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
plot_name = os.path.split(plot_path)[-1]
new_plot_path = os.path.join(plot_dir, plot_name)
shutil.copyfile(plot_path, new_plot_path)
break
except WindowsError:
test_logger.info("Trying to copy the file; Attempts remaining: {}".format(total_retry - retry - 1))
else:
test_logger.warning("*** File Copy Failed ***")
return new_name
def amp_volts_to_percentage(in_volt):
"""
This function takes in amplitude in volts and returns amplitude percentage for arduino
:param in_volt:
:return:
"""
full_scale_amp = float(volt_scale_range[1])/2
amp_percentage = (in_volt * 100) / full_scale_amp
if amp_percentage > 100:
amp_percentage = 100.0
return amp_percentage
def read_csv_file(file_path, num_cols=2):
"""
This function can be used for reading and returning data from csv files
:param file_path:
:param num_cols:
:return:
"""
with open(file_path, 'r') as f_ref:
lines = f_ref.readlines()
lines.pop(0)
rows = [list(map(float, line.split(','))) for line in lines]
cols = zip(*rows)
return cols[:num_cols]
def read_csv_col(file_path, col_idx=0, row_offset=1):
"""
This function reads a csv file and returns the column data specified by the col_idx.
:param file_path:
:param col_idx:
:return:
"""
with open(file_path, 'r') as f_ref:
line_list = f_ref.readlines()
col_data_list = []
last_line = len(line_list) - 1
for i, line in enumerate(line_list):
if i >= row_offset and any(line):
line.strip()
if last_line == i and not any(line.split(",")[col_idx]): # If the last row is empty
continue
if i <= last_line - 7:
# if the last packet in adpd has a partial data for example you are streaming Slot F, G, H, I.
# The last packet might contain only Slot H, I data so truncating the data
try:
any(line.split(",")[col_idx])
except IndexError:
continue
col_data_list.append(float(line.split(",")[col_idx]))
return col_data_list
# **********************************************************************
| [
11748,
28686,
198,
11748,
25064,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
256,
74,
3849,
198,
220,
220,
220,
1330,
640,
198,
220,
220,
220,
1330,
331,
43695,
198,
220,
220,
220,
422,
256,
74,
3849,
1330,
3275,
3524,
198,
220,
... | 2.105476 | 16,307 |
import collections
import copy
from typing import Iterable
import dgl
import torch
import torchtext
| [
11748,
17268,
198,
11748,
4866,
198,
6738,
19720,
1330,
40806,
540,
198,
198,
11748,
288,
4743,
198,
11748,
28034,
198,
11748,
28034,
5239,
628,
628
] | 4.16 | 25 |
# chesstab-4-1-1_castling-option-correction.py
# Copyright 2020 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Read games from a ChessTab database and report games with FENs where the
castling options are not consistent with the piece placement, and attempt
correction on request.
The database must be compatible with ChessTab-4.1.
"""
import tkinter
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
import os
from ast import literal_eval
import time
# This module must have the PGN class from pgn-read-1.3.1 and the PGNUpdate
# class from ChessTab-4.1 so both are copied here, rather than imported, as
# PGN131 along with PGNError131.
# The fitting pgn_read constants module is copied too.
# The two chessql constants are declared here too.
# All docstrings removed from the copied classes and modules.
# The names are modified to indicate their reliance on pgn-read-1.3.1.
# One constant is copied from chesstab.core.chessrecord.
# A regular expession is copied from chesstab.core.querystatement.
# The PGN class from pgn-read-1.3.2 is used to verify any corrected FENs are
# valid so it is copied here, rather than imported, as PGN132 along with
# PGNError132.
# PGN131 and PGN132 use the same version of .constants module
import re
from solentware_base import modulequery
from solentware_base.core.record import KeyData, Value, Record
from pgn_read.core import parser
from .. import (
APPLICATION_DATABASE_MODULE,
FULL_POSITION_MODULE,
ANALYSIS_MODULE,
)
from ..core.chessrecord import ChessDBrecordGameUpdate, ChessDBrecordAnalysis
# These have to be same at both versions of ChessTab so use the current ones.
from ..core.filespec import (
FileSpec,
POSITIONS_FIELD_DEF,
SOURCE_FIELD_DEF,
PIECESQUAREMOVE_FIELD_DEF,
PIECEMOVE_FIELD_DEF,
SQUAREMOVE_FIELD_DEF,
GAMES_FILE_DEF,
REPERTOIRE_FILE_DEF,
OPENING_ERROR_FIELD_DEF,
PGN_DATE_FIELD_DEF,
VARIATION_FIELD_DEF,
ENGINE_FIELD_DEF,
PARTIALPOSITION_NAME_FIELD_DEF,
RULE_FIELD_DEF,
COMMAND_FIELD_DEF,
ANALYSIS_FILE_DEF,
)
# start of attributes copied from pgn_read.core.constants at version 1.3.1
# pgn specification values
TAG_EVENT = "Event"
TAG_SITE = "Site"
TAG_DATE = "Date"
TAG_ROUND = "Round"
TAG_WHITE = "White"
TAG_BLACK = "Black"
TAG_RESULT = "Result"
TAG_FEN = "FEN"
SEVEN_TAG_ROSTER = {
TAG_EVENT: "?",
TAG_SITE: "?",
TAG_DATE: "????.??.??",
TAG_ROUND: "?",
TAG_WHITE: "?",
TAG_BLACK: "?",
TAG_RESULT: "*",
}
SEVEN_TAG_ROSTER_DISPLAY_ORDER = (
TAG_SITE,
TAG_ROUND,
TAG_EVENT,
TAG_DATE,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
SEVEN_TAG_ROSTER_EXPORT_ORDER = (
TAG_EVENT,
TAG_SITE,
TAG_DATE,
TAG_ROUND,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
# Allow for decorators to do special cases for Date and Round sorting
SPECIAL_TAG_DATE = ("?", "0")
SPECIAL_TAG_ROUND = {"?": 1, "-": 2}
NORMAL_TAG_ROUND = 3
SEVEN_TAG_ROSTER_ARCHIVE_SORT1 = (
TAG_EVENT,
TAG_SITE,
TAG_DATE,
)
SEVEN_TAG_ROSTER_ARCHIVE_SORT2 = (
TAG_ROUND,
TAG_WHITE,
TAG_BLACK,
TAG_RESULT,
)
WHITE_WIN = "1-0"
BLACK_WIN = "0-1"
DRAW = "1/2-1/2"
UNKNOWN_RESULT = "*"
RESULT_SET = {WHITE_WIN, BLACK_WIN, DRAW, UNKNOWN_RESULT}
# Repertoire Tags (non-standard)
TAG_OPENING = "Opening"
REPERTOIRE_TAG_ORDER = (TAG_OPENING, TAG_RESULT)
REPERTOIRE_GAME_TAGS = {
TAG_OPENING: "?",
TAG_RESULT: UNKNOWN_RESULT,
}
PGN_PAWN = ""
PGN_KING = "K"
PGN_QUEEN = "Q"
PGN_ROOK = "R"
PGN_BISHOP = "B"
PGN_KNIGHT = "N"
PGN_FROM_SQUARE_DISAMBIGUATION = frozenset((PGN_QUEEN, PGN_BISHOP, PGN_KNIGHT))
# Refugees from old PGN regular expression pattern matching strings.
O_O_O = "O-O-O"
O_O = "O-O"
PLAIN_MOVE = ""
CAPTURE_MOVE = "x"
LINEFEED = "\n"
CARRIAGE_RETURN = "\r"
NEWLINE = "".join((LINEFEED, CARRIAGE_RETURN))
SPACE = " "
HORIZONTAL_TAB = "\t"
FORMFEED = "\f"
VERTICAL_TAB = "\v"
# PGN regular expression pattern matching strings
# Building blocks
ANYTHING_ELSE = "."
WHITESPACE = "\s+"
FULLSTOP = "."
PERIOD = "\\" + FULLSTOP
INTEGER = "[1-9][0-9]*"
TERMINATION = "|".join((WHITE_WIN, BLACK_WIN, DRAW, "\\" + UNKNOWN_RESULT))
START_TAG = "["
END_TAG = "]"
SYMBOL = "([A-Za-z0-9][A-Za-z0-9_+#=:-]*)"
STRING = r'"((?:[^\\"]|\\.)*)"'
TAG_PAIR = "".join(
(
"(\\",
START_TAG,
")\s*",
SYMBOL,
"\s*",
STRING,
"\s*",
"(\\",
END_TAG,
")",
)
)
START_COMMENT = "{"
END_COMMENT = "}"
COMMENT = "".join(
("\\", START_COMMENT, "[^", END_COMMENT, "]*\\", END_COMMENT)
)
LEFT_ANGLE_BRACE = "<"
RIGHT_ANGLE_BRACE = ">"
RESERVED = "".join(
(LEFT_ANGLE_BRACE, "[^", RIGHT_ANGLE_BRACE, "]*", RIGHT_ANGLE_BRACE)
)
COMMENT_TO_EOL = ";(?:[^\n]*)\n"
PERCENT = "%"
ESCAPE_LINE = PERCENT.join(("(?:\A|(?<=\n))", "(?:[^\n]*)\n"))
NAG = "\$[0-9]+(?!/|-)"
START_RAV = "("
END_RAV = ")"
# KQRBN are replaced by PGN_KING, ..., constants; not WKING, ..., constants.
FNR = "a-h"
RNR = "1-8"
PAWN_PROMOTE = "".join(
(
"(?:([" + FNR + "])(x))?([" + FNR + "][18])(=[",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])",
)
)
PAWN_CAPTURE = "([" + FNR + "])(x)([" + FNR + "][2-7])"
PIECE_CAPTURE = "".join(
(
"(?:(",
PGN_KING,
")|(?:([",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([" + FNR + "]?[" + RNR + "]?)))",
"(x)([" + FNR + "][" + RNR + "])",
)
)
PIECE_CHOICE_MOVE = "".join(
(
"([",
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([",
FNR + RNR + "])([" + FNR + "][" + RNR + "])",
)
)
PIECE_MOVE = "".join(
(
"([",
PGN_KING,
PGN_BISHOP,
PGN_KNIGHT,
PGN_QUEEN,
PGN_ROOK,
"])([" + FNR + "][" + RNR + "])",
)
)
PAWN_MOVE = "([" + FNR + "][" + RNR + "])"
CASTLES = "(O-O(?:-O)?)"
CHECK = "([+#]?)"
ANNOTATION = "([!?][!?]?)?"
# Regular expression to detect full games in import format; export format is a
# subset of import format. The text stored on database is captured.
IMPORT_FORMAT = "".join(
(
"(?:",
TAG_PAIR,
")",
"|",
"(?:",
"(?:",
"(?:",
PAWN_PROMOTE,
")",
"|",
"(?:",
PAWN_CAPTURE,
")",
"|",
"(?:",
PIECE_CAPTURE,
")",
"|",
"(?:",
PIECE_CHOICE_MOVE,
")",
"|",
"(?:",
PIECE_MOVE,
")",
"|",
"(?:",
PAWN_MOVE,
")",
"|",
"(?:",
CASTLES,
")",
")",
"(?:",
CHECK,
")",
"(?:",
ANNOTATION,
")",
")",
"|",
"(",
COMMENT,
")",
"|",
"(",
NAG,
")",
"|",
"(",
COMMENT_TO_EOL,
")",
"|",
"(",
TERMINATION,
")",
"|",
INTEGER,
"|",
PERIOD,
"|",
WHITESPACE,
"|",
"(\\",
START_RAV,
")",
"|",
"(\\",
END_RAV,
")",
"|",
RESERVED,
"|",
ESCAPE_LINE,
"|",
"(",
ANYTHING_ELSE,
")",
)
)
# Regular expressions to disambiguate moves: move text like 'Bc4d5' is the only
# kind which could need to be interpreted as one move rather than two.
DISAMBIGUATE_FORMAT = "".join(
(
"[" + PGN_BISHOP + PGN_KNIGHT + PGN_QUEEN + "]",
"[" + FNR + "][" + RNR + "]",
"[" + FNR + "][" + RNR + "]",
)
)
UNAMBIGUOUS_FORMAT = ".*"
# Regular expression to detect possible beginning of move in an error sequence,
# "Bxa" for example while typing "Bxa6".
# No constants for partial castling moves.
POSSIBLE_MOVE = "".join(
(
"[O",
PGN_KING,
PGN_BISHOP,
PGN_KNIGHT,
PGN_ROOK,
PGN_QUEEN,
FNR,
"][-O",
FNR,
RNR,
"+#?!=]* *",
)
)
#
# Group offsets for IMPORT_FORMAT matches
#
IFG_START_TAG = 1
IFG_TAG_SYMBOL = 2
# IFG_TAG_STRING = 3
IFG_TAG_STRING_VALUE = 3
# IFG_TAG_END = 4
IFG_PAWN_PROMOTE_FROM_FILE = 5
IFG_PAWN_TAKES_PROMOTE = 6
IFG_PAWN_PROMOTE_SQUARE = 7
IFG_PAWN_PROMOTE_PIECE = 8
IFG_PAWN_CAPTURE_FROM_FILE = 9
IFG_PAWN_TAKES = 10
IFG_PAWN_CAPTURE_SQUARE = 11
IFG_KING_CAPTURE = 12
IFG_PIECE_CAPTURE = 13
IFG_PIECE_CAPTURE_FROM = 14
IFG_PIECE_TAKES = 15
IFG_PIECE_CAPTURE_SQUARE = 16
IFG_PIECE_CHOICE = 17
IFG_PIECE_CHOICE_FILE_OR_RANK = 18
IFG_PIECE_CHOICE_SQUARE = 19
IFG_PIECE_MOVE = 20
IFG_PIECE_SQUARE = 21
IFG_PAWN_SQUARE = 22
IFG_CASTLES = 23
IFG_CHECK = 24
IFG_ANNOTATION = 25
IFG_COMMENT = 26
IFG_NAG = 27
IFG_COMMENT_TO_EOL = 28
IFG_TERMINATION = 29
IFG_START_RAV = 30
IFG_END_RAV = 31
IFG_ANYTHING_ELSE = 32
#
# Parser states
#
PGN_SEARCHING = 0
PGN_SEARCHING_AFTER_ERROR_IN_RAV = 1
PGN_SEARCHING_AFTER_ERROR_IN_GAME = 2
PGN_COLLECTING_TAG_PAIRS = 3
PGN_COLLECTING_MOVETEXT = 4
PGN_COLLECTING_NON_WHITESPACE_WHILE_SEARCHING = 5
PGN_DISAMBIGUATE_MOVE = 6
#
# numeric annotation glyphs (just validation for now)
#
NAG_TRANSLATION = {"$" + str(o): None for o in range(1, 499)}
#
# Square constants and flags
#
BOARDSIDE = 8
BOARDSQUARES = BOARDSIDE * BOARDSIDE
SQUARE_BITS = [1 << i for i in range(BOARDSQUARES)]
ALL_SQUARES = sum(SQUARE_BITS)
EN_PASSANT_TO_SQUARES = sum([SQUARE_BITS[s] for s in range(24, 40)])
EN_PASSANT_FROM_SQUARES = sum([SQUARE_BITS[s] for s in range(8, 16)]) | sum(
[SQUARE_BITS[s] for s in range(48, 56)]
)
# Pieces
# Encoding positions is more efficient (key length) if pawns are encoded with
# a value less than 4 with either the white or the black pawn encoded as 0 and
# the squares that cannot host a pawn include 0..3 as their encodings (bytes
# \x01..\x03 which arises naturally as the second byte of the 2-byte encodings
# ), typically the squares b1 c1 and d1. The other two values less than 4 are
# best used for the kings which are always present. Absence of a piece is best
# encoded with the highest value, which will be 12 if using lists, wherever
# possible, rather than dictionaries for mappings.
NOPIECE = ""
WKING = "K"
WQUEEN = "Q"
WROOK = "R"
WBISHOP = "B"
WKNIGHT = "N"
WPAWN = "P"
BKING = "k"
BQUEEN = "q"
BROOK = "r"
BBISHOP = "b"
BKNIGHT = "n"
BPAWN = "p"
PIECES = frozenset(
(
WKING,
WQUEEN,
WROOK,
WBISHOP,
WKNIGHT,
WPAWN,
BKING,
BQUEEN,
BROOK,
BBISHOP,
BKNIGHT,
BPAWN,
)
)
# Define white and black pieces and map to values used in database records
WPIECES = frozenset((WKING, WQUEEN, WROOK, WBISHOP, WKNIGHT, WPAWN))
BPIECES = frozenset((BKING, BQUEEN, BROOK, BBISHOP, BKNIGHT, BPAWN))
# The default initial board, internal representation.
INITIAL_BOARD = (
BROOK,
BKNIGHT,
BBISHOP,
BQUEEN,
BKING,
BBISHOP,
BKNIGHT,
BROOK,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
BPAWN,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
NOPIECE,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WPAWN,
WROOK,
WKNIGHT,
WBISHOP,
WQUEEN,
WKING,
WBISHOP,
WKNIGHT,
WROOK,
)
INITIAL_OCCUPIED_SQUARES = (
frozenset(
(48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)
),
frozenset((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)),
)
INITIAL_BOARD_BITMAP = sum(
[sum([SQUARE_BITS[o] for o in s]) for s in INITIAL_OCCUPIED_SQUARES]
)
INITIAL_PIECE_LOCATIONS = {
k: v
for k, v in (
(WKING, (60,)),
(WQUEEN, (59,)),
(WROOK, (56, 63)),
(WBISHOP, (58, 61)),
(WKNIGHT, (57, 62)),
(WPAWN, (48, 49, 50, 51, 52, 53, 54, 55)),
(BKING, (4,)),
(BQUEEN, (3,)),
(BROOK, (0, 7)),
(BBISHOP, (2, 5)),
(BKNIGHT, (1, 6)),
(BPAWN, (8, 9, 10, 11, 12, 13, 14, 15)),
)
}
# White and black side
WHITE_SIDE = 0
BLACK_SIDE = 1
OTHER_SIDE = BLACK_SIDE, WHITE_SIDE
SIDE_KING = WKING, BKING
# Map PGN piece file and rank names to internal representation
MAPPIECE = (
{
PGN_PAWN: WPAWN,
PGN_KING: WKING,
PGN_QUEEN: WQUEEN,
PGN_ROOK: WROOK,
PGN_BISHOP: WBISHOP,
PGN_KNIGHT: WKNIGHT,
},
{
PGN_PAWN: BPAWN,
PGN_KING: BKING,
PGN_QUEEN: BQUEEN,
PGN_ROOK: BROOK,
PGN_BISHOP: BBISHOP,
PGN_KNIGHT: BKNIGHT,
},
) # not sure if this should be set or tuple or dict
MAPFILE = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7}
MAPRANK = {
"8": 0,
"7": 8,
"6": 16,
"5": 24,
"4": 32,
"3": 40,
"2": 48,
"1": 56,
}
MAPROW = {"8": 0, "7": 1, "6": 2, "5": 3, "4": 4, "3": 5, "2": 6, "1": 7}
# {'a8':0, 'b8':1, ..., 'g1':62, 'h1':63}, the order squares are listed in
# Forsyth-Edwards Notation and the square numbers used internally.
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER = {
"".join((f, r)): fn + rn
for f, fn in MAPFILE.items()
for r, rn in MAPRANK.items()
}
# FEN constants
FEN_WHITE = "w"
FEN_BLACK = "b"
FEN_FIELD_DELIM = " "
FEN_RANK_DELIM = "/"
FEN_NULL = "-"
FEN_INITIAL_HALFMOVE_COUNT = 0
FEN_INITIAL_FULLMOVE_NUMBER = 1
FEN_INITIAL_CASTLING = WKING + WQUEEN + BKING + BQUEEN
FEN_STARTPOSITION = FEN_FIELD_DELIM.join(
(
FEN_RANK_DELIM.join(
(
"".join(
(
BROOK,
BKNIGHT,
BBISHOP,
BQUEEN,
BKING,
BBISHOP,
BKNIGHT,
BROOK,
)
),
"".join(
(BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN, BPAWN)
),
str(len(MAPFILE)),
str(len(MAPFILE)),
str(len(MAPFILE)),
str(len(MAPFILE)),
"".join(
(WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN, WPAWN)
),
"".join(
(
WROOK,
WKNIGHT,
WBISHOP,
WQUEEN,
WKING,
WBISHOP,
WKNIGHT,
WROOK,
)
),
)
),
FEN_WHITE,
FEN_INITIAL_CASTLING,
FEN_NULL,
str(FEN_INITIAL_HALFMOVE_COUNT),
str(FEN_INITIAL_FULLMOVE_NUMBER),
)
)
FEN_FIELD_COUNT = 6
FEN_SIDES = {FEN_WHITE: WHITE_SIDE, FEN_BLACK: BLACK_SIDE}
FEN_TOMOVE = FEN_WHITE, FEN_BLACK
# Map FEN square names to board square numbers for en passant move and capture
FEN_WHITE_MOVE_TO_EN_PASSANT = {
"a6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a6"],
"b6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b6"],
"c6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c6"],
"d6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d6"],
"e6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e6"],
"f6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f6"],
"g6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g6"],
"h6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h6"],
}
FEN_BLACK_MOVE_TO_EN_PASSANT = {
"a3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a3"],
"b3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b3"],
"c3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c3"],
"d3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d3"],
"e3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e3"],
"f3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f3"],
"g3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g3"],
"h3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h3"],
}
FEN_WHITE_CAPTURE_EN_PASSANT = {
"a6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a5"],
"b6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b5"],
"c6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c5"],
"d6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d5"],
"e6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e5"],
"f6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f5"],
"g6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g5"],
"h6": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h5"],
}
FEN_BLACK_CAPTURE_EN_PASSANT = {
"a3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a4"],
"b3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b4"],
"c3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c4"],
"d3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d4"],
"e3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e4"],
"f3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f4"],
"g3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g4"],
"h3": MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h4"],
}
FEN_EN_PASSANT_TARGET_RANK = {"5": "6", "4": "3"}
# Specification of conditions to be met to permit castling and changes to make
# to board to display move in internal representation.
# The square to which the king moves is not included in the set of squares
# that must not be under attack because this condition is checked for all moves
# after being played provisionally on the board. The special additional thing
# about castling is that the king cannot move out of or through check; for all
# types of move the king must not be under attack after playing the move. But
# as currently implemented there is no harm except waste in including the test.
CASTLING_W = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e1"]
CASTLING_WK = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h1"]
CASTLING_WQ = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a1"]
CASTLING_B = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["e8"]
CASTLING_BK = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["h8"]
CASTLING_BQ = MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["a8"]
CASTLING_AVAILABITY_SQUARES = (
SQUARE_BITS[CASTLING_WQ]
| SQUARE_BITS[CASTLING_W]
| SQUARE_BITS[CASTLING_WK]
| SQUARE_BITS[CASTLING_BQ]
| SQUARE_BITS[CASTLING_B]
| SQUARE_BITS[CASTLING_BK]
)
CASTLING_SQUARES = {
WKING: (
CASTLING_W,
CASTLING_WK,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f1"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g1"],
),
(),
WROOK,
WKING,
),
WQUEEN: (
CASTLING_W,
CASTLING_WQ,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d1"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c1"],
),
(MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b1"],),
WROOK,
WKING,
),
BKING: (
CASTLING_B,
CASTLING_BK,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["f8"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["g8"],
),
(),
BROOK,
BKING,
),
BQUEEN: (
CASTLING_B,
CASTLING_BQ,
(
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["d8"],
MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["c8"],
),
(MAP_PGN_SQUARE_NAME_TO_FEN_ORDER["b8"],),
BROOK,
BKING,
),
}
# FEN validation
FEN_CASTLING_OPTION_REPEAT_MAX = 1
FEN_PIECE_COUNT_PER_SIDE_MAX = 16
FEN_KING_COUNT = 1
FEN_PAWN_COUNT_MAX = 8
FEN_QUEEN_COUNT_INITIAL = 1
FEN_ROOK_COUNT_INITIAL = 2
FEN_BISHOP_COUNT_INITIAL = 2
FEN_KNIGHT_COUNT_INITIAL = 2
FEN_MAXIMUM_PIECES_GIVING_CHECK = 2
# variation markers and non-move placeholders
NON_MOVE = None
MOVE_ERROR = False
MOVE_AFTER_ERROR = 0
MOVE_TEXT = True
# Maximum line length in PGN file for movetext excluding EOL ('\n')
# Some PGN Tags are allowed to exceed this
# The rule may not be enforcable for comments, especially any re-exported,
# without disturbing any formatting attempts with EOL and spaces.
PGN_MAX_LINE_LEN = 79
# Piece moves and line definitions
_RANKS = [
sum([SQUARE_BITS[s + r * BOARDSIDE] for s in range(BOARDSIDE)])
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_FILES = [
sum([SQUARE_BITS[s * BOARDSIDE + f] for s in range(BOARDSIDE)])
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_TOPLEFT_TO_BOTTOMRIGHT = [
sum(
[
SQUARE_BITS[
((f + c) % BOARDSIDE) + ((r + c) % BOARDSIDE) * BOARDSIDE
]
for c in range(BOARDSIDE)
if (
f + c < BOARDSIDE
and r + c < BOARDSIDE
or f + c >= BOARDSIDE
and r + c >= BOARDSIDE
)
]
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
_BOTTOMLEFT_TO_TOPRIGHT = [
sum(
[
SQUARE_BITS[
((f - c) % BOARDSIDE) + ((r + c) % BOARDSIDE) * BOARDSIDE
]
for c in range(BOARDSIDE)
if f >= c and r + c < BOARDSIDE or c > f and r + c >= BOARDSIDE
]
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
RANKS = [_RANKS[r * BOARDSIDE] for r in range(BOARDSIDE)]
FILES = _FILES[:BOARDSIDE]
ROOK_MOVES = [(_RANKS[k] | _FILES[k]) - s for k, s in enumerate(SQUARE_BITS)]
BISHOP_MOVES = [
(_TOPLEFT_TO_BOTTOMRIGHT[k] | _BOTTOMLEFT_TO_TOPRIGHT[k]) - s
for k, s in enumerate(SQUARE_BITS)
]
QUEEN_MOVES = [(BISHOP_MOVES[s] | ROOK_MOVES[s]) for s in range(BOARDSQUARES)]
KNIGHT_MOVES = [
(
(
sum(
_FILES[kf + r * BOARDSIDE]
for kf in range(f - 2, f + 3)
if kf >= 0 and kf < BOARDSIDE
)
& sum(
_RANKS[f + kr * 8]
for kr in range(r - 2, r + 3)
if kr >= 0 and kr < BOARDSIDE
)
)
& ~(
_RANKS[f + r * BOARDSIDE]
| _FILES[f + r * BOARDSIDE]
| _TOPLEFT_TO_BOTTOMRIGHT[f + r * BOARDSIDE]
| _BOTTOMLEFT_TO_TOPRIGHT[f + r * BOARDSIDE]
)
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
KING_MOVES = [
(
QUEEN_MOVES[f + r * BOARDSIDE]
& (
sum(
_FILES[kf + r * BOARDSIDE]
for kf in range(f - 1, f + 2)
if kf >= 0 and kf < BOARDSIDE
)
& sum(
_RANKS[f + kr * 8]
for kr in range(r - 1, r + 2)
if kr >= 0 and kr < BOARDSIDE
)
)
)
for r in range(BOARDSIDE)
for f in range(BOARDSIDE)
]
WHITE_PAWN_MOVES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSQUARES - BOARDSIDE * 2:
WHITE_PAWN_MOVES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE])
else:
WHITE_PAWN_MOVES_TO_SQUARE.append(0)
for s in range(BOARDSQUARES - BOARDSIDE * 4, BOARDSQUARES - BOARDSIDE * 3):
WHITE_PAWN_MOVES_TO_SQUARE[s] |= SQUARE_BITS[s + BOARDSIDE * 2]
BLACK_PAWN_MOVES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSIDE * 2:
BLACK_PAWN_MOVES_TO_SQUARE.append(0)
else:
BLACK_PAWN_MOVES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE])
for s in range(BOARDSIDE * 3, BOARDSIDE * 4):
BLACK_PAWN_MOVES_TO_SQUARE[s] |= SQUARE_BITS[s - BOARDSIDE * 2]
# 'b1' for black, and 'b8' for white, are allowed as pawn move specifications
# to disambiguate queen moves like 'Qd1f1'.
# PAWN_MOVE_DESITINATION filters them out.
PAWN_MOVE_DESITINATION = [0, 0]
for s in range(BOARDSQUARES):
if s < BOARDSIDE:
pass
elif s < BOARDSIDE * 2:
PAWN_MOVE_DESITINATION[0] |= SQUARE_BITS[s]
elif s < BOARDSQUARES - BOARDSIDE * 2:
PAWN_MOVE_DESITINATION[0] |= SQUARE_BITS[s]
PAWN_MOVE_DESITINATION[1] |= SQUARE_BITS[s]
elif s < BOARDSQUARES - BOARDSIDE:
PAWN_MOVE_DESITINATION[1] |= SQUARE_BITS[s]
WHITE_PAWN_CAPTURES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s > BOARDSQUARES - BOARDSIDE * 2 - 1:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(0)
elif s % BOARDSIDE == 0:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE + 1])
elif s % BOARDSIDE == BOARDSIDE - 1:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s + BOARDSIDE - 1])
else:
WHITE_PAWN_CAPTURES_TO_SQUARE.append(
SQUARE_BITS[s + BOARDSIDE - 1] | SQUARE_BITS[s + BOARDSIDE + 1]
)
BLACK_PAWN_CAPTURES_TO_SQUARE = []
for s in range(BOARDSQUARES):
if s < BOARDSIDE * 2:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(0)
elif s % BOARDSIDE == 0:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE + 1])
elif s % BOARDSIDE == BOARDSIDE - 1:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(SQUARE_BITS[s - BOARDSIDE - 1])
else:
BLACK_PAWN_CAPTURES_TO_SQUARE.append(
SQUARE_BITS[s - BOARDSIDE - 1] | SQUARE_BITS[s - BOARDSIDE + 1]
)
GAPS = []
for f in range(BOARDSQUARES):
GAPS.append(list())
for t in range(BOARDSQUARES):
aligned = (
(_RANKS[f] & _RANKS[t])
| (_FILES[f] & _FILES[t])
| (_TOPLEFT_TO_BOTTOMRIGHT[f] & _TOPLEFT_TO_BOTTOMRIGHT[t])
| (_BOTTOMLEFT_TO_TOPRIGHT[f] & _BOTTOMLEFT_TO_TOPRIGHT[t])
)
if not aligned:
if SQUARE_BITS[t] & KNIGHT_MOVES[f]:
GAPS[f].append(0)
else:
GAPS[f].append(ALL_SQUARES)
else:
gap = (
aligned
& sum(SQUARE_BITS[min(f, t) : max(f, t) + 1])
& ~(SQUARE_BITS[f] | SQUARE_BITS[t])
)
if gap:
GAPS[f].append(gap)
elif f == t:
GAPS[f].append(ALL_SQUARES)
else:
GAPS[f].append(0)
del _TOPLEFT_TO_BOTTOMRIGHT
del _BOTTOMLEFT_TO_TOPRIGHT
del _FILES
del _RANKS
del f, t, gap, aligned
PIECE_CAPTURE_MAP = {
k: v
for k, v in (
(WKING, KING_MOVES),
(WQUEEN, QUEEN_MOVES),
(WROOK, ROOK_MOVES),
(WBISHOP, BISHOP_MOVES),
(WKNIGHT, KNIGHT_MOVES),
(WPAWN, WHITE_PAWN_CAPTURES_TO_SQUARE),
(BKING, KING_MOVES),
(BQUEEN, QUEEN_MOVES),
(BROOK, ROOK_MOVES),
(BBISHOP, BISHOP_MOVES),
(BKNIGHT, KNIGHT_MOVES),
(BPAWN, BLACK_PAWN_CAPTURES_TO_SQUARE),
)
}
PIECE_MOVE_MAP = {
k: v
for k, v in (
(WKING, KING_MOVES),
(WQUEEN, QUEEN_MOVES),
(WROOK, ROOK_MOVES),
(WBISHOP, BISHOP_MOVES),
(WKNIGHT, KNIGHT_MOVES),
(WPAWN, WHITE_PAWN_MOVES_TO_SQUARE),
(BKING, KING_MOVES),
(BQUEEN, QUEEN_MOVES),
(BROOK, ROOK_MOVES),
(BBISHOP, BISHOP_MOVES),
(BKNIGHT, KNIGHT_MOVES),
(BPAWN, BLACK_PAWN_MOVES_TO_SQUARE),
)
}
# Lookup tables for string representation of square and move numbers.
MAP_FEN_ORDER_TO_PGN_SQUARE_NAME = [
t[-1]
for t in sorted(
(v, k) for k, v in MAP_PGN_SQUARE_NAME_TO_FEN_ORDER.items()
)
]
MOVE_NUMBER_KEYS = tuple(
["0"] + [str(len(hex(i)) - 2) + hex(i)[2:] for i in range(1, 256)]
)
# Error markers for PGN display.
ERROR_START_COMMENT = START_COMMENT + "Error: "
ESCAPE_END_COMMENT = "::" + START_COMMENT + START_COMMENT + "::"
# end of attributes copied from pgn_read.core.constants
# Defined in chesstab.core.chessrecord.
PLAYER_NAME_TAGS = frozenset((TAG_WHITE, TAG_BLACK))
# Imported from chesstab.core.querystatement.
re_normalize_player_name = re.compile("([^,\.\s]+)(?:[,\.\s]*)")
# The two chessql.core.constants attributes needed.
ANY_WHITE_PIECE_NAME = r"A"
ANY_BLACK_PIECE_NAME = r"a"
MAP_PGN_PIECE_TO_CQL_COMPOSITE_PIECE = {
WKING: ANY_WHITE_PIECE_NAME,
WQUEEN: ANY_WHITE_PIECE_NAME,
WROOK: ANY_WHITE_PIECE_NAME,
WBISHOP: ANY_WHITE_PIECE_NAME,
WKNIGHT: ANY_WHITE_PIECE_NAME,
WPAWN: ANY_WHITE_PIECE_NAME,
BKING: ANY_BLACK_PIECE_NAME,
BQUEEN: ANY_BLACK_PIECE_NAME,
BROOK: ANY_BLACK_PIECE_NAME,
BBISHOP: ANY_BLACK_PIECE_NAME,
BKNIGHT: ANY_BLACK_PIECE_NAME,
BPAWN: ANY_BLACK_PIECE_NAME,
}
re_tokens = re.compile(IMPORT_FORMAT)
# Avoid re.fullmatch() method while compatibility with Python 3.3 is important.
re_disambiguate_error = re.compile(DISAMBIGUATE_FORMAT.join(("^", "$")))
re_disambiguate_non_move = re.compile(UNAMBIGUOUS_FORMAT.join(("^", "$")))
re_possible_move = re.compile(POSSIBLE_MOVE.join(("(^", "$)")))
# for runtime "from <db|dpt>results import ChessDatabase" and similar
_ChessDB = "ChessDatabase"
_FullPositionDS = "FullPositionDS"
_AnalysisDS = "AnalysisDS"
# Subclass PGN131 to collect inconsistent FENs.
# Subclass PGN132 to collect inconsistent FENs: meaning verify they do not
# exist for PGN copied from pgn_read.core.parser version 1.3.2.
# Versions of the classes in core.chessrecord which use PGNUpdate modified to
# use PGNUpdate131, defined in this module above, so the records which have the
# inconsistent castling options can be deleted in full.
# Replaces ChessDBvaluePGNUpdate and ChessDBvalueGameImport which had been
# identical for a considerable time.
# Decided that PGNUpdate should remain in pgn.core.parser because that code
# generates data while this code updates a database.
# ChessDBvalueGameImport had this comment:
# Implication of original is encode_move_number not supported and load in
# ChessDBvaluePGN superclass is used.
if __name__ == "__main__":
Main().root.mainloop()
| [
2,
269,
956,
39029,
12,
19,
12,
16,
12,
16,
62,
2701,
1359,
12,
18076,
12,
10215,
8243,
13,
9078,
198,
2,
15069,
12131,
13637,
9786,
198,
2,
10483,
594,
25,
4091,
38559,
18310,
357,
21800,
17098,
8,
198,
198,
37811,
5569,
1830,
42... | 1.859855 | 15,855 |
'''
made by YeahKun in 2017-7-22 11:50:42
็ๅญ่ฐๆธธๆ2.0
ๅขๅ ไบๆ็คบ๏ผๆฉๅฑไบๅ่ฏ็็ง็ฑปๅๆฐ้
'''
import random
HANGMANPICS = [
'''
=====
+---+
| |
|
|
|
|
=====''',
'''
=====
+---+
| |
O |
|
|
|
=====''',
'''
=====
+---+
| |
O |
| |
|
|
=====''',
'''
======
+---+
| |
O |
/| |
|
|
======''',
'''
======
+---+
| |
O |
/|\ |
|
|
======''',
'''
======
+---+
| |
O |
/|\ |
/ |
|
======''',
'''
======
+---+
| |
O |
/|\ |
/ \ |
|
======''',
'''
======
+---+
| |
(O |
/|\ |
/ \ |
|
======''',
'''
======
+---+
| |
(O) |
/|\ |
/ \ |
|
======'''
]
# ๅๆฌๆๆ็ฅ็งๅ่ฏ็ๅญๅ
ธ
words = {
'Colors': 'red blue pink yellow green white gray black purple orange clear tan'.split(),
'Fruits': 'tomato orange banana berry mango pear cherry melon plum jackfrult grape'.split(),
'Animals': 'tiger deer lion sheep dog cat horse monkey snake frog fox pig ox duck chicken elephant'.split()
}
if __name__ == '__main__':
print('H A N G M A N')
missedLetters = '' # ็ฉๅฎถๅทฒ็ป็่ฟ็ไธๅฑไบ็ฅ็งๅ่ฏ็ๅญ็ฌฆไธฒ
correctLetters = '' # ็ฉๅฎถๅทฒ็ป็่ฟ็ๅฑไบ็ฅ็งๅ่ฏ็ๅญ็ฌฆไธฒ
serectWord, wordKey = getRandomWord(words) # ่ทๅพ้ๆบ็็ฅ็งๅ่ฏ
gameIsDone = False
while True:
disPlayGround(HANGMANPICS, missedLetters,
correctLetters, serectWord) # ๆพ็คบๆธธๆ็
# ็ฉๅฎถ่พๅ
ฅ็ๆตๅญๆฏ
guess = getGuess(missedLetters + correctLetters) # ็ฉๅฎถ่พๅ
ฅ่ฟ็ๅญๆฏๆๆ็ๅญ็ฌฆไธฒ
# ๅคๆญๅญๆฏๆฏๅฆๅฑไบ็ฅ็งๅ่ฏไธญ
if guess in serectWord: # ๅฆๆๅฑไบ
correctLetters = correctLetters + guess
# ๅคๆญ็ฉๅฎถๆฏๅฆ่ท่
foundAllLetters = True
for i in range(len(serectWord)):
if serectWord[i] not in correctLetters:
foundAllLetters = False
break
if foundAllLetters:
print("Yes! The secret word is " +
serectWord + "! You have won!")
gameIsDone = True
else:
missedLetters = missedLetters + guess
#
if len(missedLetters) == len(HANGMANPICS) - 1:
disPlayGround(HANGMANPICS, missedLetters,
correctLetters, serectWord)
print("\nYou have run out of guesses!\n " + "The secret word is " + serectWord + "\nAfter " + str(len(missedLetters)) + " missed guesses and " +
str(len(correctLetters)) + " correct guesses, the word was" + serectWord)
gameIsDone = True
if gameIsDone:
if playAgain():
missedLetters = ''
correctLetters = ''
gameIsDone = False
serectWord = getRandomWord(words)
else:
break
| [
7061,
6,
201,
198,
220,
220,
220,
925,
416,
9425,
42,
403,
287,
2177,
12,
22,
12,
1828,
1367,
25,
1120,
25,
3682,
201,
198,
220,
220,
220,
13328,
234,
250,
27764,
245,
164,
108,
250,
162,
116,
116,
22755,
237,
17,
13,
15,
201,
... | 1.597095 | 1,859 |
import sys
import math
import re
from collections import OrderedDict
import random
import numpy
import matplotlib.pyplot as plt
features = OrderedDict({})
trainingData = []
testFeatures = OrderedDict({})
testData = []
class_attribute = []
# returns
# feature index of the best feature
# information gain by that feature in data
# ----------------------------------------------------------------------------------------
# feature = None # slope
# parent = None # anotehr node
# children = [] # some nodes
# feature_values = [] # [up, down, flat] # TODO think this way or store conditions
# class_type = None # negative or positive
# negPosCount = None # number of +ves and -ves at this node
# head = None # head node for the tree
# def add_node(self, node_to_add, nodes_parent):
# nodes_parent.
# eg: (56,'male','atyp_angina',120,236,'f','normal',178,'no',0.8,'up',0,'normal') => (negative)
def get_class_type(data, features=None):
'''
gets the class type if this is a stopping phase of data
:param data:
:param features:
:return: positive or negative class if this is a stopping phase, else None
'''
posNegCounts = findPosNegCounts(data)
if posNegCounts[0] == 0:
return 'positive'
elif posNegCounts[1] == 0:
return 'negative'
return None
if __name__ == '__main__':
main() | [
11748,
25064,
198,
11748,
10688,
198,
11748,
302,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
4738,
198,
11748,
299,
32152,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
40890,
796,
14230,
1068,
35,
71... | 2.995708 | 466 |
if __name__ == '__main__':
P = coordinate()
print(P)
P = coordinate(x=2)
print(P)
P = coordinate(y=3)
print(P)
P = coordinate(x=5, y=7)
print(P)
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
47,
796,
20435,
3419,
198,
197,
4798,
7,
47,
8,
198,
197,
47,
796,
20435,
7,
87,
28,
17,
8,
198,
197,
4798,
7,
47,
8,
198,
197,
47,
796,
20435,
7,
... | 2.025974 | 77 |
#ClickKaleidoscope.py
import random
import turtle
t = turtle.Pen()
t.speed(0)
t.hideturtle()
turtle.bgcolor("black")
colors = ["red", "yellow", "blue", "green", "orange", "purple",
"white", "gray"]
turtle.onscreenclick(draw_kaleido)
| [
2,
8164,
42,
1000,
312,
40326,
13,
9078,
198,
198,
11748,
4738,
198,
11748,
28699,
198,
83,
796,
28699,
13,
25553,
3419,
198,
83,
13,
12287,
7,
15,
8,
198,
83,
13,
49675,
316,
17964,
3419,
198,
83,
17964,
13,
35904,
8043,
7203,
13... | 2.474747 | 99 |
#!/usr/bin/env python
import unittest
from tests.base import PyangBindTestCase
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
5254,
13,
8692,
1330,
9485,
648,
36180,
14402,
20448,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
... | 2.62 | 50 |
# -*- coding: utf-8 -*-
from copy import deepcopy
from typing import List, Optional
__author__ = 'lundberg'
# Models for filtering out unneeded or unwanted data from eduID database objects
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
834,
9800,
834,
796,
705,
37525,
3900,
6,
628,
198,
2,
32329,
329,
25431,
503,
555,
2... | 3.322581 | 62 |
''' Serialization interface for custom DataFrame objects. Allows to save/load
for memory streams or files. Because one cannot serialize DataFrames with
custom attributes, this uses an intermediate object for that process. Plan
it implement pickling saved methods later (requires more work). These are meant to
supplant the DataFrame's save() and load() methods when custom attributes must persist.
Note, this program assesses custom attributes by inspecting your DataFrame's
attributes using Python's builting function, dir(). It compares these to the
attributes of an empty DataFrame. This adds a bit of overhead, but should allow
this program to work with new versions of pandas, as Dataframe's methods and attributes
are likely to change. Is there are better way to do this?
The following four functions are defined:
df_dumps: Serialize a DataFrame into memory. Returns serialized stream.
df_dump: Serialize a DataFrame into a file. Returns None.
df_loads: Return a DataFrame from a serialized stream.
df_load: Return a Dataframe from a serialized file.
See bottom of file for test cases: '''
__author__ = "Adam Hughes"
__maintainer__ = "Adam Hughes"
__email__ = "hugadams@gwmail.gwu.edu"
__status__ = "Prototype"
import cPickle
from operator import attrgetter
from pandas import DataFrame
### For testing ###
from numpy.random import randn
class TempDump(object):
''' Temporary class to dump DataFrame object with custom attributes. Custom attrubutes are
passed in as a dictionary and then temporarily stored upon serialization as _metadict. Upon
deserialization, the attributes and values are re-appended to the DataFrame automatically.'''
dfempty=DataFrame()
defattrs=dir(dfempty)
def print_customattr(df):
'''Formatted output of all custom attributes found in a DataFrame. For all
attributes and methods, use dir(df).'''
metadict=_get_metadict(df)
if len(metadict) > 0:
print '\nFound %s custom attributes:\n'%len(metadict)
print '\n'.join([(k+'\t'+v) for k,v in sorted(metadict.items())])
else:
print 'No custom attributes found'
def _get_metadict(df):
''' Returns dictionary of attributes in a dataframe not found in the default frame.'''
attrs=dir(df)
newattr=[att for att in attrs if att not in defattrs] #if not is type(instancemethod?)
if len(newattr) > 1:
fget=attrgetter(*newattr)
return dict(zip(newattr, fget(df)))
else:
return {}
def df_dumps(df):
''' Save dataframe as a stream into memory.'''
metadict=_get_metadict(df)
return cPickle.dumps(TempDump(df, metadict )) #Dumps writes the object to memory
def df_dump(df, outfile):
''' Save dataframe as a file.'''
outstream=df_dumps(df) #Dumps writes the object to memory
f=open(outfile, 'w') #Should this be 'wb'
f.write(outstream)
f.close()
return None #Should I return none or stream?
def df_load(infile):
'''Returns dataframe from a serialized file '''
f=open(infile, 'r')
tempobj=cPickle.load(f)
f.close()
df=tempobj.dataframe
for attr, value in tempobj._metadict.items():
setattr(df, attr, value)
return df
def df_loads(stream):
''' Returns dataframe from a serialized stream'''
tempobj=cPickle.loads(stream) #loads not load
df=tempobj.dataframe
for attr, value in tempobj._metadict.items():
setattr(df, attr, value)
return df
if __name__ == '__main__':
### Make a random dataframe, add some attributes
df=DataFrame(((randn(3,3))), columns=['a','b','c'])
print_customattr(df)
print 'adding some attributes'
df.name='Billy'
df.junk='in the trunk'
print_customattr(df)
### Serialize into memory
stream=df_dumps(df)
print 'wrote dataframe to memory'
### Restore from memory
dfnew=df_loads(stream)
print 'restored from memory'
print_customattr(dfnew)
### Serialize into file
outfile='dftest.df' #What file extension is commonly used for this?
df_dump(df, outfile)
print 'wrote dataframe to file %s'%outfile
### Restore from file
dfnewnew=df_load(outfile)
print 'Restored from file%s'%outfile
print_customattr(dfnewnew)
| [
7061,
6,
23283,
1634,
7071,
329,
2183,
6060,
19778,
5563,
13,
220,
40402,
284,
3613,
14,
2220,
220,
198,
329,
4088,
15190,
393,
3696,
13,
220,
4362,
530,
2314,
11389,
1096,
6060,
35439,
351,
198,
2183,
12608,
11,
428,
3544,
281,
19898... | 2.814912 | 1,529 |
import pymongo
from sshtunnel import SSHTunnelForwarder | [
11748,
279,
4948,
25162,
198,
6738,
26678,
28286,
4954,
1330,
6723,
6535,
403,
4954,
39746,
263
] | 3.4375 | 16 |
"""Meta-learners for Omniglot experiment.
Based on original implementation:
https://github.com/amzn/metalearn-leap
"""
import random
from abc import abstractmethod
from torch import nn
from torch import optim
import maml
import warpgrad
from leap import Leap
from leap.utils import clone_state_dict
from utils import Res, AggRes
class BaseWrapper(object):
"""Generic training wrapper.
Arguments:
criterion (func): loss criterion to use.
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
"""
@abstractmethod
def _partial_meta_update(self, loss, final):
"""Meta-model specific meta update rule.
Arguments:
loss (nn.Tensor): loss value for given mini-batch.
final (bool): whether iteration is the final training step.
"""
NotImplementedError('Implement in meta-learner class wrapper.')
@abstractmethod
def _final_meta_update(self):
"""Meta-model specific meta update rule."""
NotImplementedError('Implement in meta-learner class wrapper.')
def run_tasks(self, tasks, meta_train):
"""Train on a mini-batch tasks and evaluate test performance.
Arguments:
tasks (list, torch.utils.data.DataLoader): list of task-specific
dataloaders.
meta_train (bool): whether current run in during meta-training.
"""
results = []
for task in tasks:
task.dataset.train()
trainres = self.run_task(task, train=True, meta_train=meta_train)
task.dataset.eval()
valres = self.run_task(task, train=False, meta_train=False)
results.append((trainres, valres))
##
results = AggRes(results)
# Meta gradient step
if meta_train:
self._final_meta_update()
return results
def run_task(self, task, train, meta_train):
"""Run model on a given task.
Arguments:
task (torch.utils.data.DataLoader): task-specific dataloaders.
train (bool): whether to train on task.
meta_train (bool): whether to meta-train on task.
"""
optimizer = None
if train:
self.model.init_adaptation()
self.model.train()
optimizer = self.optimizer_cls(
self.model.parameters(), **self.optimizer_kwargs)
else:
self.model.eval()
return self.run_batches(
task, optimizer, train=train, meta_train=meta_train)
def run_batches(self, batches, optimizer, train=False, meta_train=False):
"""Iterate over task-specific batches.
Arguments:
batches (torch.utils.data.DataLoader): task-specific dataloaders.
optimizer (torch.nn.optim): optimizer instance if training is True.
train (bool): whether to train on task.
meta_train (bool): whether to meta-train on task.
"""
device = next(self.model.parameters()).device
res = Res()
N = len(batches)
for n, (input, target) in enumerate(batches):
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# Evaluate model
prediction = self.model(input)
loss = self.criterion(prediction, target)
res.log(loss=loss.item(), pred=prediction, target=target)
# TRAINING #
if not train:
continue
final = (n+1) == N
loss.backward()
if meta_train:
self._partial_meta_update(loss, final)
optimizer.step()
optimizer.zero_grad()
if final:
break
###
res.aggregate()
return res
class WarpGradWrapper(BaseWrapper):
"""Wrapper around WarpGrad meta-learners.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
meta_kwargs (dict): kwargs to pass to meta-learner upon construction.
criterion (func): loss criterion to use.
"""
def run_task(self, task, train, meta_train):
"""Run model on a given task, first adapting and then evaluating"""
if meta_train and train:
# Register new task in buffer.
self.model.register_task(task)
self.model.collect()
else:
# Make sure we're not collecting non-meta-train data
self.model.no_collect()
optimizer = None
if train:
# Initialize model adaptation
self.model.init_adaptation()
optimizer = self.optimizer_cls(
self.model.optimizer_parameter_groups(),
**self.optimizer_kwargs)
if self.model.collecting and self.model.learn_optimizer:
# Register optimiser to collect potential momentum buffers
self.model.register_optimizer(optimizer)
else:
self.model.eval()
return self.run_batches(
task, optimizer, train=train, meta_train=meta_train)
class LeapWrapper(BaseWrapper):
"""Wrapper around the Leap meta-learner.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
meta_kwargs (dict): kwargs to pass to meta-learner upon construction.
criterion (func): loss criterion to use.
"""
class MAMLWrapper(object):
"""Wrapper around the MAML meta-learner.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
def run_meta_batch(self, meta_batch, meta_train):
"""Run on meta-batch.
Arguments:
meta_batch (list): list of task-specific dataloaders
meta_train (bool): meta-train on batch.
"""
loss, results = self.meta(meta_batch,
return_predictions=False,
return_results=True,
create_graph=meta_train)
if meta_train:
loss.backward()
self.meta_optimizer.step()
self.meta_optimizer.zero_grad()
return results
class NoWrapper(BaseWrapper):
"""Wrapper for baseline without any meta-learning.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
criterion (func): loss criterion to use.
"""
class _FOWrapper(BaseWrapper):
"""Base wrapper for First-order MAML and Reptile.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = None
class ReptileWrapper(_FOWrapper):
"""Wrapper for Reptile.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = True
class FOMAMLWrapper(_FOWrapper):
"""Wrapper for FOMAML.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
meta_optimizer_cls: meta optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
meta_optimizer_kwargs (dict): kwargs to pass to meta optimizer upon
construction.
criterion (func): loss criterion to use.
"""
_all_grads = False
class FtWrapper(BaseWrapper):
"""Wrapper for Multi-headed finetuning.
This wrapper differs from others in that it blends batches from all tasks
into a single epoch.
Arguments:
model (nn.Module): classifier.
optimizer_cls: optimizer class.
optimizer_kwargs (dict): kwargs to pass to optimizer upon construction.
criterion (func): loss criterion to use.
"""
@staticmethod
def gen_multitask_batches(tasks, train):
"""Generates one batch iterator across all tasks."""
iterator_id = 0
all_batches = []
for task_id, iterator in tasks:
if train:
iterator.dataset.train()
else:
iterator.dataset.eval()
for batch in iterator:
all_batches.append((iterator_id, task_id, batch))
iterator_id += 1
if train:
random.shuffle(all_batches)
return all_batches
def run_multitask(self, batches, train):
"""Train on task in multi-task mode
This is equivalent to the run_task method but differs in that
batches are assumed to be mixed from different tasks.
"""
N = len(batches)
if train:
self.model.train()
else:
self.model.eval()
device = next(self.model.parameters()).device
res = {}
for n, (iterator_id, task_id, (input, target)) in enumerate(batches):
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
prediction = self.model(input, task_id)
loss = self.criterion(prediction, target)
if iterator_id not in res:
res[iterator_id] = Res()
res[iterator_id].log(loss=loss.item(),
pred=prediction,
target=target)
# TRAINING #
if not train:
continue
final = (n + 1) == N
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
if final:
break
###
res = [r[1] for r in sorted(res.items(), key=lambda r: r[0])]
for r in res:
r.aggregate()
return res
| [
37811,
48526,
12,
35720,
364,
329,
31816,
328,
26487,
6306,
13,
198,
15001,
319,
2656,
7822,
25,
198,
5450,
1378,
12567,
13,
785,
14,
321,
47347,
14,
28469,
451,
77,
12,
293,
499,
198,
37811,
198,
11748,
4738,
198,
6738,
450,
66,
13... | 2.262348 | 4,940 |
# Coinbase Pro library:
# https://github.com/danpaquin/coinbasepro-python
#curl "https://api.pro.coinbase.com/products/BTC-USD/candles?start=2021-01-01T12:00:00&end=2021-01-12T12:00:00&granularity=3600"
import cbpro
import numpy as np
import pandas as pd
import logging
from datetime import datetime, timedelta
import json
#from IPython.core.debugger import set_trace | [
2,
45802,
1041,
5888,
25,
198,
2,
3740,
1378,
12567,
13,
785,
14,
25604,
8957,
21915,
14,
3630,
12093,
538,
305,
12,
29412,
628,
198,
2,
66,
6371,
366,
5450,
1378,
15042,
13,
1676,
13,
3630,
8692,
13,
785,
14,
29498,
14,
35964,
12... | 2.729927 | 137 |
from django.db import models
from django.contrib.auth.models import User
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.utils.timezone import now
'''
def votes(self):
num_submissions = Submission.objects.filter(uploaded_by=self).count()
return num_submissions
'''
# with_votes = Link.objects.count()
# with_votes = Link.objects.filter(with_votes__gt=0).annotate(votes=Count('with_votes')).order_by('-votes')
# Signal while saving user
from django.db.models.signals import post_save
post_save.connect(create_profile, sender=User)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
2764,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
... | 2.890995 | 211 |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
import urllib.request,re,os
import config | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
5313,
32103,
21321,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
2956,
297,
571,
13,
25927,
11,
260,
1... | 3.434783 | 46 |
from typing import Optional, List, Set
from fastapi import FastAPI
from pydantic import BaseModel, HttpUrl
app = FastAPI()
@app.put('/items/{item_id}') | [
6738,
19720,
1330,
32233,
11,
7343,
11,
5345,
198,
198,
6738,
3049,
15042,
1330,
12549,
17614,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
367,
29281,
28165,
628,
198,
1324,
796,
12549,
17614,
3419,
198,
198,
31,
1324,
13,
1996,
... | 3.039216 | 51 |
import cv2
import os
import time
import numpy as np
from keras import backend as K
from keras.models import load_model
from yad2k.models.keras_yolo import yolo_eval, yolo_head
| [
11748,
269,
85,
17,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
1330,
30203,
355,
509,
198,
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
198,
6738,
331,
324,
17,
74,
13,
... | 2.920635 | 63 |
"""Describe logbook events."""
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import callback
from .const import DOMAIN, DOOR_STATION, DOOR_STATION_EVENT_ENTITY_IDS
@callback
def async_describe_events(hass, async_describe_event):
"""Describe logbook events."""
@callback
def async_describe_logbook_event(event):
"""Describe a logbook event."""
_, doorbird_event = event.event_type.split("_", 1)
return {
"name": "Doorbird",
"message": f"Event {event.event_type} was fired.",
"entity_id": hass.data[DOMAIN][DOOR_STATION_EVENT_ENTITY_IDS].get(
doorbird_event, event.data.get(ATTR_ENTITY_ID)
),
}
domain_data = hass.data[DOMAIN]
for config_entry_id in domain_data:
door_station = domain_data[config_entry_id][DOOR_STATION]
for event in door_station.doorstation_events:
async_describe_event(
DOMAIN, f"{DOMAIN}_{event}", async_describe_logbook_event
)
| [
37811,
24564,
4892,
2604,
2070,
2995,
526,
15931,
198,
198,
6738,
1363,
562,
10167,
13,
9979,
1330,
5161,
5446,
62,
3525,
9050,
62,
2389,
198,
6738,
1363,
562,
10167,
13,
7295,
1330,
23838,
198,
198,
6738,
764,
9979,
1330,
24121,
29833,... | 2.245203 | 469 |
"""
WSGI config for gigfinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os,sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
#path a donde esta el manage.py de nuestro proyecto Django
sys.path.append('/home/ubuntu/workspace/gigfinder')
os.environ.setdefault("LANG", "en_US.UTF-8")
os.environ.setdefault("LC_ALL", "en_US.UTF-8")
activate_this = '/home/ubuntu/workspace/myvenv/bin/activate_this.py'
application = get_wsgi_application() | [
37811,
198,
19416,
18878,
4566,
329,
12526,
22805,
1628,
13,
198,
198,
1026,
32142,
262,
25290,
18878,
869,
540,
355,
257,
8265,
12,
5715,
7885,
3706,
7559,
31438,
15506,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
11,
766,
198,
54... | 2.800847 | 236 |
"""Layer definitions.
This could also be done in the setup file itself.
"""
import os
from zope.app.testing.functional import ZCMLLayer
# We define a ZCML test layer. ZCML layers are special as they define
# some setup code for creation of empty ZODBs and more. If you only
# want some ZCML registrations to be done, you can use it like so:
FunctionalLayer1 = ZCMLLayer(
# As first argument we need the absolute path of a ZCML file
os.path.join(os.path.dirname(__file__), 'ftesting.zcml'),
# Second argument is the module, where the layer is defined.
__name__,
# This is the name of our layer. It can be an arbitrary string.
'FunctionalLayer1',
# By default ZCML layers are not torn down. You should make sure,
# that any registrations you do in your ZCML are removed in a
# tearDown method if you specify this parameter to be `True`. This
# parameter is optional.
allow_teardown=True)
class UnitLayer1(object):
"""This represents a layer.
A layer is a way to have common setup and teardown that happens
once for a whole group of tests.
It must be an object with a `setUp` and a `tearDown` method, which
are run once before or after all the tests applied to a layer
respectively.
Optionally you can additionally define `testSetUp` and
`testTearDown` methods, which are run before and after each single
test.
This class is not instantiated. Therefore we use classmethods.
"""
@classmethod
def setUp(self):
"""This gets run once for the whole test run, or at most once per
TestSuite that depends on the layer.
(The latter can happen if multiple suites depend on the layer
and the testrunner decides to tear down the layer after first
suite finishes.)
"""
@classmethod
def tearDown(self):
"""This gets run once for the whole test run, or at most
once per TestSuite that depends on the layer,
after all tests in the suite have finished.
"""
@classmethod
def testSetUp(self):
"""This method is run before each single test in the current
layer. It is optional.
"""
print " Running testSetUp of UnitLayer1"
@classmethod
def testTearDown(self):
"""This method is run before each single test in the current
layer. It is optional.
"""
print " Running testTearDown of UnitLayer1"
| [
37811,
49925,
17336,
13,
198,
198,
1212,
714,
635,
307,
1760,
287,
262,
9058,
2393,
2346,
13,
198,
37811,
198,
11748,
28686,
198,
6738,
1976,
3008,
13,
1324,
13,
33407,
13,
45124,
1330,
1168,
24187,
3069,
2794,
198,
198,
2,
775,
8160,... | 2.980559 | 823 |
from tqdm import tqdm
import time
import platform
# ops is a list of AsyncOp object
| [
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
640,
198,
11748,
3859,
198,
198,
2,
39628,
318,
257,
1351,
286,
1081,
13361,
18257,
2134,
628
] | 3.185185 | 27 |
import matplotlib.pyplot as plt
from numba import njit
import numpy as np
import pandas as pd
from scipy.special import gamma, gammainc
import takahe
from tqdm import tqdm
def MadauDickinson(Z, z):
"""Computes the Madau & Dickinson SFRD at metallicity Z and redshift z.
Implements the SFRD given by eqn(15) of [1]. Returns a value in
M_sun / yr / Mpc^3.
Assumes Z_sun = 0.020, and that input metallicity is NOT already
measured relative to this.
[1] https://www.annualreviews.org/doi/pdf/10.1146/annurev-astro-081811-125615
Arguments:
Z {float} -- The metallicity under consideration.
z {float} -- The redshift under consideration.
Returns:
{float} -- The SFRD at metallicity Z and redshift z.
"""
GAM = gammainc(0.84, (Z / 0.02)**2 * 10**(0.3*z))
NUM = 0.015 * (1+z)**2.7
DEM = (1+((1+z)/2.9)**5.6)
SFRDi = GAM * (NUM / DEM)
return SFRDi
def MilkyWay(Z, z):
"""Implements the SFR equation from (Wiktorowicz et. al. 2020) [1]
for the Milky Way Galaxy.
Piecewise function for the SFR in the Milky Way galaxy. Assumes a
four-component formalism - consisting of a thin disk, thick disk,
bulge, and halo. Precise values of the SFR come from
(Olejak et. al 2019) [2].
[1] https://arxiv.org/pdf/2006.08317.pdf
[2] https://arxiv.org/pdf/1908.08775.pdf
Arguments:
Z {float} -- The metallicity under consideration.
z {float} -- The redshift under consideration.
Returns:
{float} -- The SFRD at metallicity Z and redshift z.
"""
SFR_arr = np.zeros(len(z))
for i in range(len(z)):
zi = z[i]
tL = takahe.helpers.redshift_to_lookback(zi)
Z_sun = takahe.constants.SOLAR_METALLICITY
SFR = 0
if Z == Z_sun:
# Thin Disk
if 0 <= tL <= 10:
SFR += 4.7
# Bulge
if 0 <= tL <= 10:
SFR += 0.45
elif 10 <= tL <= 12:
SFR += 2.3
elif Z == Z_sun / 10:
# Thick Disk
if 9 <= tL <= 11:
SFR += 2.5
elif Z == 1e-4:
# Halo
if 10 <= tL <= 12:
SFR += 0.5
SFR_arr[i] += SFR
return SFR_arr
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
997,
7012,
1330,
299,
45051,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
629,
541,
88,
13,
20887,
1330,
34236,
11,
308,
6475,... | 2.065158 | 1,105 |
"""Intercept log messages from the used libraries and pass them to `loguru`.
See https://github.com/Delgan/loguru
"""
import logging
from loguru import logger
# class PropagateHandler(logging.Handler):
# """Send events from loguru to standard logging"""
# def emit(self, record):
# logging.getLogger(record.name).handle(record)
#
#
# logger.add(PropagateHandler(), format="{message}")
class InterceptHandler(logging.Handler):
"""Send events from standard logging to loguru."""
def emit(self, record: logging.LogRecord) -> None:
"""See :meth:`logging.Handler.emit`.
Args:
record: data to log
"""
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = logging.getLevelName(record.levelno)
# Find caller from where originated the logged message
frame = logging.currentframe()
depth = 2
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back # type: ignore
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage()
)
log = logging.getLogger()
# log.setLevel(0)
log.addHandler(InterceptHandler())
# logging.basicConfig(handlers=[InterceptHandler()], level=0, style='{')
| [
37811,
9492,
984,
2604,
6218,
422,
262,
973,
12782,
290,
1208,
606,
284,
4600,
6404,
14717,
44646,
198,
198,
6214,
3740,
1378,
12567,
13,
785,
14,
13856,
1030,
14,
6404,
14717,
198,
198,
37811,
198,
11748,
18931,
198,
198,
6738,
2604,
... | 2.590317 | 537 |
from .board import Board, demo_board
from .ticket import Ticket, load_tickets, demo_ticket
| [
6738,
764,
3526,
1330,
5926,
11,
13605,
62,
3526,
198,
6738,
764,
43350,
1330,
24014,
11,
3440,
62,
83,
15970,
11,
13605,
62,
43350,
198
] | 3.64 | 25 |
# Example 1
# This example show how to encoding matrix in graph7 format
import graph7 as g7
import random
order = 10
# Matrix contains only 0 and 1, so for encoding a matrix on one element
# needed only 1 bit
rand_mat = [[random.randint(0, 1) for _ in range(order)] for _ in range(order)]
directed = g7.encode(rand_mat)
# We leave only the upper triangle of the matrix
for i in range(order):
for j in range(i, order):
if i == j:
rand_mat[i][j] = 0
continue
rand_mat[j][i] = rand_mat[i][j]
undirected = g7.encode(rand_mat)
# Compare
print(directed)
print(undirected)
| [
2,
17934,
352,
198,
2,
770,
1672,
905,
703,
284,
21004,
17593,
287,
4823,
22,
5794,
198,
198,
11748,
4823,
22,
355,
308,
22,
198,
11748,
4738,
198,
198,
2875,
796,
838,
198,
198,
2,
24936,
4909,
691,
657,
290,
352,
11,
523,
329,
... | 2.599156 | 237 |
# -*- coding: utf-8 -*-
# vim: set ts=4 et
from datetime import datetime, timedelta
import re
from sqlalchemy import or_, and_, func
from pybot.plugin import *
from . import models
from .models import *
RETRY_INTERVAL = 3600
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
900,
40379,
28,
19,
2123,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
302,
198,
6738,
44161,
282,
26599,
1330,
393,
62,
1... | 2.875 | 80 |
# Token types
# EOF (end-of-file) token -> indicates that no more input is left for lexical analysis.
# Lexical Analysis: Breaking input strings into tokens -> scanner, tokenizer, lexical analyzer, lexer
# Lexeme -> a sequence of characters that form a token. This is for multidigit for example. Here we implement the intger method for this reason.
# Expr method -> finds structure via the stream of tokens from get_next_token() method. Then generates results by computing.
# Parsing -> recognizing a phrase in a stream of tokens -> Parser
# Expr -> Does both parsing and interpreting.
# Here are the guidelines that we will use to convert the grammar to source code. By following them, you can literally
# translate the grammar to a working parser:
# Each rule, R, defined in the grammar, becomes a method with the same name, and references to that rule become a method call: R().
# The body of the method follows the flow of the body of the rule using the very same guidelines.
# Alternatives (a1 | a2 | aN) become an if-elif-else statement
# An optional grouping (โฆ)* becomes a while statement that can loop over zero or more times
# Each token reference T becomes a call to the method eat: eat(T). The way the eat method works is that it consumes the token T if it matches the
# current lookahead token, then it gets a new token from the lexer and assigns that token to the current_token internal variable.
INTEGER, PLUS, MINUS, MUL, DIV, LPAREN, RPAREN, EOF = (
"INTEGER",
"PLUS",
"MINUS",
"MUL",
"DIV",
"(",
")",
"EOF",
)
if __name__ == "__main__":
main()
| [
2,
29130,
3858,
198,
198,
2,
412,
19238,
357,
437,
12,
1659,
12,
7753,
8,
11241,
4613,
9217,
326,
645,
517,
5128,
318,
1364,
329,
31191,
605,
3781,
13,
198,
2,
17210,
605,
14691,
25,
24942,
5128,
13042,
656,
16326,
4613,
27474,
11,
... | 3.546256 | 454 |
#!/usr/bin/env python3
import argparse
import datetime
import re
VERSION_HEADER_RE = re.compile(r'(?P<version>\d+\.\d+\.\d+) ?- ?(?P<date>\d{4}-\d{2}-\d{2})')
CHANGE_TYPE_RE = re.compile(r'^### ?(?P<change_type>.*)')
SKIP_LINE_RE = re.compile(r'^-{2,}|^$')
LIST_LINE_RE = re.compile(r'^[-*] ?(?P<line_item>.*)')
CONTINUATION_LINE_RE = re.compile(r'^ {1,4}.*')
# This date format doesn't include the timezone, since we just hard-code that
# to +0000
PARTIAL_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S'
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
198,
43717,
62,
37682,
1137,
62,
2200,
796,
302,
13,
5589,
576,
7,
81,
6,
7,
30,
47,
27,
9641,
29,
59,
... | 2.037736 | 265 |
from django.shortcuts import render
from .models import UserProfile, Friend, RoomInstance
from django.contrib.auth.models import User, Group
from django.contrib.sessions.models import Session
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from registration.views import RegistrationView
from django.db.models import Q
from datetime import datetime,timedelta
from django.contrib.gis.geoip2 import GeoIP2
from geopy import geocoders
from registration.views import RegistrationView
from django.core.mail import send_mail
from lazysignup.utils import is_lazy_user
from lazysignup.decorators import allow_lazy_user
from django.template import RequestContext
from django.shortcuts import render_to_response
import json
import ast
import random
import string
mapapikey = ('<script src="https://maps.googleapis.com/maps/api/'
'js?key=AIzaSyAvDRB7PnQbIVNtRHf3x-MTB5y-3OXD1xg&libraries=places">async defer> </script>')
"""
HOME
"""
@login_required(login_url='/')
"""
SPLASH
"""
"""
MAP
"""
@allow_lazy_user
"""
Removes old entries
"""
"""
PREFERENCES
"""
@login_required
"""
FRIENDS
"""
@login_required
"""
CHAT
"""
"""
PROFILE
"""
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
764,
27530,
1330,
11787,
37046,
11,
9182,
11,
10096,
33384,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
11,
4912,
198,
6738,
42625,
14208,
13,
3... | 2.980392 | 408 |
import falcon
import json
from helper.log import logger
from helper.utils import Constants
| [
11748,
24215,
1102,
198,
11748,
33918,
198,
6738,
31904,
13,
6404,
1330,
49706,
198,
6738,
31904,
13,
26791,
1330,
4757,
1187,
628
] | 4.181818 | 22 |
# encoding: utf-8
# pylint: disable=C0103
"""Utility functions to deal with audio."""
import librosa
__all__ = ['load']
# simply use librosa.load (this may change in the future)
load = librosa.load
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
279,
2645,
600,
25,
15560,
28,
34,
486,
3070,
198,
37811,
18274,
879,
5499,
284,
1730,
351,
6597,
526,
15931,
198,
198,
11748,
9195,
4951,
64,
198,
198,
834,
439,
834,
796,
37250,
2220,
2052... | 2.830986 | 71 |
import uvicorn
from fastapi import FastAPI
from email_sender.service.email_sender import EmailSender
from flights_tracker.routes import router
from flights_tracker.services.weekend_flights_service import WeekendFlightsService
app = FlightsTracker()
app.include_router(router)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000, log_level="info")
| [
11748,
334,
25531,
1211,
198,
6738,
3049,
15042,
1330,
12549,
17614,
198,
198,
6738,
3053,
62,
82,
2194,
13,
15271,
13,
12888,
62,
82,
2194,
1330,
9570,
50,
2194,
198,
6738,
13956,
62,
2213,
10735,
13,
81,
448,
274,
1330,
20264,
198,
... | 2.944882 | 127 |
import os
import celery
import requests
from app.logic.pipeline import process, load_model, predict
CELERY_BROKER = os.environ.get('CELERY_BROKER')
CELERY_BACKEND = os.environ.get('CELERY_BACKEND')
app = celery.Celery('tasks', broker=CELERY_BROKER, backend=CELERY_BACKEND)
@app.task(base=Model)
Model = app.register_task(Model())
| [
11748,
28686,
198,
198,
11748,
18725,
1924,
198,
11748,
7007,
198,
6738,
598,
13,
6404,
291,
13,
79,
541,
4470,
1330,
1429,
11,
3440,
62,
19849,
11,
4331,
198,
198,
34,
3698,
19664,
62,
11473,
11380,
1137,
796,
28686,
13,
268,
2268,
... | 2.541353 | 133 |
import tensorflow as tf
tf.enable_eager_execution();
| [
11748,
11192,
273,
11125,
355,
48700,
198,
27110,
13,
21633,
62,
68,
3536,
62,
18558,
1009,
9783,
628,
198
] | 2.894737 | 19 |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
| [
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198
] | 3.433333 | 30 |
import db
import mfrc522
import network
import speaker
import squeezebox
import squeezebox
import time
import ujson
from machine import Pin, PWM
if __name__ == '__main__':
main()
| [
11748,
20613,
198,
11748,
285,
69,
6015,
49542,
198,
11748,
3127,
198,
11748,
10834,
198,
11748,
13170,
38130,
1140,
198,
11748,
13170,
38130,
1140,
198,
11748,
640,
198,
11748,
334,
17752,
198,
6738,
4572,
1330,
13727,
11,
350,
22117,
62... | 3.166667 | 60 |
PARTIAL_MOCK_CONFIG = {
"api":{
"plugins":{
"lots.core": None,
"transferring":{
"plugins":{
"lots.transferring":None
}
},
}
}
}
| [
30709,
12576,
62,
44,
11290,
62,
10943,
16254,
796,
1391,
198,
220,
220,
220,
366,
15042,
1298,
90,
198,
220,
220,
220,
220,
220,
366,
37390,
1298,
90,
198,
220,
220,
220,
220,
220,
220,
220,
366,
75,
1747,
13,
7295,
1298,
6045,
1... | 1.619048 | 126 |
#coding:utf-8
'''
filename:floatrange.py
generate a sequence of parmeters with floating-point numbers.
'''
import itertools
f = frange(1.2,9)
print(list(f))
f = frange(1.2)
print(list(f))
f = frange(1.2,9,-1)
print(list(f))
f = frange(1.2,None,-1)
print(list(f))
f = frange(1.2,None,0)
print(list(f))
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
198,
7061,
6,
198,
220,
220,
220,
29472,
25,
22468,
9521,
13,
9078,
198,
220,
220,
220,
7716,
257,
8379,
286,
1582,
4164,
364,
351,
12462,
12,
4122,
3146,
13,
198,
7061,
6,
628,
198,
11748,
3... | 2.10828 | 157 |
import argparse
import os
import re
import sys
SOURCE_FMT = """// AUTOGENERATED
// Generated by spirv_num_to_cpp.py, from '{file_name}'
#include <stddef.h>
#include <stdint.h>
namespace {namespace} {{
const volatile uint32_t {var_name}[] = {{
{raw_lines}
}};
const volatile size_t {var_name}_count = {total_data_count};
}} // {namespace}
// AUTOGENERATED
"""
HEADER_VARS_FMT = "extern const uint32_t {var_name}[];\nextern const size_t {var_name}_count;"
HEADER_FMT = """// AUTOGENERATED
// Generated by spirv_num_to_cpp.py
#pragma once
#include <stddef.h>
#include <stdint.h>
namespace {namespace} {{
{var_pairs}
}} // {namespace}
// AUTOGENERATED
"""
if __name__ == '__main__':
sys.exit(main())
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
628,
198,
47690,
62,
37,
13752,
796,
37227,
1003,
47044,
7730,
1677,
1137,
11617,
198,
1003,
2980,
515,
416,
9158,
85,
62,
22510,
62,
1462,
62,
20322,
13,
9078,
... | 2.392739 | 303 |
import json
from json import JSONDecodeError
import subprocess as sp
from traceback import print_exc
from ..dbi import WeewxDB
from flask import Flask, request
def create_app(database):
"""
:param database: Name of database to save into
"""
app = Flask(__name__)
weewx_db = WeewxDB(database)
@app.route('/', methods=['POST'])
@app.route('/data', methods=['POST'])
@app.route('/')
return app
| [
11748,
33918,
198,
6738,
33918,
1330,
19449,
10707,
1098,
12331,
198,
11748,
850,
14681,
355,
599,
198,
6738,
12854,
1891,
1330,
3601,
62,
41194,
198,
198,
6738,
11485,
67,
8482,
1330,
775,
413,
87,
11012,
198,
6738,
42903,
1330,
46947,
... | 2.77707 | 157 |
"""
Modified from https://github.com/facebookresearch/DomainBed
@author: Baixu Chen
@contact: cbx_99_hasta@outlook.com
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
class InvariancePenaltyLoss(nn.Module):
r"""Invariance Penalty Loss from `Invariant Risk Minimization <https://arxiv.org/pdf/1907.02893.pdf>`_.
We adopt implementation from `DomainBed <https://github.com/facebookresearch/DomainBed>`_. Given classifier
output :math:`y` and ground truth :math:`labels`, we split :math:`y` into two parts :math:`y_1, y_2`, corresponding
labels are :math:`labels_1, labels_2`. Next we calculate cross entropy loss with respect to a dummy classifier
:math:`w`, resulting in :math:`grad_1, grad_2` . Invariance penalty is then :math:`grad_1*grad_2`.
Inputs:
- y: predictions from model
- labels: ground truth
Shape:
- y: :math:`(N, C)` where C means the number of classes.
- labels: :math:`(N, )` where N mean mini-batch size
"""
| [
37811,
198,
5841,
1431,
422,
3740,
1378,
12567,
13,
785,
14,
19024,
34033,
14,
43961,
45896,
198,
31,
9800,
25,
8999,
844,
84,
12555,
198,
31,
32057,
25,
269,
65,
87,
62,
2079,
62,
71,
40197,
31,
448,
5460,
13,
785,
198,
37811,
19... | 2.755906 | 381 |
#!/usr/bin/env python
import parallel
import unittest
import thread
import testing_lib
import time
import uuid
import config
from multiprocessing import RawValue
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
10730,
198,
11748,
555,
715,
395,
198,
11748,
4704,
198,
11748,
4856,
62,
8019,
198,
11748,
640,
198,
11748,
334,
27112,
198,
11748,
4566,
198,
6738,
18540,
305,
919,
278,
... | 3.149254 | 67 |
# Fix the program
print("Are you ready?")
number = int(input("Please type in a number: "))
while number != 0:
print(number)
number = number -1
print("Now!") | [
2,
13268,
262,
1430,
198,
4798,
7203,
8491,
345,
3492,
1701,
8,
198,
17618,
796,
493,
7,
15414,
7203,
5492,
2099,
287,
257,
1271,
25,
366,
4008,
198,
4514,
1271,
14512,
657,
25,
198,
220,
220,
220,
3601,
7,
17618,
8,
198,
220,
220... | 2.928571 | 56 |
import datetime
import os
import fitz # fitzๅฐฑๆฏpip install PyMuPDF -i https://mirrors.aliyun.com/pypi/simple (PyMuPDF-1.18.17)
if __name__ == "__main__":
path = r'D:\่ๅ็งๆ-็ ๅ\2021 ่ๅ็งๆAIๅนณๅฐ\OCR\ไธๅธๅ
ฌๅธ่ดขๆฅ.pdf'
# path = r'D:\่ๅ็งๆ-็ ๅ\2021 ่ๅ็งๆAIๅนณๅฐ\OCR\็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ.pdf'
pdf_to_img(path)
# list_img = ['็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ3.jpg', 'Inked็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ-4.jpg', '็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ6.jpg', '็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ9.jpg',
# '็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ4.jpg', ]
# list_img = ['็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ3.jpg', 'Inked็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ6.jpg', '็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ9.jpg',]
#
# path2 = r'D:\่ๅ็งๆ-็ ๅ\2021 ่ๅ็งๆAIๅนณๅฐ\OCR\็ญๅๅ
ฌๅธ2016ๅนดๅฎก่ฎกๆฅๅ222.pdf'
# list_img2 = [f'D:\่ๅ็งๆ-็ ๅ\\2021 ่ๅ็งๆAIๅนณๅฐ\\OCR\\{item}' for item in list_img]
# img_to_pdf(list_img2, path2)
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
198,
11748,
4197,
89,
220,
1303,
4197,
89,
22887,
109,
42468,
79,
541,
2721,
9485,
33239,
20456,
532,
72,
3740,
1378,
10793,
5965,
13,
7344,
88,
403,
13,
785,
14,
79,
4464,
72,
14,
36439,
3... | 1.117378 | 656 |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
import asyncio
from asyncio import wait
from userbot import BOTLOG_CHATID, BOTLOG, CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.spam")
@register(outgoing=True, pattern="^.tspam")
@register(outgoing=True, pattern="^.bigspam")
@register(outgoing=True, pattern="^.gangsta$")
@register(outgoing=True, pattern="^.nikal$")
@register(outgoing=True, pattern="^.repeat")
@register(outgoing=True, pattern="^.repeats")
@register(outgoing=True, pattern="^.picspam")
@register(outgoing=True, pattern="^.delayspam")
CMD_HELP.update({
"spam": ".tspam <text>\
\nUsage: Spam the text letter by letter.\
\n\n.spam <count> <text>\
\nUsage: Your regular spammer stuff :P\
\n\n.bigspam <count> <text>\
\nUsage: .spam on steroids !!\
\n\n.picspam <count> <link>\
\nUsage: As if text spam was not enough !!\
\n\n.delayspam <delay> <count> <text>\
\nUsage: .bigspam but slower.\
\n\n.gangsta\
\nUsage: Gives you Gengster Feeling, btw Spyder is real Gangsta.\
\n\n.nikal\
\nUsage: Prevents Lawda Lassun.\
\n\n\nNOTE : I am not responsible if you get banned for spamming!"
})
| [
2,
15069,
357,
34,
8,
13130,
383,
12281,
5303,
417,
6794,
5834,
11419,
13,
198,
2,
198,
2,
49962,
739,
262,
12281,
5303,
417,
6794,
5094,
13789,
11,
10628,
352,
13,
65,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428... | 2.418564 | 571 |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The IP allocator maintains the life cycle of assigned IP addresses.
The IP allocator accepts IP blocks (range of IP addresses), and supports
allocating and releasing IP addresses from the assigned IP blocks. Note
that an IP address is not immediately made available for allocation right
after release: it is "reserved" for the same client for a certain period of
time to ensure that 1) an observer, e.g. pipelined, that caches IP states has
enough time to pull the updated IP states; 2) IP packets intended for the
old client will not be unintentionally routed to a new client until the old
TCP connection expires.
To support this semantic, an IP address can have the following states
during it's life cycle in the IP allocator:
FREE: IP is available for allocation
ALLOCATED: IP is allocated for a client.
RELEASED: IP is released, but still reserved for the client
REAPED: IPs are periodically reaped from the RELEASED state to the
REAPED state, and at the same time a timer is set. All REAPED state
IPs are freed once the time goes off. The purpose of this state is
to age IPs for a certain period of time before freeing.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
from ipaddress import ip_address, ip_network
from typing import Dict, List, Set
from random import choice
from magma.mobilityd.ip_descriptor import IPDesc, IPState
DEFAULT_IP_RECYCLE_INTERVAL = 15
| [
37811,
198,
15269,
12131,
383,
2944,
2611,
46665,
13,
198,
198,
1212,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
12,
7635,
5964,
1043,
287,
262,
198,
43,
2149,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
198,
... | 3.816568 | 507 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""learning rate generator"""
import numpy as np
from mindspore.common.tensor import Tensor
def get_poly_lr(global_step, lr_init, lr_end, lr_max, warmup_steps, total_steps, poly_power):
"""
generate learning rate array
Args:
lr_init(float): init learning rate
lr_end(float): end learning rate
lr_max(float): max learning rate
warmup_steps(int): number of warmup epochs
total_steps(int): total epoch of training
poly_power(int): poly learning rate power
Returns:
np.array, learning rate array
"""
lr_each_step = []
if warmup_steps != 0:
inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps)
else:
inc_each_step = 0
for i in range(total_steps):
if i < warmup_steps:
lr = float(lr_init) + inc_each_step * float(i)
else:
base = (1.0 - (float(i) - float(warmup_steps)) / (float(total_steps) - float(warmup_steps)))
lr = float(lr_max - lr_end) * (base ** poly_power)
lr = lr + lr_end
if lr < 0.0:
lr = 0.0
lr_each_step.append(lr)
learning_rate = np.array(lr_each_step).astype(np.float32)
current_step = global_step
learning_rate = learning_rate[current_step:]
return learning_rate
# bert kfac hyperparam setting
| [
2,
15069,
12131,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 2.663144 | 757 |
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent))
from cwesecurity.cwe import Cwe | [
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
17597,
13,
6978,
13,
33295,
7,
2536,
7,
15235,
7,
834,
7753,
834,
737,
411,
6442,
22446,
8000,
4008,
198,
6738,
269,
86,
274,
721,
1684,
13,
66,
732,
1330,
327,
732
] | 2.95122 | 41 |
import logging
import multiprocessing
import os
from dataclasses import dataclass
from itertools import repeat, takewhile
from pathlib import Path
from typing import Any, List, Mapping, Optional, Union
import numpy as np
import pandas as pd
from .bed_reader import read_f32, read_f64, read_i8
# https://stackoverflow.com/questions/845058/how-to-get-line-count-of-a-large-file-cheaply-in-python
@dataclass
_delimiters = {"fam": r"\s+", "bim": "\t"}
_count_name = {"fam": "iid_count", "bim": "sid_count"}
_meta_meta = {
# https://stackoverflow.com/questions/41921255/staticmethod-object-is-not-callable
"fid": _MetaMeta("fam", 0, np.str_, "0", _all_same),
"iid": _MetaMeta("fam", 1, np.str_, None, _sequence),
"father": _MetaMeta("fam", 2, np.str_, "0", _all_same),
"mother": _MetaMeta("fam", 3, np.str_, "0", _all_same),
"sex": _MetaMeta("fam", 4, np.int32, 0, _all_same),
"pheno": _MetaMeta("fam", 5, np.str_, "0", _all_same),
"chromosome": _MetaMeta("bim", 0, np.str_, "0", _all_same),
"sid": _MetaMeta("bim", 1, np.str_, None, _sequence),
"cm_position": _MetaMeta("bim", 2, np.float32, 0, _all_same),
"bp_position": _MetaMeta("bim", 3, np.int32, 0, _all_same),
"allele_1": _MetaMeta("bim", 4, np.str_, "A1", _all_same),
"allele_2": _MetaMeta("bim", 5, np.str_, "A2", _all_same),
}
class open_bed:
"""
Open a PLINK .bed file for reading.
Parameters
----------
filepath: pathlib.Path or str
File path to the .bed file.
iid_count: None or int, optional
Number of individuals (samples) in the .bed file.
The default (``iid_count=None``) finds the number
automatically by quickly scanning the .fam file.
sid_count: None or int, optional
Number of SNPs (variants) in the .bed file.
The default (``sid_count=None``) finds the number
automatically by quickly scanning the .bim file.
properties: dict, optional
A dictionary of any replacement properties. The default is an empty dictionary.
The keys of the dictionary are the names of the properties to replace.
The possible keys are:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
The values are replacement lists or arrays. A value can also be `None`,
meaning do not read or offer this property. See examples, below.
The list or array will be converted to a :class:`numpy.ndarray`
of the appropriate dtype, if necessary. Any :class:`numpy.nan` values
will converted to the appropriate missing value. The PLINK `.fam specification
<https://www.cog-genomics.org/plink2/formats#fam>`_
and `.bim specification <https://www.cog-genomics.org/plink2/formats#bim>`_
lists the dtypes and missing values for each property.
count_A1: bool, optional
True (default) to count the number of A1 alleles (the PLINK standard).
False to count the number of A2 alleles.
num_threads: None or int, optional
The number of threads with which to read data. Defaults to all available
processors.
Can also be set with these environment variables (listed in priority order):
'PST_NUM_THREADS', 'NUM_THREADS', 'MKL_NUM_THREADS'.
skip_format_check: bool, optional
False (default) to immediately check for expected starting bytes in
the .bed file. True to delay the check until (and if) data is read.
fam_filepath: pathlib.Path or str, optional
Path to the file containing information about each individual (sample).
Defaults to replacing the .bed fileโs suffix with .fam.
bim_filepath: pathlib.Path or str, optional
Path to the file containing information about each SNP (variant).
Defaults to replacing the .bed fileโs suffix with .bim.
Returns
-------
open_bed
an open_bed object
Examples
--------
List individual (sample) :attr:`iid` and SNP (variant) :attr:`sid`, then :meth:`read`
the whole file.
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> bed = open_bed(file_name)
>>> print(bed.iid)
['iid1' 'iid2' 'iid3']
>>> print(bed.sid)
['sid1' 'sid2' 'sid3' 'sid4']
>>> print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
>>> del bed # optional: delete bed object
Open the file and read data for one SNP (variant)
at index position 2.
.. doctest::
>>> import numpy as np
>>> with open_bed(file_name) as bed:
... print(bed.read(np.s_[:,2]))
[[nan]
[nan]
[ 2.]]
Replace :attr:`iid`.
>>> bed = open_bed(file_name, properties={"iid":["sample1","sample2","sample3"]})
>>> print(bed.iid) # replaced
['sample1' 'sample2' 'sample3']
>>> print(bed.sid) # same as before
['sid1' 'sid2' 'sid3' 'sid4']
Give the number of individuals (samples) and SNPs (variants) so that the .fam and
.bim files need never be opened.
>>> with open_bed(file_name, iid_count=3, sid_count=4) as bed:
... print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
Mark some properties as "donโt read or offer".
>>> bed = open_bed(file_name, properties={
... "father" : None, "mother" : None, "sex" : None, "pheno" : None,
... "allele_1" : None, "allele_2":None })
>>> print(bed.iid) # read from file
['iid1' 'iid2' 'iid3']
>>> print(bed.allele_2) # not read and not offered
None
See the :meth:`read` for details of reading batches via slicing and fancy indexing.
"""
def read(
self,
index: Optional[Any] = None,
dtype: Optional[Union[type, str]] = "float32",
order: Optional[str] = "F",
force_python_only: Optional[bool] = False,
num_threads=None,
) -> np.ndarray:
"""
Read genotype information.
Parameters
----------
index:
An optional expression specifying the individuals (samples) and SNPs
(variants) to read. (See examples, below).
Defaults to ``None``, meaning read all.
(If index is a tuple, the first component indexes the individuals and the
second indexes
the SNPs. If it is not a tuple and not None, it indexes SNPs.)
dtype: {'float32' (default), 'float64', 'int8'}, optional
The desired data-type for the returned array.
order : {'F','C'}, optional
The desired memory layout for the returned array.
Defaults to ``F`` (Fortran order, which is SNP-major).
force_python_only: bool, optional
If False (default), uses the faster Rust code; otherwise it uses the slower
pure Python code.
num_threads: None or int, optional
The number of threads with which to read data. Defaults to all available
processors.
Can also be set with :class:`open_bed` or these
environment variables (listed in priority order):
'PST_NUM_THREADS', 'NUM_THREADS', 'MKL_NUM_THREADS'.
Returns
-------
numpy.ndarray
2-D array containing values of 0, 1, 2, or missing
Rows represent individuals (samples). Columns represent SNPs (variants).
For ``dtype`` 'float32' and 'float64', NaN indicates missing values.
For 'int8', -127 indicates missing values.
Examples
--------
To read all data in a .bed file, set ``index`` to ``None``. This is the default.
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.read())
[[ 1. 0. nan 0.]
[ 2. 0. nan 2.]
[ 0. 1. 2. 0.]]
To read selected individuals (samples) and/or SNPs (variants), set each part of
a :class:`numpy.s_` to an `int`, a list of `int`, a slice expression, or
a list of `bool`.
Negative integers count from the end of the list.
.. doctest::
>>> import numpy as np
>>> bed = open_bed(file_name)
>>> print(bed.read(np.s_[:,2])) # read the SNPs indexed by 2.
[[nan]
[nan]
[ 2.]]
>>> print(bed.read(np.s_[:,[2,3,0]])) # read the SNPs indexed by 2, 3, and 0
[[nan 0. 1.]
[nan 2. 2.]
[ 2. 0. 0.]]
>>> # read SNPs from 1 (inclusive) to 4 (exclusive)
>>> print(bed.read(np.s_[:,1:4]))
[[ 0. nan 0.]
[ 0. nan 2.]
[ 1. 2. 0.]]
>>> print(np.unique(bed.chromosome)) # print unique chrom values
['1' '5' 'Y']
>>> print(bed.read(np.s_[:,bed.chromosome=='5'])) # read all SNPs in chrom 5
[[nan]
[nan]
[ 2.]]
>>> print(bed.read(np.s_[0,:])) # Read 1st individual (across all SNPs)
[[ 1. 0. nan 0.]]
>>> print(bed.read(np.s_[::2,:])) # Read every 2nd individual
[[ 1. 0. nan 0.]
[ 0. 1. 2. 0.]]
>>> #read last and 2nd-to-last individuals and the last SNPs
>>> print(bed.read(np.s_[[-1,-2],-1]))
[[0.]
[2.]]
You can give a dtype for the output.
.. doctest::
>>> print(bed.read(dtype='int8'))
[[ 1 0 -127 0]
[ 2 0 -127 2]
[ 0 1 2 0]]
>>> del bed # optional: delete bed object
"""
iid_index_or_slice_etc, sid_index_or_slice_etc = self._split_index(index)
dtype = np.dtype(dtype)
if order not in {"F", "C"}:
raise ValueError(f"order '{order}' not known, only 'F', 'C'")
# Later happy with _iid_range and _sid_range or could it be done with
# allocation them?
if self._iid_range is None:
self._iid_range = np.arange(self.iid_count, dtype="uintp")
if self._sid_range is None:
self._sid_range = np.arange(self.sid_count, dtype="uintp")
iid_index = np.ascontiguousarray(
self._iid_range[iid_index_or_slice_etc],
dtype="uintp",
)
sid_index = np.ascontiguousarray(
self._sid_range[sid_index_or_slice_etc], dtype="uintp"
)
if not force_python_only:
num_threads = get_num_threads(
self._num_threads if num_threads is None else num_threads
)
val = np.zeros((len(iid_index), len(sid_index)), order=order, dtype=dtype)
if self.iid_count > 0 and self.sid_count > 0:
if dtype == np.int8:
reader = read_i8
elif dtype == np.float64:
reader = read_f64
elif dtype == np.float32:
reader = read_f32
else:
raise ValueError(
f"dtype '{val.dtype}' not known, only "
+ "'int8', 'float32', and 'float64' are allowed."
)
reader(
str(self.filepath),
iid_count=self.iid_count,
sid_count=self.sid_count,
count_a1=self.count_A1,
iid_index=iid_index,
sid_index=sid_index,
val=val,
num_threads=num_threads,
)
else:
if not self.count_A1:
byteZero = 0
byteThree = 2
else:
byteZero = 2
byteThree = 0
if dtype == np.int8:
missing = -127
else:
missing = np.nan
# An earlier version of this code had a way to read consecutive SNPs of code
# in one read. May want
# to add that ability back to the code.
# Also, note that reading with python will often result in
# non-contiguous memory
# logging.warn("using pure python plink parser (might be much slower!!)")
val = np.zeros(
((int(np.ceil(0.25 * self.iid_count)) * 4), len(sid_index)),
order=order,
dtype=dtype,
) # allocate it a little big
nbyte = int(np.ceil(0.25 * self.iid_count))
with open(self.filepath, "rb") as filepointer:
for SNPsIndex, bimIndex in enumerate(sid_index):
startbit = int(np.ceil(0.25 * self.iid_count) * bimIndex + 3)
filepointer.seek(startbit)
bytes = np.array(bytearray(filepointer.read(nbyte))).reshape(
(int(np.ceil(0.25 * self.iid_count)), 1), order="F"
)
val[3::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 64] = missing
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 128] = 1
val[3::4, SNPsIndex : SNPsIndex + 1][bytes >= 192] = byteThree
bytes = np.mod(bytes, 64)
val[2::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 16] = missing
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 32] = 1
val[2::4, SNPsIndex : SNPsIndex + 1][bytes >= 48] = byteThree
bytes = np.mod(bytes, 16)
val[1::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 4] = missing
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 8] = 1
val[1::4, SNPsIndex : SNPsIndex + 1][bytes >= 12] = byteThree
bytes = np.mod(bytes, 4)
val[0::4, SNPsIndex : SNPsIndex + 1] = byteZero
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 1] = missing
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 2] = 1
val[0::4, SNPsIndex : SNPsIndex + 1][bytes >= 3] = byteThree
val = val[iid_index, :] # reorder or trim any extra allocation
assert val.dtype == np.dtype(dtype) # real assert
if not open_bed._array_properties_are_ok(val, order):
val = val.copy(order=order)
return val
@property
def fid(self) -> np.ndarray:
"""
Family id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.fid)
['fid1' 'fid1' 'fid2']
"""
return self.property_item("fid")
@property
def iid(self) -> np.ndarray:
"""
Individual id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.iid)
['iid1' 'iid2' 'iid3']
"""
return self.property_item("iid")
@property
def father(self) -> np.ndarray:
"""
Father id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.father)
['iid23' 'iid23' 'iid22']
"""
return self.property_item("father")
@property
def mother(self) -> np.ndarray:
"""
Mother id of each individual (sample).
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.mother)
['iid34' 'iid34' 'iid33']
"""
return self.property_item("mother")
@property
def sex(self) -> np.ndarray:
"""
Sex of each individual (sample).
Returns
-------
numpy.ndarray
array of 0, 1, or 2
0 is unknown, 1 is male, 2 is female
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sex)
[1 2 0]
"""
return self.property_item("sex")
@property
def pheno(self) -> np.ndarray:
"""
A phenotype for each individual (sample)
(seldom used).
Returns
-------
numpy.ndarray
array of str
'0' may represent a missing value.
If needed, will cause a one-time read of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.pheno)
['red' 'red' 'blue']
"""
return self.property_item("pheno")
@property
def properties(self) -> Mapping[str, np.array]:
"""
All the properties returned as a dictionary.
Returns
-------
dict
all the properties
The keys of the dictionary are the names of the properties, namely:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
The values are :class:`numpy.ndarray`.
If needed, will cause a one-time read of the .fam and .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(len(bed.properties)) #length of dict
12
"""
for key in _meta_meta:
self.property_item(key)
return self.properties_dict
def property_item(self, name: str) -> np.ndarray:
"""
Retrieve one property by name.
Returns
-------
numpy.ndarray
a property value
The name is one of these:
"fid" (family id), "iid" (individual or sample id), "father" (father id),
"mother" (mother id), "sex", "pheno" (phenotype), "chromosome", "sid"
(SNP or variant id), "cm_position" (centimorgan position), "bp_position"
(base-pair position), "allele_1", "allele_2".
If needed, will cause a one-time read of the .fam or .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.property_item('chromosome'))
['1' '1' '5' 'Y']
"""
if name not in self.properties_dict:
mm = _meta_meta[name]
self._read_fam_or_bim(suffix=mm.suffix)
return self.properties_dict[name]
@property
def chromosome(self) -> np.ndarray:
"""
Chromosome of each SNP (variant)
Returns
-------
numpy.ndarray
array of str
'0' represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.chromosome)
['1' '1' '5' 'Y']
"""
return self.property_item("chromosome")
@property
def sid(self) -> np.ndarray:
"""
SNP id of each SNP (variant).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sid)
['sid1' 'sid2' 'sid3' 'sid4']
"""
return self.property_item("sid")
@property
def cm_position(self) -> np.ndarray:
"""
Centimorgan position of each SNP (variant).
Returns
-------
numpy.ndarray
array of float
0.0 represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.cm_position)
[ 100.4 2000.5 4000.7 7000.9]
"""
return self.property_item("cm_position")
@property
def bp_position(self) -> np.ndarray:
"""
Base-pair position of each SNP (variant).
Returns
-------
numpy.ndarray
array of int
0 represents a missing value.
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.bp_position)
[ 1 100 1000 1004]
"""
return self.property_item("bp_position")
@property
def allele_1(self) -> np.ndarray:
"""
First allele of each SNP (variant).
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.allele_1)
['A' 'T' 'A' 'T']
"""
return self.property_item("allele_1")
@property
def allele_2(self) -> np.ndarray:
"""
Second allele of each SNP (variant),
Returns
-------
numpy.ndarray
array of str
If needed, will cause a one-time read of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.allele_2)
['A' 'C' 'C' 'G']
"""
return self.property_item("allele_2")
@property
def iid_count(self) -> np.ndarray:
"""
Number of individuals (samples).
Returns
-------
int
number of individuals
If needed, will cause a fast line-count of the .fam file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.iid_count)
3
"""
return self._count("fam")
@property
def sid_count(self) -> np.ndarray:
"""
Number of SNPs (variants).
Returns
-------
int
number of SNPs
If needed, will cause a fast line-count of the .bim file.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.sid_count)
4
"""
return self._count("bim")
@staticmethod
@staticmethod
@property
def shape(self):
"""
Number of individuals (samples) and SNPs (variants).
Returns
-------
(int, int)
number of individuals, number of SNPs
If needed, will cause a fast line-count of the .fam and .bim files.
Example
-------
.. doctest::
>>> from bed_reader import open_bed, sample_file
>>>
>>> file_name = sample_file("small.bed")
>>> with open_bed(file_name) as bed:
... print(bed.shape)
(3, 4)
"""
return (len(self.iid), len(self.sid))
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# if True:
# from bed_reader import sample_file
# file_name = sample_file("small.bed")
# with open_bed(file_name) as bed:
# print(bed.iid)
# print(bed.sid)
# print(bed.read())
# if False:
# import numpy as np
# from bed_reader._open_bed import open_bed
# # Can get file from
# https://www.dropbox.com/sh/xluk9opjiaobteg/AABgEggLk0ZoO0KQq0I4CaTJa?dl=0
# bigfile = r"M:\deldir\genbgen\2\merged_487400x220000.1.bed"
# # bigfile = '/mnt/m/deldir/genbgen/2/merged_487400x220000.1.bed'
# with open_bed(bigfile, num_threads=20) as bed:
# sid_batch = 22 * 1000
# for sid_start in range(0, 10 * sid_batch, sid_batch):
# slicer = np.s_[:10000, sid_start : sid_start + sid_batch]
# print(slicer)
# val = bed.read(slicer)
# print(val.shape)
# if False:
# file = r"D:\OneDrive\programs\sgkit-plink\bed_reader\tests\data
# /plink_sim_10s_100v_10pmiss.bed"
# with open_bed(file) as bed:
# print(bed.iid)
# print(bed.shape)
# val = bed.read()
# print(val)
# if False:
# # bed_file = example_file('doc/ipynb/all.*','*.bed')
# bed_file = r"F:\backup\carlk4d\data\carlk\cachebio\genetics\onemil\
# id1000000.sid_1000000.seed0.byiid\iid990000to1000000.bed"
# bed = Bed(bed_file, count_A1=False)
# snpdata1 = bed[:, :1000].read()
# snpdata2 = bed[:, :1000].read(dtype="int8", _require_float32_64=False)
# print(snpdata2)
# snpdata3 = bed[:, :1000].read(
# dtype="int8", order="C", _require_float32_64=False
# )
# print(snpdata3)
# snpdata3.val = snpdata3.val.astype("float32")
# snpdata3.val.dtype
# if False:
# from bed_reader import Bed, SnpGen
# iid_count = 487409
# sid_count = 5000
# sid_count_max = 5765294
# sid_batch_size = 50
# sid_batch_count = -(sid_count // -sid_batch_size)
# sid_batch_count_max = -(sid_count_max // -sid_batch_size)
# snpgen = SnpGen(seed=234, iid_count=iid_count, sid_count=sid_count_max)
# for batch_index in range(sid_batch_count):
# sid_index_start = batch_index * sid_batch_size
# sid_index_end = (batch_index + 1) * sid_batch_size # what about rounding
# filename = r"d:\deldir\rand\fakeukC{0}x{1}-{2}.bed".format(
# iid_count, sid_index_start, sid_index_end
# )
# if not os.path.exists(filename):
# Bed.write(
# filename + ".temp", snpgen[:, sid_index_start:sid_index_end].read()
# )
# os.rename(filename + ".temp", filename)
# if False:
# from bed_reader import Pheno, Bed
# filename = r"m:\deldir\New folder (4)\all_chr.maf0.001.N300.bed"
# iid_count = 300
# iid = [["0", "iid_{0}".format(iid_index)] for iid_index in range(iid_count)]
# bed = Bed(filename, iid=iid, count_A1=False)
# print(bed.iid_count)
# if False:
# from pysnptools.util import example_file
# pheno_fn = example_file("pysnptools/examples/toydata.phe")
# if False:
# from bed_reader import Pheno, Bed
# print(os.getcwd())
# # Read data from Pheno format
# snpdata = Pheno("../examples/toydata.phe").read()
# # pstutil.create_directory_if_necessary("tempdir/toydata.5chrom.bed")
# Bed.write(
# "tempdir/toydata.5chrom.bed", snpdata, count_A1=False
# ) # Write data in Bed format
import pytest
pytest.main(["--doctest-modules", __file__])
| [
11748,
18931,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
340,
861,
10141,
1330,
9585,
11,
1011,
4514,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
... | 2.031957 | 15,302 |
# Author: Matthew Bowden bowdenm@spu.edu
import flask as fsk
app = fsk.Flask(__name__)
@app.route('/')
| [
2,
6434,
25,
9308,
9740,
6559,
9563,
6559,
76,
31,
2777,
84,
13,
15532,
198,
198,
11748,
42903,
355,
277,
8135,
198,
198,
1324,
796,
277,
8135,
13,
7414,
2093,
7,
834,
3672,
834,
8,
628,
198,
31,
1324,
13,
38629,
10786,
14,
11537,... | 2.377778 | 45 |
if __name__ == '__main__':
from time import perf_counter_ns
basic_tests = (
([1, 1, 1], 1),
([1, 2, 3, 4, 5, 6], 3)
)
additional_tests = (
([1, 2], 0),
([2, 3, 5, 7], 0),
([1] * 100 + [2] + [4], 4),
([10, 20, 23, 27, 45, 69, 118, 138, 161, 166, 167, 170, 174, 213, 222, 224, 250, 251, 270, 285, 291, 325, 336, 355, 360, 381, 390, 396, 403, 413, 423, 446, 488, 507, 521, 560, 570, 660, 685, 715, 758, 781, 782, 783, 829, 855, 864, 874, 897, 936, 938, 944, 965, 981, 983, 993, 998, 1038, 1039, 1044, 1072, 1133, 1155, 1156, 1178, 1184, 1188, 1223, 1229, 1247, 1249, 1292, 1295, 1406, 1413, 1430, 1446, 1470, 1485, 1525, 1538, 1572, 1575, 1656, 1665, 1713, 1744, 1756, 1757, 1759, 1809, 1823, 1834, 1852, 1860, 1884, 1893, 1923, 1989, 2000], 11),
([1, 2, 2, 3, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 12, 14, 15, 15, 15, 17, 19, 24, 25, 26, 30, 31, 31, 34, 37, 42, 44, 48, 49, 50, 50, 55, 60, 62, 66, 68, 69, 77, 78, 79, 79, 81, 83, 84, 85, 87, 88, 88, 96, 98, 102, 104, 105, 106, 109, 109, 111, 112, 121, 122, 123, 123, 128, 130, 132, 132, 135, 136, 140, 143, 144, 145, 151, 151, 154, 155, 156, 157, 163, 163, 170, 172, 174, 174, 175, 175, 176, 179, 184, 193, 196, 200, 200, 204, 206, 206, 207, 208, 210, 211, 213, 215, 217, 219, 220, 220, 225, 225, 225, 226, 227, 228, 231, 232, 232, 236, 237, 238, 240, 240, 240, 243, 250, 254, 255, 257, 258, 260, 260, 264, 266, 268, 274, 275, 275, 275, 278, 279, 279, 281, 282, 283, 284, 286, 291, 293, 293, 294, 301, 301, 302, 304, 305, 305, 306, 306, 308, 310, 311, 311, 315, 316, 316, 320, 321, 321, 322, 323, 326, 328, 329, 330, 333, 334, 338, 339, 341, 347, 348, 349, 353, 356, 357, 361, 363, 366, 366, 366, 367, 369, 372, 373, 374, 375, 383, 384, 385, 388, 390, 392, 398, 405, 406, 409, 412, 412, 414, 419, 419, 419, 424, 425, 425, 425, 426, 427, 428, 429, 432, 432, 434, 435, 436, 438, 441, 442, 445, 446, 448, 448, 452, 456, 457, 459, 463, 464, 465, 466, 467, 467, 468, 468, 468, 473, 473, 480, 484, 486, 488, 488, 489, 489, 491, 495, 496, 497, 501, 502, 505, 506, 506, 510, 512, 516, 517, 517, 518, 528, 528, 530, 534, 536, 536, 537, 539, 539, 542, 545, 549, 555, 558, 559, 562, 563, 563, 563, 563, 565, 566, 567, 571, 572, 575, 578, 579, 579, 579, 584, 584, 586, 588, 590, 591, 592, 592, 598, 601, 603, 604, 607, 609, 612, 612, 613, 613, 615, 616, 618, 619, 622, 623, 625, 626, 627, 630, 630, 631, 631, 631, 632, 635, 637, 637, 641, 643, 645, 645, 646, 647, 648, 648, 649, 650, 650, 653, 653, 655, 657, 658, 659, 661, 664, 665, 668, 669, 669, 677, 678, 684, 686, 688, 690, 698, 698, 699, 703, 703, 704, 705, 706, 706, 709, 712, 720, 722, 725, 726, 727, 727, 730, 732, 732, 733, 735, 736, 746, 750, 753, 753, 753, 753, 759, 761, 767, 772, 778, 786, 788, 788, 792, 793, 796, 797, 798, 799, 799, 801, 801, 810, 811, 812, 813, 822, 823, 826, 828, 829, 830, 832, 833, 833, 834, 837, 838, 839, 840, 842, 843, 851, 852, 854, 859, 860, 861, 863, 866, 866, 869, 870, 873, 873, 874, 874, 877, 880, 885, 890, 893, 895, 895, 903, 907, 912, 918, 918, 919, 919, 919, 919, 922, 923, 924, 924, 924, 933, 935, 936, 936, 940, 945, 948, 949, 950, 952, 952, 954, 957, 958, 963, 966, 966, 968, 969, 971, 972, 973, 973, 973, 976, 977, 980, 981, 985, 985, 986, 987, 987, 989, 992, 993, 994, 997, 999, 999, 1004, 1004, 1006, 1008, 1008, 1009, 1009, 1012, 1015, 1017, 1021, 1022, 1024, 1024, 1027, 1027, 1035, 1039, 1039, 1040, 1042, 1043, 1046, 1048, 1052, 1053, 1058, 1060, 1066, 1067, 1067, 1067, 1070, 1071, 1072, 1076, 1081, 1082, 1083, 1087, 1091, 1091, 1094, 1094, 1095, 1096, 1102, 1103, 1103, 1103, 1105, 1107, 1107, 1113, 1114, 1114, 1114, 1115, 1115, 1117, 1117, 1119, 1125, 1126, 1127, 1127, 1127, 1131, 1131, 1132, 1145, 1146, 1146, 1148, 1149, 1150, 1150, 1151, 1151, 1155, 1155, 1160, 1163, 1165, 1165, 1167, 1168, 1172, 1173, 1173, 1174, 1177, 1181, 1183, 1184, 1189, 1192, 1192, 1197, 1197, 1202, 1209, 1212, 1215, 1216, 1217, 1220, 1220, 1222, 1222, 1222, 1222, 1226, 1227, 1231, 1232, 1239, 1240, 1243, 1244, 1245, 1250, 1255, 1258, 1258, 1259, 1264, 1271, 1271, 1272, 1272, 1274, 1276, 1277, 1279, 1280, 1283, 1284, 1285, 1288, 1291, 1296, 1298, 1299, 1300, 1302, 1302, 1306, 1311, 1315, 1315, 1316, 1321, 1321, 1325, 1325, 1327, 1329, 1329, 1330, 1332, 1333, 1338, 1339, 1340, 1345, 1347, 1347, 1350, 1353, 1357, 1359, 1360, 1360, 1360, 1363, 1369, 1370, 1370, 1370, 1371, 1374, 1376, 1378, 1379, 1380, 1381, 1382, 1385, 1388, 1388, 1390, 1395, 1398, 1402, 1403, 1403, 1405, 1406, 1408, 1412, 1414, 1419, 1424, 1424, 1427, 1428, 1430, 1430, 1432, 1435, 1439, 1439, 1440, 1442, 1442, 1450, 1454, 1455, 1456, 1457, 1458, 1459, 1461, 1462, 1463, 1463, 1465, 1465, 1466, 1472, 1474, 1476, 1477, 1477, 1477, 1480, 1482, 1483, 1485, 1487, 1488, 1490, 1491, 1493, 1494, 1495, 1496, 1498, 1498, 1501, 1505, 1505, 1506, 1515, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1523, 1525, 1525, 1526, 1527, 1528, 1528, 1530, 1535, 1537, 1537, 1540, 1540, 1540, 1540, 1542, 1543, 1546, 1548, 1549, 1551, 1552, 1553, 1556, 1557, 1558, 1560, 1561, 1563, 1563, 1566, 1569, 1570, 1571, 1576, 1579, 1583, 1584, 1585, 1589, 1589, 1594, 1594, 1595, 1598, 1606, 1609, 1611, 1612, 1618, 1619, 1620, 1625, 1628, 1629, 1639, 1640, 1640, 1644, 1644, 1645, 1649, 1653, 1656, 1657, 1657, 1658, 1658, 1659, 1661, 1666, 1667, 1668, 1671, 1672, 1673, 1681, 1687, 1689, 1689, 1691, 1691, 1691, 1692, 1699, 1699, 1702, 1703, 1704, 1705, 1707, 1708, 1714, 1717, 1717, 1720, 1725, 1725, 1730, 1732, 1733, 1738, 1738, 1740, 1741, 1741, 1744, 1746, 1748, 1748, 1751, 1753, 1755, 1756, 1757, 1759, 1759, 1759, 1768, 1772, 1773, 1774, 1780, 1781, 1784, 1785, 1787, 1787, 1788, 1788, 1789, 1789, 1791, 1794, 1797, 1797, 1802, 1805, 1806, 1809, 1809, 1812, 1813, 1814, 1815, 1816, 1821, 1824, 1826, 1826, 1831, 1834, 1835, 1838, 1839, 1839, 1839, 1845, 1846, 1849, 1854, 1858, 1864, 1865, 1867, 1877, 1879, 1879, 1879, 1880, 1881, 1882, 1882, 1883, 1885, 1885, 1888, 1890, 1897, 1899, 1901, 1905, 1907, 1907, 1913, 1913, 1914, 1922, 1923, 1925, 1927, 1929, 1930, 1932, 1932, 1939, 1940, 1941, 1945, 1946, 1947, 1952, 1952, 1953, 1954, 1954, 1954, 1956, 1959, 1959, 1962, 1963, 1966, 1967, 1970, 1977, 1978, 1978, 1979, 1982, 1987, 1988, 1990, 1991, 1992, 1993, 1994, 1995, 1997, 2000], 16_509),
([1, 1, 2, 4, 5, 2376, 2404, 3797, 3851, 4386, 4626, 5146, 5378, 5611, 5651, 5814, 6513, 6604, 7433, 7456, 7902, 8116, 8480, 10222, 10434, 10996, 11135, 11424, 11496, 11869, 12024, 12380, 13137, 13270, 13542, 13827, 13915, 14567, 14594, 14999, 15004, 16862, 17536, 17998, 19438, 19881, 20007, 21197, 21517, 22352, 22738, 22964, 24492, 24811, 25316, 26545, 27646, 28899, 29248, 29414, 29508, 29710, 30286, 31039, 31133, 33469, 34124, 34253, 35365, 35500, 35549, 35824, 36176, 37025, 37333, 37797, 38722, 39109, 39350, 39515, 41329, 41480, 41902, 41925, 42138, 42272, 42580, 43135, 43285, 43459, 43609, 43673, 43720, 44215, 44228, 44388, 44424, 45172, 46363, 46672, 46838, 47485, 48833, 49688, 50804, 53130, 53853, 54021, 54411, 54593, 55252, 55883, 56838, 57900, 58000, 58294, 58660, 59099, 59419, 59693, 60482, 61178, 61269, 61314, 62412, 63961, 64270, 64859, 66320, 66602, 67277, 68792, 69172, 69384, 70404, 70925, 71912, 72238, 72407, 72903, 73156, 73957, 74339, 75594, 75739, 76477, 76933, 77056, 78383, 79292, 79460, 80007, 81393, 81921, 82478, 82519, 83555, 83700, 83729, 84267, 84293, 84456, 84991, 85015, 85168, 85483, 86330, 86539, 86602, 86627, 87365, 87373, 87397, 87752, 88339, 88736, 88755, 88878, 89210, 90786, 90867, 90985, 91038, 91293, 91441, 92081, 93020, 93308, 94704, 95199, 95349, 95402, 95520, 95588, 96507, 97209, 97949, 98547, 99409, 99572, 99956, 100273, 100286, 100520, 100996, 103060, 103716, 104204, 104588, 105063, 105291, 107506, 107573, 107598, 107786, 109411, 110328, 111122, 112567, 112982, 114466, 114734, 114952, 114956, 115699, 116183, 116235, 116240, 116546, 117085, 118292, 118642, 118692, 119629, 120058, 120229, 120299, 120668, 120843, 121310, 121361, 121809, 122237, 122444, 122745, 123172, 123536, 124751, 124758, 124864, 125802, 125842, 126102, 126496, 127064, 128252, 128500, 128527, 128775, 129423, 129770, 130180, 131520, 131955, 131968, 133103, 133550, 133653, 135184, 135353, 135424, 135775, 135806, 136364, 138014, 138019, 138995, 139978, 140443, 140710, 141077, 141758, 142049, 144424, 145361, 146043, 146496, 147308, 148004, 148132, 148194, 148315, 148356, 148745, 149171, 150067, 150409, 150911, 151094, 151344, 151852, 151955, 153093, 153421, 153868, 154412, 154415, 154556, 154988, 155165, 155369, 155452, 157006, 158594, 158833, 158977, 159320, 159441, 159621, 160559, 161030, 161418, 161499, 161546, 162092, 162100, 162487, 162495, 162933, 164019, 164860, 166041, 166227, 166514, 167443, 168228, 168442, 168714, 169205, 170059, 170458, 170944, 171048, 171937, 172401, 173151, 173953, 174383, 176454, 177051, 177371, 177604, 177653, 177916, 178673, 178721, 178859, 179775, 180347, 180556, 180708, 181440, 182059, 183012, 183102, 183703, 184324, 184364, 186200, 187135, 187147, 187287, 187326, 188781, 189064, 189455, 189622, 189688, 189722, 190190, 190559, 190985, 191409, 191960, 192376, 193140, 193657, 194994, 195168, 195421, 196295, 196534, 196949, 197042, 197229, 197590, 198872, 199052, 199632, 199657, 200555, 201151, 201324, 201446, 201632, 201827, 202262, 203034, 203080, 203775, 203790, 203795, 204252, 204309, 204317, 205306, 205412, 207839, 207914, 207956, 208364, 209462, 211072, 212088, 213155, 213159, 213322, 213659, 214046, 214728, 214779, 215260, 215900, 215973, 217046, 217974, 218444, 218696, 219185, 219686, 220148, 220273, 220842, 221436, 221497, 221716, 222530, 222635, 222647, 223100, 223403, 223862, 224272, 224580, 224625, 225157, 225364, 225525, 225965, 226064, 226132, 227500, 227558, 227627, 228193, 228426, 228528, 229668, 229730, 230653, 230802, 231518, 232532, 232733, 233089, 233919, 235296, 235321, 235642, 238313, 238441, 239117, 240710, 240870, 241429, 241594, 241722, 241815, 241939, 242116, 242857, 243226, 243230, 243593, 243655, 243720, 244049, 245057, 245396, 245734, 247547, 248382, 249195, 249807, 250421, 250589, 252190, 253206, 253276, 253398, 254136, 254332, 254848, 255485, 255581, 256750, 257099, 257198, 257745, 258165, 258626, 258870, 259521, 260359, 260474, 260813, 261771, 262329, 263921, 264230, 264378, 264631, 265056, 265143, 265391, 267191, 267653, 268623, 268624, 268988, 269234, 269742, 270090, 270570, 272591, 272688, 273856, 274040, 274529, 274873, 275226, 276389, 276403, 276635, 277403, 277409, 278268, 279490, 280155, 280876, 281309, 281621, 281760, 282060, 282282, 282594, 283735, 283852, 284328, 284590, 285020, 285298, 286064, 286072, 287060, 287761, 287839, 288425, 288602, 288875, 289531, 289736, 290635, 290896, 291107, 291206, 291672, 291846, 292053, 292771, 292786, 293642, 293928, 294476, 294496, 294643, 294693, 294944, 295285, 295430, 295463, 295664, 296142, 296337, 297621, 297872, 298045, 298057, 298149, 298577, 298699, 299572, 299648, 300637, 301226, 301632, 302001, 302023, 303323, 303576, 304150, 305089, 305425, 305950, 306972, 307464, 307700, 308344, 308490, 308593, 309417, 310113, 312420, 312454, 312472, 313194, 313356, 314130, 314332, 314461, 314582, 314872, 315209, 315285, 315334, 315498, 315773, 317746, 317917, 318182, 319378, 320172, 320448, 321163, 321909, 322979, 323203, 323526, 323794, 324611, 324678, 325446, 325462, 325635, 326641, 327200, 328873, 329951, 330151, 330447, 330516, 331125, 331548, 333377, 333662, 333976, 334641, 335104, 336391, 337062, 337460, 337571, 339236, 339329, 339480, 339705, 339765, 340482, 340605, 340793, 341016, 341729, 342315, 342338, 344123, 344776, 345140, 345586, 345825, 345937, 346608, 347127, 348265, 348378, 348706, 348754, 348796, 349200, 349851, 350914, 351323, 352159, 352348, 352561, 352776, 352991, 353107, 354069, 354498, 354910, 355844, 355965, 357028, 357341, 357722, 358812, 359449, 359597, 360115, 360332, 360459, 361637, 362126, 362210, 362254, 362533, 362708, 362838, 363078, 364395, 364762, 365521, 366124, 366219, 366891, 367246, 367608, 368364, 369011, 369044, 369737, 370433, 370510, 370547, 371477, 371560, 371749, 373421, 373608, 374140, 375112, 375157, 377419, 377582, 377669, 377968, 378340, 378421, 379710, 380238, 380601, 382147, 383396, 383398, 383411, 383475, 383486, 383783, 384718, 385380, 386302, 386729, 386807, 387258, 389859, 389895, 390345, 391082, 391398, 391576, 392238, 392261, 392455, 392510, 393929, 394210, 394223, 394389, 394485, 394749, 394925, 395541, 396339, 396464, 397327, 397903, 398066, 398297, 398427, 398562, 399776, 400170, 400754, 400969, 401064, 401272, 401663, 401914, 402040, 402164, 402696, 403151, 403681, 404052, 405818, 406037, 406261, 406629, 407310, 409060, 409374, 409495, 409544, 410885, 412078, 412701, 412903, 413601, 414417, 415696, 415729, 415781, 415863, 417181, 417630, 417752, 418517, 419112, 419171, 419353, 419510, 419682, 420192, 420810, 421004, 421461, 421786, 422146, 422150, 423551, 425267, 425379, 425782, 425975, 426113, 426186, 426599, 426929, 427245, 427712, 428179, 428412, 428777, 429052, 429261, 429406, 429892, 430130, 431013, 431415, 431551, 432078, 432812, 433038, 433933, 434655, 434711, 434716, 434966, 435418, 435457, 435630, 435749, 436432, 437531, 437759, 438173, 438243, 438514, 439222, 439640, 440146, 440304, 440694, 441318, 442052, 442321, 442912, 443710, 443734, 444491, 444573, 444754, 445243, 445301, 445512, 445851, 445935, 446428, 446992, 447391, 447721, 449202, 449288, 450127, 451570, 453164, 453291, 453619, 454826, 456006, 456196, 456229, 456688, 456747, 456877, 457778, 457851, 457997, 458359, 458470, 458931, 459116, 459163, 459320, 459716, 459761, 461561, 462270, 462276, 462666, 463203, 465064, 466002, 466783, 466937, 468798, 468881, 471002, 471887, 472016, 472145, 472217, 473959, 474378, 475158, 475238, 475366, 475644, 475975, 476065, 476114, 476926, 477511, 478181, 478249, 478450, 479206, 479217, 479533, 481048, 483196, 483691, 484304, 484488, 484494, 485018, 485349, 486256, 486449, 486872, 487486, 487961, 488037, 488156, 489348, 489638, 489908, 491162, 492176, 492300, 492866, 493793, 493925, 494924, 495341, 495407, 495699, 496482, 497186, 497884, 498271, 498450, 498519, 498528, 498899, 499047, 499333, 500150, 501425, 502056, 502268, 502442, 502869, 502899, 503448, 503535, 504613, 504905, 505175, 505888, 506169, 506282, 506666, 506774, 507343, 507557, 509448, 509851, 511908, 512739, 513048, 513129, 513377, 513634, 514286, 514572, 515207, 516682, 516911, 518608, 518692, 518860, 519961, 520080, 520382, 520560, 522851, 522937, 523178, 523367, 523494, 524226, 524474, 526274, 526328, 527401, 527436, 529756, 530121, 530265, 531483, 531625, 531777, 532553, 532973, 532984, 534260, 534397, 534602, 535340, 535508, 535783, 536444, 536992, 537216, 537968, 539486, 539787, 539834, 542257, 543800, 544298, 544614, 545107, 545537, 545778, 547150, 547811, 547866, 547908, 548595, 550162, 550186, 551133, 551911, 552997, 553188, 553978, 553978, 554130, 554795, 554856, 556226, 556916, 557050, 557832, 557879, 558941, 560307, 560462, 561439, 561775, 561789, 561934, 562007, 562716, 563375, 563593, 564273, 564510, 564640, 564859, 565369, 565832, 566604, 566628, 566790, 567004, 567243, 567245, 567467, 567949, 569373, 569688, 570202, 570438, 571062, 571255, 572528, 572670, 573224, 573688, 574074, 574122, 575086, 575466, 575628, 575998, 576338, 576351, 576423, 578248, 578472, 578581, 578661, 579047, 579070, 579086, 579289, 579462, 579536, 579555, 580414, 582070, 582275, 582996, 583037, 584002, 584111, 584719, 585584, 585663, 586710, 588070, 588097, 589054, 589506, 592401, 593024, 595977, 596044, 597282, 598495, 598581, 598960, 599513, 599538, 599851, 600064, 600141, 600422, 600465, 600810, 601258, 601309, 601729, 602268, 602302, 602947, 603146, 603656, 604433, 605449, 607652, 607709, 607898, 608403, 609582, 611612, 611903, 613310, 614715, 615497, 616157, 616292, 616551, 616595, 617936, 618565, 618699, 618761, 620093, 620475, 620590, 620657, 621727, 622288, 622299, 622710, 623579, 623983, 623990, 624360, 625648, 625905, 627038, 627046, 627321, 627411, 627870, 628348, 628465, 628604, 628907, 629093, 630123, 630169, 630587, 630682, 631633, 631753, 632566, 633245, 634336, 634604, 634660, 635053, 635697, 635866, 636420, 636673, 636710, 636987, 637660, 638096, 638808, 639858, 640684, 640991, 641215, 641284, 641420, 642119, 642443, 642701, 642820, 642862, 642953, 643370, 643500, 643671, 645554, 645971, 647794, 648648, 648865, 649376, 649432, 649795, 650358, 650568, 651834, 651856, 652254, 653300, 653440, 653454, 654175, 655179, 655314, 655389, 655627, 657291, 658236, 658900, 658973, 659088, 659584, 660104, 660559, 660990, 661166, 661431, 661514, 661661, 661807, 662368, 662633, 662791, 662927, 663067, 665502, 665995, 667229, 667348, 667461, 667595, 668861, 669190, 669762, 670137, 670289, 670785, 671082, 671673, 671740, 672038, 672736, 672781, 673036, 673144, 673886, 674025, 674156, 674280, 674661, 674681, 675010, 675272, 675680, 675685, 676299, 676468, 676630, 676775, 677155, 677223, 678522, 678836, 679444, 679470, 680074, 681360, 682418, 682815, 682941, 682948, 683240, 684703, 684886, 684910, 686936, 687137, 687911, 688084, 689225, 690904, 691771, 692349, 692476, 692763, 693718, 694162, 694339, 695346, 695759, 695779, 696211, 696750, 697011, 697270, 697481, 697870, 697957, 698246, 699744, 699889, 700237, 700448, 700703, 701356, 702575, 703435, 703455, 703748, 703799, 704043, 704190, 704616, 705139, 706540, 706558, 706707, 708015, 708694, 708926, 709825, 710492, 711090, 711168, 711361, 711781, 711894, 713324, 713529, 713686, 714646, 714683, 714909, 715177, 715416, 716041, 716235, 716442, 717033, 717516, 719185, 719891, 721161, 721627, 721965, 722128, 722248, 722285, 722633, 722653, 722824, 722844, 723592, 725429, 725743, 726556, 726970, 727189, 727362, 727443, 727517, 727834, 728297, 728388, 728457, 728545, 728552, 730850, 732439, 732705, 733196, 734087, 734168, 734274, 734583, 735300, 736158, 736434, 736887, 737125, 737654, 737829, 737915, 738100, 738749, 738868, 739490, 740312, 741096, 741961, 742147, 742282, 742480, 743002, 743022, 744131, 744338, 745303, 745596, 745624, 745668, 746420, 746442, 747031, 748626, 749169, 749571, 749638, 749882, 751490, 751786, 752276, 752798, 753000, 753614, 754993, 756731, 757354, 757480, 757613, 757701, 758073, 758559, 758645, 758689, 760270, 760274, 761576, 762247, 762673, 762794, 762795, 763258, 763649, 763731, 764087, 764418, 764791, 765065, 766545, 766624, 767867, 767868, 768262, 769370, 769625, 769727, 769764, 769806, 769890, 770042, 770888, 770939, 771303, 771704, 772691, 772819, 772852, 772991, 773256, 774325, 774756, 776239, 777138, 777220, 777350, 778003, 778047, 778267, 778856, 779024, 779239, 779918, 782130, 782264, 782336, 782490, 782530, 783304, 784670, 785546, 785788, 786413, 786976, 787344, 787444, 787580, 788023, 789280, 790678, 790879, 791556, 792022, 792549, 792679, 793021, 795676, 795807, 797302, 797557, 797566, 797623, 797879, 798439, 798850, 800365, 800495, 801142, 801767, 801826, 802426, 802759, 802982, 803285, 803760, 804229, 804881, 805481, 806355, 806412, 807131, 807155, 807344, 808725, 808985, 809392, 809648, 810667, 811253, 811526, 811756, 811965, 812124, 812251, 812853, 813200, 815272, 815744, 817021, 817128, 817503, 818154, 818170, 818944, 819568, 820404, 820705, 821494, 821946, 822287, 822294, 822342, 822798, 823066, 823287, 823302, 823715, 823786, 824195, 825090, 825643, 826223, 826473, 826799, 827386, 828174, 828603, 829122, 829284, 829806, 830026, 830622, 830945, 831387, 831905, 833516, 833563, 833708, 833886, 833953, 834054, 834260, 834314, 834650, 834749, 835908, 836018, 836966, 837330, 837645, 838957, 839309, 839577, 839861, 840024, 840136, 840182, 840967, 842003, 842414, 842452, 843463, 843899, 844144, 844260, 844689, 844835, 844881, 844953, 845450, 846379, 846589, 847023, 847704, 849207, 849977, 852621, 852888, 852925, 853944, 853952, 854185, 854562, 854629, 854651, 858294, 858306, 859025, 859621, 860103, 862058, 862305, 862477, 862811, 864637, 864959, 864965, 865802, 866147, 867167, 867201, 867652, 868060, 869453, 871559, 871577, 871926, 872212, 872497, 873052, 873056, 873119, 873131, 875113, 875271, 876161, 876519, 876938, 877547, 878046, 878472, 878503, 879047, 879575, 880701, 881652, 881833, 881919, 882061, 883577, 884403, 885023, 885127, 885785, 886158, 886208, 888402, 889913, 890229, 891018, 891362, 892577, 892614, 892993, 895511, 896001, 896080, 896840, 897549, 897778, 898041, 898631, 898925, 899632, 899693, 900664, 900731, 900846, 901237, 902452, 902600, 903765, 903824, 904503, 904806, 905170, 905714, 905773, 906339, 907288, 907374, 907465, 907670, 908341, 910218, 911660, 912251, 912590, 913230, 913434, 913862, 914468, 914555, 916230, 916429, 916539, 916570, 916992, 918561, 918717, 919383, 919617, 920634, 921636, 922107, 923018, 924184, 924450, 924527, 924671, 925145, 925642, 925668, 926427, 927170, 928014, 928689, 928908, 929630, 929880, 929982, 930221, 930510, 930956, 931230, 931469, 931615, 931807, 931849, 932278, 932334, 933131, 934640, 936083, 936568, 936766, 937113, 938140, 938375, 939190, 939220, 939406, 940609, 940924, 942686, 942741, 943700, 944047, 945738, 946158, 946663, 946803, 947757, 947909, 948209, 948851, 949348, 950198, 951077, 951495, 951531, 951552, 951665, 952289, 952822, 952942, 953011, 953352, 953503, 953979, 955326, 955497, 955971, 957215, 957374, 957416, 957494, 957711, 957775, 958597, 958845, 959574, 961150, 961643, 961700, 963012, 963241, 964259, 965387, 965609, 965863, 966914, 969018, 969270, 969665, 969762, 971319, 971600, 972634, 972757, 973134, 973294, 973894, 973985, 974198, 974994, 975440, 975802, 975974, 976033, 976057, 976313, 977155, 977168, 977286, 978755, 979202, 979626, 981524, 981594, 981667, 982178, 982446, 982685, 983200, 983528, 983662, 983912, 984327, 984469, 985813, 986081, 986251, 986977, 987372, 987385, 987400, 988582, 988950, 989624, 989795, 989930, 990827, 991296, 991411, 991873, 991948, 992277, 993009, 993016, 993092, 993998, 994233, 994280, 994287, 994621, 995485, 995576, 995633, 996076, 996197, 996989, 999437], 6_582)
)
results = {}
num_iters = 1
for func in [func for func in dir() if func.startswith('solution')]:
results[func] = []
print(f'\n{func}() (Number of Iterations {num_iters:,})')
for test in basic_tests + additional_tests:
l, expected = test
start = perf_counter_ns()
for i in range(num_iters):
result = globals()[func](l)
end = perf_counter_ns()
results[func].append(end - start)
print(f'{func}({l if len(l) < 10 else "truncated due to length: " + str(len(l))}) returned {result} '
f'({"correct" if result == expected else f"expected: {expected}"})'
f' in {end - start:,} nanoseconds.')
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
422,
640,
1330,
23035,
62,
24588,
62,
5907,
198,
220,
220,
220,
4096,
62,
41989,
796,
357,
198,
220,
220,
220,
220,
220,
220,
220,
29565,
16,
... | 2.077604 | 10,966 |
import numpy as np
import copy
import pygmo
from gips.gistmodel.mode0 import mode0
from gips.gistmodel.mode1 import mode1
from gips.gistmodel.mode3 import mode3
from gips.gistmodel.mode4 import mode4
from gips.gistmodel.mode5 import mode5
from gips.gistmodel.mode6 import mode6
from gips.gistmodel.mode7 import mode7
from gips.utils.misc import mode_error
from gips.mapout.map_processing import mapout_maps
from gips.utils.read_write import read_parmsfile
from gips.utils.read_write import write_maps
| [
11748,
299,
32152,
355,
45941,
198,
198,
11748,
4866,
198,
11748,
12972,
70,
5908,
198,
198,
6738,
308,
2419,
13,
70,
396,
19849,
13,
14171,
15,
1330,
4235,
15,
198,
6738,
308,
2419,
13,
70,
396,
19849,
13,
14171,
16,
1330,
4235,
16... | 2.924419 | 172 |
from mlflow.exceptions import MlflowException
from mlflow.models.evaluation.artifacts import (
ImageEvaluationArtifact,
JsonEvaluationArtifact,
NumpyEvaluationArtifact,
CsvEvaluationArtifact,
ParquetEvaluationArtifact,
TextEvaluationArtifact,
PickleEvaluationArtifact,
)
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import json
import pathlib
import pytest
from mlflow.models.evaluation.artifacts import _infer_artifact_type_and_ext
from mlflow.models.evaluation.default_evaluator import _CustomMetric
@pytest.fixture
@pytest.mark.parametrize(
"is_file,artifact,artifact_type,ext",
[
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "png"),
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "jpg"),
(True, lambda path: plt.figure().savefig(path), ImageEvaluationArtifact, "jpeg"),
(True, __generate_dummy_json_file, JsonEvaluationArtifact, "json"),
(True, lambda path: pathlib.Path(path).write_text("test"), TextEvaluationArtifact, "txt"),
(
True,
lambda path: np.save(path, np.array([1, 2, 3]), allow_pickle=False),
NumpyEvaluationArtifact,
"npy",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_csv(path, index=False),
CsvEvaluationArtifact,
"csv",
),
(
True,
lambda path: pd.DataFrame({"test": [1, 2, 3]}).to_parquet(path),
ParquetEvaluationArtifact,
"parquet",
),
(False, pd.DataFrame({"test": [1, 2, 3]}), CsvEvaluationArtifact, "csv"),
(False, np.array([1, 2, 3]), NumpyEvaluationArtifact, "npy"),
(False, plt.figure(), ImageEvaluationArtifact, "png"),
(False, {"a": 1, "b": "e", "c": 1.2, "d": [1, 2]}, JsonEvaluationArtifact, "json"),
(False, [1, 2, 3, "test"], JsonEvaluationArtifact, "json"),
(False, '{"a": 1, "b": [1.2, 3]}', JsonEvaluationArtifact, "json"),
(False, '[1, 2, 3, "test"]', JsonEvaluationArtifact, "json"),
(False, __DummyClass(), PickleEvaluationArtifact, "pickle"),
],
)
| [
6738,
285,
1652,
9319,
13,
1069,
11755,
1330,
337,
1652,
9319,
16922,
198,
6738,
285,
1652,
9319,
13,
27530,
13,
18206,
2288,
13,
50179,
1330,
357,
198,
220,
220,
220,
7412,
36,
2100,
2288,
8001,
29660,
11,
198,
220,
220,
220,
449,
... | 2.182796 | 1,023 |
# pylint: disable=invalid-name
"""Hyvรคksymistestausluokka
"""
#Pylint disablettu toistaiseksi
from stub_io import StubIO # pylint: disable=import-error
class ReadingtipLibrary: # pylint: disable=invalid-name
"""Luokka joka vastaa vaatimusten testaamisesta
"""
def __init__(self):
"""Luokan konstruktori
"""
self._io = StubIO()
def input(self, value):
"""Luo syรถtteen
"""
self._io.initial_add(value)
def output_should_contain(self, value):
"""Tarkistaa tulosteen
"""
outputs = self._io.output
if not value in outputs:
raise AssertionError(
f"Output \"{value}\" is not in {str(outputs)}"
)
def run_application(self):
"""Kรคynnistรครค sovelluksen
"""
self._io.start()
def last_output_should_contain(self, value):
"""Tarkistaa viimeisen tulosteen
"""
if len(self._io.output) > 0:
last_output = self._io.output.pop()
else:
last_output = ""
if last_output != value:
raise AssertionError(
f"{value} is not in {last_output}"
)
def database_must_be_empty(self):
"""Tarkistaa onko tietokanta tyhjรค
"""
if len(self._io.database) > 0:
raise AssertionError(
"Database is not empty"
)
| [
2,
279,
2645,
600,
25,
15560,
28,
259,
12102,
12,
3672,
198,
37811,
21217,
85,
11033,
591,
4948,
396,
395,
8717,
2290,
482,
4914,
198,
37811,
198,
198,
2,
47,
2645,
600,
15560,
926,
84,
284,
12523,
786,
591,
72,
198,
6738,
17071,
... | 1.994398 | 714 |
import frozendict
from loguru import logger
from omni_converter import AutoDataFactory
from omni_converter.solver.astar import AstarSolver
from omni_cv_rules.coconut.omni_converter import AutoList
from omni_cv_rules.rulebook import CV_RULEBOOK
target_conversions=[
("[image_path]","numpy_rgb"),
("pix2pix_batch,nc=3","image,RGB,RGB"),
("torch,float32,CHW,RGB,0_1","base64"),
("torch,float32,CHW,RGB,0_1","widget"),
("numpy,float32,CHW,RGB,0_1","[image,L,L]"),
("numpy,float32,BCHW,RGB,0_1","[image,L,L]"),
("[numpy,float32,CHW,RGB,0_1]","[image,L,L]"),
("numpy,float32,BHW,L,None","numpy,float32,BHWC,RGB,None"),
(AutoList(frozendict.frozendict({'arrange': 'HWC', 'meta': frozendict.frozendict({'shape': (None, None, 1)}), 'type': 'numpy', 'dtype': 'float32', 'ch_rpr': 'L', 'v_range': 'None'})),
AutoList(frozendict.frozendict({'type': 'numpy', 'arrange': 'HWC', 'ch_rpr': 'LLL', 'meta': frozendict.frozendict({'shape': (None, None, 3)}), 'dtype': 'float32', 'v_range': 'None'})))
]
| [
11748,
8400,
89,
437,
713,
198,
6738,
2604,
14717,
1330,
49706,
198,
198,
6738,
39030,
8461,
62,
1102,
332,
353,
1330,
11160,
6601,
22810,
198,
6738,
39030,
8461,
62,
1102,
332,
353,
13,
82,
14375,
13,
459,
283,
1330,
317,
7364,
50,
... | 2.177966 | 472 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
train triplet net and get feature vectors
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import time
import argparse
# from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
import numpy as np
import chainer
from chainer import cuda, optimizers, serializers
import dump_vec
import triplet_net
if __name__ == '__main__':
st = time.clock()
s_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=-1, type=int)
parser.add_argument('--epoch', default=40, type=int)
parser.add_argument('--batchsize', default=100, type=int)
parser.add_argument('--initmodel', default=0, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--outputdim', default=2, type=int)
parser.add_argument('--n_train', default=1000, type=int)
parser.add_argument('--plot_dim', default=100, type=int)
parser.add_argument('--d_name', default='hoge', type=str)
args = parser.parse_args()
print('Create model')
model = triplet_net.Triplet_net(args.outputdim)
print('Check gpu')
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
print('Load dataset')
ld_dict, unld_dict = get_mnist(args.n_train)
print('Setup optimizer')
optimizer = optimizers.Adam(alpha=0.0002)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001))
if args.initmodel:
model_dir = '../dump/{}_triplet.model'.format(args.d_name)
serializers.load_hdf5(model_dir, model)
if args.resume:
state_dir = '../dump/{}_triplet.state'.format(args.d_name)
serializers.load_hdf5(state_dir, optimizer)
print('training and test')
train_and_dump(model, optimizer, ld_dict, unld_dict, xp, args.batchsize,\
args.epoch, args.plot_dim, args.gpu, args.outputdim, args.d_name)
print('end')
print('elapsed time[m]:', (time.clock() - st)/60.0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
27432,
15055,
83,
2010,
290,
651,
3895,
30104,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
41... | 2.49835 | 909 |
#!/usr/bin/env python3
# coding=utf-8
import sys
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSlot,QTimer
from PyQt5 import uic,QtGui
import random
import math
if __name__=='__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
25064,
198,
11748,
25064,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1635,
198,
6738,
9485,
48,
83,
20,
13,
... | 2.345588 | 136 |
# Copyright (c) 2015 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
from cef_parser import *
# test the module
if __name__ == "__main__":
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) < 2:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <include_dir>\n')
sys.exit()
# create the header object
header = obj_header()
excluded_files = ['cef_api_hash.h', 'cef_application_mac.h', 'cef_version.h']
header.add_directory(sys.argv[1], excluded_files)
# dump the result to stdout
sys.stdout.write(make_wrapper_types_header(header))
| [
2,
15069,
357,
66,
8,
1853,
383,
18255,
1505,
13302,
47238,
25161,
46665,
13,
1439,
2489,
198,
2,
10395,
13,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
198,
2,
460,
307,
1043,
287,
262,
38559,... | 3.031746 | 252 |
from os import path
from django.conf import settings
from rest_framework import exceptions, authentication
import jwt
from .models import CustomUser
| [
198,
6738,
28686,
1330,
3108,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
1334,
62,
30604,
1330,
13269,
11,
18239,
198,
11748,
474,
46569,
198,
6738,
764,
27530,
1330,
8562,
12982,
198,
220,
220,
220,
220,
220,
220,
220,
... | 3.34 | 50 |
import time,machine
BUTTONPIN = 0
buttonpin = machine.Pin(BUTTONPIN, machine.Pin.IN, machine.Pin.PULL_UP)
buttonlast = 0
print("wait_until_pressed: Waits until the button is pressed.")
while buttonpin.value()==1:
print("push the button")
time.sleep_ms(100)
print("button is pressed")
| [
11748,
640,
11,
30243,
198,
47526,
11357,
44032,
796,
657,
198,
16539,
11635,
796,
4572,
13,
28348,
7,
47526,
11357,
44032,
11,
4572,
13,
28348,
13,
1268,
11,
4572,
13,
28348,
13,
5105,
3069,
62,
8577,
8,
198,
16539,
12957,
796,
657,
... | 2.910891 | 101 |
'''Word2Vec Baseline
python3 baseline_embeddings.py path_to_embedding'''
import logging
import logging.config
import configparser as cp
#import args
import sys
import pickle
import numpy as np
import vectorizer
import load
import sample
#configurations
config = cp.ConfigParser(strict=False)
config.read('defaults.cfg')
#argparser
#args = args.get_args()
'''
>>> args.train
False
'''
#logging
logger = logging.getLogger(__name__)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level':'INFO',
'formatter': 'standard',
'class':'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
}
}
})
# word embedding
from gensim.models import KeyedVectors
def prepare_embedding_vocab(filename, binary = True, limit = 1000000):
'''filename: '~/disease-normalization/data/embeddings/wvec_50_haodi-li-et-al.bin'
1. Use gensim for reading in embedding model
2. Sort based on the index to make sure that they are in the correct order
3. Normalize the vectors
4. Build vocabulary mappings, zero for padding
5. Create an inverse dictionary
'''
vector_model = KeyedVectors.load_word2vec_format(filename, binary = binary, limit = limit)
#vector_model=KeyedVectors.load_word2vec_format(config['embedding']['emb_file'], binary=True, limit=50000)
words = [k for k,v in sorted(vector_model.vocab.items(),key = lambda x:x[1].index)]
vector_model.init_sims(replace = True)
vocabulary={"<SPECIAL>": 0, "<OOV>": 1}
for word in words:
vocabulary.setdefault(word, len(vocabulary))
inversed_vocabulary={value:key for key, value in vocabulary.items()}
return vector_model, vocabulary, inversed_vocabulary
def load_pretrained_word_embeddings(vocab,embedding_model):
"""vocab: vocabulary from data vectorizer
embedding_model: model loaded with gensim"""
pretrained_embeddings = np.random.uniform(low=-0.05, high=0.05, size=(len(vocab)-1,embedding_model.vectors.shape[1]))
pretrained_embeddings = np.vstack((np.zeros(shape=(1,embedding_model.vectors.shape[1])), pretrained_embeddings))
found=0
for word,idx in vocab.items():
if word in embedding_model.vocab:
pretrained_embeddings[idx]=embedding_model.get_vector(word)
found+=1
logger.info("Found pretrained vectors for {found} words.".format(found=found))
return pretrained_embeddings
if __name__ == '__main__':
emb_baseline(sys.argv[1])
#normalize(sys.argv[1], sys.argv[2]) | [
7061,
6,
26449,
17,
53,
721,
6455,
4470,
198,
29412,
18,
14805,
62,
20521,
67,
654,
13,
9078,
3108,
62,
1462,
62,
20521,
12083,
7061,
6,
198,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
198,
11748,
4566,
48610,
355,
31396,
... | 2.431624 | 1,170 |
import argparse
import random
import time
import torch
import numpy as np
from network import GNet
from trainer import Trainer
from utils.data_loader import FileLoader
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3127,
1330,
402,
7934,
198,
6738,
21997,
1330,
31924,
198,
6738,
3384,
4487,
13,
7890,
62,
29356,
1330,
9220,
17401,
... | 3.47541 | 61 |
from src.pysource import get_callable, print_source
| [
6738,
12351,
13,
79,
893,
1668,
1330,
651,
62,
13345,
540,
11,
3601,
62,
10459,
198
] | 3.25 | 16 |
"""
The boolean module lets you create complicated boolean expressions by composing
objects. The compositions can be evaluated against multiple values.
"""
import logging
import operator
import re
from functools import partial, wraps
from itertools import count
log = logging.getLogger(__name__)
__all__ = [
"pred",
"pred2",
"flip",
"TRUE",
"FALSE",
"flip",
"pred",
"pred2",
"lt",
"le",
"eq",
"ge",
"gt",
"isin",
"contains",
"search",
"matches",
"startswith",
"endswith",
]
# Optimization: generate regular python functions from the AST.
# This "compilation" takes microseconds.
class Predicate(Boolean):
""" Calls a function to determine truth value. """
pred = Predicate
def flip(f):
"""
Switches position of the first two arguments to f and ensures
its result is a bool.
"""
@wraps(f)
return inner
TRUE = TRUE()
FALSE = FALSE()
lt = pred2(operator.lt)
le = pred2(operator.le)
eq = pred2(operator.eq)
ge = pred2(operator.ge)
gt = pred2(operator.gt)
isin = pred2(flip(operator.contains))
contains = pred2(operator.contains)
search = pred2(flip(re.search))
matches = search
startswith = pred2(str.startswith)
endswith = pred2(str.endswith)
| [
37811,
198,
464,
25131,
8265,
8781,
345,
2251,
8253,
25131,
14700,
416,
49760,
198,
48205,
13,
383,
33543,
460,
307,
16726,
1028,
3294,
3815,
13,
198,
37811,
198,
11748,
18931,
198,
11748,
10088,
198,
11748,
302,
198,
198,
6738,
1257,
3... | 2.641822 | 483 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/4 14:15
# @Author : Marrion
#
import functools,inspect
@check
print(add(3,4)) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
2177,
14,
23,
14,
19,
1478,
25,
1314,
198,
2,
2488,
13838,
220,
1058,
1526,
81,... | 2.086957 | 69 |
# -*- coding:utf-8 -*-
'''
Model for classify.
'''
from config import CMS_CFG
from torcms.model.core_tab import TabPost, TabPost2Tag, TabTag
class MClassify():
'''
Model for classify.
'''
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
17633,
329,
36509,
13,
198,
7061,
6,
198,
198,
6738,
4566,
1330,
40773,
62,
22495,
38,
198,
6738,
7332,
46406,
13,
19849,
13,
7295,
62,
8658,
1330,
16904,
6... | 2.559633 | 109 |
# -*- coding: utf-8 -*-
"""Format adapter for the terminaltables module."""
import terminaltables
import itertools
from cli_helpers.utils import filter_dict_by_key
from .preprocessors import (convert_to_string, override_missing_value,
style_output)
supported_formats = ('ascii', 'double', 'github')
preprocessors = (override_missing_value, convert_to_string, style_output)
def adapter(data, headers, table_format=None, **kwargs):
"""Wrap terminaltables inside a function for TabularOutputFormatter."""
keys = ('title', )
table_format_handler = {
'ascii': terminaltables.AsciiTable,
'double': terminaltables.DoubleTable,
'github': terminaltables.GithubFlavoredMarkdownTable,
}
table = table_format_handler[table_format]
t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys))
dimensions = terminaltables.width_and_alignment.max_dimensions(
t.table_data,
t.padding_left,
t.padding_right)[:3]
for r in t.gen_table(*dimensions):
yield u''.join(r)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
26227,
21302,
329,
262,
5651,
2501,
2977,
8265,
526,
15931,
198,
198,
11748,
5651,
2501,
2977,
198,
11748,
340,
861,
10141,
198,
198,
6738,
537,
72,
62,
16794,
36... | 2.570071 | 421 |
#!/usr/bin/env python3
import sys
import requests
from google.cloud import translate
REGIST_URL = r"https://www.berlin.de/daten/liste-der-kfz-kennzeichen/kfz-kennz-d.csv"
NUMBERS = (
"null",
"eins",
"zwei",
"drei",
"vier",
"fรผnf",
"sechs",
"sieben",
"acht",
"neun"
)
csvData = requests.get(REGIST_URL)
csvData.encoding = "utf-8"
PREFIXES = {k.lower(): v for k, v in (
line.split(",")[0:2] for line in csvData.text.split("\r\n")[1:-1])} # first and last line contain garbage
translateClient = translate.Client()
languages = translateClient.get_languages()
result = {}
for lang in languages:
langCode = lang["language"]
if langCode == "de":
continue
translated = [i["translatedText"].lower() for i in translateClient.translate(
NUMBERS[1:], target_language=langCode, source_language="de")]
findMatches(lang["name"], translated)
findMatches("German", NUMBERS[1:])
print("%d results" % len(result))
for lang in result:
print("\n##", lang, "\n")
for res in result[lang]:
print(" - %s - %s %d* %s" % res)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
198,
11748,
7007,
198,
6738,
23645,
13,
17721,
1330,
15772,
198,
198,
31553,
8808,
62,
21886,
796,
374,
1,
5450,
1378,
2503,
13,
527,
2815,
13,
2934,
14,
... | 2.353945 | 469 |
# -*- coding: utf-8 -*-
"""Heartbeat service.
This is the internal thread responsible for sending heartbeat events
at regular intervals (may not be an actual thread).
"""
from __future__ import absolute_import, unicode_literals
from celery.signals import heartbeat_sent
from celery.utils.sysinfo import load_average
from .state import SOFTWARE_INFO, active_requests, all_total_count
__all__ = ('Heart',)
class Heart(object):
"""Timer sending heartbeats at regular intervals.
Arguments:
timer (kombu.asynchronous.timer.Timer): Timer to use.
eventer (celery.events.EventDispatcher): Event dispatcher
to use.
interval (float): Time in seconds between sending
heartbeats. Default is 2 seconds.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
28541,
12945,
2139,
13,
198,
198,
1212,
318,
262,
5387,
4704,
4497,
329,
7216,
36051,
2995,
198,
265,
3218,
20016,
357,
11261,
407,
307,
281,
4036,
4704,
737,
198... | 3.02381 | 252 |
from toolbox.geohash.lib import ray as _uut
from nose import tools as _tools
from toolbox.geohash.lib import point as _point
| [
6738,
2891,
3524,
13,
469,
1219,
1077,
13,
8019,
1330,
26842,
355,
4808,
84,
315,
198,
198,
6738,
9686,
1330,
4899,
355,
4808,
31391,
198,
198,
6738,
2891,
3524,
13,
469,
1219,
1077,
13,
8019,
1330,
966,
355,
4808,
4122,
628,
198
] | 3.071429 | 42 |
import numpy as np
import os
import imageio
mnist = np.load('mnist.npy')
num_imgs = mnist.shape[0]
for i in range(num_imgs):
img = mnist[i,:,:]
name = 'img_%s.jpg'%(i)
file_path = os.path.join('mnist_images', name)
imageio.imwrite(file_path, (img*255).astype(np.uint8)) | [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
2939,
952,
198,
198,
10295,
396,
796,
45941,
13,
2220,
10786,
10295,
396,
13,
77,
9078,
11537,
198,
22510,
62,
9600,
82,
796,
285,
77,
396,
13,
43358,
58,
15,
60,
198,
16... | 2.166667 | 132 |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""REST facade for roles."""
from lib import users
from lib.constants import roles, objects, object_states
from lib.decorator import memoize
from lib.entities import entities_factory
from lib.entities.entity import AccessControlRoleEntity
from lib.service import rest_facade
def get_role_name_and_id(object_type, role):
"""Returns role name and id as dict according to passed role entity or
name and object type."""
if isinstance(role, AccessControlRoleEntity):
return {"role_name": role.name, "role_id": role.id}
return {"role_name": role, "role_id": roles.ACLRolesIDs.id_of_role(
object_type, role)}
def custom_read_role(object_type):
"""Creates and returns custom access control role for object with 'Read'
rights."""
current_user = users.current_user()
users.set_current_user(entities_factory.PeopleFactory.superuser)
role = rest_facade.create_access_control_role(
object_type=object_type, read=True, update=False, delete=False)
users.set_current_user(current_user)
return role
@memoize
def custom_audit_read_role():
"""Returns custom access control role with 'Read' rights for Audit."""
return custom_read_role(objects.get_singular(objects.AUDITS, title=True))
@memoize
def custom_asmt_read_role():
"""Returns custom access control role with 'Read' rights for Assessment."""
return custom_read_role(objects.get_singular(objects.ASSESSMENTS,
title=True))
def add_verifier_to_set_obj_state(obj, state, person):
"""Assign a person as verifier if verifier presence is necessary for
setting an object into specific state and obj has no verifiers assigned."""
if state in object_states.VERIFIER_REQUIRING_STATES and not obj.verifiers:
rest_facade.update_acl(
objs=[obj], people=person,
**get_role_name_and_id(obj.type, roles.VERIFIERS))
| [
2,
15069,
357,
34,
8,
12131,
3012,
3457,
13,
198,
2,
49962,
739,
2638,
1378,
2503,
13,
43073,
13,
2398,
14,
677,
4541,
14,
43,
2149,
24290,
12,
17,
13,
15,
1279,
3826,
38559,
24290,
2393,
29,
198,
37811,
49,
6465,
43562,
329,
9176... | 2.959581 | 668 |
import pytest
from fastapi_iam import auth
pytestmark = pytest.mark.asyncio
| [
11748,
12972,
9288,
198,
6738,
3049,
15042,
62,
1789,
1330,
6284,
198,
198,
9078,
9288,
4102,
796,
12972,
9288,
13,
4102,
13,
292,
13361,
952,
628
] | 3 | 26 |
# Copyright 2020 Jiang Shenghu
# SPDX-License-Identifier: Apache-2.0
from tvm import topi
from ..poly import TensorTable, Statement, ScheduleTree
from .conv import PlainConv2d, Conv2d
| [
2,
15069,
12131,
32294,
22323,
456,
84,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
6738,
256,
14761,
1330,
1353,
72,
198,
198,
6738,
11485,
35428,
1330,
309,
22854,
10962,
11,
21983,
11,
19281,
27... | 3.166667 | 60 |
"""Main file for DETR."""
from typing import Any
from absl import flags
from clu import metric_writers
import jax
import jax.numpy as jnp
import ml_collections
from scenic import app
from scenic.projects.baselines.detr import model as detr_model
from scenic.projects.baselines.detr import trainer
from scenic.train_lib import train_utils
FLAGS = flags.FLAGS
def get_model_cls(model_name: str) -> Any:
"""Returns model class given its name."""
if model_name == 'detr':
return detr_model.DETRModel
else:
raise ValueError(f'Unrecognized model: {model_name}.')
def main(rng: jnp.ndarray, config: ml_collections.ConfigDict, workdir: str,
writer: metric_writers.MetricWriter):
"""Main function for the DETR project."""
model_cls = get_model_cls(config.model_name)
data_rng, rng = jax.random.split(rng)
dataset = train_utils.get_dataset(
config, data_rng, dataset_service_address=FLAGS.dataset_service_address)
trainer.train_and_evaluate(
rng=rng,
config=config,
model_cls=model_cls,
dataset=dataset,
workdir=workdir,
writer=writer)
if __name__ == '__main__':
app.run(main=main)
| [
37811,
13383,
2393,
329,
38267,
49,
526,
15931,
198,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
2352,
75,
1330,
9701,
198,
6738,
537,
84,
1330,
18663,
62,
34422,
198,
11748,
474,
897,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
... | 2.655963 | 436 |
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from main.serializers import BookReplicaSerializer, BookSerializer, LibrarianSerializer, LibraryHallSerializer, ReaderSerializer
from main.models import Book, BookReplica, CustomUser, Librarian, LibraryHall, Reader
from rest_framework import generics
from django.shortcuts import render
# Create your views here.
| [
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
1330,
3722,
220,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1388,
13,
46911,
11341,
1330,
4897,
39232,
3970,
32634,
7509,
11,
4897,... | 4.017857 | 112 |
"""Loads a trained model checkpoint and makes predictions on a dataset."""
from chemprop_solvation.parsing import parse_predict_args
from chemprop_solvation.train import make_predictions
if __name__ == '__main__':
args = parse_predict_args()
make_predictions(args)
| [
37811,
8912,
82,
257,
8776,
2746,
26954,
290,
1838,
16277,
319,
257,
27039,
526,
15931,
198,
198,
6738,
4607,
22930,
62,
34453,
10473,
13,
79,
945,
278,
1330,
21136,
62,
79,
17407,
62,
22046,
198,
6738,
4607,
22930,
62,
34453,
10473,
... | 3.235294 | 85 |
class NoSolutionPossible(Exception):
"""A simple class used to explicitly let a user know that a solution is not
possible given the current inputs.
"""
pass
| [
198,
4871,
1400,
46344,
47,
4733,
7,
16922,
2599,
198,
220,
220,
220,
37227,
32,
2829,
1398,
973,
284,
11777,
1309,
257,
2836,
760,
326,
257,
4610,
318,
407,
198,
220,
220,
220,
1744,
1813,
262,
1459,
17311,
13,
198,
220,
220,
220,
... | 3.5 | 50 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['BackupArgs', 'Backup']
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 3.504673 | 107 |
import math
from rpython.rlib.rfloat import isnan, isinf, copysign
# code to deal with special values (infinities, NaNs, ...)
#
# The special types can be:
ST_NINF = 0 # negative infinity
ST_NEG = 1 # negative finite number (nonzero)
ST_NZERO = 2 # -0.
ST_PZERO = 3 # +0.
ST_POS = 4 # positive finite number (nonzero)
ST_PINF = 5 # positive infinity
ST_NAN = 6 # Not a Number
P = math.pi
P14 = 0.25 * math.pi
P12 = 0.5 * math.pi
P34 = 0.75 * math.pi
INF = 1e200 * 1e200
N = INF / INF
U = -9.5426319407711027e33 # unlikely value, used as placeholder
Z = 0.0 # defined here instead of in the tuples below, because of a bug
# in pypy releases < 1.5
NAN = N
acos_special_values = build_table([
(P34,INF), (P,INF), (P,INF), (P,-INF), (P,-INF), (P34,-INF), (N,INF),
(P12,INF), (U,U), (U,U), (U,U), (U,U), (P12,-INF), (N,N),
(P12,INF), (U,U), (P12,Z), (P12,-Z), (U,U), (P12,-INF), (P12,N),
(P12,INF), (U,U), (P12,Z), (P12,-Z), (U,U), (P12,-INF), (P12,N),
(P12,INF), (U,U), (U,U), (U,U), (U,U), (P12,-INF), (N,N),
(P14,INF), (Z,INF), (Z,INF), (Z,-INF), (Z,-INF), (P14,-INF), (N,INF),
(N,INF), (N,N), (N,N), (N,N), (N,N), (N,-INF), (N,N),
])
acosh_special_values = build_table([
(INF,-P34), (INF,-P), (INF,-P), (INF,P), (INF,P), (INF,P34), (INF,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-P12), (Z,P12), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-P12), (Z,P12), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,N), (N,N), (N,N), (INF,N), (N,N),
])
asinh_special_values = build_table([
(-INF,-P14), (-INF,-Z), (-INF,-Z),(-INF,Z), (-INF,Z), (-INF,P14), (-INF,N),
(-INF,-P12), (U,U), (U,U), (U,U), (U,U), (-INF,P12), (N,N),
(-INF,-P12), (U,U), (-Z,-Z), (-Z,Z), (U,U), (-INF,P12), (N,N),
(INF,-P12), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,-Z), (N,Z), (N,N), (INF,N), (N,N),
])
atanh_special_values = build_table([
(-Z,-P12), (-Z,-P12), (-Z,-P12), (-Z,P12), (-Z,P12), (-Z,P12), (-Z,N),
(-Z,-P12), (U,U), (U,U), (U,U), (U,U), (-Z,P12), (N,N),
(-Z,-P12), (U,U), (-Z,-Z), (-Z,Z), (U,U), (-Z,P12), (-Z,N),
(Z,-P12), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,P12), (Z,N),
(Z,-P12), (U,U), (U,U), (U,U), (U,U), (Z,P12), (N,N),
(Z,-P12), (Z,-P12), (Z,-P12), (Z,P12), (Z,P12), (Z,P12), (Z,N),
(Z,-P12), (N,N), (N,N), (N,N), (N,N), (Z,P12), (N,N),
])
log_special_values = build_table([
(INF,-P34), (INF,-P), (INF,-P), (INF,P), (INF,P), (INF,P34), (INF,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (-INF,-P), (-INF,P), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (-INF,-Z), (-INF,Z), (U,U), (INF,P12), (N,N),
(INF,-P12), (U,U), (U,U), (U,U), (U,U), (INF,P12), (N,N),
(INF,-P14), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,P14), (INF,N),
(INF,N), (N,N), (N,N), (N,N), (N,N), (INF,N), (N,N),
])
sqrt_special_values = build_table([
(INF,-INF), (Z,-INF), (Z,-INF), (Z,INF), (Z,INF), (INF,INF), (N,INF),
(INF,-INF), (U,U), (U,U), (U,U), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (Z,-Z), (Z,Z), (U,U), (INF,INF), (N,N),
(INF,-INF), (U,U), (U,U), (U,U), (U,U), (INF,INF), (N,N),
(INF,-INF), (INF,-Z), (INF,-Z), (INF,Z), (INF,Z), (INF,INF), (INF,N),
(INF,-INF), (N,N), (N,N), (N,N), (N,N), (INF,INF), (N,N),
])
exp_special_values = build_table([
(Z,Z), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,Z), (Z,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,N), (U,U), (1.,-Z), (1.,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (1.,-Z), (1.,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
cosh_special_values = build_table([
(INF,N), (U,U), (INF,Z), (INF,-Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,Z), (U,U), (1.,Z), (1.,-Z), (U,U), (N,Z), (N,Z),
(N,Z), (U,U), (1.,-Z), (1.,Z), (U,U), (N,Z), (N,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,Z), (N,Z), (N,N), (N,N), (N,N),
])
sinh_special_values = build_table([
(INF,N), (U,U), (-INF,-Z), (-INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(Z,N), (U,U), (-Z,-Z), (-Z,Z), (U,U), (Z,N), (Z,N),
(Z,N), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,N), (Z,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
tanh_special_values = build_table([
(-1.,Z), (U,U), (-1.,-Z), (-1.,Z), (U,U), (-1.,Z), (-1.,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(N,N), (U,U), (-Z,-Z), (-Z,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (Z,-Z), (Z,Z), (U,U), (N,N), (N,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(1.,Z), (U,U), (1.,-Z), (1.,Z), (U,U), (1.,Z), (1.,Z),
(N,N), (N,N), (N,-Z), (N,Z), (N,N), (N,N), (N,N),
])
rect_special_values = build_table([
(INF,N), (U,U), (-INF,Z), (-INF,-Z), (U,U), (INF,N), (INF,N),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(Z,Z), (U,U), (-Z,Z), (-Z,-Z), (U,U), (Z,Z), (Z,Z),
(Z,Z), (U,U), (Z,-Z), (Z,Z), (U,U), (Z,Z), (Z,Z),
(N,N), (U,U), (U,U), (U,U), (U,U), (N,N), (N,N),
(INF,N), (U,U), (INF,-Z), (INF,Z), (U,U), (INF,N), (INF,N),
(N,N), (N,N), (N,Z), (N,Z), (N,N), (N,N), (N,N),
])
assert copysign(1., acosh_special_values[5][2][1]) == -1.
| [
11748,
10688,
198,
6738,
374,
29412,
13,
81,
8019,
13,
81,
22468,
1330,
2125,
272,
11,
318,
10745,
11,
2243,
893,
570,
198,
198,
2,
2438,
284,
1730,
351,
2041,
3815,
357,
10745,
259,
871,
11,
11013,
47503,
11,
2644,
8,
198,
2,
198... | 1.500791 | 4,425 |
# -*- coding: utf-8 -*-
'''
torstack.library.compat.py
compat definition.
:copyright: (c) 2018 by longniao <longniao@gmail.com>
:license: MIT, see LICENSE for more details.
'''
import sys
# __all__ = (
# 'text_type', 'string_types', 'izip', 'iteritems', 'itervalues',
# 'with_metaclass',
# )
PY3 = sys.version_info >= (3,)
if PY3:
text_type = str
string_types = (str, )
integer_types = int
izip = zip
_xrange = range
MAXSIZE = sys.maxsize
else:
text_type = unicode
string_types = (basestring, )
integer_types = (int, long)
from itertools import izip
_xrange = xrange
MAXSIZE = sys.maxint
# "raise x, y, z" raises SyntaxError in Python 3
exec("""def reraise(exctype, value, trace=None):
raise exctype, str(value), trace
""")
_unicode = unicode
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
13165,
25558,
13,
32016,
13,
5589,
265,
13,
9078,
198,
5589,
265,
6770,
13,
198,
198,
25,
22163,
4766,
25,
357,
66,
8,
2864,
416,
890,
77,
13481,
... | 2.338068 | 352 |