content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import breve
class Test( breve.Control ):
'''def iterate( self ):
self.object.energy -= self.increm
if 0 >= self.object.energy or self.object.energy >= 1:
self.increm = 0
self.object.adjustSize()
if self.increm != 0:
print self.object.temp'''
breve.Test = Test
breve.CustomObject = CustomObject
breve.myCustomShape = myCustomShape
Test() | [
11748,
1449,
303,
201,
198,
201,
198,
4871,
6208,
7,
1449,
303,
13,
15988,
15179,
201,
198,
201,
198,
197,
7061,
6,
4299,
11629,
378,
7,
2116,
15179,
201,
198,
197,
197,
944,
13,
15252,
13,
22554,
48185,
2116,
13,
1939,
2787,
201,
... | 2.382716 | 162 |
#!/usr/bin/python
"""
Tool to analyze some datalogger raw data
"""
from __future__ import print_function
import os
import sys
import argparse
import json
parser = argparse.ArgumentParser(description="Tool to analyze some datalogger raw data")
parser.add_argument("-i", "--input-file", help="file to read from", required=True)
options = parser.parse_args("-i /var/rrd/snmp/raw/ifTable_2017-11-15.csv".split())
if not os.path.isfile(options.input_file):
print("file %s does not exist" % options.input_file)
sys.exit(1)
data = {}
meta = {}
meta["delimiter"] = "\t"
meta["index_keynames"] = ("hostname", "ifDescr")
meta["ts_keyname"] = "ts"
meta["interval"] = 300
headers = None
with open(options.input_file, "rt") as infile:
for line in infile.read().split("\n"):
if line == "" or line == "\n":
continue
if headers is None:
headers = line.split(meta["delimiter"])
meta["headers"] = headers
data["length"] = len(headers)
for header in headers:
data[header] = {
"isnumeric" : True,
"interval" : 0
}
assert meta["ts_keyname"] in headers
assert all((index_key in headers for index_key in meta["index_keynames"]))
else:
columns = line.split(meta["delimiter"])
assert len(columns) == data["length"]
for index, column in enumerate(columns):
data[headers[index]]["isnumeric"] = all((data[headers[index]]["isnumeric"], column.isnumeric()))
print(line)
meta["value_keynames"] = dict([(header, "asis") for header in headers if data[header]["isnumeric"] == True])
meta["blacklist"] = [header for header in headers if (data[header]["isnumeric"] == False) and (header not in meta["index_keynames"]) and (header != meta["ts_keyname"])]
print(json.dumps(meta, indent=4, sort_keys=True))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
25391,
284,
16602,
617,
4818,
11794,
1362,
8246,
1366,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
... | 2.394279 | 804 |
from os.path import *
REPO_DIR = abspath(join(dirname(realpath(__file__)), pardir, pardir))
TOKEN_PATH = join(REPO_DIR, '.hooks', '.token')
VERSION_PATH = join(REPO_DIR, 'res', 'version.txt')
| [
6738,
28686,
13,
6978,
1330,
1635,
198,
198,
2200,
16402,
62,
34720,
796,
2352,
6978,
7,
22179,
7,
15908,
3672,
7,
5305,
6978,
7,
834,
7753,
834,
36911,
41746,
343,
11,
41746,
343,
4008,
198,
10468,
43959,
62,
34219,
796,
4654,
7,
2... | 2.573333 | 75 |
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/80C4285C-779E-DD11-9889-001617E30CA4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/A83BF5EE-6E9E-DD11-8082-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8266853E-999E-DD11-8B73-001D09F2432B.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2AAFE9A9-A19E-DD11-821B-000423D99F3E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/067E98F3-489F-DD11-B309-000423D996B4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/64C1D6F5-489F-DD11-90B7-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/3C084F93-679C-DD11-A361-000423D9989E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9C14E69F-069D-DD11-AC41-001617DBCF1E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5439B4CA-309D-DD11-84E5-000423D944F8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/D20BB375-AE9D-DD11-BF49-000423D944FC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/E2E8FE03-A69D-DD11-8699-000423D98750.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/B40C29EC-B69D-DD11-A665-000423D6A6F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/843C7874-1F9F-DD11-8E03-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7EB3DF8E-0E9F-DD11-A451-001D09F29146.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CEB001F9-169F-DD11-A5E6-000423D94494.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/382BAEE2-D39E-DD11-A0A4-000423D98EC8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CC5B37A1-A99E-DD11-816F-001617DBD230.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/6EDD168B-2F9F-DD11-ADF5-001617C3B79A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EE4B4C82-999C-DD11-86EC-000423D99F3E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/1CC8332F-459E-DD11-BFE1-001617C3B65A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7A6A133C-999E-DD11-9155-001D09F2462D.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F292BE7F-409F-DD11-883A-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B870AA81-409F-DD11-B549-001617C3B78C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9003F328-899C-DD11-83D7-000423D986C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/500B13D3-6F9C-DD11-8745-001617DC1F70.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/4CBAEDCC-309D-DD11-A617-001617E30D06.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/AED19458-399D-DD11-B9AC-000423D9A2AE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/A6688D1F-959D-DD11-B5B7-000423D6A6F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F0076F20-F59E-DD11-8B57-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EC6C6EA4-499D-DD11-AC7D-000423D98DB4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/DA099105-639D-DD11-9C3E-001617E30F50.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/2E40EDED-1A9E-DD11-9014-001617DBD5AC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/7647004C-F19D-DD11-8BAA-001617DBD224.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/38881706-5E9E-DD11-B487-000423D98868.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/1098901B-569E-DD11-BE60-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5E4E7508-919C-DD11-AEB1-000423D9853C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/060DD475-179D-DD11-A003-000423D94908.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/8A563E55-289D-DD11-BA24-000423D6BA18.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/545F9B54-D09D-DD11-A58B-000423D6B5C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/68795DEE-D79D-DD11-ADB7-000423D98DB4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3AD49E1B-F59E-DD11-81C4-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/548891AB-8C9D-DD11-8989-001617C3B69C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/745CD91D-529D-DD11-8908-000423D6B48C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3EF1CC87-2F9F-DD11-9EFC-001617DF785A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FCB4F2BA-3C9E-DD11-82C7-000423D99160.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/ECC4D018-569E-DD11-80C4-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/20C97175-669E-DD11-8ADD-00161757BF42.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/52683098-A99E-DD11-BCD0-000423D94AA8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/F6C17BA7-A19E-DD11-B57C-000423D98634.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/D844B765-519F-DD11-96F9-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/02EB3FD3-6F9C-DD11-8C35-001617C3B6FE.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0EB355C8-309D-DD11-85B7-001617C3B64C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8E478481-BA9E-DD11-9573-000423D6B358.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/A4775BE3-739D-DD11-843D-001617C3B778.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/8E8B21C6-F99D-DD11-BF05-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0EF20D52-139E-DD11-9473-000423D6B5C4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7C00B404-389F-DD11-AB81-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/AE67CFF1-279F-DD11-B6DC-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7400A101-389F-DD11-B540-000423D60FF6.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2A630CF2-279F-DD11-942A-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/1CD3DEA6-F59C-DD11-986D-000423D98BC4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/D809EECD-7F9E-DD11-B4D7-00161757BF42.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/64451F65-779E-DD11-869D-001617E30D40.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/BA532E6C-519F-DD11-8DE7-000423D98FBC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/021AEBFE-7A9F-DD11-863E-0019DB29C620.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CA5A90F6-489F-DD11-8F60-000423D6B2D8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/028578E0-809C-DD11-AF7D-001617C3B6E8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/08A6038E-679C-DD11-A4B9-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0012/18BA3290-679C-DD11-B9A1-001617C3B77C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B0CD45DB-D39E-DD11-BC03-000423D985E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/125FE86B-CB9E-DD11-B054-000423DD2F34.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/6E783236-849D-DD11-A9FF-001617C3B654.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/800FA4BD-5A9D-DD11-ACBB-001617DBD5AC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/760FB963-7C9D-DD11-B812-001D09F231C9.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/52CBD0DE-0A9E-DD11-B583-000423D6B358.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/905B1953-349E-DD11-8022-001D09F2AD7F.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/7A7A6D05-389F-DD11-9D08-000423D98804.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/C223029D-E59C-DD11-A125-001617E30D40.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3293D2A6-4D9E-DD11-81D1-000423D98B5C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4E5AEDFC-5D9E-DD11-BD7D-001617C3B5F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/2A9CA4B8-909E-DD11-857B-001617E30D38.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/9E30F47D-409F-DD11-A947-001617C3B6E8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/745BB069-519F-DD11-A8F9-000423D94700.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/4493CD28-899C-DD11-AF14-000423D6CA02.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/0085D14F-289D-DD11-862E-000423D6006E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/841A2D63-E09D-DD11-BDA5-001617DF785A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/5658C98B-9D9D-DD11-9B46-000423D99F1E.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/3ABEFDFB-169F-DD11-94E3-000423D98BC4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FC20EEFC-059F-DD11-A7CA-001617C3B5F4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/CE7B883A-ED9E-DD11-A737-0019DB29C614.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4261BA6C-CB9E-DD11-AE94-000423D986A8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/9A134C3C-849D-DD11-8A1C-000423D98C20.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FE7A7A73-1F9F-DD11-A841-001617DBD230.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/6097BB22-FE9C-DD11-AA3C-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/F4AE9DE3-DC9C-DD11-9223-000423D6B42C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/3CA664CA-309D-DD11-A642-000423D951D4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/2C9D3EE0-C79D-DD11-AAF0-000423D94534.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/7E81C76A-BF9D-DD11-9970-001617E30F50.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4EA98C8F-0E9F-DD11-A48E-001D09F253FC.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/0A4BBAF5-C29E-DD11-967D-0016177CA778.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/4253601B-B29E-DD11-9725-001617DBD224.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/E00BC4D5-D39E-DD11-861A-001617C3B5E4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/EA733333-419D-DD11-9B49-000423D99660.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/40A87664-239E-DD11-8ABC-000423D944F8.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/DA448F60-239E-DD11-8347-000423D98DD4.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/0E9D6303-389F-DD11-8C22-001617E30D0A.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/8A40053E-889E-DD11-9442-000423D944F0.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/082ED767-999E-DD11-962C-0019B9F70607.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/205DAE07-0F9D-DD11-9FD4-000423D9890C.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/041B05FD-059F-DD11-871E-001617E30D52.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/FE7823F2-C29E-DD11-81F1-0019DB29C614.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/BC834BD5-2B9E-DD11-A8D9-001617C3B706.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0013/966DFADC-E89D-DD11-A90E-000423D99264.root',
'rfio:///?svcClass=cmscaf&path=/castor/cern.ch/cms/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_V2P_StreamALCARECOTkAlCosmics0T_v7/0014/B4FD3F7C-409F-DD11-8F2B-001617DBCF90.root'
| [
6,
41871,
952,
1378,
20924,
21370,
66,
9487,
28,
46406,
66,
1878,
5,
6978,
33223,
2701,
273,
14,
30903,
13,
354,
14,
46406,
14,
8095,
14,
7890,
14,
50246,
278,
2919,
14,
36734,
76,
873,
14,
1847,
20034,
2943,
46,
14,
34,
44700,
62... | 1.964692 | 10,734 |
# vim: ai:sw=4:ts=4:sta:et:fo=croql
# coding=utf-8
| [
2,
43907,
25,
257,
72,
25,
2032,
28,
19,
25,
912,
28,
19,
25,
38031,
25,
316,
25,
6513,
28,
19915,
13976,
198,
2,
19617,
28,
40477,
12,
23,
198
] | 1.7 | 30 |
from typing import Type
from pyspark.sql.functions import col, lit, when
from impc_etl.shared import utils
from impc_etl.workflow.config import ImpcConfig
from pyspark.sql.session import SparkSession
import luigi
from luigi.contrib.spark import PySparkTask
from pyspark.sql.types import StringType
| [
6738,
19720,
1330,
5994,
198,
6738,
279,
893,
20928,
13,
25410,
13,
12543,
2733,
1330,
951,
11,
6578,
11,
618,
198,
6738,
848,
66,
62,
316,
75,
13,
28710,
1330,
3384,
4487,
198,
6738,
848,
66,
62,
316,
75,
13,
1818,
11125,
13,
112... | 3.180851 | 94 |
from collections import deque
if __name__ == "__main__":
main() | [
6738,
17268,
1330,
390,
4188,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419
] | 2.956522 | 23 |
from setuptools import setup
from likeasrt import VERSION
setup(
name="like-a-srt",
version=VERSION,
description=(
"CLI to generate SRT subtitles automatically from audio files, "
"using Azure Speech"
),
long_description=readme(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
],
url="https://github.com/RobertoPrevato/Like-a-srt",
author="RobertoPrevato",
author_email="roberto.prevato@gmail.com",
keywords="azure speech srt subtitles automatic generation",
license="MIT",
packages=[
"likeasrt",
"likeasrt.commands",
"likeasrt.domain",
],
entry_points={
"console_scripts": ["like-a-srt=likeasrt.main:main", "las=likeasrt.main:main"]
},
install_requires=[
"click==8.0.3",
"essentials==1.1.4",
"azure-cognitiveservices-speech==1.19.0",
"python-dotenv==0.19.2",
],
include_package_data=True,
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
6738,
588,
292,
17034,
1330,
44156,
2849,
628,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
2339,
12,
64,
12,
82,
17034,
1600,
198,
220,
220,
220,
2196,
28,
43717,
11,
198,
220,
... | 2.376726 | 507 |
from .testapp.models import Thing
| [
6738,
764,
9288,
1324,
13,
27530,
1330,
21561,
198
] | 3.777778 | 9 |
import click
from .config import PyjjConfig
from .database import Database as Db
from .messages import msg, header, content, division
from .utils import validate_url
pass_config = click.make_pass_decorator(PyjjConfig, ensure=True)
@click.group(help="A CLI tool for bookmark management")
@pass_config
def pyjj(config):
"""A CLI tool for bookmark management
:param object config: an object with the current context
"""
config.parse()
config.db = Db(division=config.division)
config.db.setup()
click.echo(division(config.division))
@pyjj.command(help="Switch to a different table")
@click.argument("division")
@pass_config
def use(config, division=str):
"""Switch to a different table
:param object config: an object with the current context
:param str division: a name of the division
"""
config.update(division=division)
click.echo(f"Switched to {division}")
@pyjj.command(help="Show a list of bookmarks")
@click.option("--tag", "-t")
@pass_config
def list(config, tag: str):
"""Show a list of bookmarks
:param object config: an object with the current context
:param str tag: a tag of urls
"""
status, urls = config.db.list_urls(tag=tag)
if not status:
click.echo(msg(status, urls))
else:
click.echo(header("Bookmarks", f"{'ID':^7} {'URL':60} {'TAGS':20} DATE"))
for url, tags in urls:
click.echo(content(f"{url[0]:^7} {url[1]:60} {','.join(tags):20} {url[2]}"))
# TODO: Pagination
@pyjj.command(help="Add a new bookmark")
@click.argument("url")
@click.option("--tags", "-t")
@pass_config
def add(config, tags: str, url: str):
"""Add a new bookmark
:param object config: an object with the current context
:param str url: an url to add to the database
"""
try:
_url = validate_url(url)
if tags:
result = config.db.add_url(_url, tags=tags.split(","))
else:
result = config.db.add_url(_url)
click.echo(msg(*result))
except Exception as e:
click.echo(msg(False, str(e)))
@pyjj.command(help="Edit a bookmark")
@click.argument("id")
@click.argument("url")
@pass_config
def edit(config, id: int, url: str):
"""Edit a bookmark
:param object config: an object with the current context
:param int id: an id of url to edit
:param str url: an url to add to the database
"""
try:
_url = validate_url(url)
result = config.db.get_url(id)
if result[0]: # Edit url as id exists
result = config.db.edit_url(id, _url)
click.echo(msg(*result))
except Exception as e:
click.echo(msg(False, str(e)))
@pyjj.command(help="Remove a bookmark")
@click.argument("id")
@click.option("--tag", "-t")
@pass_config
def remove(config, id, tag):
"""Remove a bookmark. When given option `-t`, only the tag
associated with the url gets removed.
:param object config: an object with the current context
:param int id: an id of url to delete
:param str tag: a tag of url to delete
"""
result = config.db.get_url(id)
if result[0]: # Remove url as id exists
if tag:
result = config.db.remove_url_tag(id, tag)
else:
is_confirmed = click.confirm(f"Wish to delete {result[1]} ?")
if is_confirmed:
result = config.db.remove_url(id)
else:
result = (False, "aborted.")
click.echo(msg(*result))
@pyjj.command(help="Get a random bookmark")
@click.option("--tag", "-t")
@pass_config
def eureka(config, tag=None):
"""Get a random bookmark. When given option `-t`, returns
a randome bookmark with the given tag.
:param object config: an object with the current context
:param str tag: a tag of a random url
"""
_, url_tags = config.db.get_random_url(tag)
url, tags = url_tags
click.echo(header("Eureka!", f"{'ID':^7} {'URL':60} {'TAGS':20} DATE"))
click.echo(content(f"{url[0]:^7} {url[1]:60} {','.join(tags):20} {url[2]}"))
@pyjj.command(help="Show a list of tags")
@pass_config
def tags(config):
"""Show a list of tags.
:param object config: an object with the current context
"""
status, tags = config.db.list_tags()
click.echo(header("Tags", f"{'ID':^7} {'TAGS':20} DATE"))
if status:
for index, tag in tags:
click.echo(content(f"{index:^7} {tag[0]:20} {tag[1]}"))
if __name__ == "__main__":
pyjj()
| [
11748,
3904,
198,
198,
6738,
764,
11250,
1330,
9485,
41098,
16934,
198,
6738,
764,
48806,
1330,
24047,
355,
360,
65,
198,
6738,
764,
37348,
1095,
1330,
31456,
11,
13639,
11,
2695,
11,
7297,
198,
6738,
764,
26791,
1330,
26571,
62,
6371,
... | 2.510626 | 1,788 |
from bs4 import BeautifulSoup
from .utils.request import get
from typing import Optional
class User:
"""
Represents a Roblox user.
"""
def __init__(self, user_id: int):
"""
Construct a new user class.
:param user_id: The User's ID.
"""
response = get(f"https://users.roblox.com/v1/users/{user_id}").json()
status = get(f"https://users.roblox.com/v1/users/{user_id}/status").json()["status"]
self.name: str = response["name"]
self.display_name: str = response["displayName"]
self.id: int = response["id"]
self.is_banned: bool = response["isBanned"]
self.created: str = response["created"]
self.description: str = response["description"] if response["description"] else None
self.status: str = status if status else None
| [
6738,
275,
82,
19,
1330,
23762,
50,
10486,
201,
198,
6738,
764,
26791,
13,
25927,
1330,
651,
201,
198,
6738,
19720,
1330,
32233,
201,
198,
201,
198,
201,
198,
4871,
11787,
25,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,... | 2.36413 | 368 |
import unittest
# def test___init__(self):
# # open_vas_override = OpenVASOverride()
# assert False # TODO: implement your test here
#
# def test_make_object(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.make_object(oid, name, text, text_is_excerpt, threat, new_threat, orphan))
# assert False # TODO: implement your test here
#
# def test_name(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.name())
# assert False # TODO: implement your test here
#
# def test_name_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.name(val))
# assert False # TODO: implement your test here
#
# def test_new_threat(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.new_threat())
# assert False # TODO: implement your test here
#
# def test_new_threat_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.new_threat(val))
# assert False # TODO: implement your test here
#
# def test_oid(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.oid())
# assert False # TODO: implement your test here
#
# def test_oid_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.oid(val))
# assert False # TODO: implement your test here
#
# def test_orphan(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.orphan())
# assert False # TODO: implement your test here
#
# def test_orphan_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.orphan(val))
# assert False # TODO: implement your test here
#
# def test_text(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text())
# assert False # TODO: implement your test here
#
# def test_text_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text(val))
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text_is_excerpt())
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.text_is_excerpt(val))
# assert False # TODO: implement your test here
#
# def test_threat(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.threat())
# assert False # TODO: implement your test here
#
# def test_threat_case_2(self):
# # open_vas_override = OpenVASOverride()
# # self.assertEqual(expected, open_vas_override.threat(val))
# assert False # TODO: implement your test here
# def test___init__(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# assert False # TODO: implement your test here
#
# def test_name(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.name())
# assert False # TODO: implement your test here
#
# def test_oid(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.oid())
# assert False # TODO: implement your test here
#
# def test_orphan(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.orphan())
# assert False # TODO: implement your test here
#
# def test_text(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.text())
# assert False # TODO: implement your test here
#
# def test_text_is_excerpt(self):
# # open_vas_notes = OpenVASNotes(oid, name, text, text_is_excerpt, orphan)
# # self.assertEqual(expected, open_vas_notes.text_is_excerpt())
# assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
628,
198,
198,
2,
220,
220,
220,
220,
825,
1332,
17569,
15003,
834,
7,
944,
2599,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
1280,
62,
11017,
62,
2502,
13154,
796,
4946,
53,
1921,
37961,
3419,
198,... | 2.327813 | 2,053 |
"""Create a new tournament."""
from scorecard import db
from scorecard.models.tournament import Tournament
def create(name, start_date, end_date):
"""Create a tournament."""
tournament = Tournament.query.filter_by(name=name, start_date=start_date, end_date=end_date).first()
if tournament is None:
tournament = Tournament(name, start_date, end_date)
tournament.save()
db.session.commit()
return tournament
| [
37811,
16447,
257,
649,
7756,
526,
15931,
198,
198,
6738,
4776,
9517,
1330,
20613,
198,
6738,
4776,
9517,
13,
27530,
13,
83,
5138,
1330,
9595,
628,
198,
4299,
2251,
7,
3672,
11,
923,
62,
4475,
11,
886,
62,
4475,
2599,
198,
220,
220,... | 3.013333 | 150 |
from operator import attrgetter
import torch
from continual import CoModule
from pytorch_lightning.utilities.parsing import AttributeDict
from ride.core import Configs, RideMixin
from ride.utils.logging import getLogger
from ride.utils.utils import name
logger = getLogger("co3d")
| [
6738,
10088,
1330,
708,
81,
1136,
353,
198,
198,
11748,
28034,
198,
6738,
37639,
1330,
1766,
26796,
198,
6738,
12972,
13165,
354,
62,
2971,
768,
13,
315,
2410,
13,
79,
945,
278,
1330,
3460,
4163,
35,
713,
198,
6738,
6594,
13,
7295,
... | 3.380952 | 84 |
# Generated by Django 3.1.7 on 2021-04-13 20:33
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
3023,
12,
1485,
1160,
25,
2091,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
cv2.namedWindow("frame")
cv2.createTrackbar("test", "frame", 50, 500, nothing)
cv2.createTrackbar("color/gray", "frame", 0, 1, nothing)
while True:
ret, frame = cap.read()
if not ret:
break
test = cv2.getTrackbarPos("test", "frame")
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(frame, str(test), (50, 150), font, 4, (0, 0, 255))
s = cv2.getTrackbarPos("color/gray", "frame")
if s == 0:
pass
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
11128,
796,
269,
85,
17,
13,
10798,
49630,
7,
15,
8,
198,
33967,
17,
13,
13190,
27703,
7203,
14535,
4943,
198,
33967,
17,
13,
17953,
24802,
5657,
7203,
9288,
160... | 2.169884 | 259 |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016-present Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import subprocess
from docker import Client
from itertools import chain
from nose.tools import *
from CordContainer import *
from CordTestUtils import log_test as log
import threading
import time
import os
import json
import pexpect
import urllib
log.setLevel('INFO')
flatten = lambda l: chain.from_iterable(l)
| [
198,
2,
15069,
2177,
12,
25579,
4946,
7311,
278,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.823232 | 396 |
#!/usr/bin/env python#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Madpack utilities
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from itertools import izip_longest
import re
import unittest
def is_rev_gte(left, right):
""" Return if left >= right
Args:
@param left: list. Revision numbers in a list form (as returned by
_get_rev_num).
@param right: list. Revision numbers in a list form (as returned by
_get_rev_num).
Returns:
Boolean
If left and right are all numeric then regular list comparison occurs.
If either one contains a string, then comparison occurs till both have int.
First list to have a string is considered smaller
(including if the other does not have an element in corresponding index)
Examples:
[1, 9, 0] >= [1, 9, 0]
[1, 9, 1] >= [1, 9, 0]
[1, 9, 1] >= [1, 9]
[1, 10] >= [1, 9, 1]
[1, 9, 0] >= [1, 9, 0, 'dev']
[1, 9, 1] >= [1, 9, 0, 'dev']
[1, 9, 0] >= [1, 9, 'dev']
[1, 9, 'rc'] >= [1, 9, 'dev']
[1, 9, 'rc', 0] >= [1, 9, 'dev', 1]
[1, 9, 'rc', '1'] >= [1, 9, 'rc', '1']
"""
if all_numeric(left) and all_numeric(right):
return left >= right
else:
for i, (l_e, r_e) in enumerate(izip_longest(left, right)):
if isinstance(l_e, int) and isinstance(r_e, int):
if l_e == r_e:
continue
else:
return l_e > r_e
elif isinstance(l_e, int) or isinstance(r_e, int):
# [1, 9, 0] > [1, 9, 'dev']
# [1, 9, 0] > [1, 9]
return isinstance(l_e, int)
else:
# both are not int
if r_e is None:
# [1, 9, 'dev'] < [1, 9]
return False
else:
return l_e is None or left[i:] >= right[i:]
return True
# ----------------------------------------------------------------------
def get_rev_num(rev):
"""
Convert version string into number for comparison
@param rev version text
It is expected to follow Semantic Versioning (semver.org)
Valid inputs:
1.9.0, 1.10.0, 2.5.0
1.0.0-alpha, 1.0.0-alpha.1, 1.0.0-0.3.7, 1.0.0-x.7.z.92
1.0.0+20130313144700, 1.0.0-beta+exp.sha.5114f85
Returns:
List. The numeric parts of version string are converted to int and
non-numeric parts are returned as is.
Invalid versions strings returned as [0]
Examples:
'1.9.0' -> [1, 9, 0]
'1.9' -> [1, 9, 0]
'1.9-alpha' -> [1, 9, 'alpha']
'1.9-alpha+dc65ab' -> [1, 9, 'alpha', 'dc65ab']
'a.123' -> [0]
"""
try:
rev_parts = re.split('[-+_]', rev)
# get numeric part of the version string
num = [int(i) for i in rev_parts[0].split('.')]
num += [0] * (3 - len(num)) # normalize num to be of length 3
# get identifier part of the version string
if len(rev_parts) > 1:
num.extend(map(str, rev_parts[1:]))
if not num:
num = [0]
return num
except (ValueError, TypeError):
# invalid revision
return [0]
# ------------------------------------------------------------------------------
# -----------------------------------------------------------------------
# Unit tests
# -----------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
2,
198,
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
... | 2.238262 | 2,002 |
"""
The following simple example demonstarates how a module can initialize a counter from a file
when it is imported and save the counter's updated value automatically when the program terminates
without relying on the application making an explicit call into this module at termination
"""
try:
with open("counterfile") as infile:
_count = int(infile.read())
except FileNotFoundError:
_count = 0
import atexit
atexit.register(savecounter)
"""
Positional and keyword arguments may also be passwd to `register()`
"""
# Positinoal arguments
atexit.register(goodbye, 'Denny', 'nice')
# Keyword arguments
atexit.register(goodbye, adjective='nice', name='Donny')
"""
Usage as a decorator
"""
@atexit.register | [
37811,
198,
464,
1708,
2829,
1672,
3222,
7364,
689,
703,
257,
8265,
460,
41216,
257,
3753,
422,
257,
2393,
198,
12518,
340,
318,
17392,
290,
3613,
262,
3753,
338,
6153,
1988,
6338,
618,
262,
1430,
5651,
689,
198,
19419,
17965,
319,
26... | 3.576355 | 203 |
# being a bit too dynamic
# pylint: disable=E1101
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import numpy as np
def scatter_matrix(frame, alpha=0.5, figsize=None, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
fig, axes = _subplots(nrows=n, ncols=n, figsize=figsize)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
for i, a in zip(range(n), df.columns):
for j, b in zip(range(n), df.columns):
axes[i, j].scatter(df[b], df[a], alpha=alpha, **kwds)
axes[i, j].yaxis.set_visible(False)
axes[i, j].xaxis.set_visible(False)
# setup labels
if i == 0 and j % 2 == 1:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position('top')
axes[i, j].xaxis.set_label_position('top')
if i == n - 1 and j % 2 == 0:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position('bottom')
axes[i, j].xaxis.set_label_position('bottom')
if j == 0 and i % 2 == 0:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position('left')
axes[i, j].yaxis.set_label_position('left')
if j == n - 1 and i % 2 == 1:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position('right')
axes[i, j].yaxis.set_label_position('right')
# ensure {x,y}lim off diagonal are the same as diagonal
for i in range(n):
for j in range(n):
if i != j:
axes[i, j].set_xlim(axes[j, j].get_xlim())
axes[i, j].set_ylim(axes[i, i].get_ylim())
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, log=False,
figsize=None, layout=None, sharex=False, sharey=False,
rot=90):
"""
Returns
-------
fig : matplotlib.Figure
"""
# if isinstance(data, DataFrame):
# data = data[column]
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey,
figsize=figsize, layout=layout, rot=rot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.3, wspace=0.2)
return fig
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_default_rot = 0
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False}
@property
@cache_readonly
_need_to_set_index = False
def plot_frame(frame=None, subplots=False, sharex=True, sharey=False,
use_index=True,
figsize=None, grid=True, legend=True, rot=None,
ax=None, title=None,
xlim=None, ylim=None, logy=False,
xticks=None, yticks=None,
kind='line',
sort_columns=True, fontsize=None, **kwds):
"""
Make line or bar plot of DataFrame's series with the index on the x-axis
using matplotlib / pylab.
Parameters
----------
subplots : boolean, default False
Make separate subplots for each time series
sharex : boolean, default True
In case subplots=True, share x axis
sharey : boolean, default False
In case subplots=True, share y axis
use_index : boolean, default True
Use index as ticks for x axis
stacked : boolean, default False
If True, create stacked bar plot. Only valid for DataFrame input
sort_columns: boolean, default True
Sort column names to determine plot ordering
title : string
Title to use for the plot
grid : boolean, default True
Axis grid lines
legend : boolean, default True
Place legend on axis subplots
ax : matplotlib axis object, default None
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
ax_or_axes : matplotlib.AxesSubplot or list of them
"""
kind = kind.lower().strip()
if kind == 'line':
klass = LinePlot
elif kind in ('bar', 'barh'):
klass = BarPlot
else:
raise ValueError('Invalid chart type given %s' % kind)
plot_obj = klass(frame, kind=kind, subplots=subplots, rot=rot,
legend=legend, ax=ax, fontsize=fontsize,
use_index=use_index, sharex=sharex, sharey=sharey,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
title=title, grid=grid, figsize=figsize, logy=logy,
sort_columns=sort_columns, **kwds)
plot_obj.generate()
plot_obj.draw()
if subplots:
return plot_obj.axes
else:
return plot_obj.axes[0]
def plot_series(series, label=None, kind='line', use_index=True, rot=None,
xticks=None, yticks=None, xlim=None, ylim=None,
ax=None, style=None, grid=True, logy=False, **kwds):
"""
Plot the input series with the index on the x-axis using matplotlib
Parameters
----------
label : label argument to provide to plot
kind : {'line', 'bar'}
rot : int, default 30
Rotation for tick labels
use_index : boolean, default True
Plot index as axis tick labels
ax : matplotlib axis object
If not passed, uses gca()
style : string, default matplotlib default
matplotlib line style to use
ax : matplotlib axis object
If not passed, uses gca()
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Notes
-----
See matplotlib documentation online for more on this subject
"""
if kind == 'line':
klass = LinePlot
elif kind in ('bar', 'barh'):
klass = BarPlot
if ax is None:
ax = _gca()
# is there harm in this?
if label is None:
label = series.name
plot_obj = klass(series, kind=kind, rot=rot, logy=logy,
ax=ax, use_index=use_index, style=style,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
legend=False, grid=grid, label=label, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.ax
# if use_index:
# # custom datetime/interval plotting
# from pandas import IntervalIndex, DatetimeIndex
# if isinstance(self.index, IntervalIndex):
# return tsp.tsplot(self)
# if isinstance(self.index, DatetimeIndex):
# offset = self.index.freq
# name = datetools._newOffsetNames.get(offset, None)
# if name is not None:
# try:
# code = datetools._interval_str_to_code(name)
# s_ = Series(self.values,
# index=self.index.to_interval(freq=code),
# name=self.name)
# tsp.tsplot(s_)
# except:
# pass
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None):
"""
Make a box plot from DataFrame column optionally grouped b ysome columns or
other inputs
Parameters
----------
data : DataFrame
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
fontsize : int or string
Returns
-------
ax : matplotlib.axes.AxesSubplot
"""
if column == None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
if not isinstance(by, (list, tuple)):
by = [by]
fig, axes = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize)
# Return axes in multiplot case, maybe revisit later # 985
ret = axes
else:
if ax is None:
ax = _gca()
fig = ax.get_figure()
data = data._get_numeric_data()
if columns:
cols = columns
else:
cols = data.columns
keys = [_stringify(x) for x in cols]
# Return boxplot dict in single plot case
bp = ax.boxplot(list(data[cols].values.T))
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
ax.grid(grid)
ret = bp
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return ret
def scatter_plot(data, x, y, by=None, ax=None, figsize=None):
"""
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(str(y))
ax.set_xlabel(str(x))
return fig
def hist_frame(data, grid=True, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None, ax=None, **kwds):
"""
Draw Histogram the DataFrame's series using matplotlib / pylab.
Parameters
----------
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
kwds : other plotting keyword arguments
To be passed to hist function
"""
import matplotlib.pyplot as plt
n = len(data.columns)
k = 1
while k ** 2 < n:
k += 1
_, axes = _subplots(nrows=k, ncols=k, ax=ax)
for i, col in enumerate(com._try_sort(data.columns)):
ax = axes[i / k][i % k]
ax.hist(data[col].dropna().values, **kwds)
ax.set_title(col)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
def hist_series(self, ax=None, grid=True, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
values = self.dropna().values
ax.hist(values, **kwds)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return ax
# copied from matplotlib/pyplot.py for compatibility with matplotlib < 1.0
def _subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, **fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
nrows : int
Number of rows of the subplot grid. Defaults to 1.
ncols : int
Number of columns of the subplot grid. Defaults to 1.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharex : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array contaning Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
fig_kw : dict
Dict with keywords passed to the figure() call. Note that all keywords
not recognized above will be automatically included here.
ax : Matplotlib axis object, default None
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one supblot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows*ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
axarr[i] = fig.add_subplot(nrows, ncols, i+1, **subplot_kw)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots==1:
return fig, axarr[0]
else:
return fig, axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
return fig, axarr.reshape(nrows, ncols)
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
# top10 = sales['zip'].value_counts()[:10].index
# sales2 = sales[sales.zip.isin(top10)]
# _ = scatter_plot(sales2, 'squarefeet', 'price', by='zip')
# plt.show()
import matplotlib.pyplot as plt
import pandas.tools.plotting as plots
import pandas.core.frame as fr
reload(plots)
reload(fr)
from pandas.core.frame import DataFrame
data = DataFrame([[3, 6, -5], [4, 8, 2], [4, 9, -6],
[4, 9, -3], [2, 5, -1]],
columns=['A', 'B', 'C'])
data.plot(kind='barh', stacked=True)
plt.show()
| [
2,
852,
257,
1643,
1165,
8925,
198,
2,
279,
2645,
600,
25,
15560,
28,
36,
1157,
486,
198,
198,
6738,
19798,
292,
13,
22602,
13,
12501,
273,
2024,
1330,
12940,
62,
961,
8807,
198,
11748,
19798,
292,
13,
7295,
13,
11321,
355,
401,
1... | 2.228203 | 8,063 |
from .news import news_router
| [
6738,
764,
10827,
1330,
1705,
62,
472,
353,
198
] | 3.333333 | 9 |
#!/usr/bin/env python
"""
Monitors the availability of the TCP port, runs external process if port is unavailable,
but not more frequently than cooldown timeout. Persistent information is stored in /tmp
"""
import argparse
import contextlib
import datetime
import logging.handlers
import os
import random
import shelve
import shlex
import socket
import subprocess
import sys
import tempfile
import time
logger = logging.getLogger()
# noinspection PyTypeChecker
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
220,
220,
220,
2892,
6742,
262,
11500,
286,
262,
23633,
2493,
11,
4539,
7097,
1429,
611,
2493,
318,
23485,
11,
198,
220,
220,
220,
475,
407,
517,
6777,
621,
20869,
26827,
... | 3.583333 | 144 |
# -*- coding: utf-8 -*-
"""
Domain model implementation
"""
__author__ = 'Samir Adrik'
__email__ = 'samir.adrik@gmail.com'
from .expenses import Expenses
from .currency import Currency
from .address import Address
from .percent import Percent
from .family import Family
from .entity import Entity
from .person import Person
from .female import Female
from .amount import Amount
from .mobile import Mobile
from .share import Share
from .money import Money
from .email import Email
from .value import Value
from .phone import Phone
from .male import Male
from .name import Name
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
43961,
2746,
7822,
198,
198,
37811,
198,
198,
834,
9800,
834,
796,
705,
16305,
343,
1215,
12602,
6,
198,
834,
12888,
834,
796,
705,
37687,
343,
13,
32... | 3.694268 | 157 |
from typing import Callable, Dict, Tuple
import numpy as np
from numpy import cumsum
from copy import deepcopy
from ..utils.utilities import calculate_best_intervention_and_effect
def get_relevant_results(results: Callable, replicates: int) -> Dict[str, tuple]:
"""
When we get results from a notebook they are in a different format from when we pickle them. This function converts the results into the correct format so that we can analyse them.
Parameters
----------
results : Callable
The results from running the function 'run_methods_replicates()'
replicates : int
How many replicates we used.
Returns
-------
Dict[str, tuple]
A dictionary with the methods on the keys with results from each replicates on the values.
"""
data = {m: [] for m in results}
for m in results:
for r in range(replicates):
data[m].append(
(
results[m][r].per_trial_cost,
results[m][r].optimal_outcome_values_during_trials,
results[m][r].optimal_intervention_sets,
results[m][r].assigned_blanket,
)
)
return data
| [
6738,
19720,
1330,
4889,
540,
11,
360,
713,
11,
309,
29291,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
269,
5700,
388,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
11485,
26791,
13,
315,
2410,
1330,
15284,
62,
... | 2.491903 | 494 |
#
# VUsbTools.Log
# Micah Elizabeth Scott <micah@vmware.com>
#
# Implements parsers for USB log files. Currently
# this includes slurping usbAnalyzer data out of the
# VMX log, and parsing the XML logs exported by
# Ellisys Visual USB.
#
# Copyright (C) 2005-2010 VMware, Inc. Licensed under the MIT
# License, please see the README.txt. All rights reserved.
#
from __future__ import division
import sys, time, re, os, string, atexit
import xml.sax, Queue, threading, difflib
import gtk, gobject
import traceback, gzip, struct
from VUsbTools import Types
class UsbIOParser(Types.psyobj):
"""Parses USBIO log lines and generates Transaction objects appropriately.
Finished transactions are pushed into the supplied queue.
"""
lineOriented = True
def flush(self):
"""Force any in-progress transactions to be completed. This should be
called when you know the USB analyzer is finished outputting
data, such as when a non-USBIO line appears in the log.
"""
if self.current.dir:
self.eventQueue.put(self.current)
self.current = Types.Transaction()
class TimestampLogParser:
"""Parse a simple format which logs timestamps in nanosecond resolution.
Lines are of the form:
<timestamp> <name> args...
The event name may be 'begin-foo' or 'end-foo' to indicate an event
which executes over a span of time, or simply 'foo' to mark a single
point.
"""
lineOriented = True
class VmxLogParser(UsbIOParser):
"""Read the VMX log, looking for new USBIO lines and parsing them.
"""
frame = None
epoch = None
lineNumber = 0
def parseRelativeTime(self, line):
"""Start the clock when we see our first USB log line"""
t = self.parseTime(line)
if self.epoch is None:
self.epoch = t
return t - self.epoch
_timeCache = (None, None)
def parseTime(self, line):
"""Return a unix-style timestamp for the given line."""
if line[10] != "T":
"""XXX: This assumes the current year, so logs that straddle
years will have a giant discontinuity in timestamps.
"""
timefmt = "%b %d %H:%M:%S"
stamp = line[:15]
usec = line[16:19]
else:
timefmt = "%Y-%m-%dT%H:%M:%S"
stamp = line[:19]
usec = line[20:23]
# Cache the results of strptime. It only changes every
# second, and this was taking more than 50% of our parsing time!
savedStamp, parsed = self._timeCache
if savedStamp != stamp:
parsed = time.strptime(stamp, timefmt)
self._timeCache = stamp, parsed
now = time.localtime()
try:
usec = int(usec)
except ValueError:
usec = 0
return usec / 1000.0 + time.mktime((
now.tm_year, parsed.tm_mon, parsed.tm_mday,
parsed.tm_hour, parsed.tm_min, parsed.tm_sec,
parsed.tm_wday, parsed.tm_yday, parsed.tm_isdst))
def parseInt(attrs, name, default=None):
"""The Ellisys logs include commas in their integers"""
try:
return int(attrs[name].replace(",", ""))
except (KeyError, ValueError):
return default
def parseFloat(attrs, name, default=None):
"""The Ellisys logs include commas and spaces in their floating point numbers"""
try:
return float(attrs[name].replace(",", "").replace(" ", ""))
except (KeyError, ValueError):
return default
class EllisysXmlHandler(xml.sax.handler.ContentHandler):
"""Handles SAX events from an XML log exported by Ellisys
Visual USB. The completed USB transactions are pushed into
the provided completion queue.
"""
frameNumber = None
device = None
endpoint = None
current = None
characterHandler = None
def beginUrb(self, pipe):
"""Simulate a new URB being created on the supplied pipe. This
begins a Down transaction and makes it pending and current.
"""
t = Types.Transaction()
t.dir = 'Down'
t.dev, t.endpt = pipe
t.timestamp = self.timestamp
t.frame = parseInt(self._frameAttrs, 'frameNumber')
t.status = 0
self.pipes[pipe] = t
self.pending[pipe] = t
self.current = t
def flipUrb(self, pipe):
"""Begin the Up phase on a particular pipe. This
completes the Down transaction, and makes an Up
current (but not pending)
"""
del self.pending[pipe]
down = self.pipes[pipe]
self.eventQueue.put(down)
up = Types.Transaction()
up.dir = 'Up'
up.dev, up.endpt = pipe
# Up and Down transactions share setup data, if applicable
if down.hasSetupData():
up.data = down.data[:8]
self.pipes[pipe] = up
self.current = up
def completeUrb(self, pipe, id):
"""Complete the Up phase on a pipe"""
if pipe in self.pending:
self.flipUrb(pipe)
assert pipe in self.pipes
t = self.pipes[pipe]
del self.pipes[pipe]
self.current = None
t.timestamp = self.timestamp
t.frame = parseInt(self._frameAttrs, 'frameNumber')
if id in ('ACK', 'NYET'):
t.status = 0
else:
t.status = id
self.eventQueue.put(t)
Types.psycoBind(EllisysXmlHandler)
class EllisysXmlParser:
"""Parses XML files exported from Ellisys Visual USB. This
is just a glue object that sets up an XML parser and
sends SAX events to the EllisysXmlHandler.
"""
lineOriented = False
class UsbmonLogParser:
"""Parses usbmon log lines and generates Transaction objects appropriately.
Finished transactions are pushed into the supplied queue.
This parser was originally contributed by Christoph Zimmermann.
"""
lineOriented = True
lineNumber = 0
class Follower(threading.Thread):
"""A thread that continuously scans a file, parsing each line"""
pollInterval = 0.1
running = True
progressInterval = 0.2
progressExpiration = 0
class QueueSink:
"""Polls a Queue for new items, via the Glib main loop.
When they're available, calls a callback with them.
"""
interval = 200
timeSlice = 0.25
maxsize = 512
batch = range(10)
def chooseParser(filename):
"""Return an appropriate log parser class for the provided filename.
This implementation does not try to inspect the file's content,
it just looks at the filename's extension.
"""
base, ext = os.path.splitext(filename)
if ext == ".gz":
return chooseParser(base)
if ext == ".xml":
return EllisysXmlParser
if ext == ".tslog":
return TimestampLogParser
if ext == ".mon":
return UsbmonLogParser
return VmxLogParser
| [
2,
198,
2,
569,
5842,
65,
33637,
13,
11187,
198,
2,
7631,
993,
10674,
4746,
1279,
9383,
993,
31,
14761,
1574,
13,
785,
29,
198,
2,
198,
2,
1846,
1154,
902,
13544,
364,
329,
8450,
2604,
3696,
13,
16888,
198,
2,
428,
3407,
40066,
... | 2.463681 | 2,836 |
#!/usr/bin/python3
from collections import Counter
from htsworkflow.submission.encoded import ENCODED
import psycopg2
import pandas
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
17268,
1330,
15034,
198,
6738,
289,
912,
1818,
11125,
13,
7266,
3411,
13,
12685,
9043,
1330,
412,
7792,
3727,
1961,
198,
198,
11748,
17331,
22163,
70,
17,
198,
11748,
19798,
29... | 2.83871 | 62 |
"""Create hash from a dictionary."""
import os
import hashlib
from .serializers import pumpJsonDump
from typing import List
def create_hash_from_dict(index_dict: dict, salt: str = "",
get_env: bool = True, keys: List[str] = None):
"""Create a hash for the index."""
# If get_env set as True and salt not set try to get from env variable
if salt == "" and get_env:
salt = os.getenv("HASH_SALT", "")
temp_dict = index_dict
# Retrict keys to be used in hashing
if keys is not None:
temp_dict = dict([(k, index_dict[k]) for k in keys])
string_dict = pumpJsonDump(temp_dict)
hash_object = hashlib.sha1(salt.encode() + str(string_dict).encode())
pbHash = hash_object.hexdigest()
return pbHash
def create_hash_from_str(index: str, salt: str = "", get_env: bool = True):
"""Create a hash for the index."""
# If get_env set as True and salt not set try to get from env variable
if salt == "" and get_env:
salt = os.getenv("HASH_SALT", "")
hash_object = hashlib.sha1(salt.encode() + index.encode())
pbHash = hash_object.hexdigest()
return pbHash
| [
37811,
16447,
12234,
422,
257,
22155,
526,
15931,
198,
11748,
28686,
198,
11748,
12234,
8019,
198,
6738,
764,
46911,
11341,
1330,
8901,
41,
1559,
35,
931,
198,
6738,
19720,
1330,
7343,
628,
198,
4299,
2251,
62,
17831,
62,
6738,
62,
1160... | 2.554084 | 453 |
import logging
from rich.logging import RichHandler
FORMAT = "%(message)s"
logging.basicConfig(level="INFO", format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
log = logging.getLogger("rich")
| [
11748,
18931,
198,
198,
6738,
5527,
13,
6404,
2667,
1330,
3998,
25060,
198,
198,
21389,
1404,
796,
36521,
7,
20500,
8,
82,
1,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
2625,
10778,
1600,
5794,
28,
21389,
1404,
11,
3128,
69,
16762,
... | 3.030303 | 66 |
tabby_cat="\tI'm tabbed in."
persian_cat="I'm split\non a line."
backslash_cat="I'm\\a\\cat."
fat_cat="""
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat | [
8658,
1525,
62,
9246,
2625,
59,
83,
40,
1101,
7400,
3077,
287,
526,
201,
198,
19276,
666,
62,
9246,
2625,
40,
1101,
6626,
59,
13159,
257,
1627,
526,
201,
198,
1891,
6649,
1077,
62,
9246,
2625,
40,
1101,
6852,
64,
6852,
9246,
526,
... | 2.153846 | 117 |
import pandas as pd
iris = pd.read_csv('IRIS.csv')
# print(iris.head(5))
# split data attributes and label attribute
attributes = iris.drop(['species'], axis=1)
labels = iris['species']
# import and build hieratchy cluster model
from scipy.cluster.hierarchy import linkage, dendrogram
hc = linkage(attributes, 'single')
# print(hc)
# plot the dendogram
samplelist = range(1, 151)# make a list for the data samples
# import pylot libray
from matplotlib import pyplot as plt
plt.figure(figsize=(30, 15))
dendrogram(hc,
orientation='top',
labels=samplelist,
distance_sort='descending',
show_leaf_counts='true')
plt.show() | [
11748,
19798,
292,
355,
279,
67,
198,
198,
29616,
796,
279,
67,
13,
961,
62,
40664,
10786,
4663,
1797,
13,
40664,
11537,
198,
2,
3601,
7,
29616,
13,
2256,
7,
20,
4008,
198,
198,
2,
6626,
1366,
12608,
290,
6167,
11688,
198,
1078,
7... | 2.583012 | 259 |
import argparse
import logging
import sys
from aiohttp import web
from prometheus_client.core import REGISTRY
from cloudflare_exporter.collector import CloudflareCollector
from cloudflare_exporter.config import (DEFAULT_HOST,
DEFAULT_LOGS_FETCH,
DEFAULT_LOGS_COUNT,
DEFAULT_LOGS_RANGE,
DEFAULT_LOGS_SAMPLE,
DEFAULT_PORT, LOG_FORMAT)
from cloudflare_exporter.handlers import handle_health, handle_metrics
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
6738,
257,
952,
4023,
1330,
3992,
198,
6738,
1552,
36916,
62,
16366,
13,
7295,
1330,
23337,
1797,
40405,
198,
198,
6738,
6279,
2704,
533,
62,
1069,
26634,
13,
33327,
27... | 1.840909 | 352 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from zoil.well import get_production_data | [
6738,
1976,
9437,
13,
4053,
1330,
651,
62,
25493,
62,
7890
] | 3.727273 | 11 |
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
import datetime
import time
from extension.src.Constants import Constants
| [
2,
15069,
12131,
5413,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.875706 | 177 |
# !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cifar10 data module."""
import os
import pytorch_lightning as pl
import webdataset as wds
from torch.utils.data import DataLoader
from torchvision import transforms
class CIFAR10DataModule(pl.LightningDataModule): # pylint: disable=too-many-instance-attributes
"""Data module class."""
def __init__(self, **kwargs):
"""Initialization of inherited lightning data module."""
super(CIFAR10DataModule, self).__init__() # pylint: disable=super-with-arguments
self.train_dataset = None
self.valid_dataset = None
self.test_dataset = None
self.train_data_loader = None
self.val_data_loader = None
self.test_data_loader = None
self.normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
self.valid_transform = transforms.Compose([
transforms.ToTensor(),
self.normalize,
])
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,
])
self.args = kwargs
def prepare_data(self):
"""Implementation of abstract class."""
@staticmethod
def get_num_files(input_path):
"""Gets num files.
Args:
input_path : path to input
"""
return len(os.listdir(input_path)) - 1
def setup(self, stage=None):
"""Downloads the data, parse it and split the data into train, test,
validation data.
Args:
stage: Stage - training or testing
"""
data_path = self.args.get("train_glob", "/pvc/output/processing")
train_base_url = data_path + "/train"
val_base_url = data_path + "/val"
test_base_url = data_path + "/test"
train_count = self.get_num_files(train_base_url)
val_count = self.get_num_files(val_base_url)
test_count = self.get_num_files(test_base_url)
train_url = "{}/{}-{}".format(train_base_url, "train",
"{0.." + str(train_count) + "}.tar")
valid_url = "{}/{}-{}".format(val_base_url, "val",
"{0.." + str(val_count) + "}.tar")
test_url = "{}/{}-{}".format(test_base_url, "test",
"{0.." + str(test_count) + "}.tar")
self.train_dataset = (wds.WebDataset(
train_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm;jpg;jpeg;png",
info="cls").map_dict(image=self.train_transform).to_tuple(
"image", "info").batched(40))
self.valid_dataset = (wds.WebDataset(
valid_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm",
info="cls").map_dict(image=self.valid_transform).to_tuple(
"image", "info").batched(20))
self.test_dataset = (wds.WebDataset(
test_url,
handler=wds.warn_and_continue).shuffle(100).decode("pil").rename(
image="ppm",
info="cls").map_dict(image=self.valid_transform).to_tuple(
"image", "info").batched(20))
def create_data_loader(self, dataset, batch_size, num_workers): # pylint: disable=no-self-use
"""Creates data loader."""
return DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers)
def train_dataloader(self):
"""Train Data loader.
Returns:
output - Train data loader for the given input
"""
self.train_data_loader = self.create_data_loader(
self.train_dataset,
self.args.get("train_batch_size", None),
self.args.get("train_num_workers", 4),
)
return self.train_data_loader
def val_dataloader(self):
"""Validation Data Loader.
Returns:
output - Validation data loader for the given input
"""
self.val_data_loader = self.create_data_loader(
self.valid_dataset,
self.args.get("val_batch_size", None),
self.args.get("val_num_workers", 4),
)
return self.val_data_loader
def test_dataloader(self):
"""Test Data Loader.
Returns:
output - Test data loader for the given input
"""
self.test_data_loader = self.create_data_loader(
self.test_dataset,
self.args.get("val_batch_size", None),
self.args.get("val_num_workers", 4),
)
return self.test_data_loader
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
14,
29412,
18,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345... | 2.124073 | 2,563 |
import os
from pkgutil import get_data
import torch
import dgl
import numpy as np
import requests
import zipfile
from scipy.spatial.distance import pdist, squareform
from data.tsp import distance_matrix_tensor_representation
import tqdm
from toolbox import utils
| [
11748,
28686,
198,
6738,
279,
10025,
22602,
1330,
651,
62,
7890,
198,
11748,
28034,
198,
11748,
288,
4743,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
7007,
198,
11748,
19974,
7753,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
3024... | 3.428571 | 77 |
"""Segmentor library designed to learn how to segment images using GAs.
This libary actually does not incode the GA itself, instead it just defines
the search parameters the evaluation funtions and the fitness function (comming soon)."""
# DO: Research project-clean up the parameters class to reduce the search space
# DO: Change the seed from a number to a fraction 0-1 which is scaled to image rows and columns
# DO: Enumerate teh word based measures.
from collections import OrderedDict
import sys
import logging
import numpy as np
import skimage
from skimage import segmentation
from skimage import color
from see.Segment_Similarity_Measure import FF_ML2DHD_V2
# List of all algorithms
algorithmspace = dict()
def runAlgo(img, ground_img, individual, return_mask=False):
"""Run and evaluate the performance of an individual.
Keyword arguments:
img -- training image
ground_img -- the ground truth for the image mask
individual -- the list representing an individual in our population
return_mask -- Boolean value indicating whether to return resulting
mask for the individual or not (default False)
Output:
fitness -- resulting fitness value for the individual
mask -- resulting image mask associated with the individual (if return_mask=True)
"""
logging.getLogger().info(f"Running Algorithm {individual[0]}")
# img = copy.deepcopy(copyImg)
seg = algoFromParams(individual)
mask = seg.evaluate(img)
logging.getLogger().info("Calculating Fitness")
fitness = FitnessFunction(mask, ground_img)
if return_mask:
return [fitness, mask]
else:
return fitness
def algoFromParams(individual):
"""Convert an individual's param list to an algorithm. Assumes order
defined in the parameters class.
Keyword arguments:
individual -- the list representing an individual in our population
Output:
algorithm(individual) -- algorithm associated with the individual
"""
if individual[0] in algorithmspace:
algorithm = algorithmspace[individual[0]]
return algorithm(individual)
else:
raise ValueError("Algorithm not avaliable")
def popCounts(pop):
"""Count the number of each algorihtm in a population"""
algorithms = eval(parameters.ranges["algorithm"])
counts = {a:0 for a in algorithms}
for p in pop:
print(p[0])
counts[p[0]] += 1
return counts
class parameters(OrderedDict):
"""Construct an ordered dictionary that represents the search space.
Functions:
printparam -- returns description for each parameter
tolist -- converts dictionary of params into list
fromlist -- converts individual into dictionary of params
"""
descriptions = dict()
ranges = dict()
pkeys = []
ranges["algorithm"] = "['CT','FB','SC','WS','CV','MCV','AC']"
descriptions["algorithm"] = "string code for the algorithm"
descriptions["beta"] = "A parameter for randomWalker So, I should take this out"
ranges["beta"] = "[i for i in range(0,10000)]"
descriptions["tolerance"] = "A parameter for flood and flood_fill"
ranges["tolerance"] = "[float(i)/1000 for i in range(0,1000,1)]"
descriptions["scale"] = "A parameter for felzenszwalb"
ranges["scale"] = "[i for i in range(0,10000)]"
descriptions["sigma"] = "sigma value. A parameter for felzenswalb, inverse_guassian_gradient, slic, and quickshift"
ranges["sigma"] = "[float(i)/100 for i in range(0,100)]"
descriptions["min_size"] = "parameter for felzenszwalb"
ranges["min_size"] = "[i for i in range(0,10000)]"
descriptions["n_segments"] = "A parameter for slic"
ranges["n_segments"] = "[i for i in range(2,10000)]"
descriptions["iterations"] = "A parameter for both morphological algorithms"
ranges["iterations"] = "[10, 10]"
descriptions["ratio"] = "A parameter for ratio"
ranges["ratio"] = "[float(i)/100 for i in range(0,100)]"
descriptions["kernel_size"] = "A parameter for kernel_size"
ranges["kernel_size"] = "[i for i in range(0,10000)]"
descriptions["max_dist"] = "A parameter for quickshift"
ranges["max_dist"] = "[i for i in range(0,10000)]"
descriptions["Channel"] = "A parameter for Picking the Channel R,G,B,H,S,V"
ranges["Channel"] = "[0,1,2,3,4,5]"
descriptions["connectivity"] = "A parameter for flood and floodfill"
ranges["connectivity"] = "[i for i in range(0, 9)]"
descriptions["compactness"] = "A parameter for slic and watershed"
ranges["compactness"] = "[0.0001,0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]"
descriptions["mu"] = "A parameter for chan_vese"
ranges["mu"] = "[float(i)/100 for i in range(0,100)]"
descriptions["lambda"] = "A parameter for chan_vese and morphological_chan_vese"
ranges["lambda"] = "[(1,1), (1,2), (2,1)]"
descriptions["dt"] = "#An algorithm for chan_vese May want to make seperate level sets for different functions e.g. Morph_chan_vese vs morph_geo_active_contour"
ranges["dt"] = "[float(i)/10 for i in range(0,100)]"
descriptions["init_level_set_chan"] = "A parameter for chan_vese and morphological_chan_vese"
ranges["init_level_set_chan"] = "['checkerboard', 'disk', 'small disk']"
descriptions["init_level_set_morph"] = "A parameter for morphological_chan_vese"
ranges["init_level_set_morph"] = "['checkerboard', 'circle']"
descriptions["smoothing"] = "A parameter used in morphological_geodesic_active_contour"
ranges["smoothing"] = "[i for i in range(1, 10)]"
descriptions["alpha"] = "A parameter for inverse_guassian_gradient"
ranges["alpha"] = "[i for i in range(0,10000)]"
descriptions["balloon"] = "A parameter for morphological_geodesic_active_contour"
ranges["balloon"] = "[i for i in range(-50,50)]"
descriptions["seed_pointX"] = "A parameter for flood and flood_fill"
ranges["seed_pointX"] = "[0.0]"
descriptions["seed_pointY"] = "??"
ranges["seed_pointY"] = "[0.0]"
descriptions["seed_pointZ"] = "??"
ranges["seed_pointZ"] = "[0.0]"
# Try to set defaults only once.
# Current method may cause all kinds of weird problems.
# @staticmethod
# def __Set_Defaults__()
def __init__(self):
"""Set default values for each param in the dictionary."""
self["algorithm"] = "None"
self["beta"] = 0.0
self["tolerance"] = 0.0
self["scale"] = 0.0
self["sigma"] = 0.0
self["min_size"] = 0.0
self["n_segments"] = 0.0
self["iterations"] = 10
self["ratio"] = 0.0
self["kernel_size"] = 0.0
self["max_dist"] = 0.0
self["Channel"] = 0.0
self["connectivity"] = 0.0
self["compactness"] = 0.0
self["mu"] = 0.0
self["lambda"] = (1, 1)
self["dt"] = 0.0
self["init_level_set_chan"] = "disk"
self["init_level_set_morph"] = "checkerboard"
self["smoothing"] = 0.0
self["alpha"] = 0.0
self["balloon"] = 0.0
self["seed_pointX"] = 0.0
self["seed_pointY"] = 0.0
self["seed_pointZ"] = 0.0
self.pkeys = list(self.keys())
def printparam(self, key):
"""Return description of parameter from param list."""
return f"{key}={self[key]}\n\t{self.descriptions[key]}\n\t{self.ranges[key]}\n"
def __str__(self):
"""Return descriptions of all parameters in param list."""
out = ""
for index, k in enumerate(self.pkeys):
out += f"{index} " + self.printparam(k)
return out
def tolist(self):
"""Convert dictionary of params into list of parameters."""
plist = []
for key in self.pkeys:
plist.append(self.params[key])
return plist
def fromlist(self, individual):
"""Convert individual's list into dictionary of params."""
logging.getLogger().info(f"Parsing Parameter List for {len(individual)} parameters")
for index, key in enumerate(self.pkeys):
self[key] = individual[index]
class segmentor(object):
"""Base class for segmentor classes defined below.
Functions:
evaluate -- Run segmentation algorithm to get inferred mask.
"""
algorithm = ""
def __init__(self, paramlist=None):
"""Generate algorithm params from parameter list."""
self.params = parameters()
if paramlist:
self.params.fromlist(paramlist)
def evaluate(self, img):
"""Run segmentation algorithm to get inferred mask."""
return np.zeros(img.shape[0:1])
def __str__(self):
"""Return params for algorithm."""
mystring = f"{self.params['algorithm']} -- \n"
for p in self.paramindexes:
mystring += f"\t{p} = {self.params[p]}\n"
return mystring
class ColorThreshold(segmentor):
"""Peform Color Thresholding segmentation algorithm. Segments parts of the image
based on the numerical values for the respective channel.
Parameters:
my_mx -- maximum thresholding value
my_mn -- minimum thresholding value
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(ColorThreshold, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CT"
self.params["Channel"] = 5
self.params["mu"] = 0.4
self.params["sigma"] = 0.6
self.paramindexes = ["Channel", "sigma", "mu"]
def evaluate(self, img): #XX
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
channel_num = self.params["Channel"]
if len(img.shape) > 2:
num_channels = img.shape[2]
if channel_num < num_channels:
channel = img[:, :, int(channel_num)]
else:
hsv = skimage.color.rgb2hsv(img)
print(f"working with hsv channel {channel_num-3}")
channel = hsv[:, :, int(channel_num)-3]
else:
channel = img
pscale = np.max(channel)
my_mx = self.params["sigma"] * pscale
my_mn = self.params["mu"] * pscale
output = None
if my_mn < my_mx:
output = np.ones(channel.shape)
output[channel < my_mn] = 0
output[channel > my_mx] = 0
else:
output = np.zeros(channel.shape)
output[channel > my_mn] = 1
output[channel < my_mx] = 1
return output
algorithmspace['CT'] = ColorThreshold
algorithmspace["AAA"] = TripleA
class Felzenszwalb(segmentor):
"""Perform Felzenszwalb segmentation algorithm. ONLY WORKS FOR RGB. The felzenszwalb
algorithms computes a graph based on the segmentation. Produces an oversegmentation
of the multichannel using min-span tree. Returns an integer mask indicating the segment labels.
Parameters:
scale -- float, higher meanse larger clusters
sigma -- float, std. dev of Gaussian kernel for preprocessing
min_size -- int, minimum component size. For postprocessing
mulitchannel -- bool, Whether the image is 2D or 3D
"""
def __doc__(self):
"""Return help string for function."""
myhelp = "Wrapper function for the scikit-image Felzenszwalb segmentor:"
myhelp += f" xx {skimage.segmentation.random_walker.__doc__}"
return myhelp
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Felzenszwalb, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "FB"
self.params["scale"] = 984
self.params["sigma"] = 0.09
self.params["min_size"] = 92
self.paramindexes = ["scale", "sigma", "min_size"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.felzenszwalb(
img,
self.params["scale"],
self.params["sigma"],
self.params["min_size"],
multichannel=multichannel,
)
return output
algorithmspace["FB"] = Felzenszwalb
class Slic(segmentor):
"""Perform the Slic segmentation algorithm. Segments k-means clustering in Color space
(x, y, z). Returns a 2D or 3D array of labels.
Parameters:
image -- ndarray, input image
n_segments -- int, approximate number of labels in segmented output image
compactness -- float, Balances color proximity and space proximity.
Higher values mean more weight to space proximity (superpixels
become more square/cubic) Recommended log scale values (0.01,
0.1, 1, 10, 100, etc)
max_iter -- int, max number of iterations of k-means
sigma -- float or (3,) shape array of floats, width of Guassian
smoothing kernel. For pre-processing for each dimesion of the
image. Zero means no smoothing.
spacing -- (3,) shape float array. Voxel spacing along each image
dimension. Defalt is uniform spacing
multichannel -- bool, multichannel (True) vs grayscale (False)
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Slic, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "SC"
self.params["n_segments"] = 5
self.params["compactness"] = 5
self.params["iterations"] = 3
self.params["sigma"] = 5
self.paramindexes = ["n_segments", "compactness", "iterations", "sigma"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.slic(
img,
n_segments=self.params["n_segments"],
compactness=self.params["compactness"],
max_iter=self.params["iterations"],
sigma=self.params["sigma"],
convert2lab=True,
multichannel=multichannel,
)
return output
algorithmspace["SC"] = Slic
class QuickShift(segmentor):
"""Perform the Quick Shift segmentation algorithm. Segments images with quickshift
clustering in Color (x,y) space. Returns ndarray segmentation mask of the labels.
Parameters:
image -- ndarray, input image
ratio -- float, balances color-space proximity & image-space
proximity. Higher vals give more weight to color-space
kernel_size: float, Width of Guassian kernel using smoothing.
Higher means fewer clusters
max_dist -- float, Cut-off point for data distances. Higher means fewer clusters
sigma -- float, Width of Guassian smoothing as preprocessing.
Zero means no smoothing
random_seed -- int, Random seed used for breacking ties.
"""
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(QuickShift, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "QS"
self.params["kernel_size"] = 5
self.params["max_dist"] = 60
self.params["sigma"] = 5
self.params["Channel"] = 1
self.params["ratio"] = 2
self.paramindexes = ["kernel_size", "max_dist", "sigma", "Channel", "ratio"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
output = skimage.segmentation.quickshift(
color.gray2rgb(img),
ratio=self.params["ratio"],
kernel_size=self.params["kernel_size"],
max_dist=self.params["max_dist"],
sigma=self.params["sigma"],
random_seed=self.params["Channel"],
)
return output
algorithmspace["QS"] = QuickShift
#DO: This algorithm seems to need a channel input. We should fix that.
class Watershed(segmentor):
"""Perform the Watershed segmentation algorithm. Uses user-markers.
treats markers as basins and 'floods' them. Especially good if overlapping objects.
Returns a labeled image ndarray.
Parameters:
image -- ndarray, input array
compactness -- float, compactness of the basins. Higher values
make more regularly-shaped basin.
"""
# Not using connectivity, markers, or offset params as arrays would
# expand the search space too much.
# abbreviation for algorithm = WS
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Watershed, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "WS"
self.params["compactness"] = 2.0
self.paramindexes = ["compactness"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
channel = 0
channel_img = img[:, :, channel]
output = skimage.segmentation.watershed(
channel_img, markers=None, compactness=self.params["compactness"]
)
return output
algorithmspace["WS"] = Watershed
class Chan_Vese(segmentor):
"""Peform Chan Vese segmentation algorithm. ONLY GRAYSCALE. Segments objects
without clear boundaries. Returns segmentation array of algorithm.
Parameters:
image -- ndarray grayscale image to be segmented
mu -- float, 'edge length' weight parameter. Higher mu vals make a
'round edge' closer to zero will detect smaller objects. Typical
values are from 0 - 1.
lambda1 -- float 'diff from average' weight param to determine if
output region is True. If lower than lambda1, the region has a
larger range of values than the other
lambda2 -- float 'diff from average' weight param to determine if
output region is False. If lower than lambda1, the region will
have a larger range of values
tol -- positive float, typically (0-1), very low level set variation
tolerance between iterations.
max_iter -- uint, max number of iterations before algorithms stops
dt -- float, Multiplication factor applied at the calculations step
"""
# Abbreviation for Algorithm = CV
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CV"
self.params["mu"] = 2.0
self.params["lambda"] = (10, 20)
self.params["iterations"] = 10
self.params["dt"] = 0.10
self.params["tolerance"] = 0.001
self.params["init_level_set_chan"] = "small disk"
self.paramindexes = ["mu", "lambda", "iterations", "dt", "init_level_set_chan"]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.chan_vese(
img,
mu=self.params["mu"],
lambda1=self.params["lambda"][0],
lambda2=self.params["lambda"][1],
tol=self.params["tolerance"],
max_iter=self.params["iterations"],
dt=self.params["dt"],
)
return output
algorithmspace["CV"] = Chan_Vese
class Morphological_Chan_Vese(segmentor):
"""Peform Morphological Chan Vese segmentation algorithm.
ONLY WORKS ON GRAYSCALE. Active contours without edges. Can be used to
segment images/volumes without good borders. Required that the inside of
the object looks different than outside (color, shade, darker).
Parameters:
image -- ndarray of grayscale image
iterations -- uint, number of iterations to run
init_level_set -- str, or array same shape as image. Accepted string
values are:
'checkerboard': Uses checkerboard_level_set. Returns a binary level set of a checkerboard
'circle': Uses circle_level_set. Creates a binary level set of a circle, given radius and a
center
smoothing -- uint, number of times the smoothing operator is applied
per iteration. Usually around 1-4. Larger values make it smoother
lambda1 -- Weight param for outer region. If larger than lambda2,
outer region will give larger range of values than inner value.
lambda2 -- Weight param for inner region. If larger thant lambda1,
inner region will have a larger range of values than outer region.
"""
# Abbreviation for algorithm = MCV
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(Morphological_Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "MCV"
self.params["iterations"] = 10
self.params["init_level_set_morph"] = "checkerboard"
self.params["smoothing"] = 10
self.params["lambda"] = (10, 20)
self.paramindexes = [
"iterations",
"init_level_set_morph",
"smoothing",
"lambda",
]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.morphological_chan_vese(
img,
iterations=self.params["iterations"],
init_level_set=self.params["init_level_set_morph"],
smoothing=self.params["smoothing"],
lambda1=self.params["lambda"][0],
lambda2=self.params["lambda"][1],
)
return output
algorithmspace["MCV"] = Morphological_Chan_Vese
class MorphGeodesicActiveContour(segmentor):
"""Peform Morphological Geodesic Active Contour segmentation algorithm. Uses
an image from inverse_gaussian_gradient in order to segment object with visible,
but noisy/broken borders. inverse_gaussian_gradient computes the magnitude of
the gradients in an image. Returns a preprocessed image suitable for above function.
Returns ndarray of segmented image.
Parameters:
gimage -- array, preprocessed image to be segmented.
iterations -- uint, number of iterations to run.
init_level_set -- str, array same shape as gimage. If string, possible
values are:
'checkerboard': Uses checkerboard_level_set. Returns a binary level set of a checkerboard
'circle': Uses circle_level_set. Creates a binary level set of a circle, given radius and a
center
smoothing -- uint, number of times the smoothing operator is applied
per iteration. Usually 1-4, larger values have smoother segmentation.
threshold -- Areas of image with a smaller value than the threshold are borders.
balloon -- float, guides contour of low-information parts of image.
"""
# Abbrevieation for algorithm = AC
def __init__(self, paramlist=None):
"""Get parameters from parameter list that are used in segmentation algorithm.
Assign default values to these parameters."""
super(MorphGeodesicActiveContour, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "AC"
self.params["alpha"] = 0.2
self.params["sigma"] = 0.3
self.params["iterations"] = 10
self.params["init_level_set_morph"] = "checkerboard"
self.params["smoothing"] = 5
self.params["balloon"] = 10
self.paramindexes = [
"alpha",
"sigma",
"iterations",
"init_level_set_morph",
"smoothing",
"balloon",
]
def evaluate(self, img):
"""Evaluate segmentation algorithm on training image.
Keyword arguments:
img -- Original training image.
Output:
output -- resulting segmentation mask from algorithm.
"""
# We run the inverse_gaussian_gradient to get the image to use
gimage = skimage.segmentation.inverse_gaussian_gradient(
color.rgb2gray(img), self.params["alpha"], self.params["sigma"]
)
# zeros = 0
output = skimage.segmentation.morphological_geodesic_active_contour(
gimage,
self.params["iterations"],
self.params["init_level_set_morph"],
smoothing=self.params["smoothing"],
threshold="auto",
balloon=self.params["balloon"],
)
return output
algorithmspace["AC"] = MorphGeodesicActiveContour
| [
37811,
41030,
434,
273,
5888,
3562,
284,
2193,
703,
284,
10618,
4263,
1262,
402,
1722,
13,
198,
770,
9195,
560,
1682,
857,
407,
753,
1098,
262,
14545,
2346,
11,
2427,
340,
655,
15738,
198,
220,
262,
2989,
10007,
262,
12660,
1257,
4524... | 2.593997 | 10,229 |
# PLUS ONE LEETCODE SOLUTION:
# creating a class.
# creating a function to solve the problem.
| [
2,
48635,
16329,
12509,
2767,
34,
16820,
36817,
35354,
25,
201,
198,
201,
198,
2,
4441,
257,
1398,
13,
201,
198,
201,
198,
220,
220,
220,
1303,
4441,
257,
2163,
284,
8494,
262,
1917,
13,
201
] | 2.888889 | 36 |
# Create structure with mavic 2 pro parameters
params = {'wingtype' : 'rotary',
'TOW' : 0.907, #kg
'max_speed' : 20, #m/s
'max_alt' : 6000, #m above sea level
'max_t' : 31, #min, no wind
'max_t_hover' : 29, #min, no wind
'max_tilt' : 35, #deg
'min_temp' : -10, #deg C
'max_temp' : 40, #deg C
'power_rating' : 60,
'batt_type' : 'LiPo',
'batt_capacity' : 3850, #mAh
'batt_voltage' : 15.4, #V
'batt_cells' : 4,
'batt_energy' : 59.29,
'batt_mass' : 0.297 #kg
}
#test change | [
2,
13610,
4645,
351,
285,
615,
291,
362,
386,
10007,
198,
198,
37266,
796,
1391,
6,
5469,
4906,
6,
220,
220,
220,
220,
220,
220,
220,
1058,
220,
220,
705,
10599,
560,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
... | 1.535354 | 495 |
from __future__ import division
import os
import numpy as np
import math
import torch
import torchvision.transforms as transforms
from PIL import Image
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
#plot_losses()
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
28034,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
2603,
2... | 3.25 | 76 |
import sys
from base import DP, least_common
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Expected exactly one argument - the .cnf file name")
exit(1)
DP(least_common)(sys.argv[1])
| [
11748,
25064,
198,
6738,
2779,
1330,
27704,
11,
1551,
62,
11321,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
611,
18896,
7,
17597,
13,
853,
85,
8,
14512,
362,
25,
198,
220,
220,
220,
220,
... | 2.369565 | 92 |
import numpy as np
import hmm
import utils
from cnn import CNNModel3 as CNNModel
from discriminator import Discriminator
from ea import EA
POOL_SIZE = 4
if __name__ == "__main__":
main()
| [
11748,
299,
32152,
355,
45941,
198,
198,
11748,
289,
3020,
198,
11748,
3384,
4487,
198,
6738,
269,
20471,
1330,
8100,
17633,
18,
355,
8100,
17633,
198,
6738,
6534,
20900,
1330,
8444,
3036,
20900,
198,
6738,
304,
64,
1330,
19814,
198,
19... | 2.911765 | 68 |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from guillotina.schema._bootstrapinterfaces import IContextAwareDefaultFactory
from guillotina.schema._bootstrapinterfaces import IFromUnicode
from guillotina.schema._schema import get_fields
from guillotina.schema.exceptions import ConstraintNotSatisfied
from guillotina.schema.exceptions import NotAContainer
from guillotina.schema.exceptions import NotAnIterator
from guillotina.schema.exceptions import RequiredMissing
from guillotina.schema.exceptions import StopValidation
from guillotina.schema.exceptions import TooBig
from guillotina.schema.exceptions import TooLong
from guillotina.schema.exceptions import TooShort
from guillotina.schema.exceptions import TooSmall
from guillotina.schema.exceptions import WrongType
from typing import Any
from zope.interface import Attribute
from zope.interface import implementer
from zope.interface import providedBy
__docformat__ = 'restructuredtext'
# XXX This class violates the Liskov Substituability Principle: it
# is derived from Container, but cannot be used everywhere an instance
# of Container could be, because it's '_validate' is more restrictive.
class Orderable(object):
"""Values of ordered fields can be sorted.
They can be restricted to a range of values.
Orderable is a mixin used in combination with Field.
"""
min = ValidatedProperty('min')
max = ValidatedProperty('max')
class MinMaxLen(object):
"""Expresses constraints on the length of a field.
MinMaxLen is a mixin used in combination with Field.
"""
min_length = 0
max_length = None
@implementer(IFromUnicode)
class Text(MinMaxLen, Field):
"""A field containing text used for human discourse."""
_type = str
def from_unicode(self, str):
"""
>>> t = Text(constraint=lambda v: 'x' in v)
>>> t.from_unicode(b"foo x spam")
Traceback (most recent call last):
...
WrongType: ('foo x spam', <type 'unicode'>, '')
>>> t.from_unicode("foo x spam")
u'foo x spam'
>>> t.from_unicode("foo spam")
Traceback (most recent call last):
...
ConstraintNotSatisfied: ('foo spam', '')
"""
self.validate(str)
return str
class TextLine(Text):
"""A text field with no newlines."""
class Password(TextLine):
"""A text field containing a text used as a password."""
UNCHANGED_PASSWORD = object()
def set(self, context, value):
"""Update the password.
We use a special marker value that a widget can use
to tell us that the password didn't change. This is
needed to support edit forms that don't display the
existing password and want to work together with
encryption.
"""
if value is self.UNCHANGED_PASSWORD:
return
super(Password, self).set(context, value)
class Bool(Field):
"""A field representing a Bool."""
_type = bool
def from_unicode(self, str):
"""
>>> b = Bool()
>>> IFromUnicode.providedBy(b)
True
>>> b.from_unicode('True')
True
>>> b.from_unicode('')
False
>>> b.from_unicode('true')
True
>>> b.from_unicode('false') or b.from_unicode('False')
False
"""
v = str == 'True' or str == 'true'
self.validate(v)
return v
@implementer(IFromUnicode)
class Int(Orderable, Field):
"""A field representing an Integer."""
_type = int
def from_unicode(self, str):
"""
>>> f = Int()
>>> f.from_unicode("125")
125
>>> f.from_unicode("125.6") #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: invalid literal for int(): 125.6
"""
v = int(str)
self.validate(v)
return v
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
6244,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789... | 2.725182 | 1,652 |
# A straightforward implementation
# of the particle filter idea
#
# A particle filter is a sample based approach
# for recursive Bayesian filtering
# The particles are a population based discrete
# representation of a probability density function.
#
# The filter recursively updates
#
# - the particle locations according to a
# probabilistic motion model
# (prediction update step)
#
# - recomputes importance weights for each particle
# (measurement update step)
#
# - resamples the particles according to the current
# pdf represented by the importance weights
#
# ---
# by Prof. Dr. Juergen Brauer, www.juergenbrauer.org
# ported from C++ to Python by Markus Waldmann.
from dataclasses import dataclass
import numpy as np
from params import *
#
# for each particle we store its location in state space
# and an importance weight
@dataclass
all_particles = list() # list of all particles
particle_with_highest_weight = Particle(np.zeros(1), 0)
#
# base class for motion & measurement update models
#
#
# update location or important weight of the specified particle
#
#
# represents a probability distribution using
# a set of discrete particles
#
#
# method to set pointer to user data
# needed to access in motion or perception model
#
#
# reset positions & weights of all particles
# to start conditions
#
#
# should be used by the user to specify in which range [min_value,max_value]
# the <param_nr>-th parameter of the state space lives
#
#
# for the specified particle we guess a random start location
#
#
# set initial location in state space for all particles
#
#
# returns a copy of an existing particle
# the particle to be copied is chosen according to
# a probability that is proportional to its importance weight
#
#
# one particle filter update step
#
| [
2,
317,
15836,
7822,
201,
198,
2,
286,
262,
18758,
8106,
2126,
201,
198,
2,
201,
198,
2,
317,
18758,
8106,
318,
257,
6291,
1912,
3164,
201,
198,
2,
329,
45115,
4696,
35610,
25431,
201,
198,
2,
383,
13166,
389,
257,
3265,
1912,
288... | 3.01497 | 668 |
import os
import time
import sys
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from configurator import read_config, select_data, select_model, select_loss, select_optimizer
from src.pipeline import NLUDataset
from src.utils import make_dir, save_train_log, save_ckpt, load_ckpt
from src.train import train_1epoch
from src.evaluate import evaluate
#%% Input args
# Run this file in background: nohup python main.py --config_fname=sample > log_sample.txt &
parser = argparse.ArgumentParser()
parser.add_argument('--config_fname', type=str, default='sample')
parser.add_argument('--overwrite', type=bool, default=True)
args = parser.parse_args()
#%%
CONFIG_FNAME = args.config_fname
try:
experiment_configs = read_config(CONFIG_FNAME)
except ValueError as e:
print('There is no such configure file!')
RESULT_ROOT_DIR = make_dir('../results', CONFIG_FNAME, overwrite=args.overwrite)
#%%
#%% =============================================== main
if __name__ == "__main__":
result = []
num_exps = len(experiment_configs)
for i in range(num_exps):
config = experiment_configs[i]
print('########################################################')
print('# Config: %s, Case: %d'\
%(CONFIG_FNAME,config['case_num']))
test_acc = main(config)
print('# Config: %s, Case: %d, Acc: %.4f'\
%(CONFIG_FNAME,experiment_configs[i]['case_num'],test_acc))
print('########################################################')
result.append([config['case_num'], test_acc])
test_results = np.array(result)
np.savetxt('%s/test_results.txt'%RESULT_ROOT_DIR, test_results, delimiter=',')
| [
11748,
28686,
201,
198,
11748,
640,
201,
198,
11748,
25064,
201,
198,
11748,
1822,
29572,
201,
198,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
40085,
355,
6436,
201,
198,
6738,
... | 2.608755 | 731 |
import unittest
import os
import json
from unittest.mock import patch
import threading
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from google.cloud.bigquery._http import Connection
from kaggle_gcp import KaggleKernelCredentials, PublicBigqueryClient, _DataProxyConnection
import kaggle_secrets
| [
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
11748,
4704,
278,
198,
6738,
1332,
13,
11284,
1330,
9344,
19852,
24502,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19... | 3.789063 | 128 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cve.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cve.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b(
'\n\tcve.proto\"G\n\treference\x12\x0b\n\x03url\x18\x01 \x02(\t\x12\x0c\n\x04name\x18\x02 \x02(\t\x12\x11\n\trefsource\x18\x03 \x02(\t\x12\x0c\n\x04tags\x18\x04 \x03(\t\"W\n\rversion_match\x12\r\n\x05start\x18\x01 \x02(\t\x12\x15\n\rstart_include\x18\x02 \x02(\x08\x12\x0b\n\x03\x65nd\x18\x03 \x02(\t\x12\x13\n\x0b\x65nd_include\x18\x04 \x02(\x08\"\xdd\x01\n\tcpe_match\x12\x0c\n\x04part\x18\x01 \x02(\t\x12\x0e\n\x06vendor\x18\x02 \x01(\t\x12\x0f\n\x07product\x18\x03 \x01(\t\x12\x1f\n\x07version\x18\x04 \x01(\x0b\x32\x0e.version_match\x12\x0e\n\x06update\x18\x05 \x01(\t\x12\x0f\n\x07\x65\x64ition\x18\x06 \x01(\t\x12\x10\n\x08language\x18\x07 \x01(\t\x12\x12\n\nsw_edition\x18\x08 \x01(\t\x12\n\n\x02sw\x18\t \x01(\t\x12\n\n\x02hw\x18\n \x01(\t\x12\r\n\x05other\x18\x0b \x01(\t\x12\x12\n\nvulnerable\x18\x0c \x02(\x08\")\n\x06target\x12\x1f\n\x0b\x63pe_matches\x18\x01 \x03(\x0b\x32\n.cpe_match\"\xe1\x01\n\x03\x63ve\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12\x0e\n\x06\x63ve_id\x18\x02 \x02(\t\x12\x18\n\x07targets\x18\x03 \x03(\x0b\x32\x07.target\x12\x0b\n\x03\x63we\x18\x04 \x01(\x05\x12\x13\n\x0b\x64\x65scription\x18\x05 \x02(\t\x12\x1e\n\nreferences\x18\x06 \x03(\x0b\x32\n.reference\x12\x16\n\x0e\x63vss_v3_vector\x18\x07 \x01(\t\x12\x16\n\x0e\x63vss_v2_vector\x18\x08 \x01(\t\x12\x16\n\x0epublished_date\x18\t \x02(\t\x12\x15\n\rmodified_date\x18\x13 \x02(\t')
)
_REFERENCE = _descriptor.Descriptor(
name='reference',
full_name='reference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='reference.url', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='reference.name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='refsource', full_name='reference.refsource', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='reference.tags', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=13,
serialized_end=84,
)
_VERSION_MATCH = _descriptor.Descriptor(
name='version_match',
full_name='version_match',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='version_match.start', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_include', full_name='version_match.start_include', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='version_match.end', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_include', full_name='version_match.end_include', index=3,
number=4, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=173,
)
_CPE_MATCH = _descriptor.Descriptor(
name='cpe_match',
full_name='cpe_match',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='part', full_name='cpe_match.part', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vendor', full_name='cpe_match.vendor', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='product', full_name='cpe_match.product', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='cpe_match.version', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='cpe_match.update', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edition', full_name='cpe_match.edition', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language', full_name='cpe_match.language', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sw_edition', full_name='cpe_match.sw_edition', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sw', full_name='cpe_match.sw', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hw', full_name='cpe_match.hw', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='other', full_name='cpe_match.other', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vulnerable', full_name='cpe_match.vulnerable', index=11,
number=12, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=397,
)
_TARGET = _descriptor.Descriptor(
name='target',
full_name='target',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cpe_matches', full_name='target.cpe_matches', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=399,
serialized_end=440,
)
_CVE = _descriptor.Descriptor(
name='cve',
full_name='cve',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='cve.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cve_id', full_name='cve.cve_id', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targets', full_name='cve.targets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cwe', full_name='cve.cwe', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='cve.description', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='references', full_name='cve.references', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cvss_v3_vector', full_name='cve.cvss_v3_vector', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cvss_v2_vector', full_name='cve.cvss_v2_vector', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='published_date', full_name='cve.published_date', index=8,
number=9, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='modified_date', full_name='cve.modified_date', index=9,
number=19, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=443,
serialized_end=668,
)
_CPE_MATCH.fields_by_name['version'].message_type = _VERSION_MATCH
_TARGET.fields_by_name['cpe_matches'].message_type = _CPE_MATCH
_CVE.fields_by_name['targets'].message_type = _TARGET
_CVE.fields_by_name['references'].message_type = _REFERENCE
DESCRIPTOR.message_types_by_name['reference'] = _REFERENCE
DESCRIPTOR.message_types_by_name['version_match'] = _VERSION_MATCH
DESCRIPTOR.message_types_by_name['cpe_match'] = _CPE_MATCH
DESCRIPTOR.message_types_by_name['target'] = _TARGET
DESCRIPTOR.message_types_by_name['cve'] = _CVE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
reference = _reflection.GeneratedProtocolMessageType('reference', (_message.Message,), dict(
DESCRIPTOR=_REFERENCE,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:reference)
))
_sym_db.RegisterMessage(reference)
version_match = _reflection.GeneratedProtocolMessageType('version_match', (_message.Message,), dict(
DESCRIPTOR=_VERSION_MATCH,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:version_match)
))
_sym_db.RegisterMessage(version_match)
cpe_match = _reflection.GeneratedProtocolMessageType('cpe_match', (_message.Message,), dict(
DESCRIPTOR=_CPE_MATCH,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:cpe_match)
))
_sym_db.RegisterMessage(cpe_match)
target = _reflection.GeneratedProtocolMessageType('target', (_message.Message,), dict(
DESCRIPTOR=_TARGET,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:target)
))
_sym_db.RegisterMessage(target)
cve = _reflection.GeneratedProtocolMessageType('cve', (_message.Message,), dict(
DESCRIPTOR=_CVE,
__module__='cve_pb2'
# @@protoc_insertion_point(class_scope:cve)
))
_sym_db.RegisterMessage(cve)
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
269,
303,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
198,
62,
65,
796,
25064,
13,
9641,
62,
10951,
58,
15,
60,
1279,
513,
290,
357,
5... | 2.095222 | 8,748 |
from TextString import TextString
##
# The abstract class that is the basic plugin
##
| [
6738,
8255,
10100,
1330,
8255,
10100,
198,
2235,
198,
2,
383,
12531,
1398,
326,
318,
262,
4096,
13877,
198,
2235,
198
] | 4.095238 | 21 |
import calendar
from umalqurra.hijri_date import HijriDate
import jdatetime
from datetime import datetime
import re
def middle_east_parsed_date(text_date, kwargs):
"""
:param text_date:
:param kwargs: format : %d-%m-%Y for 12-7-1397.
:return:
"""
dict_month_numeric = dict((v, k) for k, v in enumerate(calendar.month_name))
dict_month_abbr_numeric = dict((v, k) for k, v in enumerate(calendar.month_abbr))
day = -1
month = -1
year = -1
default_format = ["%d","%m","%Y"]
tsplit = split_non_alpha(text_date)
if "format" in kwargs:
format = kwargs["format"]
else:
format = default_format
if len(tsplit) != len(default_format):
#TODO: likely split characters next to each other 29101394
return None
for idx in range(0, len(tsplit)):
item = tsplit[idx]
if not isinstance(item, int) and not isinstance(item, float):
item = item.capitalize().strip()
if item in dict_month_numeric:
item = dict_month_numeric[item]
elif item in dict_month_abbr_numeric:
item = dict_month_abbr_numeric[item]
f_value = format[idx]
if f_value == "%d":
day = int(item)
elif f_value == "%m":
month = int(item)
elif f_value == "%Y":
year = int(item)
if month > 0 and day > 0 and year > 0:
if year < 1410:
jd = jdatetime.datetime(year, month, day)
return jd.togregorian()
if year < 1500:
um = HijriDate(year, month, day)
return datetime(um.year_gr, um.month_gr, um.day_gr)
return None | [
11748,
11845,
198,
6738,
334,
7617,
80,
333,
430,
13,
71,
2926,
380,
62,
4475,
1330,
367,
2926,
380,
10430,
198,
11748,
474,
19608,
8079,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
302,
628,
198,
198,
4299,
3504,
62,
23316,
... | 2.084577 | 804 |
#!/usr/bin/env python3
import math
from tuw_trinamic_iwos_revolute_controller.device.motor import Motor
from tuw_trinamic_iwos_revolute_controller.device.configuration_tool import ConfigurationTool
class Wheel:
"""
class representing a wheel controlled with Trinamic TMCM-1640
"""
def set_velocity(self, velocity):
"""
set the rounded target velocity (m/s) for the wheel
:param velocity: target velocity (m/s)
:return:
"""
# velocity needs to be multiplied by negative one to change direction (since wheels are mounted in reverse)
velocity_ms = velocity * -1
velocity_rps = velocity_ms / self._perimeter
velocity_rpm = velocity_rps * 60
self._motor.set_target_velocity_rpm(velocity=round(velocity_rpm))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
10688,
198,
198,
6738,
12777,
86,
62,
2213,
259,
18127,
62,
14246,
418,
62,
18218,
3552,
62,
36500,
13,
25202,
13,
76,
20965,
1330,
12533,
198,
6738,
12777,
86,
62,
2... | 2.725424 | 295 |
import os, numpy as np
from pymicro.file.file_utils import HST_read
from skimage.transform import radon
from matplotlib import pyplot as plt
'''
Example of use of the radon transform.
'''
data = HST_read('../data/mousse_250x250x250_uint8.raw', autoparse_filename=True, zrange=range(1))[:, :, 0]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5))
ax1.set_title('Original data')
ax1.imshow(data.T, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(data.shape), endpoint=False)
sinogram = radon(data, theta=theta, circle=False)
ax2.set_title('Radon transform (Sinogram)')
ax2.set_xlabel('Projection angle (deg)')
ax2.set_ylabel('Projection position (pixels)')
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.subplots_adjust(left=0.05, right=0.95)
image_name = os.path.splitext(__file__)[0] + '.png'
print('writting %s' % image_name)
plt.savefig(image_name, format='png')
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| [
11748,
28686,
11,
299,
32152,
355,
45941,
198,
6738,
279,
4948,
2500,
13,
7753,
13,
7753,
62,
26791,
1330,
367,
2257,
62,
961,
198,
6738,
1341,
9060,
13,
35636,
1330,
2511,
261,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
4... | 2.431604 | 424 |
import argparse
import math
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('foo', type=perfect_square)
args = parser.parse_args()
print(args.foo) | [
11748,
1822,
29572,
198,
11748,
10688,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
1676,
70,
11639,
4805,
7730,
11537,
198,
48610,
13,
2860,
62,
49140,
10786,
21943,
3256,
2099,
28,
25833,
62,
23415,
8,
198,
22046,
796... | 3.055556 | 54 |
# -*- coding: utf-8 -*-
# Copyright 2019 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the level, version and FBC usage of the loaded SBML file."""
from __future__ import absolute_import
from memote.utils import annotate, wrapper
@annotate(title="SBML Level and Version", format_type="raw")
def test_sbml_level(sbml_version):
"""
Expect the SBML to be at least level 3 version 2.
This test reports if the model file is represented in the latest edition
(level) of the Systems Biology Markup Language (SBML) which is Level 3,
and at least version 1.
Implementation:
The level and version are parsed directly from the SBML document.
"""
version_tag = 'SBML Level {} Version {}'.format(
sbml_version[0], sbml_version[1])
ann = test_sbml_level.annotation
ann["data"] = version_tag
outcome = sbml_version[:2] >= (3, 1)
ann["metric"] = 1.0 - float(outcome)
ann["message"] = wrapper.fill(
"""The SBML file uses: {}""".format(ann["data"]))
assert sbml_version[:2] >= (3, 1), ann["message"]
@annotate(title="FBC enabled", format_type="raw")
def test_fbc_presence(sbml_version):
"""
Expect the FBC plugin to be present.
The Flux Balance Constraints (FBC) Package extends SBML with structured
and semantic descriptions for domain-specific model components such as
flux bounds, multiple linear objective functions, gene-protein-reaction
associations, metabolite chemical formulas, charge and related annotations
which are relevant for parameterized GEMs and FBA models. The SBML and
constraint-based modeling communities collaboratively develop this package
and update it based on user input.
Implementation:
Parse the state of the FBC plugin from the SBML document.
"""
fbc_present = sbml_version[2] is not None
ann = test_fbc_presence.annotation
ann["data"] = fbc_present
ann["metric"] = 1.0 - float(fbc_present)
if fbc_present:
ann["message"] = wrapper.fill("The FBC package *is* used.")
else:
ann["message"] = wrapper.fill("The FBC package is *not* used.")
assert fbc_present, ann["message"]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
13130,
5267,
78,
18687,
1984,
5693,
3337,
329,
347,
4267,
19542,
1799,
11,
198,
2,
20671,
2059,
286,
16490,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
... | 3.144482 | 879 |
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules import SubtractMean, LCN
from PuzzleLib.Visual import loadImage, showImage
if __name__ == "__main__":
main()
| [
6738,
23966,
25835,
13,
7282,
437,
1330,
308,
19944,
18747,
198,
198,
6738,
23966,
25835,
13,
5841,
5028,
1330,
3834,
83,
974,
5308,
272,
11,
22228,
45,
198,
6738,
23966,
25835,
13,
36259,
1330,
3440,
5159,
11,
905,
5159,
628,
198,
19... | 3.087719 | 57 |
import decimal
import numpy as np
from collections import deque
import torch
from config import cfg
from utils.timer import Timer
from utils.logger import logger_info
import utils.distributed as dist
from utils.distributed import sum_tensor
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 1.0 for k in topk]
class AverageMeter:
"""Computes and stores the average and current value"""
def time_string(seconds):
"""Converts time in seconds to a fixed-width string format."""
days, rem = divmod(int(seconds), 24 * 3600)
hrs, rem = divmod(rem, 3600)
mins, secs = divmod(rem, 60)
return "{0:02},{1:02}:{2:02}:{3:02}".format(days, hrs, mins, secs)
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / 1024 / 1024
def float_to_decimal(data, prec=4):
"""Convert floats to decimals which allows for fixed width json."""
if isinstance(data, dict):
return {k: float_to_decimal(v, prec) for k, v in data.items()}
if isinstance(data, float):
return decimal.Decimal(("{:." + str(prec) + "f}").format(data))
else:
return data
class ScalarMeter(object):
"""Measures a scalar value (adapted from Detectron)."""
class TrainMeter(object):
"""Measures training stats."""
| [
11748,
32465,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
390,
4188,
198,
198,
11748,
28034,
198,
6738,
4566,
1330,
30218,
70,
198,
6738,
3384,
4487,
13,
45016,
1330,
5045,
263,
198,
6738,
3384,
4487,
13,
6404,
1362,
133... | 2.70712 | 618 |
print(solve(int(input()))) | [
628,
198,
198,
4798,
7,
82,
6442,
7,
600,
7,
15414,
3419,
22305
] | 2.307692 | 13 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ResophNotes.py
Convert to JSON and query ResophNotes notes from shell.
Copyright 2017 by Reiner Rottmann <reiner@rottmann.it
Released under the BSD License.
"""
import os
import sys
import base64
import uuid
import json
import argparse
import subprocess
import collections
import xmltodict
def convert(path):
"""Convert the ResophNotes data file (resophnotesdata.xml) at given path. Returns db as dict."""
tags = {}
db = {}
fd = open(path, 'r')
content = fd.read()
fd.close()
data = xmltodict.parse(content)
tags['none'] = []
for tag in data['database']['tag']:
try:
tags[base64.b64decode(str(tag))] = []
except KeyError:
print tag
for obj in data['database']['object']:
uid = str(uuid.uuid4())
try:
if 'tags' in obj:
objtags = base64.b64decode(str(obj['tags'])).split(',')
else:
objtags = ['none']
for tag in objtags:
tags[tag].append(uid)
db[uid] = {}
for key in obj.keys():
if key in ['content', 'tags']:
value = base64.b64decode(str(obj[key]))
else:
value = str(obj[key])
db[uid][key] = value
except:
pass
return db
def save_json(path, db):
"""Save the db as JSON dump to given path."""
fd = open(path, 'w')
json.dump(db, fd)
fd.close()
def open_json(path):
"""Open the db from JSON file previously saved at given path."""
fd = open(path, 'r')
return json.load(fd)
def count_tags(db):
"""Count tag statistics for some awesomeness."""
all_tags = []
for key in db.keys():
obj = db[key]
if 'tags' in obj.keys():
all_tags = all_tags + obj['tags'].split(',')
stats = collections.Counter(all_tags)
return stats.most_common(10)
def cli(db, internal=False):
"""Query the database via CLI. May use internal viewer instead of less."""
count_tags(db)
print """
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|R|e|s|o|p|h|N|o|t|e|s|.|p|y|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
print 'Total number of notes:', len(db)
print 'Most common tags:', ', '.join(['@' + x[0] + ':' + str(x[1]) for x in count_tags(db)])
print ''
while True:
query = raw_input('Query (q to quit)? ').lower()
if query == 'q':
sys.exit(0)
results = []
for key in db.keys():
obj = db[key]
match_tag = True
for q in query.split():
if 'tags' in obj.keys():
if not q in str(obj['tags']).lower().split(','):
match_tag = False
break
else:
match_tag = False
break
if match_tag:
if not obj in results:
results.append(obj)
continue
if 'content' in obj.keys():
if obj['content'].encode('utf-8').lower().find(query) > 0:
if not obj in results:
results.append(obj)
continue
i = 0
for result in results[:36]:
if not 'tags' in result.keys():
result['tags'] = 'none'
print str(i), '|', result['modify'], '|', result['content'].splitlines()[0], '|', 'tags:', ' '.join(
['@' + x for x in result['tags'].split(',')])
i += 1
print 'Results:', len(results)
if len(results) > 0:
while True:
show = str(raw_input('Read which note (q to return)? '))
if show == 'q':
break
if show.isdigit():
show = int(show)
if show >= 0 and show <= len(results):
if internal:
sys.stdout.write(str(results[show]['content']))
else:
p = subprocess.Popen('/usr/bin/less', stdin=subprocess.PIPE, shell=True)
p.communicate(results[show]['content'].encode('utf-8').strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert to JSON and query ResophNotes notes from shell.')
parser.add_argument('--data', help='Import from this ResophNotes data file. Default: resophnotesdata.xml',
default='resophnotesdata.xml')
parser.add_argument('--json', help='JSON file with converted ResophNotes data. Default: resophnotesdata.json',
default='resophnotesdata.json')
parser.add_argument('--cli', help='Open an interactive cli to query ResophNotes data.', action='store_true')
parser.add_argument('--internal', help='Use internal viewer instead of less.', action='store_true')
args = parser.parse_args()
db = None
if not os.path.exists(args.json) and os.path.exists(args.data):
db = convert(args.data)
save_json(args.json, db)
else:
if os.path.exists(args.json):
db = open_json(args.json)
if db is None and os.path.exists(args.json):
db = open_json(args.json)
else:
if db:
if args.cli:
cli(db, args.internal)
sys.exit(0)
else:
print "Error: No ResophNotes available."
sys.exit(1)
parser.print_help()
sys.exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
4965,
2522,
16130,
13,
9078,
198,
198,
3103,
1851,
284,
19449,
290,
12405,
1874,
2522,
16130,
4710,
422,
75... | 1.997505 | 2,806 |
# Create a task list. A user is presented with the text below.
# Let them select an option to list all of their tasks, add a task to their list, delete a task, or quit the program.
# Make each option a different function in your program.
# Do NOT use Google. Do NOT use other students. Try to do this on your own.
if __name__ == '__main__':
main()
| [
198,
198,
2,
13610,
257,
4876,
1351,
13,
317,
2836,
318,
5545,
351,
262,
2420,
2174,
13,
198,
2,
3914,
606,
2922,
281,
3038,
284,
1351,
477,
286,
511,
8861,
11,
751,
257,
4876,
284,
511,
1351,
11,
12233,
257,
4876,
11,
393,
11238,... | 3.54902 | 102 |
import string
size = 10
mid_line = '-'.join([string.ascii_letters[size - x] for x in range(1, size)] + [string.ascii_letters[x] for x in range(size)])
lines = []
for x in range(2,size+1):
main = ''.join(string.ascii_letters[size - x] for x in range(1, x))
*main_list,_ = list(main)
reverse = ''.join(x for x in reversed(main_list))
line = '-'.join(main+reverse)
num = (len(mid_line)-len(line)) // 2
output_line = '-' * num + line + '-' * num
lines.append(output_line)
[print(x) for x in lines]
print(mid_line)
[print(x) for x in reversed(lines)] | [
11748,
4731,
198,
7857,
796,
838,
198,
13602,
62,
1370,
796,
705,
12,
4458,
22179,
26933,
8841,
13,
292,
979,
72,
62,
15653,
58,
7857,
532,
2124,
60,
329,
2124,
287,
2837,
7,
16,
11,
2546,
15437,
1343,
685,
8841,
13,
292,
979,
72,... | 2.40249 | 241 |
from flask import Blueprint
from flask_restful import Api
from .. import models, request
from ..resource import Resource
from ..response import Response, no_content
from ..validator import matcher, schemas, validate_input, validate_schema
module = Blueprint('users', __name__, url_prefix='/users')
api = Api(module)
@api.resource('')
| [
6738,
42903,
1330,
39932,
198,
6738,
42903,
62,
2118,
913,
1330,
5949,
72,
198,
198,
6738,
11485,
1330,
4981,
11,
2581,
198,
6738,
11485,
31092,
1330,
20857,
198,
6738,
11485,
26209,
1330,
18261,
11,
645,
62,
11299,
198,
6738,
11485,
12... | 3.595745 | 94 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['VirtualMachineResource']
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 3.747664 | 107 |
import os
import asyncio
from datetime import datetime, timedelta
from influxdb import InfluxDBClient
asyncio.run(watch())
| [
11748,
28686,
198,
11748,
30351,
952,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
25065,
9945,
1330,
4806,
22564,
11012,
11792,
628,
198,
292,
13361,
952,
13,
5143,
7,
8340,
28955,
198
] | 3.472222 | 36 |
#Sort list, and sort list in reverse order
a = list()
a = [5, 2, 9, 1, 7, 6, 3, 8, 4]
b = list()
c = list()
b = sorted(a)
c = sorted(a, reverse = 1)
print(b)
print(c)
| [
2,
42758,
1351,
11,
290,
3297,
1351,
287,
9575,
1502,
201,
198,
201,
198,
64,
796,
1351,
3419,
201,
198,
64,
796,
685,
20,
11,
362,
11,
860,
11,
352,
11,
767,
11,
718,
11,
513,
11,
807,
11,
604,
60,
201,
198,
65,
796,
1351,
... | 2 | 91 |
import matplotlib as mt
mt.use('Agg')
import caffe
import numpy as np
import sys
if __name__ == "__main__":
# sys.argv[1] = net prefix
# sys.argv[2] = model prefix
# sys.argv[3] = model _iter_
# sys.argv[4] = output directory
if len(sys.argv) != 5:
raise ValueError("4 args required")
net = caffe.Net(sys.argv[1]+".prototxt", sys.argv[2]+"_iter_"+sys.argv[3]+".caffemodel", caffe.TEST)
c1 = net.params['conv1'][0].data
c2 = net.params['conv2'][0].data
np.save(sys.argv[4]+'c1map.npy', c1)
np.save(sys.argv[4]+'c2map.npy', c2)
| [
11748,
2603,
29487,
8019,
355,
45079,
198,
16762,
13,
1904,
10786,
46384,
11537,
198,
11748,
21121,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
2,
2... | 2.185484 | 248 |
# DO NOT EDIT! This file is automatically generated
import typing
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.platform.models.store import (
Store,
StoreDraft,
StorePagedQueryResponse,
StoreUpdate,
StoreUpdateAction,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class StoreService(abstract.AbstractService):
"""Stores let you model the context your customers shop in."""
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> StorePagedQueryResponse:
"""Stores let you model the context your customers shop in."""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"with_total": with_total,
"where": where,
"predicate_var": predicate_var,
},
_StoreQuerySchema,
)
return self._client._get(
endpoint="stores", params=params, response_class=StorePagedQueryResponse
)
def create(self, draft: StoreDraft, *, expand: OptionalListStr = None) -> Store:
"""Stores let you model the context your customers shop in."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="stores", params=params, data_object=draft, response_class=Store
)
| [
2,
8410,
5626,
48483,
0,
770,
2393,
318,
6338,
7560,
198,
11748,
19720,
198,
198,
6738,
4412,
66,
316,
10141,
13,
16794,
364,
1330,
17220,
40613,
40161,
35608,
259,
198,
6738,
4412,
66,
316,
10141,
13,
24254,
13,
27530,
13,
8095,
1330... | 2.386831 | 729 |
import bcrypt
from fastapi import APIRouter
from fastapi import HTTPException
from peewee import IntegrityError
from db import Person as PersonORM
from db import SQLITE_DB
from schema import Person as PersonSchema
person = APIRouter()
@person.post("/person")
| [
11748,
275,
29609,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
198,
6738,
3049,
15042,
1330,
14626,
16922,
198,
6738,
613,
413,
1453,
1330,
39348,
12331,
198,
198,
6738,
20613,
1330,
7755,
355,
7755,
1581,
44,
198,
6738,
20613,
1330,... | 3.652778 | 72 |
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.contrib.auth import get_user_model
from ..comment import Comment
| [
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
3486,
2149,
75,
1153,
11,
3486,
2043,
395,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
11485,
2389... | 3.711111 | 45 |
marks = [
[1, 0, 4, 8],
[0, 2, 0, 6],
[2, 4, 5, 2],
[9, 5, 8, 3]
]
d1=0
d2=0
c=[]
r=[]
for i in range(len(marks)):
r.append(0)
for j in range(len(marks)):
c.append(0)
if(i==j):
d1=marks[i][j]+d1
if (i+j==len(marks)-1):
d2+=marks[i][j]
r[i]+=marks[i][j]
c[j]+=marks[i][j]
print('r',i,'=',r[i])
print('c',i,'=',c[i])
print('d1=',d1)
print('d2=',d2)
for i in range(len(c)-1):
if(r[i]!=r[i+1] or c[i]!=c[i+1] or r[i]!=c[i] or d1!=d2):
print('not magic square')
break
else:
print('magic square')
| [
14306,
796,
685,
198,
220,
220,
220,
685,
16,
11,
657,
11,
604,
11,
807,
4357,
198,
220,
220,
220,
685,
15,
11,
362,
11,
657,
11,
718,
4357,
198,
220,
220,
220,
685,
17,
11,
604,
11,
642,
11,
362,
4357,
198,
220,
220,
220,
6... | 1.62614 | 329 |
from typing import List
from ...utils.byte_io_mdl import ByteIO
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
2644,
26791,
13,
26327,
62,
952,
62,
9132,
75,
1330,
30589,
9399,
628
] | 3.3 | 20 |
import cli
if __name__ == "__main__":
cli.p()
| [
11748,
537,
72,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
537,
72,
13,
79,
3419,
628
] | 2.125 | 24 |
from pathlib import Path
import os
import time
import logging
import boto3
import papermill as pm
import watchtower
from package import config, utils
if __name__ == "__main__":
run_on_start = False if config.TEST_OUTPUTS_S3_BUCKET == "" else True
if not run_on_start:
exit()
cfn_client = boto3.client('cloudformation', region_name=config.AWS_REGION)
# Set up logging through watchtower
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
log_group = "/aws/sagemaker/NotebookInstances"
stream_name = "{}/run-notebook.log".format(utils.get_notebook_name())
logger.addHandler(
watchtower.CloudWatchLogHandler(log_group=log_group, stream_name=stream_name))
# Add papermill logging to CloudWatch as well
pm_logger = logging.getLogger('papermill')
pm_logger.addHandler(
watchtower.CloudWatchLogHandler(log_group=log_group, stream_name=stream_name))
# Wait for the stack to finish launching
logger.info("Waiting for stack to finish launching...")
waiter = cfn_client.get_waiter('stack_create_complete')
waiter.wait(StackName=config.STACK_NAME)
logger.info("Starting notebook execution through papermill")
# Run the notebook
bucket = config.TEST_OUTPUTS_S3_BUCKET
solution_notebooks = [
"1.Data_Privatization",
"2.Model_Training"
]
kernel_name = 'python3'
test_prefix = "/home/ec2-user/SageMaker/test/"
notebooks_directory = '/home/ec2-user/SageMaker/sagemaker/'
for notebook_name in solution_notebooks:
start_time = time.time()
stdout_path = os.path.join(test_prefix, "{}-output_stdout.txt".format(notebook_name))
stderr_path = os.path.join(test_prefix, "{}-output_stderr.txt".format(notebook_name))
with open(stdout_path, 'w') as stdoutfile, open(stderr_path, 'w') as stderrfile:
output_notebook_path = "{}-output.ipynb".format(os.path.join(test_prefix, notebook_name))
try:
nb = pm.execute_notebook(
"{}.ipynb".format(os.path.join(notebooks_directory, notebook_name)),
output_notebook_path,
cwd=notebooks_directory,
kernel_name=kernel_name,
stdout_file=stdoutfile, stderr_file=stderrfile, log_output=True
)
except pm.PapermillExecutionError as err:
logger.warn("Notebook {} encountered execution error: {}".format(notebook_name, err))
raise
finally:
end_time = time.time()
logger.info("Notebook {} execution time: {} sec.".format(notebook_name, end_time - start_time))
s3 = boto3.resource('s3')
# Upload notebook output file to S3
s3.meta.client.upload_file(output_notebook_path, bucket, Path(output_notebook_path).name)
s3.meta.client.upload_file(stdout_path, bucket, Path(stdout_path).name)
s3.meta.client.upload_file(stderr_path, bucket, Path(stderr_path).name)
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
18931,
198,
198,
11748,
275,
2069,
18,
198,
11748,
3348,
17805,
355,
9114,
198,
11748,
2342,
36170,
198,
198,
6738,
5301,
1330,
4566,
11,
3384,
4487,
628,
19... | 2.296679 | 1,355 |
from mojeid_registration.page_objects.reg_page import RegistrationPage
from mojeid_registration.config import config as c
| [
6738,
6941,
18015,
312,
62,
2301,
33397,
13,
7700,
62,
48205,
13,
2301,
62,
7700,
1330,
24610,
9876,
198,
6738,
6941,
18015,
312,
62,
2301,
33397,
13,
11250,
1330,
4566,
355,
269,
628,
220,
220,
220,
220,
220,
220,
220,
220,
198
] | 3.142857 | 42 |
# import warnings
import torch
# from .nonlinear import lbfgs
# import scipy.sparse.linalg as sla
# import numpy as np
def rotmat(a,b):
"""
Adapted from http://www.netlib.org/templates/matlab/rotmat.m
Compute the Givens rotation matrix parameters for a and b.
"""
c = torch.zeros_like(a)
s = torch.zeros_like(a)
temp = torch.zeros_like(a)
mask = (b.abs()>a.abs())
temp[mask] = a[mask] / b[mask]
s[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2)
c[mask] = temp[mask] * s[mask]
mask = (b.abs()<=a.abs())
temp[mask] = b[mask] / a[mask]
c[mask] = 1.0 / torch.sqrt(1.0+temp[mask]**2)
s[mask] = temp[mask] * c[mask]
mask = (b==0)
c[mask] = 1.0
s[mask] = 0.0
# if b==0.0:
# c = 1.0
# s = 0.0
# elif b.abs()>a.abs():
# temp = a / b
# s = 1.0 / torch.sqrt(1.0+temp**2)
# c = temp * s
# else:
# temp = b / a
# c = 1.0 / torch.sqrt(1.0+temp**2)
# s = temp * c
return c, s
def gmres( A, x, b, max_iters=None, min_iters=3, max_restarts=1, tol=None, M=None ):
"""
Adapted from http://www.netlib.org/templates/matlab/gmres.m
% -- Iterative template routine --
% Univ. of Tennessee and Oak Ridge National Laboratory
% October 1, 1993
% Details of this algorithm are described in "Templates for the
% Solution of Linear Systems: Building Blocks for Iterative
% Methods", Barrett, Berry, Chan, Demmel, Donato, Dongarra,
% Eijkhout, Pozo, Romine, and van der Vorst, SIAM Publications,
% 1993. (ftp netlib2.cs.utk.edu; cd linalg; get templates.ps).
%
% [x, error, iter, flag] = gmres( A, x, b, M, restrt, max_it, tol )
%
% gmres.m solves the linear system Ax=b
% using the Generalized Minimal residual ( GMRESm ) method with restarts .
%
% input A REAL nonsymmetric positive definite matrix
% x REAL initial guess vector
% b REAL right hand side vector
% M REAL preconditioner matrix
% max_iters INTEGER number of iterations between restarts
% max_restarts INTEGER maximum number of iterations
% tol REAL error tolerance
%
% output x REAL solution vector
% error REAL error norm
% iter INTEGER number of iterations performed
% flag INTEGER: 0 = solution found to tolerance
% 1 = no convergence given max_it
"""
# dummy preconditioner (might replace with something real later)
if M is None: M = lambda x: x
assert x.ndim==2, "x must have batch dimension, x.ndim = "+str(x.ndim)
assert b.ndim==2, "b must have batch dimension, b.ndim = "+str(b.ndim)
# dimensions, dtype and device of the problem
batch_dim = x.size(0)
n = x.size(1)
dtype = x.dtype
device = x.device
if n==1:
x = b / (A(torch.ones_like(x))+1.e-12)
r = M(b-A(x))
return x, r.norm(dim=1).amax(), 1, 0
if tol is None: tol = 1*torch.finfo(dtype).eps
tol = max(tol*b.norm(dim=1).amax(), tol)
# set max_iters if not given, and perform sanity checks
assert max_restarts>0, "max_restarts must be greater than 0, max_restarts = "+str(max_restarts)
assert max_restarts<=n, "max_restarts should not exceed size of the problem n, max_restarts = "+str(max_restarts)
if max_iters is None: max_iters = n//max_restarts
if max_iters<n:
max_restarts = n//max_iters + 1
elif max_iters>=n:
max_iters = n
max_restarts = 1
# initialization
iters = 0
flag = 0
# norm of the RHS
bnrm2 = b.norm(dim=1)
bnrm2[bnrm2==0.0] = 1.0
# terminate if tolerance achieved
# r = M(b-A(x))
# error = r.norm(dim=1) / bnrm2
# error = r.norm(dim=1)
# if error.amax()<tol: return x, error.amax(), iters, flag
# initialize workspace
# orthogonal basis matrix of the Krylov subspace
Q = torch.zeros((batch_dim,n,max_iters+1), dtype=dtype, device=device)
# H is upper Hessenberg matrix, H is A on basis Q
H = torch.zeros((batch_dim,max_iters+1,max_iters), dtype=dtype, device=device)
# cosines and sines of the rotation matrix
cs = torch.zeros((batch_dim,max_iters,), dtype=dtype, device=device)
sn = torch.zeros((batch_dim,max_iters,), dtype=dtype, device=device)
#
e1 = torch.zeros((batch_dim,n+1,), dtype=dtype, device=device)
e1[:,0] = 1.0
# perform outer iterations
for _ in range(max_restarts):
r = M(b-A(x))
rnrm2 = r.norm(dim=1,keepdim=True)
rnrm2[rnrm2==0.0] = 1.0
# first basis vector
Q[...,0] = r / rnrm2
s = rnrm2 * e1
# restart method and perform inner iterations
for i in range(max_iters):
iters += 1
################################################
# find next basis vector with Arnoldi iteration
# (i+1)-st Krylov vector
w = M(A(Q[...,i]))
# Gram-Schmidt othogonalization
for k in range(i+1):
H[:,k,i] = (w*Q[...,k]).sum(dim=1)
w -= H[:,k,i].unsqueeze(1) * Q[...,k]
w += 1.e-12 # to make possible 0/0=1 (Why can this happen?)
H[:,i+1,i] = w.norm(dim=1)
# (i+1)-st basis vector
Q[:,:,i+1] = w / H[:,i+1,i].unsqueeze(1)
################################################
# apply Givens rotation to eliminate the last element in H ith row
# rotate kth column
for k in range(i):
temp = cs[:,k]*H[:,k,i] + sn[:,k]*H[:,k+1,i]
H[:,k+1,i] = -sn[:,k]*H[:,k,i] + cs[:,k]*H[:,k+1,i]
H[:,k,i] = temp
# form i-th rotation matrix
cs[:,i], sn[:,i] = rotmat( H[:,i,i], H[:,i+1,i] )
# eliminate H[i+1,i]
H[:,i,i] = cs[:,i]*H[:,i,i] + sn[:,i]*H[:,i+1,i]
H[:,i+1,i] = 0.0
################################################
# update the residual vector
s[:,i+1] = -sn[:,i]*s[:,i]
s[:,i] = cs[:,i]*s[:,i]
error = s[:,i+1].abs().amax()
# yy, _ = torch.triangular_solve(s[:,:i+1].unsqueeze(2), H[:,:i+1,:i+1], upper=True)
# xx = torch.baddbmm(x.unsqueeze(2), Q[:,:,:i+1], yy).squeeze(2)
# error = (s[:,i+1].abs()/bnrm2).amax()
# print(i, "%.2e"%(error.item()), "%.2e"%((M(b-A(xx)).norm(dim=1)).amax().item()))
if error<tol and (i+1)>min_iters: break
# update approximation
y, _ = torch.triangular_solve(s[:,:i+1].unsqueeze(2), H[:,:i+1,:i+1], upper=True)
x = torch.baddbmm(x.unsqueeze(2), Q[:,:,:i+1], y).squeeze(2)
r = M(b-A(x))
error = r.norm(dim=1).amax()
# s[:,i+1] = r.norm(dim=1)
# error = (s[:,i+1].abs() / bnrm2).amax()
if error<tol: break
if error>tol: flag = 1
return x, error, iters, flag
# class neumann_backprop(Function):
# @staticmethod
# def forward(ctx, y, y_fp):
# # ctx.obj = obj
# ctx.save_for_backward(y, y_fp)
# return y
# @staticmethod
# def backward(ctx, dy):
# y, y_fp, = ctx.saved_tensors
# # residual = lambda dx: (dx-A_dot(dx)-dy).flatten().norm() # \| (I-A) * dx - dy \|
# A_dot = lambda x: torch.autograd.grad(y_fp, y, grad_outputs=x, retain_graph=True, only_inputs=True)[0]
# residual = lambda Adx: (Adx-dy).reshape((dy.size()[0],-1)).norm(dim=1).max() #.flatten().norm() # \| (I-A) * dx - dy \|
# tol = atol = torch.tensor(_TOL)
# TOL = torch.max(tol*dy.norm(), atol)
# #######################################################################
# # Neumann series
# dx = dy
# Ady = A_dot(dy)
# Adx = Ady
# r1 = residual(dx-Adx)
# neu_iters = 1
# while r1>=TOL and neu_iters<_max_iters:
# r0 = r1
# dx = dx + Ady
# Ady = A_dot(Ady)
# Adx = Adx + Ady
# r1 = residual(dx-Adx)
# neu_iters += 1
# assert r1<r0, "Neumann series hasn't converged at iteration "+str(neu_iters)+" out of "+str(_max_iters)+" max iterations"
# if _collect_stat:
# global _backward_stat
# _backward_stat['steps'] = _backward_stat.get('steps',0) + 1
# _backward_stat['neu_residual'] = _backward_stat.get('neu_residual',0) + r1
# _backward_stat['neu_iters'] = _backward_stat.get('neu_iters',0) + neu_iters
# return None, dx
| [
2,
1330,
14601,
198,
11748,
28034,
198,
198,
2,
422,
764,
13159,
29127,
1330,
18360,
69,
14542,
198,
2,
1330,
629,
541,
88,
13,
82,
29572,
13,
75,
1292,
70,
355,
1017,
64,
198,
2,
1330,
299,
32152,
355,
45941,
628,
198,
4299,
5724... | 2.124246 | 3,646 |
"Endpoint for site-specific static files."
import http.client
import os.path
import flask
blueprint = flask.Blueprint('site', __name__)
@blueprint.route('/static/<filename>')
def static(filename):
"Return the given site-specific static file."
dirpath = flask.current_app.config['SITE_STATIC_DIR']
if not dirpath:
raise ValueError('misconfiguration: SITE_STATIC_DIR not set')
dirpath = os.path.expandvars(os.path.expanduser(dirpath))
if dirpath:
return flask.send_from_directory(dirpath, filename)
else:
flask.abort(http.client.NOT_FOUND)
| [
1,
12915,
4122,
329,
2524,
12,
11423,
9037,
3696,
526,
198,
198,
11748,
2638,
13,
16366,
198,
11748,
28686,
13,
6978,
198,
198,
11748,
42903,
628,
198,
17585,
4798,
796,
42903,
13,
14573,
4798,
10786,
15654,
3256,
11593,
3672,
834,
8,
... | 2.686364 | 220 |
# ISLR Ch 4 by Carol Cui
%reset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
# ----------------------------------------------------------------------------
# Q10
Weekly = pd.read_csv('C:\\Users\\Carol\\Desktop\\Weekly.csv')
# (a)
Weekly.describe()
pd.crosstab(index=Weekly["Direction"], columns="count")
Weekly.corr() # Volume increases in year.
# (b)
import statsmodels.api as sm
x01 = sm.add_constant(Weekly.iloc[:, 2:8])
y01 = np.where(Weekly['Direction']=='Up', 1, 0)
glm0 = sm.Logit(y01, x01)
print(glm0.fit().summary())
# Lag2 is statistically significant.
# (c)
x = pd.DataFrame(Weekly, columns=Weekly.columns[2:8])
y = Weekly['Direction']
glm1 = LogisticRegression()
glm1.pred = glm1.fit(x, y).predict(x)
print(pd.DataFrame(confusion_matrix(y, glm1.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y, glm1.pred)) # 56%
# (d)
train = Weekly[Weekly['Year'] < 2009]
x_train = train.iloc[:,3]
x_train = x_train.reshape(len(x_train),1)
y_train = train.loc[:,'Direction']
test = Weekly[Weekly['Year'] >= 2009]
x_test = test.iloc[:,3]
x_test = x_test.reshape(len(x_test),1)
y_test = test.loc[:,'Direction']
glm2 = LogisticRegression()
glm2.pred = glm2.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm2.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, glm2.pred)) # 62.5%
# (e)
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, lda.pred)) # 62.5%
# (f)
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, qda.pred)) # 58.7%
# (g)
knn = KNeighborsClassifier(n_neighbors=1)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
print('error rate: ', accuracy_score(y_test, knn.pred)) # 49%
# (h): Logistic and LDA models are the best.
# (i)
# KNN
error_rate = np.array([])
k_value = np.array([])
for i in (5, 10, 20):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# LDA
train = Weekly[Weekly['Year'] < 2009]
x_train = train.iloc[:,2:4]
x_train['Lag12'] = x_train.Lag1 * x_train.Lag2
y_train = train.loc[:,'Direction']
test = Weekly[Weekly['Year'] >= 2009]
x_test = test.iloc[:,2:4]
x_test['Lag12'] = x_test.Lag1 * x_test.Lag2
y_test = test.loc[:,'Direction']
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, lda.pred)) # 57.7%
# QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', accuracy_score(y_test, qda.pred)) # 46.2%
# ----------------------------------------------------------------------------
# Q11
Auto = pd.read_csv('C:\\Users\\Carol\\Desktop\\Auto.csv', na_values='?').dropna()
# (a)
Auto['mpg01'] = np.where(Auto['mpg'] > Auto['mpg'].median(), 1, 0)
# (b)
pd.plotting.scatter_matrix(Auto.iloc[:,0:10], figsize=(10,10))
# select: displacement, horsepower, weight, acceleration
# (c)
x_name = ['displacement', 'horsepower', 'weight', 'acceleration']
x = pd.DataFrame(Auto, columns=x_name)
y = np.array(Auto['mpg01'])
np.random.seed(1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
# (d) LDA
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, lda.pred)) # 7.6%
# (e) QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, qda.pred)) # 3.8%
# (f) Logit
glm = LogisticRegression()
glm.pred = glm.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, glm.pred)) # 7.6%
# (g) KNN
error_rate = np.array([])
k_value = np.array([])
for i in range(1, 110, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# k = 31 is the best
# ----------------------------------------------------------------------------
# Q12
# (a)
Power()
# (b)
Power2(3,8)
# (c)
Power2(10,3) # 1000
Power2(8,17) # 2.2518e+15
Power2(131,3) # 2248091
# (d)
# (e)
x = np.arange(1, 11, 1)
y = Power3(x,2)
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.scatter(x, y)
plt.title('log(x^2) vs x')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 2)
ax.set_xscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on xlog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 3)
ax.set_yscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on ylog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
ax = fig.add_subplot(2, 2, 4)
ax.set_xscale('log')
ax.set_yscale('log')
plt.scatter(x, y)
plt.title('log(x^2) vs x on xylog-scale')
plt.xlabel('x')
plt.ylabel('log(x^2)')
# (f)
PlotPower(np.arange(1,11,1), 3)
# ----------------------------------------------------------------------------
# Q13
Boston = pd.read_csv('C:\\Users\\Carol\\Desktop\\Boston.csv')
Boston['crim01'] = np.where(Boston['crim'] > Boston['crim'].median(), 1, 0)
Boston.corr() # indus, nox, age, dis, rad, tax
pd.plotting.scatter_matrix(Boston.iloc[:,2:17]) # nox, rm, dis, tax, black, lstat, medv
# pick: indus, nox, dis, tax, lstat
# data setup
x_name = ['indus', 'nox', 'dis', 'tax', 'lstat']
x = pd.DataFrame(Boston, columns=x_name)
y = np.array(Boston['crim01'])
np.random.seed(1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
# Logit
glm = LogisticRegression()
glm.pred = glm.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, glm.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, glm.pred)) # 21.7%
# LDA
lda = LinearDiscriminantAnalysis()
lda.pred = lda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, lda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, lda.pred)) # 17.1%
# QDA
qda = QuadraticDiscriminantAnalysis()
qda.pred = qda.fit(x_train, y_train).predict(x_test)
print(pd.DataFrame(confusion_matrix(y_test, qda.pred), index=['y=0', 'y=1'], columns=['y_pred=0', 'y_pred=1']))
print('error rate: ', 1-accuracy_score(y_test, qda.pred)) # 15.1%
# KNN
error_rate = np.array([])
k_value = np.array([])
for i in range(1, 110, 10):
knn = KNeighborsClassifier(n_neighbors=i)
knn.pred = knn.fit(x_train, y_train).predict(x_test)
k_value = np.append(k_value, i)
error_rate = np.append(error_rate, 1-accuracy_score(y_test, knn.pred))
best_k = k_value[error_rate.argmin()]
print('KNN best when k=%i' %best_k)
# k = 1 is the best | [
2,
3180,
35972,
609,
604,
416,
5074,
327,
9019,
198,
4,
42503,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
35720,
13,
... | 2.22257 | 3,837 |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from cajas.movement.models.movement_withdraw import MovementWithdraw
from cajas.office.models.officeCountry import OfficeCountry
from cajas.users.models.partner import Partner
from cajas.webclient.views.utils import get_president_user
president = get_president_user()
class MovementWithdrawRequireList(LoginRequiredMixin, TemplateView):
"""
"""
login_url = '/accounts/login/'
redirect_field_name = 'redirect_to'
template_name = 'webclient/movement_withdraw_require_list.html'
| [
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
35608,
259,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
198,
6738,
269,
1228,
292,
13,
21084,
434,
13,
27530,
13,
21084,
... | 3.189474 | 190 |
import json
from bs4 import BeautifulSoup
import requests
op=open('scrap.json')
data_dic,ask,check={},int(input('tell the rank of the movie you wanna see:- ')),json.load(op)
for i in check:
if i['Rank']==ask:
url=i["Link"]
page=requests.get(url)
soup=BeautifulSoup(page.text,"html.parser")
body_of_web=soup.find('div',{'id':'__next'})
structure=body_of_web.find('main')
sturct=structure.find('div', class_='ipc-page-content-container ipc-page-content-container--full BaseLayout__NextPageContentContainer-sc-180q5jf-0 fWxmdE')
section=sturct.find('section', class_='ipc-page-background ipc-page-background--base TitlePage__StyledPageBackground-wzlr49-0 dDUGgO')
sctor=section.find('section', class_='ipc-page-background ipc-page-background--baseAlt TitleMainHeroGroup__StyledPageBackground-w70azj-0 hEHQFC')
division=sctor.find('div', class_="TitleBlock__TitleContainer-sc-1nlhx7j-1 jxsVNt")
genre=sctor.find('div', class_="GenresAndPlot__ContentParent-cum89p-8 bFvaWW Hero__GenresAndPlotContainer-kvkd64-11 twqaW")
b=genre.find_all('a', class_='GenresAndPlot__GenreChip-cum89p-3 fzmeux ipc-chip ipc-chip--on-baseAlt')
genr=[]
for i in b:
genr.append(i.text)
bio=genre.find('span', class_='GenresAndPlot__TextContainerBreakpointXS_TO_M-cum89p-0 dcFkRD').text
if len(division.text[-7::])!=7:
time=division.text[-7::]
else:
time=division.text[-8::]
name=division.find('h1').text
print(time)
time2=int(time[0])*60
if 'min' in time:
a=time.strip('min').split('h')
run_time=str(time2+int(a[1]))+' minutes'
director=structure.find('a',class_='ipc-metadata-list-item__list-content-item ipc-metadata-list-item__list-content-item--link').text
country_and_more= structure.find_all('li', class_='ipc-metadata-list__item')
for i in country_and_more:
if 'Country of origin' in i.text:
country=i.text[17:]
if 'Language' in i.text:
language=i.text[8:]
if 'Read all' in bio:
bio=bio[0:-8]
img=structure.find('div',class_='Media__PosterContainer-sc-1x98dcb-1 dGdktI')
link='https://www.imdb.com/'+(body_of_web.find_all("div",class_="ipc-poster ipc-poster--baseAlt ipc-poster--dynamic-width ipc-sub-grid-item ipc-sub-grid-item--span-2")[0].a['href'])
print(language,'\n',country,'\n',run_time,'\n',director,'\n',genr,'\n',bio,'\n',name,'\n',link)
| [
11748,
33918,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
404,
28,
9654,
10786,
1416,
2416,
13,
17752,
11537,
198,
7890,
62,
67,
291,
11,
2093,
11,
9122,
34758,
5512,
600,
7,
15414,
10786,
33331,
262,
4279,
... | 2.414557 | 948 |
# Import the modules
import sys
import MinVel as mv
import numpy as np
# NOTES: May want to update temperature dependence of thermal expansivity using Holland and Powell's (2011)
# new revised equations (see figure 1 in that article). This will necessitate recalculating the first
# Gruneisen parameters. This could provide more realistic temperature dependence of material
# properties within the mantle.
if len(sys.argv) > 1:
if sys.argv[1] == "-h":
print('MinVel -- Program to calculate mineral aggregate moduli and density')
print('')
print(' Written by Oliver Boyd')
print('')
print(' This program calculates the velocity and density of a mineral assemblage ')
print(' at a given pressure and temperature (which may be vectors).')
print(' The velocities are expressed as Voigt, Reuss, and Voigt-Reuss-Hill averages.')
print('')
print(' The data required for this analysis is taken from Hacker and Abers (2003),')
print(' updated by Abers and Hacker in 2016, and expanded by Boyd in 2018.')
print(' The moduli at pressure and temperature are calculated based on the')
print(' procedures of Hacker and Abers (2004), Bina and Helffrich (1992) and')
print(' Holland and Powell (1998) as outlined in the supplementary section of ')
print(' Boyd et al. (2004) with updates by Abers and Hacker (2016) for quartz.')
print('')
print(' OUTPUT (SI Units)')
print(' results.npy - numpy binary file containing the following vectors:')
print(' Voigt-Reuss-Hill averages')
print(' K - Bulk modulus')
print(' G - Shear modulus')
print(' E - Youngs modulus')
print(' l - Lambda')
print(' v - Poissons ratio')
print(' Vp - P-wave velocity')
print(' Vs - S-wave velocity')
print(' p - Density')
print(' a - Thermal Expansivity')
print(' Voigt(v) and Reuss(r) bounds on velocity')
print(' Vpv - P-wave velocity')
print(' Vpr - P-wave velocity')
print(' Vsv - S-wave velocity')
print(' Vsr - S-wave velocity')
print('')
print(' INPUTS')
print(' Command line options')
print(' -h Help about this program.')
print('')
print(' -f InputFile - File containing composition, temperature, and pressure ')
print(' information with the following format')
print(' MinIndx 1, MinIndx 2, ..., MinIndx N')
print(' VolFrac 1, VolFrac 2, ..., VolFrac N')
print(' T1, P1')
print(' T2, P2')
print(' ...')
print(' TN, PN')
print('')
print(' -p Pressure - desired pressure or comma separated vector of pressures (Pa)')
print(' -t Temperature - desired temperature or comma separated vector of temperatures (K)')
print('')
print(' Composition parmeters - a composition structure with the following fields: ')
print(' -cm Min - The mineral index comma separated vector.')
print(' -cv Fr - Volume fraction for each mineral in Min (0 to 1), comma separated.')
print('')
print(' Mineral Indexes')
print(' Quartz')
print(' 1. Alpha Quartz ')
print(' 2. Beta Quartz ')
print(' 3. Coesite ')
print(' Feldspar group')
print(' Plagioclase')
print(' 4. High Albite ')
print(' 5. Low Albite ')
print(' 6. Anorthite ')
print('')
print(' 7. Orthoclase ')
print(' 8. Sanidine ')
print(' Garnet structural group')
print(' 9. Almandine ')
print(' 10. Grossular ')
print(' 11. Pyrope ')
print(' Olivine group')
print(' 12. Forsterite ')
print(' 13. Fayalite ')
print(' Pyroxene group')
print(' 14. Diopside ')
print(' 15. Enstatite ')
print(' 16. Ferrosilite ')
print(' 79. Mg-Tschermak ')
print(' 17. Jadeite ')
print(' 18. Hedenbergite ')
print(' 80. Acmite ')
print(' 81. Ca-Tschermak ')
print(' Amphibole supergroup')
print(' 19. Glaucophane ')
print(' 20. Ferroglaucophane ')
print(' 21. Tremolite ')
print(' 22. Ferroactinolite ')
print(' 23. Tshermakite ')
print(' 24. Pargasite ')
print(' 25. Hornblende ')
print(' 26. Anthophyllite ')
print(' Mica group')
print(' 27. Phlogopite ')
print(' 28. Annite ')
print(' 29. Muscovite ')
print(' 30. Celadonite ')
print(' Other')
print(' 31. Talc ')
print(' 32. Clinochlore ')
print(' 33. Daphnite ')
print(' 34. Antigorite ')
print(' 35. Zoisite ')
print(' 36. Clinozoisite ')
print(' 37. Epidote ')
print(' 38. Lawsonite ')
print(' 39. Prehnite ')
print(' 40. Pumpellyite ')
print(' 41. Laumontite ')
print(' 42. Wairakite ')
print(' 43. Brucite ')
print(' 44. Clinohumite ')
print(' 45. Phase A ')
print(' 46. Sillimanite ')
print(' 47. Kyanite ')
print(' 48. Spinel ')
print(' 49. Hercynite ')
print(' 50. Magnetite ')
print(' 51. Calcite ')
print(' 52. Aragonite ')
print(' 82. Magnesite ')
print(' 83. En79Fs09Ts12 ')
print(' 84. Di75He9Jd3Ts12 ')
print(' 85. ilmenite ')
print(' 86. cordierite ')
print(' 87. scapolite (meionite) ')
print(' 88. rutile ')
print(' 89. sphene ')
print(' 53. Corundum ')
print(' 54. Dolomite ')
print(' 74. Halite ')
print(' 77. Pyrite ')
print(' 78. Gypsum ')
print(' 90. Anhydrite ')
print(' 0. Water ')
print(' -1. Ice ')
print(' Clays')
print(' 55. Montmorillonite (Saz-1)')
print(' 56. Montmorillonite (S Wy-2)')
print(' 57. Montmorillonite (STX-1)')
print(' 58. Montmorillonite (S Wy-1)')
print(' 59. Montmorillonite (Shca-1)')
print(' 60. Kaolinite (Kga-2)')
print(' 61. Kaolinite (Kga-1b)')
print(' 62. Illite (IMT-2)')
print(' 63. Illite (ISMT-2)')
print(' 66. Smectite (S Wa-1)')
print(' 70. Montmorillonite (S YN-1)')
print(' 71. Chrysotile ')
print(' 72. Lizardite ')
print(' 76. Dickite ')
print('')
print(' Example:');
print(' Geophysical parameters for 20% Quartz, 20% low Albite, 30% Forsterite, and 30% Fayalite at')
print(' 300, 400, and 500K and 0.1, 0.3, and 0.5 MPa')
print(' > python MinVelWrapper.py -t 300,400,500 -p 0.1e6,0.3e6,0.5e6 -cm 1,5,12,13 -cv 0.2,0.2,0.3,0.3')
print('')
sys.exit()
nMin = 1
nPT = 1
nT = 0
nP = 0
if len(sys.argv) > 1:
for j in range(1,len(sys.argv),2):
if sys.argv[j] == "-t":
entries = sys.argv[j+1].split(",")
nT = len(entries)
T = np.zeros((nT),dtype=np.float64)
for k in range(0,nT):
T[k] = entries[k]
if sys.argv[j] == "-p":
entries = sys.argv[j+1].split(",")
nP = len(entries)
P = np.zeros((nP),dtype=np.float64)
for k in range(0,nP):
P[k] = entries[k]
if sys.argv[j] == "-cm":
entries = sys.argv[j+1].split(",")
nMin = len(entries)
Cm = np.zeros((nMin),dtype=np.int8)
for k in range(0,nMin):
Cm[k] = entries[k]
if sys.argv[j] == "-cv":
entries = sys.argv[j+1].split(",")
nFr = len(entries)
Cv = np.zeros((nFr),dtype=np.float64)
for k in range(0,nFr):
Cv[k] = entries[k]
if sys.argv[j] == "-f":
fl = sys.argv[j+1]
print('Reading {0:s}'.format(fl))
f = open(fl,"r")
if f.mode == "r":
nPT = 0
ln = 0
for line in f:
line = line.strip()
columns = line.split(",")
if ln < 2:
nMin = len(columns)
else:
nPT = nPT + 1
ln = ln + 1
nT = nPT
nP = nPT
nFr = nMin
f.close()
T = np.zeros((nPT),dtype=np.float64)
P = np.zeros((nPT),dtype=np.float64)
Cm = np.zeros((nMin),dtype=np.int8)
Cv = np.zeros((nMin),dtype=np.float64)
f = open(fl,"r")
if f.mode == "r":
ln = 0
jT = 0
for line in f:
line = line.strip()
columns = line.split(",")
if ln == 0:
for j in range(0,len(columns)):
Cm[j] = columns[j]
elif ln == 1:
for j in range(0,len(columns)):
Cv[j] = columns[j]
else:
T[jT] = columns[0]
P[jT] = columns[1]
jT = jT + 1
ln = ln + 1
f.close()
# MAke sure volume fractions sum to 1
if sum(Cv) < 1:
print('Composition does not sum to one. - Exiting')
sys.exit()
if nT != nP:
print('Number of temperature inputs must be equal to the number of pressure inputs')
sys.exit()
else:
nPT = nT
if nMin != nFr:
print('Number of minerals types must be equal to the number of mineral fractional volumes')
sys.exit()
Par, MinNames, nPar, nAllMin = mv.loadPar('../database/MineralPhysicsDatabase.nc')
MinIndex = Par[0,:];
print('{0:21s}{1:20s}'.format('Mineral','Volume fraction'))
for j in range(0,nMin):
k = mv.find(MinIndex,Cm[j]);
print(MinNames[:,k].tobytes().decode('utf-8'),'(',Cv[j],')')
if nPT > 1:
print('There are',nPT,'temperature and pressure points')
else:
print('Temperature',T)
print('Pressure',P)
print('')
K, G, E, l, v, Vp, Vs, den, Vpv, Vpr, Vsv, Vsr, a = mv.CalcMV(Cm,Cv,T,P);
print('K ',K)
print('G ',G)
print('E ',E)
print('l ',l)
print('v ',v)
print('Vp ',Vp)
print('Vs ',Vs)
print('den',den)
print('a ',a)
print('')
print('Voigt(v) and Reuss(r) bounds on velocity')
print('Vpv',Vpv)
print('Vpr',Vpr)
print('Vsv',Vsv)
print('Vsr',Vsr)
print('')
res = np.zeros((13,nPT),dtype=np.float64)
res[0,:] = K
res[1,:] = G
res[2,:] = E
res[3,:] = l
res[4,:] = v
res[5,:] = Vp
res[6,:] = Vs
res[7,:] = den
res[8,:] = a
res[9,:] = Vpv
res[10,:] = Vpr
res[11,:] = Vsv
res[12,:] = Vsr
f = 'results.npy'
np.save(f,res)
sys.exit()
| [
2,
17267,
262,
13103,
198,
11748,
25064,
198,
11748,
1855,
46261,
355,
285,
85,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
5626,
1546,
25,
1737,
765,
284,
4296,
5951,
21403,
286,
18411,
20628,
3458,
1262,
16070,
290,
19675,
338,
... | 1.603853 | 8,825 |
import os
import requests
from matroid import error
from matroid.src.helpers import api_call, batch_file_request
# https://staging.dev.matroid.com/docs/api/index.html#api-Images-Classify
@api_call(error.InvalidQueryError)
def classify_image(self, detectorId, file=None, url=None, **options):
"""
Classify an image with a detector
detectorId: a unique id for the detector
file: path to local image file to classify
url: internet URL for the image to classify
"""
(endpoint, method) = self.endpoints['classify_image']
if not url and not file:
raise error.InvalidQueryError(
message='Missing required parameter: file or url')
endpoint = endpoint.replace(':key', detectorId)
try:
headers = {'Authorization': self.token.authorization_header()}
data = {'detectorId': detectorId}
data.update(options)
if url:
data['url'] = url
if file:
if not isinstance(file, list):
file = [file]
return batch_file_request(file, method, endpoint, headers, data)
else:
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Images-PostLocalize
@api_call(error.InvalidQueryError)
def localize_image(self, localizer, localizerLabel, **options):
"""
Note: this API is very similar to Images/Classify;
however, it can be used to update bounding boxes of existing training images
by supplying update=true, labelId, and one of imageId or imageIds, and it has
access to the internal face localizer
(localizer="DEFAULT_FACE" and localizerLabel="face").
"""
(endpoint, method) = self.endpoints['localize_image']
data = {
'localizer': localizer,
'localizerLabel': localizerLabel,
}
update = options.get('update')
if update:
image_id = options.get('imageId')
image_ids = options.get('imageIds')
if not image_id and not image_ids:
raise error.InvalidQueryError(
message='Missing required parameter for update: imageId or imageIds')
if image_id:
data['imageId'] = image_id
else:
data['imageIds'] = image_ids
else:
files = options.get('file')
urls = options.get('url')
if not files and not urls:
raise error.InvalidQueryError(
message='Missing required parameter: files or urls')
data.update({'files': files,
'urls': urls, })
try:
headers = {'Authorization': self.token.authorization_header()}
data.update({'confidence': options.get('confidence'),
'update': 'true' if update else '',
'maxFaces': options.get('maxFaces'),
'labelId': options.get('labelId')
})
if update:
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
if files:
if not isinstance(files, list):
files = [files]
return batch_file_request(files, method, endpoint, headers, data)
else:
if isinstance(urls, list):
data['urls'] = urls
else:
data['url'] = urls
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
| [
11748,
28686,
198,
11748,
7007,
198,
198,
6738,
2603,
3882,
1330,
4049,
198,
6738,
2603,
3882,
13,
10677,
13,
16794,
364,
1330,
40391,
62,
13345,
11,
15458,
62,
7753,
62,
25927,
198,
198,
2,
3740,
1378,
301,
3039,
13,
7959,
13,
6759,
... | 2.734694 | 1,274 |
#!/usr/bin/env python3
"""
Initialize tables in SQLITE db
"""
import argparse
from models import Base
from sqlalchemy import create_engine
from utils import database_string
@with_args
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
24243,
1096,
8893,
287,
16363,
12709,
20613,
198,
37811,
198,
11748,
1822,
29572,
198,
6738,
4981,
1330,
7308,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6... | 3.385965 | 57 |
#!/usr/bin/env python3
# 杂项
import math
import glob
from timeit import Timer
from yvhai.demo.base import YHDemo
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
10545,
251,
224,
165,
94,
117,
198,
198,
11748,
10688,
198,
11748,
15095,
198,
6738,
640,
270,
1330,
5045,
263,
198,
198,
6738,
331,
85,
44488,
13,
9536,
78,
13,
8692,
1330,
... | 2.446809 | 47 |
from PIL import Image
import numpy
import sys
argvs = sys.argv
argc = len(argvs)
src = "src.png" if argc <= 1 else argvs[1]
dest = "res.txt" if argc <= 2 else argvs[2]
print("{} => {}".format(src, dest))
img = Image.open(src).convert('RGB')
data = numpy.array(img)
w,h = img.size
sw = w
if sw % 8 != 0:
sw = sw + (8 - (sw % 8))
print("(Width, Height, Stride) => ({},{},{})".format(w, h, sw))
v = 0x00
pos = 0
ot = 0
f = open(dest, "w")
for y in range(h):
for x in range(sw):
if x >= w:
c = 0
else:
c = 1 if data[y,x][0] < 128 else 0
v = v | (c << pos)
pos = pos + 1
if pos >= 8:
f.write("0x{:02X},".format(v))
v = 0x00
pos = 0
ot = ot + 1
if ot == 20:
f.write("\n")
ot = 0
f.close()
| [
6738,
350,
4146,
1330,
7412,
198,
11748,
299,
32152,
198,
11748,
25064,
198,
198,
853,
14259,
796,
25064,
13,
853,
85,
198,
853,
66,
796,
18896,
7,
853,
14259,
8,
198,
198,
10677,
796,
366,
10677,
13,
11134,
1,
611,
1822,
66,
19841,... | 2.010929 | 366 |
from __future__ import absolute_import, unicode_literals
from os import remove
from celery import shared_task
from VEnCode import DataTpm, Vencodes
from .models import Promoters154CellsBinarized, Enhancers154CellsBinarized
from .utils_views import *
# Create your tasks here
@shared_task
@shared_task
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
28000,
1098,
62,
17201,
874,
198,
6738,
28686,
1330,
4781,
198,
198,
6738,
18725,
1924,
1330,
4888,
62,
35943,
198,
198,
6738,
569,
4834,
10669,
1330,
6060,
51,
4426,
11,
9932,
40148,
... | 3.344086 | 93 |
import pandas as pd
from typing import List
from pandas.core.frame import DataFrame
from src.models.base_models import Model, RandomModel
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
19720,
1330,
7343,
198,
6738,
19798,
292,
13,
7295,
13,
14535,
1330,
6060,
19778,
198,
6738,
12351,
13,
27530,
13,
8692,
62,
27530,
1330,
9104,
11,
14534,
17633,
628,
628
] | 3.710526 | 38 |
import scipy,itertools,sys,os,time,logging
import scipy.constants as con
import numpy as np
import ctypes as C
from scipy.stats.mstats_basic import tmean
def fit_brightness_temp(wave,flux):
'''
function fitting a black body to given flux-wavelength
value.
Input: Flux at wavelength (microns)
Output: brightness temperature.
'''
tempfit = scipy.optimize.fmin(chi,1000,args=(flux,wave),maxiter=1000,disp=0)
return tempfit
def black_body_to_temp(wave,flux):
'''
Does the opposite to black_body. Converts flux to temperature.
Input: wavelength grid, flux grid
Output: tempeatrure grid
@todo check if conversion is correct. There is probably a /m^2/micron offset
'''
h = 6.62606957e-34
c = 299792458
k = 1.3806488e-23
pi= 3.14159265359
wave *= 1e-6
logpart = np.log(((2.0*h*c**2)/(flux*wave**5))+1.0)
T = (h*c)/(wave*k) * 1.0/ logpart
return T
def iterate_TP_profile(TP_params, TP_params_std, TP_bounds, TP_function,iterate=True):
'''
function iterating through all lower and upper bounds of parameters
to determine which combination gives the lowest/highest attainable
TP profile. Returns mean TP profile with errorbars on each pressure level
'''
Tmean = TP_function(TP_params)
bounds = [] #list of lower and upper parameter bounds
lowpar = []
highpar= []
for i in range(len(TP_params)):
low = TP_params[i]-TP_params_std[i]
high = TP_params[i]+TP_params_std[i]
lowpar.append(low)
highpar.append(high)
if low < TP_bounds[i][0]:
low = TP_bounds[i][0]+1e-10
if high > TP_bounds[i][1]:
high = TP_bounds[i][1]-1e-10
bounds.append((low,high))
if iterate:
iterlist = list(itertools.product(*bounds))
iter_num = np.shape(iterlist)[0] #number of possible combinations
T_iter = np.zeros((len(Tmean), iter_num))
T_minmax = np.zeros((len(Tmean), 2))
for i in range(iter_num):
T_iter[:,i] = TP_function(iterlist[i])
Tmean = np.mean(T_iter,1)
T_minmax[:,0] = np.min(T_iter,1)
T_minmax[:,1] = np.max(T_iter,1)
T_sigma = (T_minmax[:,1] - T_minmax[:,0])/2.0
# T_sigma = np.std(T_iter,1)
else:
Tmin = TP_function(lowpar)
Tmax = TP_function(highpar)
T_sigma = Tmax-Tmin
T_sigma /= 2.0
return Tmean, T_sigma
def generate_tp_covariance(outob):
'''
Function generating TP_profile covariance matrix from previous best fit.
This can be used by _TP_rodgers200 or _TP_hybrid TP profiles in a second stage fit.
'''
# todo needs to be adapted to new output class
#translating fitting parameters to mean temperature and lower/upper bounds
fit_TPparam_bounds = outob.fitting.fit_bounds[outob.fitting.fit_X_nparams:]
if outob.NEST:
T_mean, T_sigma = iterate_TP_profile(outob.NEST_TP_params_values[0], outob.NEST_TP_params_std[0],fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
elif outob.MCMC:
T_mean, T_sigma = iterate_TP_profile(outob.MCMC_TP_params_values[0], outob.MCMC_TP_params_std[0],fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
elif outob.DOWN:
FIT_std = np.zeros_like(outob.DOWN_TP_params_values)
T_mean, T_sigma = iterate_TP_profile(outob.DOWN_TP_params_values, FIT_std,fit_TPparam_bounds,
outob.fitting.forwardmodel.atmosphere.TP_profile)
else:
logging.error('Cannot compute TP-covariance. No Stage 0 fit (NS/MCMC/MLE) can be found.')
exit()
#getting temperature error
nlayers = outob.fitting.forwardmodel.atmosphere.nlayers
#setting up arrays
Ent_arr = np.zeros((nlayers,nlayers))
Sig_arr = np.zeros((nlayers,nlayers))
#populating arrays
for i in range(nlayers):
Ent_arr[i,:] = np.abs((T_mean[i])-(T_mean[:]))
Sig_arr[i,:] = np.abs(T_sigma[i]+T_sigma[:])
Diff_arr = np.sqrt(Ent_arr**2+Sig_arr**2)
Diff_norm = ((Diff_arr-np.min(Diff_arr))/np.max(Diff_arr-np.min(Diff_arr)))
Cov_array = 1.0 - Diff_norm
return Cov_array | [
11748,
629,
541,
88,
11,
270,
861,
10141,
11,
17597,
11,
418,
11,
2435,
11,
6404,
2667,
198,
11748,
629,
541,
88,
13,
9979,
1187,
355,
369,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
19199,
355,
327,
198,
6738,
629,
541,
... | 2.149727 | 2,017 |
"""Dalton API Wrapper for WAX
This module provides a Python wrapper for the Atomic Asset API,
with plans to expand to WAX and Atomic Market API endpoints.
"""
__version__ = "0.3.0"
| [
37811,
35,
282,
1122,
7824,
27323,
2848,
329,
16400,
55,
201,
198,
201,
198,
1212,
8265,
3769,
257,
11361,
29908,
329,
262,
28976,
31433,
7824,
11,
220,
201,
198,
4480,
3352,
284,
4292,
284,
16400,
55,
290,
28976,
5991,
7824,
886,
130... | 3.031746 | 63 |
my_nums = num_generators(10)
while True:
try:
print(next(my_nums))
except StopIteration:
break
| [
198,
198,
1820,
62,
77,
5700,
796,
997,
62,
8612,
2024,
7,
940,
8,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7,
19545,
7,
1820,
62,
77,
5700,
4008,
198,
220,
220,
220... | 2 | 61 |
#!/user/bin/env python
# -*-coding:utf-8 -*-
# @CreateTime : 2021/10/25 0:25
# @Author : xujiahui
# @Project : robust_python
# @File : try_wx_prc1.py
# @Version : V0.0.1
# @Desc : ?
import wx
if __name__ == "__main__":
app = MyApp()
app.MainLoop()
| [
2,
48443,
7220,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
2,
2488,
16447,
7575,
1058,
33448,
14,
940,
14,
1495,
657,
25,
1495,
201,
198,
2,
2488,
13838,
1058,
220,
... | 1.841463 | 164 |
"""Implementation of cron API.
"""
# import fnmatch
import logging
from treadmill import authz
# from treadmill import context
# from treadmill import cron
from treadmill import schema
# from treadmill.cron import model as cron_model
_LOGGER = logging.getLogger(__name__)
class API(object):
"""Treadmill Cron REST api."""
def init(authorizer):
"""Returns module API wrapped with authorizer function."""
api = API()
return authz.wrap(api, authorizer)
| [
37811,
3546,
32851,
286,
1067,
261,
7824,
13,
198,
37811,
198,
198,
2,
1330,
24714,
15699,
198,
11748,
18931,
198,
198,
6738,
49246,
1330,
6284,
89,
198,
2,
422,
49246,
1330,
4732,
198,
2,
422,
49246,
1330,
1067,
261,
198,
6738,
49246... | 3.35461 | 141 |
print(foo()) # 5
print(foo(b=3)) # 7
print(foo(b=3,a=2)) # 8
| [
201,
198,
4798,
7,
21943,
28955,
1303,
642,
201,
198,
4798,
7,
21943,
7,
65,
28,
18,
4008,
1303,
767,
201,
198,
4798,
7,
21943,
7,
65,
28,
18,
11,
64,
28,
17,
4008,
1303,
807,
201,
198,
201,
198,
201,
198
] | 1.666667 | 42 |
import json
from unittest.mock import patch
from boddle import boddle
from detectem.ws import do_detection
from detectem.exceptions import SplashError, NoPluginsError
"""
Tests run with `autospec` to match function signature in case of change
"""
@patch('detectem.ws.get_detection_results', autospec=True)
@patch('detectem.ws.get_detection_results', autospec=True)
@patch('detectem.ws.get_detection_results', autospec=True)
| [
11748,
33918,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
275,
5088,
293,
1330,
275,
5088,
293,
198,
198,
6738,
4886,
368,
13,
18504,
1330,
466,
62,
15255,
3213,
198,
6738,
4886,
368,
13,
1069,
11755,
1330,
45... | 3.049296 | 142 |
"""SpeedTester."""
import datetime
import json
import dateutil.parser
class SpeedTestResult(object):
"""Represents the results of a test performed by a SpeedTester."""
def __init__(self, download, startTime, ping=None, upload=None, endTime=None):
"""Results of a speedtest performed by a SpeedTester."""
super(SpeedTestResult, self).__init__()
self.download = download
self.upload = upload
self.ping = ping
self.startTime = startTime
self.endTime = endTime
if self.endTime is None:
self.endTime = datetime.datetime.now()
self.duration = round((self.endTime - self.startTime).total_seconds(), 2)
def description(self):
"""Return string describing the test result."""
durationString = "{}s".format(self.duration)
downloadSpeedString = "{} Kbps".format(self.download)
uploadSpeedString = "{} Kbps".format(self.upload)
return "\t".join((self.startTime.isoformat(), downloadSpeedString, uploadSpeedString, durationString))
def toString(self):
"""Return JSON String."""
return json.dumps(self)
def toJSON(self):
"""Return JSON String of Test Result."""
return {
"start": self.startTime.isoformat(),
"end": self.endTime.isoformat(),
"duration": self.duration,
"download": self.download,
"upload": self.upload,
"ping": self.ping
}
def fromJSON(json):
"""Instantiate a SpeedTestResult from JSON."""
startTime = dateutil.parser.parse(json["start"])
endTime = dateutil.parser.parse(json["end"])
return SpeedTestResult(json["download"], startTime, ping=json["ping"], upload=json["upload"], endTime=endTime)
class SpeedTester:
"""Interface for an object that can produce SpeedTestResult(s)."""
def performTest(self):
"""Perform the speedtest and return a SpeedTestResult."""
raise NotImplementedError("performTest must be implemented.")
class SpeedTestResultArchive(object):
"""Interface for an object that can persist SpeedTestResult objects."""
def testResults(self):
"""Return all SpeedTestResultObjects."""
raise NotImplementedError("testResults must be implemented.")
def append(self):
"""Append a SpeedTestResult to the persistent store."""
raise NotImplementedError("append must be implemented.")
| [
37811,
22785,
51,
7834,
526,
15931,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
3128,
22602,
13,
48610,
628,
198,
4871,
8729,
14402,
23004,
7,
15252,
2599,
198,
220,
220,
220,
37227,
6207,
6629,
262,
2482,
286,
257,
1332,
615... | 2.696937 | 914 |
import torch.nn
from torch import nn
from transformers import BertModel
import numpy as np
| [
11748,
28034,
13,
20471,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
6121,
364,
1330,
22108,
17633,
198,
11748,
299,
32152,
355,
45941,
628
] | 3.833333 | 24 |
"""Catalog of user objects"""
from .CatalogMenuWdg import *
| [
37811,
49015,
286,
2836,
5563,
37811,
198,
6738,
764,
49015,
23381,
54,
67,
70,
1330,
1635,
198
] | 3.529412 | 17 |
#You will receive a journal with some Collecting items, separated with ', ' (comma and space). After that, until receiving "Craft!" you will be receiving different commands.
#Commands (split by " - "):
#"Collect - {item}" – Receiving this command, you should add the given item in your inventory. If the item already exists, you should skip this line.
#"Drop - {item}" – You should remove the item from your inventory, if it exists.
#"Combine Items - {oldItem}:{newItem}" – You should check if the old item exists, if so, add the new item after the old one. Otherwise, ignore the command.
#"Renew – {item}" – If the given item exists, you should change its position and put it last in your inventory.
items = input().split(", ")
command = input()
while not command == "Craft!":
type_command = command.split(" - ")
item = command.split(" - ")
is_exist = check_item_exist(item, items)
if type_command == "Collect" and not is_exist:
items.append(item)
elif type_command == "Drop" and is_exist:
items.remove(item)
elif type_command == "Combine Items":
old_item = item.split(":")
new_item = item.split(":")
| [
2,
1639,
481,
3328,
257,
3989,
351,
617,
9745,
278,
3709,
11,
11266,
351,
46083,
705,
357,
785,
2611,
290,
2272,
737,
2293,
326,
11,
1566,
6464,
366,
14467,
2474,
345,
481,
307,
6464,
1180,
9729,
13,
198,
2,
6935,
1746,
357,
35312,
... | 3.178082 | 365 |
from typing import List
from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from tmcp import TMCPHandler, TMCPMessage
from util.vec import Vec3
from util.utilities import physics_object, Vector
from policy import base_policy, marujo_strategy
from tools.drawing import DrawingTool
from util.game_info import GameInfo
from policy.macros import ACK, KICKOFF, CLEAR, DEFENSE, UNDEFINED
try:
from rlutilities.linear_algebra import *
from rlutilities.mechanics import Aerial, AerialTurn, Dodge, Wavedash, Boostdash
from rlutilities.simulation import Game, Ball, Car, Input
except:
print("==========================================")
print("\nrlutilities import failed.")
print("\n==========================================")
quit()
RENDERING = True
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
374,
75,
13645,
13,
49638,
13,
8692,
62,
25781,
1330,
7308,
36772,
11,
3776,
51,
624,
47,
8317,
11,
17427,
22130,
9012,
198,
6738,
374,
75,
13645,
13,
26791,
13,
7249,
942,
13,
6057,
62,
7890... | 3.29588 | 267 |
# -*- coding: utf-8 -*-
"""
Leetcode - Two Sum II
https://leetcode.com/problems/two-sum-ii-input-array-is-sorted
Created on Sun Nov 18 20:39:12 2018
@author: Arthur Dysart
"""
## REQUIRED MODULES
import sys
## MODULE DEFINITIONS
class Solution:
"""
One-pointer with binary search of sorted array.
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
n = len(a)
for i in range(n):
t = x - a[i]
j = self.find_target(t, i, n, a)
if j != 0:
return [i + 1, j + 1]
return [-1, -1]
def find_target(self, t, i, n, a):
"""
Searches for target "t" by binary search of right-section array.
:param int t: target integer for two sum
:param int i: lower limit of binary search range
:param int n: length of input array
:param list[int] a: sorted array of integers
:return: integer representing index of target element
:rtype: int
"""
if (not a or
not n):
return 0
# Execute binary search
l = i + 1
r = n - 1
while l <= r:
m = l + (r - l) // 2
if a[m] == t:
# Target element found
return m
elif a[m] < t:
# Target element in right-half
l = m + 1
else:
# Target element in left-half
r = m - 1
return 0
class Solution2:
"""
One-pointer unitary search of sorted array (with memoization).
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
c = {}
n = len(a)
for j in range(n):
# Set target element
t = x - a[j]
if t in c:
# Complimentary element found
i = c[t]
return [i + 1, j + 1]
else:
# Add visited integer to dictionary
c[a[j]] = j
return [-1, -1]
class Solution3:
"""
Two-pointer unitary search of sorted array.
Time complexity: O(n)
- Amortized iterate over all elements in array
Space complexity: O(1)
- Constant pointer evaluation
"""
def two_sum(self, a, x):
"""
Determines indicies of elements whose sum equals target "x".
:param list[int] a: sorted array of integers
:type int x: target integer sum
:return: list of indicies (base index 1) of target values
:rtype: list[int]
"""
if not a:
return [-1, -1]
n = len(a)
l = 0
r = n - 1
# Perform two-pointer search
while l < r:
# Set current sum
t = a[l] + a[r]
if t == x:
# Target indicies found
return [l + 1, r + 1]
if t > x:
r -= 1
else:
l += 1
# Target indicies not found
return [-1, -1]
## MAIN MODULE
if __name__ == "__main__":
# Import exercise parameters
a, x = Input()\
.stdin(sys.stdin)
# Evaluate solution
z = Solution()\
.two_sum(a, x)
print(z)
## END OF FILE | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
3123,
316,
8189,
532,
4930,
5060,
2873,
201,
198,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
11545,
12,
16345,
12,
4178,
12,
15414... | 1.915412 | 2,258 |
# Generated by Django 2.2.3 on 2019-07-19 09:00
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
18,
319,
13130,
12,
2998,
12,
1129,
7769,
25,
405,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import pyximport; pyximport.install()
from gryphon.lib.exchange.coinbase_btc_usd import CoinbaseBTCUSDExchange
from gryphon.tests.environment.exchange_wrappers.live_orders import LiveOrdersTest
| [
11748,
12972,
87,
11748,
26,
12972,
87,
11748,
13,
17350,
3419,
198,
198,
6738,
308,
563,
746,
261,
13,
8019,
13,
1069,
3803,
13,
3630,
8692,
62,
18347,
66,
62,
385,
67,
1330,
45802,
35964,
29072,
3109,
3803,
198,
6738,
308,
563,
74... | 3.0625 | 64 |
# -*- coding: utf-8 -*-
import logging
from .test_project_base import TestProjectCommon
_logger = logging.getLogger(__name__)
class TestProjectConfig(TestProjectCommon):
"""Test module configuration and its effects on projects."""
@classmethod
@classmethod
def _set_feature_status(cls, is_enabled):
"""Set enabled/disabled status of all optional features in the
project app config to is_enabled (boolean).
"""
features_config = cls.Settings.create(
{feature[0]: is_enabled for feature in cls.features})
features_config.execute()
def test_existing_projects_enable_features(self):
"""Check that *existing* projects have features enabled when
the user enables them in the module configuration.
"""
self._set_feature_status(is_enabled=True)
for config_flag, project_flag in self.features:
self.assertTrue(
self.project_pigs[project_flag],
"Existing project failed to adopt activation of "
f"{config_flag}/{project_flag} feature")
def test_new_projects_enable_features(self):
"""Check that after the user enables features in the module
configuration, *newly created* projects have those features
enabled as well.
"""
self._set_feature_status(is_enabled=True)
project_cows = self.Project.create({
"name": "Cows",
"partner_id": self.partner_1.id})
for config_flag, project_flag in self.features:
self.assertTrue(
project_cows[project_flag],
f"Newly created project failed to adopt activation of "
f"{config_flag}/{project_flag} feature")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
18931,
198,
198,
6738,
764,
9288,
62,
16302,
62,
8692,
1330,
6208,
16775,
17227,
198,
198,
62,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
... | 2.487252 | 706 |